1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 MediaTek Inc.
4 */
5
6 #include <linux/clk.h>
7 #include <linux/component.h>
8 #include <linux/iopoll.h>
9 #include <linux/irq.h>
10 #include <linux/of.h>
11 #include <linux/of_platform.h>
12 #include <linux/phy/phy.h>
13 #include <linux/platform_device.h>
14
15 #include <video/mipi_display.h>
16 #include <video/videomode.h>
17
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_bridge.h>
20 #include <drm/drm_bridge_connector.h>
21 #include <drm/drm_mipi_dsi.h>
22 #include <drm/drm_of.h>
23 #include <drm/drm_panel.h>
24 #include <drm/drm_print.h>
25 #include <drm/drm_probe_helper.h>
26 #include <drm/drm_simple_kms_helper.h>
27
28 #include "mtk_drm_ddp_comp.h"
29
30 #define DSI_START 0x00
31
32 #define DSI_INTEN 0x08
33
34 #define DSI_INTSTA 0x0c
35 #define LPRX_RD_RDY_INT_FLAG BIT(0)
36 #define CMD_DONE_INT_FLAG BIT(1)
37 #define TE_RDY_INT_FLAG BIT(2)
38 #define VM_DONE_INT_FLAG BIT(3)
39 #define EXT_TE_RDY_INT_FLAG BIT(4)
40 #define DSI_BUSY BIT(31)
41
42 #define DSI_CON_CTRL 0x10
43 #define DSI_RESET BIT(0)
44 #define DSI_EN BIT(1)
45 #define DPHY_RESET BIT(2)
46
47 #define DSI_MODE_CTRL 0x14
48 #define MODE (3)
49 #define CMD_MODE 0
50 #define SYNC_PULSE_MODE 1
51 #define SYNC_EVENT_MODE 2
52 #define BURST_MODE 3
53 #define FRM_MODE BIT(16)
54 #define MIX_MODE BIT(17)
55
56 #define DSI_TXRX_CTRL 0x18
57 #define VC_NUM BIT(1)
58 #define LANE_NUM (0xf << 2)
59 #define DIS_EOT BIT(6)
60 #define NULL_EN BIT(7)
61 #define TE_FREERUN BIT(8)
62 #define EXT_TE_EN BIT(9)
63 #define EXT_TE_EDGE BIT(10)
64 #define MAX_RTN_SIZE (0xf << 12)
65 #define HSTX_CKLP_EN BIT(16)
66
67 #define DSI_PSCTRL 0x1c
68 #define DSI_PS_WC 0x3fff
69 #define DSI_PS_SEL (3 << 16)
70 #define PACKED_PS_16BIT_RGB565 (0 << 16)
71 #define LOOSELY_PS_18BIT_RGB666 (1 << 16)
72 #define PACKED_PS_18BIT_RGB666 (2 << 16)
73 #define PACKED_PS_24BIT_RGB888 (3 << 16)
74
75 #define DSI_VSA_NL 0x20
76 #define DSI_VBP_NL 0x24
77 #define DSI_VFP_NL 0x28
78 #define DSI_VACT_NL 0x2C
79 #define DSI_SIZE_CON 0x38
80 #define DSI_HSA_WC 0x50
81 #define DSI_HBP_WC 0x54
82 #define DSI_HFP_WC 0x58
83
84 #define DSI_CMDQ_SIZE 0x60
85 #define CMDQ_SIZE 0x3f
86
87 #define DSI_HSTX_CKL_WC 0x64
88
89 #define DSI_RX_DATA0 0x74
90 #define DSI_RX_DATA1 0x78
91 #define DSI_RX_DATA2 0x7c
92 #define DSI_RX_DATA3 0x80
93
94 #define DSI_RACK 0x84
95 #define RACK BIT(0)
96
97 #define DSI_PHY_LCCON 0x104
98 #define LC_HS_TX_EN BIT(0)
99 #define LC_ULPM_EN BIT(1)
100 #define LC_WAKEUP_EN BIT(2)
101
102 #define DSI_PHY_LD0CON 0x108
103 #define LD0_HS_TX_EN BIT(0)
104 #define LD0_ULPM_EN BIT(1)
105 #define LD0_WAKEUP_EN BIT(2)
106
107 #define DSI_PHY_TIMECON0 0x110
108 #define LPX (0xff << 0)
109 #define HS_PREP (0xff << 8)
110 #define HS_ZERO (0xff << 16)
111 #define HS_TRAIL (0xff << 24)
112
113 #define DSI_PHY_TIMECON1 0x114
114 #define TA_GO (0xff << 0)
115 #define TA_SURE (0xff << 8)
116 #define TA_GET (0xff << 16)
117 #define DA_HS_EXIT (0xff << 24)
118
119 #define DSI_PHY_TIMECON2 0x118
120 #define CONT_DET (0xff << 0)
121 #define CLK_ZERO (0xff << 16)
122 #define CLK_TRAIL (0xff << 24)
123
124 #define DSI_PHY_TIMECON3 0x11c
125 #define CLK_HS_PREP (0xff << 0)
126 #define CLK_HS_POST (0xff << 8)
127 #define CLK_HS_EXIT (0xff << 16)
128
129 #define DSI_VM_CMD_CON 0x130
130 #define VM_CMD_EN BIT(0)
131 #define TS_VFP_EN BIT(5)
132
133 #define DSI_SHADOW_DEBUG 0x190U
134 #define FORCE_COMMIT BIT(0)
135 #define BYPASS_SHADOW BIT(1)
136
137 #define CONFIG (0xff << 0)
138 #define SHORT_PACKET 0
139 #define LONG_PACKET 2
140 #define BTA BIT(2)
141 #define DATA_ID (0xff << 8)
142 #define DATA_0 (0xff << 16)
143 #define DATA_1 (0xff << 24)
144
145 #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
146
147 #define MTK_DSI_HOST_IS_READ(type) \
148 ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
149 (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
150 (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
151 (type == MIPI_DSI_DCS_READ))
152
153 struct mtk_phy_timing {
154 u32 lpx;
155 u32 da_hs_prepare;
156 u32 da_hs_zero;
157 u32 da_hs_trail;
158
159 u32 ta_go;
160 u32 ta_sure;
161 u32 ta_get;
162 u32 da_hs_exit;
163
164 u32 clk_hs_zero;
165 u32 clk_hs_trail;
166
167 u32 clk_hs_prepare;
168 u32 clk_hs_post;
169 u32 clk_hs_exit;
170 };
171
172 struct phy;
173
174 struct mtk_dsi_driver_data {
175 const u32 reg_cmdq_off;
176 bool has_shadow_ctl;
177 bool has_size_ctl;
178 };
179
180 struct mtk_dsi {
181 struct mtk_ddp_comp ddp_comp;
182 struct device *dev;
183 struct mipi_dsi_host host;
184 struct drm_encoder encoder;
185 struct drm_bridge bridge;
186 struct drm_bridge *next_bridge;
187 struct drm_connector *connector;
188 struct phy *phy;
189
190 void __iomem *regs;
191
192 struct clk *engine_clk;
193 struct clk *digital_clk;
194 struct clk *hs_clk;
195
196 u32 data_rate;
197
198 unsigned long mode_flags;
199 enum mipi_dsi_pixel_format format;
200 unsigned int lanes;
201 struct videomode vm;
202 struct mtk_phy_timing phy_timing;
203 int refcount;
204 bool enabled;
205 bool lanes_ready;
206 u32 irq_data;
207 wait_queue_head_t irq_wait_queue;
208 const struct mtk_dsi_driver_data *driver_data;
209 };
210
bridge_to_dsi(struct drm_bridge * b)211 static inline struct mtk_dsi *bridge_to_dsi(struct drm_bridge *b)
212 {
213 return container_of(b, struct mtk_dsi, bridge);
214 }
215
host_to_dsi(struct mipi_dsi_host * h)216 static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
217 {
218 return container_of(h, struct mtk_dsi, host);
219 }
220
mtk_dsi_mask(struct mtk_dsi * dsi,u32 offset,u32 mask,u32 data)221 static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
222 {
223 u32 temp = readl(dsi->regs + offset);
224
225 writel((temp & ~mask) | (data & mask), dsi->regs + offset);
226 }
227
mtk_dsi_phy_timconfig(struct mtk_dsi * dsi)228 static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
229 {
230 u32 timcon0, timcon1, timcon2, timcon3;
231 u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
232 struct mtk_phy_timing *timing = &dsi->phy_timing;
233
234 timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
235 timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
236 timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
237 timing->da_hs_prepare;
238 timing->da_hs_trail = timing->da_hs_prepare + 1;
239
240 timing->ta_go = 4 * timing->lpx - 2;
241 timing->ta_sure = timing->lpx + 2;
242 timing->ta_get = 4 * timing->lpx;
243 timing->da_hs_exit = 2 * timing->lpx + 1;
244
245 timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
246 timing->clk_hs_post = timing->clk_hs_prepare + 8;
247 timing->clk_hs_trail = timing->clk_hs_prepare;
248 timing->clk_hs_zero = timing->clk_hs_trail * 4;
249 timing->clk_hs_exit = 2 * timing->clk_hs_trail;
250
251 timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
252 timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
253 timcon1 = timing->ta_go | timing->ta_sure << 8 |
254 timing->ta_get << 16 | timing->da_hs_exit << 24;
255 timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
256 timing->clk_hs_trail << 24;
257 timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
258 timing->clk_hs_exit << 16;
259
260 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
261 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
262 writel(timcon2, dsi->regs + DSI_PHY_TIMECON2);
263 writel(timcon3, dsi->regs + DSI_PHY_TIMECON3);
264 }
265
mtk_dsi_enable(struct mtk_dsi * dsi)266 static void mtk_dsi_enable(struct mtk_dsi *dsi)
267 {
268 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN);
269 }
270
mtk_dsi_disable(struct mtk_dsi * dsi)271 static void mtk_dsi_disable(struct mtk_dsi *dsi)
272 {
273 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
274 }
275
mtk_dsi_reset_engine(struct mtk_dsi * dsi)276 static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
277 {
278 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
279 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
280 }
281
mtk_dsi_reset_dphy(struct mtk_dsi * dsi)282 static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi)
283 {
284 mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET);
285 mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0);
286 }
287
mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi * dsi)288 static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
289 {
290 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
291 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
292 }
293
mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi * dsi)294 static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
295 {
296 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
297 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
298 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
299 }
300
mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi * dsi)301 static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
302 {
303 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
304 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
305 }
306
mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi * dsi)307 static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
308 {
309 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
310 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
311 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
312 }
313
mtk_dsi_clk_hs_state(struct mtk_dsi * dsi)314 static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
315 {
316 return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN;
317 }
318
mtk_dsi_clk_hs_mode(struct mtk_dsi * dsi,bool enter)319 static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
320 {
321 if (enter && !mtk_dsi_clk_hs_state(dsi))
322 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
323 else if (!enter && mtk_dsi_clk_hs_state(dsi))
324 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
325 }
326
mtk_dsi_set_mode(struct mtk_dsi * dsi)327 static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
328 {
329 u32 vid_mode = CMD_MODE;
330
331 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
332 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
333 vid_mode = BURST_MODE;
334 else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
335 vid_mode = SYNC_PULSE_MODE;
336 else
337 vid_mode = SYNC_EVENT_MODE;
338 }
339
340 writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
341 }
342
mtk_dsi_set_vm_cmd(struct mtk_dsi * dsi)343 static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
344 {
345 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
346 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
347 }
348
mtk_dsi_ps_control_vact(struct mtk_dsi * dsi)349 static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
350 {
351 struct videomode *vm = &dsi->vm;
352 u32 dsi_buf_bpp, ps_wc;
353 u32 ps_bpp_mode;
354
355 if (dsi->format == MIPI_DSI_FMT_RGB565)
356 dsi_buf_bpp = 2;
357 else
358 dsi_buf_bpp = 3;
359
360 ps_wc = vm->hactive * dsi_buf_bpp;
361 ps_bpp_mode = ps_wc;
362
363 switch (dsi->format) {
364 case MIPI_DSI_FMT_RGB888:
365 ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
366 break;
367 case MIPI_DSI_FMT_RGB666:
368 ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
369 break;
370 case MIPI_DSI_FMT_RGB666_PACKED:
371 ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
372 break;
373 case MIPI_DSI_FMT_RGB565:
374 ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
375 break;
376 }
377
378 writel(vm->vactive, dsi->regs + DSI_VACT_NL);
379 writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
380 writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
381 }
382
mtk_dsi_rxtx_control(struct mtk_dsi * dsi)383 static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
384 {
385 u32 tmp_reg;
386
387 switch (dsi->lanes) {
388 case 1:
389 tmp_reg = 1 << 2;
390 break;
391 case 2:
392 tmp_reg = 3 << 2;
393 break;
394 case 3:
395 tmp_reg = 7 << 2;
396 break;
397 case 4:
398 tmp_reg = 0xf << 2;
399 break;
400 default:
401 tmp_reg = 0xf << 2;
402 break;
403 }
404
405 tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6;
406 tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3;
407
408 writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
409 }
410
mtk_dsi_ps_control(struct mtk_dsi * dsi)411 static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
412 {
413 u32 dsi_tmp_buf_bpp;
414 u32 tmp_reg;
415
416 switch (dsi->format) {
417 case MIPI_DSI_FMT_RGB888:
418 tmp_reg = PACKED_PS_24BIT_RGB888;
419 dsi_tmp_buf_bpp = 3;
420 break;
421 case MIPI_DSI_FMT_RGB666:
422 tmp_reg = LOOSELY_PS_18BIT_RGB666;
423 dsi_tmp_buf_bpp = 3;
424 break;
425 case MIPI_DSI_FMT_RGB666_PACKED:
426 tmp_reg = PACKED_PS_18BIT_RGB666;
427 dsi_tmp_buf_bpp = 3;
428 break;
429 case MIPI_DSI_FMT_RGB565:
430 tmp_reg = PACKED_PS_16BIT_RGB565;
431 dsi_tmp_buf_bpp = 2;
432 break;
433 default:
434 tmp_reg = PACKED_PS_24BIT_RGB888;
435 dsi_tmp_buf_bpp = 3;
436 break;
437 }
438
439 tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
440 writel(tmp_reg, dsi->regs + DSI_PSCTRL);
441 }
442
mtk_dsi_config_vdo_timing(struct mtk_dsi * dsi)443 static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
444 {
445 u32 horizontal_sync_active_byte;
446 u32 horizontal_backporch_byte;
447 u32 horizontal_frontporch_byte;
448 u32 horizontal_front_back_byte;
449 u32 data_phy_cycles_byte;
450 u32 dsi_tmp_buf_bpp, data_phy_cycles;
451 u32 delta;
452 struct mtk_phy_timing *timing = &dsi->phy_timing;
453
454 struct videomode *vm = &dsi->vm;
455
456 if (dsi->format == MIPI_DSI_FMT_RGB565)
457 dsi_tmp_buf_bpp = 2;
458 else
459 dsi_tmp_buf_bpp = 3;
460
461 writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
462 writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
463 writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
464 writel(vm->vactive, dsi->regs + DSI_VACT_NL);
465
466 if (dsi->driver_data->has_size_ctl)
467 writel(vm->vactive << 16 | vm->hactive,
468 dsi->regs + DSI_SIZE_CON);
469
470 horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
471
472 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
473 horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
474 else
475 horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
476 dsi_tmp_buf_bpp - 10;
477
478 data_phy_cycles = timing->lpx + timing->da_hs_prepare +
479 timing->da_hs_zero + timing->da_hs_exit + 3;
480
481 delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
482
483 horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
484 horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
485 data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
486
487 if (horizontal_front_back_byte > data_phy_cycles_byte) {
488 horizontal_frontporch_byte -= data_phy_cycles_byte *
489 horizontal_frontporch_byte /
490 horizontal_front_back_byte;
491
492 horizontal_backporch_byte -= data_phy_cycles_byte *
493 horizontal_backporch_byte /
494 horizontal_front_back_byte;
495 } else {
496 DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
497 }
498
499 writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
500 writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
501 writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
502
503 mtk_dsi_ps_control(dsi);
504 }
505
mtk_dsi_start(struct mtk_dsi * dsi)506 static void mtk_dsi_start(struct mtk_dsi *dsi)
507 {
508 writel(0, dsi->regs + DSI_START);
509 writel(1, dsi->regs + DSI_START);
510 }
511
mtk_dsi_stop(struct mtk_dsi * dsi)512 static void mtk_dsi_stop(struct mtk_dsi *dsi)
513 {
514 writel(0, dsi->regs + DSI_START);
515 }
516
mtk_dsi_set_cmd_mode(struct mtk_dsi * dsi)517 static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
518 {
519 writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
520 }
521
mtk_dsi_set_interrupt_enable(struct mtk_dsi * dsi)522 static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
523 {
524 u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
525
526 writel(inten, dsi->regs + DSI_INTEN);
527 }
528
mtk_dsi_irq_data_set(struct mtk_dsi * dsi,u32 irq_bit)529 static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
530 {
531 dsi->irq_data |= irq_bit;
532 }
533
mtk_dsi_irq_data_clear(struct mtk_dsi * dsi,u32 irq_bit)534 static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
535 {
536 dsi->irq_data &= ~irq_bit;
537 }
538
mtk_dsi_wait_for_irq_done(struct mtk_dsi * dsi,u32 irq_flag,unsigned int timeout)539 static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
540 unsigned int timeout)
541 {
542 s32 ret = 0;
543 unsigned long jiffies = msecs_to_jiffies(timeout);
544
545 ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
546 dsi->irq_data & irq_flag,
547 jiffies);
548 if (ret == 0) {
549 DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
550
551 mtk_dsi_enable(dsi);
552 mtk_dsi_reset_engine(dsi);
553 }
554
555 return ret;
556 }
557
mtk_dsi_irq(int irq,void * dev_id)558 static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
559 {
560 struct mtk_dsi *dsi = dev_id;
561 u32 status, tmp;
562 u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
563
564 status = readl(dsi->regs + DSI_INTSTA) & flag;
565
566 if (status) {
567 do {
568 mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
569 tmp = readl(dsi->regs + DSI_INTSTA);
570 } while (tmp & DSI_BUSY);
571
572 mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
573 mtk_dsi_irq_data_set(dsi, status);
574 wake_up_interruptible(&dsi->irq_wait_queue);
575 }
576
577 return IRQ_HANDLED;
578 }
579
mtk_dsi_switch_to_cmd_mode(struct mtk_dsi * dsi,u8 irq_flag,u32 t)580 static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
581 {
582 mtk_dsi_irq_data_clear(dsi, irq_flag);
583 mtk_dsi_set_cmd_mode(dsi);
584
585 if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
586 DRM_ERROR("failed to switch cmd mode\n");
587 return -ETIME;
588 } else {
589 return 0;
590 }
591 }
592
mtk_dsi_poweron(struct mtk_dsi * dsi)593 static int mtk_dsi_poweron(struct mtk_dsi *dsi)
594 {
595 struct device *dev = dsi->host.dev;
596 int ret;
597 u32 bit_per_pixel;
598
599 if (++dsi->refcount != 1)
600 return 0;
601
602 switch (dsi->format) {
603 case MIPI_DSI_FMT_RGB565:
604 bit_per_pixel = 16;
605 break;
606 case MIPI_DSI_FMT_RGB666_PACKED:
607 bit_per_pixel = 18;
608 break;
609 case MIPI_DSI_FMT_RGB666:
610 case MIPI_DSI_FMT_RGB888:
611 default:
612 bit_per_pixel = 24;
613 break;
614 }
615
616 dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
617 dsi->lanes);
618
619 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
620 if (ret < 0) {
621 dev_err(dev, "Failed to set data rate: %d\n", ret);
622 goto err_refcount;
623 }
624
625 phy_power_on(dsi->phy);
626
627 ret = clk_prepare_enable(dsi->engine_clk);
628 if (ret < 0) {
629 dev_err(dev, "Failed to enable engine clock: %d\n", ret);
630 goto err_phy_power_off;
631 }
632
633 ret = clk_prepare_enable(dsi->digital_clk);
634 if (ret < 0) {
635 dev_err(dev, "Failed to enable digital clock: %d\n", ret);
636 goto err_disable_engine_clk;
637 }
638
639 mtk_dsi_enable(dsi);
640
641 if (dsi->driver_data->has_shadow_ctl)
642 writel(FORCE_COMMIT | BYPASS_SHADOW,
643 dsi->regs + DSI_SHADOW_DEBUG);
644
645 mtk_dsi_reset_engine(dsi);
646 mtk_dsi_phy_timconfig(dsi);
647
648 mtk_dsi_ps_control_vact(dsi);
649 mtk_dsi_set_vm_cmd(dsi);
650 mtk_dsi_config_vdo_timing(dsi);
651 mtk_dsi_set_interrupt_enable(dsi);
652
653 return 0;
654 err_disable_engine_clk:
655 clk_disable_unprepare(dsi->engine_clk);
656 err_phy_power_off:
657 phy_power_off(dsi->phy);
658 err_refcount:
659 dsi->refcount--;
660 return ret;
661 }
662
mtk_dsi_poweroff(struct mtk_dsi * dsi)663 static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
664 {
665 if (WARN_ON(dsi->refcount == 0))
666 return;
667
668 if (--dsi->refcount != 0)
669 return;
670
671 /*
672 * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
673 * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
674 * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
675 * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
676 * after dsi is fully set.
677 */
678 mtk_dsi_stop(dsi);
679
680 mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
681 mtk_dsi_reset_engine(dsi);
682 mtk_dsi_lane0_ulp_mode_enter(dsi);
683 mtk_dsi_clk_ulp_mode_enter(dsi);
684 /* set the lane number as 0 to pull down mipi */
685 writel(0, dsi->regs + DSI_TXRX_CTRL);
686
687 mtk_dsi_disable(dsi);
688
689 clk_disable_unprepare(dsi->engine_clk);
690 clk_disable_unprepare(dsi->digital_clk);
691
692 phy_power_off(dsi->phy);
693
694 dsi->lanes_ready = false;
695 }
696
mtk_dsi_lane_ready(struct mtk_dsi * dsi)697 static void mtk_dsi_lane_ready(struct mtk_dsi *dsi)
698 {
699 if (!dsi->lanes_ready) {
700 dsi->lanes_ready = true;
701 mtk_dsi_rxtx_control(dsi);
702 usleep_range(30, 100);
703 mtk_dsi_reset_dphy(dsi);
704 mtk_dsi_clk_ulp_mode_leave(dsi);
705 mtk_dsi_lane0_ulp_mode_leave(dsi);
706 mtk_dsi_clk_hs_mode(dsi, 0);
707 msleep(20);
708 /* The reaction time after pulling up the mipi signal for dsi_rx */
709 }
710 }
711
mtk_output_dsi_enable(struct mtk_dsi * dsi)712 static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
713 {
714 if (dsi->enabled)
715 return;
716
717 mtk_dsi_lane_ready(dsi);
718 mtk_dsi_set_mode(dsi);
719 mtk_dsi_clk_hs_mode(dsi, 1);
720
721 mtk_dsi_start(dsi);
722
723 dsi->enabled = true;
724 }
725
mtk_output_dsi_disable(struct mtk_dsi * dsi)726 static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
727 {
728 if (!dsi->enabled)
729 return;
730
731 dsi->enabled = false;
732 }
733
mtk_dsi_bridge_attach(struct drm_bridge * bridge,enum drm_bridge_attach_flags flags)734 static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
735 enum drm_bridge_attach_flags flags)
736 {
737 struct mtk_dsi *dsi = bridge_to_dsi(bridge);
738
739 /* Attach the panel or bridge to the dsi bridge */
740 return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
741 &dsi->bridge, flags);
742 }
743
mtk_dsi_bridge_mode_set(struct drm_bridge * bridge,const struct drm_display_mode * mode,const struct drm_display_mode * adjusted)744 static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge,
745 const struct drm_display_mode *mode,
746 const struct drm_display_mode *adjusted)
747 {
748 struct mtk_dsi *dsi = bridge_to_dsi(bridge);
749
750 drm_display_mode_to_videomode(adjusted, &dsi->vm);
751 }
752
mtk_dsi_bridge_atomic_disable(struct drm_bridge * bridge,struct drm_bridge_state * old_bridge_state)753 static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
754 struct drm_bridge_state *old_bridge_state)
755 {
756 struct mtk_dsi *dsi = bridge_to_dsi(bridge);
757
758 mtk_output_dsi_disable(dsi);
759 }
760
mtk_dsi_bridge_atomic_enable(struct drm_bridge * bridge,struct drm_bridge_state * old_bridge_state)761 static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
762 struct drm_bridge_state *old_bridge_state)
763 {
764 struct mtk_dsi *dsi = bridge_to_dsi(bridge);
765
766 if (dsi->refcount == 0)
767 return;
768
769 mtk_output_dsi_enable(dsi);
770 }
771
mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge * bridge,struct drm_bridge_state * old_bridge_state)772 static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
773 struct drm_bridge_state *old_bridge_state)
774 {
775 struct mtk_dsi *dsi = bridge_to_dsi(bridge);
776 int ret;
777
778 ret = mtk_dsi_poweron(dsi);
779 if (ret < 0)
780 DRM_ERROR("failed to power on dsi\n");
781 }
782
mtk_dsi_bridge_atomic_post_disable(struct drm_bridge * bridge,struct drm_bridge_state * old_bridge_state)783 static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
784 struct drm_bridge_state *old_bridge_state)
785 {
786 struct mtk_dsi *dsi = bridge_to_dsi(bridge);
787
788 mtk_dsi_poweroff(dsi);
789 }
790
791 static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
792 .attach = mtk_dsi_bridge_attach,
793 .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
794 .atomic_disable = mtk_dsi_bridge_atomic_disable,
795 .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
796 .atomic_enable = mtk_dsi_bridge_atomic_enable,
797 .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
798 .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
799 .atomic_reset = drm_atomic_helper_bridge_reset,
800 .mode_set = mtk_dsi_bridge_mode_set,
801 };
802
mtk_dsi_ddp_start(struct mtk_ddp_comp * comp)803 static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
804 {
805 struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
806
807 mtk_dsi_poweron(dsi);
808 }
809
mtk_dsi_ddp_stop(struct mtk_ddp_comp * comp)810 static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
811 {
812 struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
813
814 mtk_dsi_poweroff(dsi);
815 }
816
817 static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
818 .start = mtk_dsi_ddp_start,
819 .stop = mtk_dsi_ddp_stop,
820 };
821
mtk_dsi_host_attach(struct mipi_dsi_host * host,struct mipi_dsi_device * device)822 static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
823 struct mipi_dsi_device *device)
824 {
825 struct mtk_dsi *dsi = host_to_dsi(host);
826
827 dsi->lanes = device->lanes;
828 dsi->format = device->format;
829 dsi->mode_flags = device->mode_flags;
830
831 return 0;
832 }
833
mtk_dsi_wait_for_idle(struct mtk_dsi * dsi)834 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
835 {
836 int ret;
837 u32 val;
838
839 ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
840 4, 2000000);
841 if (ret) {
842 DRM_WARN("polling dsi wait not busy timeout!\n");
843
844 mtk_dsi_enable(dsi);
845 mtk_dsi_reset_engine(dsi);
846 }
847 }
848
mtk_dsi_recv_cnt(u8 type,u8 * read_data)849 static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
850 {
851 switch (type) {
852 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
853 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
854 return 1;
855 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
856 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
857 return 2;
858 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
859 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
860 return read_data[1] + read_data[2] * 16;
861 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
862 DRM_INFO("type is 0x02, try again\n");
863 break;
864 default:
865 DRM_INFO("type(0x%x) not recognized\n", type);
866 break;
867 }
868
869 return 0;
870 }
871
mtk_dsi_cmdq(struct mtk_dsi * dsi,const struct mipi_dsi_msg * msg)872 static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
873 {
874 const char *tx_buf = msg->tx_buf;
875 u8 config, cmdq_size, cmdq_off, type = msg->type;
876 u32 reg_val, cmdq_mask, i;
877 u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off;
878
879 if (MTK_DSI_HOST_IS_READ(type))
880 config = BTA;
881 else
882 config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
883
884 if (msg->tx_len > 2) {
885 cmdq_size = 1 + (msg->tx_len + 3) / 4;
886 cmdq_off = 4;
887 cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
888 reg_val = (msg->tx_len << 16) | (type << 8) | config;
889 } else {
890 cmdq_size = 1;
891 cmdq_off = 2;
892 cmdq_mask = CONFIG | DATA_ID;
893 reg_val = (type << 8) | config;
894 }
895
896 for (i = 0; i < msg->tx_len; i++)
897 mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U),
898 (0xffUL << (((i + cmdq_off) & 3U) * 8U)),
899 tx_buf[i] << (((i + cmdq_off) & 3U) * 8U));
900
901 mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val);
902 mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
903 }
904
mtk_dsi_host_send_cmd(struct mtk_dsi * dsi,const struct mipi_dsi_msg * msg,u8 flag)905 static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
906 const struct mipi_dsi_msg *msg, u8 flag)
907 {
908 mtk_dsi_wait_for_idle(dsi);
909 mtk_dsi_irq_data_clear(dsi, flag);
910 mtk_dsi_cmdq(dsi, msg);
911 mtk_dsi_start(dsi);
912
913 if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
914 return -ETIME;
915 else
916 return 0;
917 }
918
mtk_dsi_host_transfer(struct mipi_dsi_host * host,const struct mipi_dsi_msg * msg)919 static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
920 const struct mipi_dsi_msg *msg)
921 {
922 struct mtk_dsi *dsi = host_to_dsi(host);
923 u32 recv_cnt, i;
924 u8 read_data[16];
925 void *src_addr;
926 u8 irq_flag = CMD_DONE_INT_FLAG;
927 u32 dsi_mode;
928 int ret;
929
930 dsi_mode = readl(dsi->regs + DSI_MODE_CTRL);
931 if (dsi_mode & MODE) {
932 mtk_dsi_stop(dsi);
933 ret = mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
934 if (ret)
935 goto restore_dsi_mode;
936 }
937
938 if (MTK_DSI_HOST_IS_READ(msg->type))
939 irq_flag |= LPRX_RD_RDY_INT_FLAG;
940
941 mtk_dsi_lane_ready(dsi);
942
943 ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag);
944 if (ret)
945 goto restore_dsi_mode;
946
947 if (!MTK_DSI_HOST_IS_READ(msg->type)) {
948 recv_cnt = 0;
949 goto restore_dsi_mode;
950 }
951
952 if (!msg->rx_buf) {
953 DRM_ERROR("dsi receive buffer size may be NULL\n");
954 ret = -EINVAL;
955 goto restore_dsi_mode;
956 }
957
958 for (i = 0; i < 16; i++)
959 *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
960
961 recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
962
963 if (recv_cnt > 2)
964 src_addr = &read_data[4];
965 else
966 src_addr = &read_data[1];
967
968 if (recv_cnt > 10)
969 recv_cnt = 10;
970
971 if (recv_cnt > msg->rx_len)
972 recv_cnt = msg->rx_len;
973
974 if (recv_cnt)
975 memcpy(msg->rx_buf, src_addr, recv_cnt);
976
977 DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
978 recv_cnt, *((u8 *)(msg->tx_buf)));
979
980 restore_dsi_mode:
981 if (dsi_mode & MODE) {
982 mtk_dsi_set_mode(dsi);
983 mtk_dsi_start(dsi);
984 }
985
986 return ret < 0 ? ret : recv_cnt;
987 }
988
989 static const struct mipi_dsi_host_ops mtk_dsi_ops = {
990 .attach = mtk_dsi_host_attach,
991 .transfer = mtk_dsi_host_transfer,
992 };
993
mtk_dsi_encoder_init(struct drm_device * drm,struct mtk_dsi * dsi)994 static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
995 {
996 int ret;
997
998 ret = drm_simple_encoder_init(drm, &dsi->encoder,
999 DRM_MODE_ENCODER_DSI);
1000 if (ret) {
1001 DRM_ERROR("Failed to encoder init to drm\n");
1002 return ret;
1003 }
1004
1005 dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp);
1006
1007 ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
1008 DRM_BRIDGE_ATTACH_NO_CONNECTOR);
1009 if (ret)
1010 goto err_cleanup_encoder;
1011
1012 dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
1013 if (IS_ERR(dsi->connector)) {
1014 DRM_ERROR("Unable to create bridge connector\n");
1015 ret = PTR_ERR(dsi->connector);
1016 goto err_cleanup_encoder;
1017 }
1018 drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
1019
1020 return 0;
1021
1022 err_cleanup_encoder:
1023 drm_encoder_cleanup(&dsi->encoder);
1024 return ret;
1025 }
1026
mtk_dsi_bind(struct device * dev,struct device * master,void * data)1027 static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
1028 {
1029 int ret;
1030 struct drm_device *drm = data;
1031 struct mtk_dsi *dsi = dev_get_drvdata(dev);
1032
1033 ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
1034 if (ret < 0) {
1035 dev_err(dev, "Failed to register component %pOF: %d\n",
1036 dev->of_node, ret);
1037 return ret;
1038 }
1039
1040 ret = mtk_dsi_encoder_init(drm, dsi);
1041 if (ret)
1042 goto err_unregister;
1043
1044 return 0;
1045
1046 err_unregister:
1047 mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
1048 return ret;
1049 }
1050
mtk_dsi_unbind(struct device * dev,struct device * master,void * data)1051 static void mtk_dsi_unbind(struct device *dev, struct device *master,
1052 void *data)
1053 {
1054 struct drm_device *drm = data;
1055 struct mtk_dsi *dsi = dev_get_drvdata(dev);
1056
1057 drm_encoder_cleanup(&dsi->encoder);
1058 mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
1059 }
1060
1061 static const struct component_ops mtk_dsi_component_ops = {
1062 .bind = mtk_dsi_bind,
1063 .unbind = mtk_dsi_unbind,
1064 };
1065
mtk_dsi_probe(struct platform_device * pdev)1066 static int mtk_dsi_probe(struct platform_device *pdev)
1067 {
1068 struct mtk_dsi *dsi;
1069 struct device *dev = &pdev->dev;
1070 struct drm_panel *panel;
1071 struct resource *regs;
1072 int irq_num;
1073 int comp_id;
1074 int ret;
1075
1076 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
1077 if (!dsi)
1078 return -ENOMEM;
1079
1080 dsi->host.ops = &mtk_dsi_ops;
1081 dsi->host.dev = dev;
1082 ret = mipi_dsi_host_register(&dsi->host);
1083 if (ret < 0) {
1084 dev_err(dev, "failed to register DSI host: %d\n", ret);
1085 return ret;
1086 }
1087
1088 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
1089 &panel, &dsi->next_bridge);
1090 if (ret)
1091 goto err_unregister_host;
1092
1093 if (panel) {
1094 dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
1095 if (IS_ERR(dsi->next_bridge)) {
1096 ret = PTR_ERR(dsi->next_bridge);
1097 goto err_unregister_host;
1098 }
1099 }
1100
1101 dsi->driver_data = of_device_get_match_data(dev);
1102
1103 dsi->engine_clk = devm_clk_get(dev, "engine");
1104 if (IS_ERR(dsi->engine_clk)) {
1105 ret = PTR_ERR(dsi->engine_clk);
1106
1107 if (ret != -EPROBE_DEFER)
1108 dev_err(dev, "Failed to get engine clock: %d\n", ret);
1109 goto err_unregister_host;
1110 }
1111
1112 dsi->digital_clk = devm_clk_get(dev, "digital");
1113 if (IS_ERR(dsi->digital_clk)) {
1114 ret = PTR_ERR(dsi->digital_clk);
1115
1116 if (ret != -EPROBE_DEFER)
1117 dev_err(dev, "Failed to get digital clock: %d\n", ret);
1118 goto err_unregister_host;
1119 }
1120
1121 dsi->hs_clk = devm_clk_get(dev, "hs");
1122 if (IS_ERR(dsi->hs_clk)) {
1123 ret = PTR_ERR(dsi->hs_clk);
1124 dev_err(dev, "Failed to get hs clock: %d\n", ret);
1125 goto err_unregister_host;
1126 }
1127
1128 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1129 dsi->regs = devm_ioremap_resource(dev, regs);
1130 if (IS_ERR(dsi->regs)) {
1131 ret = PTR_ERR(dsi->regs);
1132 dev_err(dev, "Failed to ioremap memory: %d\n", ret);
1133 goto err_unregister_host;
1134 }
1135
1136 dsi->phy = devm_phy_get(dev, "dphy");
1137 if (IS_ERR(dsi->phy)) {
1138 ret = PTR_ERR(dsi->phy);
1139 dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
1140 goto err_unregister_host;
1141 }
1142
1143 comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
1144 if (comp_id < 0) {
1145 dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
1146 ret = comp_id;
1147 goto err_unregister_host;
1148 }
1149
1150 ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
1151 &mtk_dsi_funcs);
1152 if (ret) {
1153 dev_err(dev, "Failed to initialize component: %d\n", ret);
1154 goto err_unregister_host;
1155 }
1156
1157 irq_num = platform_get_irq(pdev, 0);
1158 if (irq_num < 0) {
1159 dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
1160 ret = irq_num;
1161 goto err_unregister_host;
1162 }
1163
1164 irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
1165 ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
1166 IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
1167 if (ret) {
1168 dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
1169 goto err_unregister_host;
1170 }
1171
1172 init_waitqueue_head(&dsi->irq_wait_queue);
1173
1174 platform_set_drvdata(pdev, dsi);
1175
1176 dsi->bridge.funcs = &mtk_dsi_bridge_funcs;
1177 dsi->bridge.of_node = dev->of_node;
1178 dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
1179
1180 drm_bridge_add(&dsi->bridge);
1181
1182 ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
1183 if (ret) {
1184 dev_err(&pdev->dev, "failed to add component: %d\n", ret);
1185 goto err_unregister_host;
1186 }
1187
1188 return 0;
1189
1190 err_unregister_host:
1191 mipi_dsi_host_unregister(&dsi->host);
1192 return ret;
1193 }
1194
mtk_dsi_remove(struct platform_device * pdev)1195 static int mtk_dsi_remove(struct platform_device *pdev)
1196 {
1197 struct mtk_dsi *dsi = platform_get_drvdata(pdev);
1198
1199 mtk_output_dsi_disable(dsi);
1200 drm_bridge_remove(&dsi->bridge);
1201 component_del(&pdev->dev, &mtk_dsi_component_ops);
1202 mipi_dsi_host_unregister(&dsi->host);
1203
1204 return 0;
1205 }
1206
1207 static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
1208 .reg_cmdq_off = 0x200,
1209 };
1210
1211 static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
1212 .reg_cmdq_off = 0x180,
1213 };
1214
1215 static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
1216 .reg_cmdq_off = 0x200,
1217 .has_shadow_ctl = true,
1218 .has_size_ctl = true,
1219 };
1220
1221 static const struct of_device_id mtk_dsi_of_match[] = {
1222 { .compatible = "mediatek,mt2701-dsi",
1223 .data = &mt2701_dsi_driver_data },
1224 { .compatible = "mediatek,mt8173-dsi",
1225 .data = &mt8173_dsi_driver_data },
1226 { .compatible = "mediatek,mt8183-dsi",
1227 .data = &mt8183_dsi_driver_data },
1228 { },
1229 };
1230
1231 struct platform_driver mtk_dsi_driver = {
1232 .probe = mtk_dsi_probe,
1233 .remove = mtk_dsi_remove,
1234 .driver = {
1235 .name = "mtk-dsi",
1236 .of_match_table = mtk_dsi_of_match,
1237 },
1238 };
1239