• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/irqdomain.h>
7 #include <linux/irq.h>
8 
9 #include "msm_drv.h"
10 #include "mdp5_kms.h"
11 
12 #define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
13 
14 struct mdp5_mdss {
15 	struct msm_mdss base;
16 
17 	void __iomem *mmio, *vbif;
18 
19 	struct regulator *vdd;
20 
21 	struct clk *ahb_clk;
22 	struct clk *axi_clk;
23 	struct clk *vsync_clk;
24 
25 	struct {
26 		volatile unsigned long enabled_mask;
27 		struct irq_domain *domain;
28 	} irqcontroller;
29 };
30 
mdss_write(struct mdp5_mdss * mdp5_mdss,u32 reg,u32 data)31 static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
32 {
33 	msm_writel(data, mdp5_mdss->mmio + reg);
34 }
35 
mdss_read(struct mdp5_mdss * mdp5_mdss,u32 reg)36 static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
37 {
38 	return msm_readl(mdp5_mdss->mmio + reg);
39 }
40 
mdss_irq(int irq,void * arg)41 static irqreturn_t mdss_irq(int irq, void *arg)
42 {
43 	struct mdp5_mdss *mdp5_mdss = arg;
44 	u32 intr;
45 
46 	intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
47 
48 	VERB("intr=%08x", intr);
49 
50 	while (intr) {
51 		irq_hw_number_t hwirq = fls(intr) - 1;
52 
53 		generic_handle_domain_irq(mdp5_mdss->irqcontroller.domain, hwirq);
54 		intr &= ~(1 << hwirq);
55 	}
56 
57 	return IRQ_HANDLED;
58 }
59 
60 /*
61  * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
62  * can register to get their irq's delivered
63  */
64 
65 #define VALID_IRQS  (MDSS_HW_INTR_STATUS_INTR_MDP | \
66 		MDSS_HW_INTR_STATUS_INTR_DSI0 | \
67 		MDSS_HW_INTR_STATUS_INTR_DSI1 | \
68 		MDSS_HW_INTR_STATUS_INTR_HDMI | \
69 		MDSS_HW_INTR_STATUS_INTR_EDP)
70 
mdss_hw_mask_irq(struct irq_data * irqd)71 static void mdss_hw_mask_irq(struct irq_data *irqd)
72 {
73 	struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
74 
75 	smp_mb__before_atomic();
76 	clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
77 	smp_mb__after_atomic();
78 }
79 
mdss_hw_unmask_irq(struct irq_data * irqd)80 static void mdss_hw_unmask_irq(struct irq_data *irqd)
81 {
82 	struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
83 
84 	smp_mb__before_atomic();
85 	set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
86 	smp_mb__after_atomic();
87 }
88 
89 static struct irq_chip mdss_hw_irq_chip = {
90 	.name		= "mdss",
91 	.irq_mask	= mdss_hw_mask_irq,
92 	.irq_unmask	= mdss_hw_unmask_irq,
93 };
94 
mdss_hw_irqdomain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)95 static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
96 				 irq_hw_number_t hwirq)
97 {
98 	struct mdp5_mdss *mdp5_mdss = d->host_data;
99 
100 	if (!(VALID_IRQS & (1 << hwirq)))
101 		return -EPERM;
102 
103 	irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
104 	irq_set_chip_data(irq, mdp5_mdss);
105 
106 	return 0;
107 }
108 
109 static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
110 	.map = mdss_hw_irqdomain_map,
111 	.xlate = irq_domain_xlate_onecell,
112 };
113 
114 
mdss_irq_domain_init(struct mdp5_mdss * mdp5_mdss)115 static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
116 {
117 	struct device *dev = mdp5_mdss->base.dev->dev;
118 	struct irq_domain *d;
119 
120 	d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
121 				  mdp5_mdss);
122 	if (!d) {
123 		DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
124 		return -ENXIO;
125 	}
126 
127 	mdp5_mdss->irqcontroller.enabled_mask = 0;
128 	mdp5_mdss->irqcontroller.domain = d;
129 
130 	return 0;
131 }
132 
mdp5_mdss_enable(struct msm_mdss * mdss)133 static int mdp5_mdss_enable(struct msm_mdss *mdss)
134 {
135 	struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
136 	DBG("");
137 
138 	clk_prepare_enable(mdp5_mdss->ahb_clk);
139 	if (mdp5_mdss->axi_clk)
140 		clk_prepare_enable(mdp5_mdss->axi_clk);
141 	if (mdp5_mdss->vsync_clk)
142 		clk_prepare_enable(mdp5_mdss->vsync_clk);
143 
144 	return 0;
145 }
146 
mdp5_mdss_disable(struct msm_mdss * mdss)147 static int mdp5_mdss_disable(struct msm_mdss *mdss)
148 {
149 	struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
150 	DBG("");
151 
152 	if (mdp5_mdss->vsync_clk)
153 		clk_disable_unprepare(mdp5_mdss->vsync_clk);
154 	if (mdp5_mdss->axi_clk)
155 		clk_disable_unprepare(mdp5_mdss->axi_clk);
156 	clk_disable_unprepare(mdp5_mdss->ahb_clk);
157 
158 	return 0;
159 }
160 
msm_mdss_get_clocks(struct mdp5_mdss * mdp5_mdss)161 static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
162 {
163 	struct platform_device *pdev =
164 			to_platform_device(mdp5_mdss->base.dev->dev);
165 
166 	mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
167 	if (IS_ERR(mdp5_mdss->ahb_clk))
168 		mdp5_mdss->ahb_clk = NULL;
169 
170 	mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
171 	if (IS_ERR(mdp5_mdss->axi_clk))
172 		mdp5_mdss->axi_clk = NULL;
173 
174 	mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
175 	if (IS_ERR(mdp5_mdss->vsync_clk))
176 		mdp5_mdss->vsync_clk = NULL;
177 
178 	return 0;
179 }
180 
mdp5_mdss_destroy(struct drm_device * dev)181 static void mdp5_mdss_destroy(struct drm_device *dev)
182 {
183 	struct msm_drm_private *priv = dev->dev_private;
184 	struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
185 
186 	if (!mdp5_mdss)
187 		return;
188 
189 	irq_domain_remove(mdp5_mdss->irqcontroller.domain);
190 	mdp5_mdss->irqcontroller.domain = NULL;
191 
192 	regulator_disable(mdp5_mdss->vdd);
193 
194 	pm_runtime_disable(dev->dev);
195 }
196 
197 static const struct msm_mdss_funcs mdss_funcs = {
198 	.enable	= mdp5_mdss_enable,
199 	.disable = mdp5_mdss_disable,
200 	.destroy = mdp5_mdss_destroy,
201 };
202 
mdp5_mdss_init(struct drm_device * dev)203 int mdp5_mdss_init(struct drm_device *dev)
204 {
205 	struct platform_device *pdev = to_platform_device(dev->dev);
206 	struct msm_drm_private *priv = dev->dev_private;
207 	struct mdp5_mdss *mdp5_mdss;
208 	int ret;
209 
210 	DBG("");
211 
212 	if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
213 		return 0;
214 
215 	mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
216 	if (!mdp5_mdss) {
217 		ret = -ENOMEM;
218 		goto fail;
219 	}
220 
221 	mdp5_mdss->base.dev = dev;
222 
223 	mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
224 	if (IS_ERR(mdp5_mdss->mmio)) {
225 		ret = PTR_ERR(mdp5_mdss->mmio);
226 		goto fail;
227 	}
228 
229 	mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
230 	if (IS_ERR(mdp5_mdss->vbif)) {
231 		ret = PTR_ERR(mdp5_mdss->vbif);
232 		goto fail;
233 	}
234 
235 	ret = msm_mdss_get_clocks(mdp5_mdss);
236 	if (ret) {
237 		DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
238 		goto fail;
239 	}
240 
241 	/* Regulator to enable GDSCs in downstream kernels */
242 	mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
243 	if (IS_ERR(mdp5_mdss->vdd)) {
244 		ret = PTR_ERR(mdp5_mdss->vdd);
245 		goto fail;
246 	}
247 
248 	ret = regulator_enable(mdp5_mdss->vdd);
249 	if (ret) {
250 		DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
251 			ret);
252 		goto fail;
253 	}
254 
255 	ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
256 			       mdss_irq, 0, "mdss_isr", mdp5_mdss);
257 	if (ret) {
258 		DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
259 		goto fail_irq;
260 	}
261 
262 	ret = mdss_irq_domain_init(mdp5_mdss);
263 	if (ret) {
264 		DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
265 		goto fail_irq;
266 	}
267 
268 	mdp5_mdss->base.funcs = &mdss_funcs;
269 	priv->mdss = &mdp5_mdss->base;
270 
271 	pm_runtime_enable(dev->dev);
272 
273 	return 0;
274 fail_irq:
275 	regulator_disable(mdp5_mdss->vdd);
276 fail:
277 	return ret;
278 }
279