• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2017 NXP
4  * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip/chained_irq.h>
11 #include <linux/irqdomain.h>
12 #include <linux/kernel.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_platform.h>
15 #include <linux/spinlock.h>
16 
17 #define CTRL_STRIDE_OFF(_t, _r)	(_t * 4 * _r)
18 #define CHANCTRL		0x0
19 #define CHANMASK(n, t)		(CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
20 #define CHANSET(n, t)		(CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
21 #define CHANSTATUS(n, t)	(CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4)
22 #define CHAN_MINTDIS(t)		(CTRL_STRIDE_OFF(t, 3) + 0x4)
23 #define CHAN_MASTRSTAT(t)	(CTRL_STRIDE_OFF(t, 3) + 0x8)
24 
25 #define CHAN_MAX_OUTPUT_INT	0x8
26 
27 struct irqsteer_data {
28 	void __iomem		*regs;
29 	struct clk		*ipg_clk;
30 	int			irq[CHAN_MAX_OUTPUT_INT];
31 	int			irq_count;
32 	raw_spinlock_t		lock;
33 	int			reg_num;
34 	int			channel;
35 	struct irq_domain	*domain;
36 	u32			*saved_reg;
37 };
38 
imx_irqsteer_get_reg_index(struct irqsteer_data * data,unsigned long irqnum)39 static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
40 				      unsigned long irqnum)
41 {
42 	return (data->reg_num - irqnum / 32 - 1);
43 }
44 
imx_irqsteer_irq_unmask(struct irq_data * d)45 static void imx_irqsteer_irq_unmask(struct irq_data *d)
46 {
47 	struct irqsteer_data *data = d->chip_data;
48 	int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
49 	unsigned long flags;
50 	u32 val;
51 
52 	raw_spin_lock_irqsave(&data->lock, flags);
53 	val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
54 	val |= BIT(d->hwirq % 32);
55 	writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
56 	raw_spin_unlock_irqrestore(&data->lock, flags);
57 }
58 
imx_irqsteer_irq_mask(struct irq_data * d)59 static void imx_irqsteer_irq_mask(struct irq_data *d)
60 {
61 	struct irqsteer_data *data = d->chip_data;
62 	int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
63 	unsigned long flags;
64 	u32 val;
65 
66 	raw_spin_lock_irqsave(&data->lock, flags);
67 	val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
68 	val &= ~BIT(d->hwirq % 32);
69 	writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
70 	raw_spin_unlock_irqrestore(&data->lock, flags);
71 }
72 
73 static struct irq_chip imx_irqsteer_irq_chip = {
74 	.name		= "irqsteer",
75 	.irq_mask	= imx_irqsteer_irq_mask,
76 	.irq_unmask	= imx_irqsteer_irq_unmask,
77 };
78 
imx_irqsteer_irq_map(struct irq_domain * h,unsigned int irq,irq_hw_number_t hwirq)79 static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
80 				irq_hw_number_t hwirq)
81 {
82 	irq_set_status_flags(irq, IRQ_LEVEL);
83 	irq_set_chip_data(irq, h->host_data);
84 	irq_set_chip_and_handler(irq, &imx_irqsteer_irq_chip, handle_level_irq);
85 
86 	return 0;
87 }
88 
89 static const struct irq_domain_ops imx_irqsteer_domain_ops = {
90 	.map		= imx_irqsteer_irq_map,
91 	.xlate		= irq_domain_xlate_onecell,
92 };
93 
imx_irqsteer_get_hwirq_base(struct irqsteer_data * data,u32 irq)94 static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
95 {
96 	int i;
97 
98 	for (i = 0; i < data->irq_count; i++) {
99 		if (data->irq[i] == irq)
100 			return i * 64;
101 	}
102 
103 	return -EINVAL;
104 }
105 
imx_irqsteer_irq_handler(struct irq_desc * desc)106 static void imx_irqsteer_irq_handler(struct irq_desc *desc)
107 {
108 	struct irqsteer_data *data = irq_desc_get_handler_data(desc);
109 	int hwirq;
110 	int irq, i;
111 
112 	chained_irq_enter(irq_desc_get_chip(desc), desc);
113 
114 	irq = irq_desc_get_irq(desc);
115 	hwirq = imx_irqsteer_get_hwirq_base(data, irq);
116 	if (hwirq < 0) {
117 		pr_warn("%s: unable to get hwirq base for irq %d\n",
118 			__func__, irq);
119 		return;
120 	}
121 
122 	for (i = 0; i < 2; i++, hwirq += 32) {
123 		int idx = imx_irqsteer_get_reg_index(data, hwirq);
124 		unsigned long irqmap;
125 		int pos, virq;
126 
127 		if (hwirq >= data->reg_num * 32)
128 			break;
129 
130 		irqmap = readl_relaxed(data->regs +
131 				       CHANSTATUS(idx, data->reg_num));
132 
133 		for_each_set_bit(pos, &irqmap, 32) {
134 			virq = irq_find_mapping(data->domain, pos + hwirq);
135 			if (virq)
136 				generic_handle_irq(virq);
137 		}
138 	}
139 
140 	chained_irq_exit(irq_desc_get_chip(desc), desc);
141 }
142 
imx_irqsteer_probe(struct platform_device * pdev)143 static int imx_irqsteer_probe(struct platform_device *pdev)
144 {
145 	struct device_node *np = pdev->dev.of_node;
146 	struct irqsteer_data *data;
147 	u32 irqs_num;
148 	int i, ret;
149 
150 	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
151 	if (!data)
152 		return -ENOMEM;
153 
154 	data->regs = devm_platform_ioremap_resource(pdev, 0);
155 	if (IS_ERR(data->regs)) {
156 		dev_err(&pdev->dev, "failed to initialize reg\n");
157 		return PTR_ERR(data->regs);
158 	}
159 
160 	data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
161 	if (IS_ERR(data->ipg_clk)) {
162 		ret = PTR_ERR(data->ipg_clk);
163 		if (ret != -EPROBE_DEFER)
164 			dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
165 		return ret;
166 	}
167 
168 	raw_spin_lock_init(&data->lock);
169 
170 	ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
171 	if (ret)
172 		return ret;
173 	ret = of_property_read_u32(np, "fsl,channel", &data->channel);
174 	if (ret)
175 		return ret;
176 
177 	/*
178 	 * There is one output irq for each group of 64 inputs.
179 	 * One register bit map can represent 32 input interrupts.
180 	 */
181 	data->irq_count = DIV_ROUND_UP(irqs_num, 64);
182 	data->reg_num = irqs_num / 32;
183 
184 	if (IS_ENABLED(CONFIG_PM_SLEEP)) {
185 		data->saved_reg = devm_kzalloc(&pdev->dev,
186 					sizeof(u32) * data->reg_num,
187 					GFP_KERNEL);
188 		if (!data->saved_reg)
189 			return -ENOMEM;
190 	}
191 
192 	ret = clk_prepare_enable(data->ipg_clk);
193 	if (ret) {
194 		dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
195 		return ret;
196 	}
197 
198 	/* steer all IRQs into configured channel */
199 	writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
200 
201 	data->domain = irq_domain_add_linear(np, data->reg_num * 32,
202 					     &imx_irqsteer_domain_ops, data);
203 	if (!data->domain) {
204 		dev_err(&pdev->dev, "failed to create IRQ domain\n");
205 		ret = -ENOMEM;
206 		goto out;
207 	}
208 
209 	if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
210 		ret = -EINVAL;
211 		goto out;
212 	}
213 
214 	for (i = 0; i < data->irq_count; i++) {
215 		data->irq[i] = irq_of_parse_and_map(np, i);
216 		if (!data->irq[i]) {
217 			ret = -EINVAL;
218 			goto out;
219 		}
220 
221 		irq_set_chained_handler_and_data(data->irq[i],
222 						 imx_irqsteer_irq_handler,
223 						 data);
224 	}
225 
226 	platform_set_drvdata(pdev, data);
227 
228 	return 0;
229 out:
230 	clk_disable_unprepare(data->ipg_clk);
231 	return ret;
232 }
233 
imx_irqsteer_remove(struct platform_device * pdev)234 static int imx_irqsteer_remove(struct platform_device *pdev)
235 {
236 	struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
237 	int i;
238 
239 	for (i = 0; i < irqsteer_data->irq_count; i++)
240 		irq_set_chained_handler_and_data(irqsteer_data->irq[i],
241 						 NULL, NULL);
242 
243 	irq_domain_remove(irqsteer_data->domain);
244 
245 	clk_disable_unprepare(irqsteer_data->ipg_clk);
246 
247 	return 0;
248 }
249 
250 #ifdef CONFIG_PM_SLEEP
imx_irqsteer_save_regs(struct irqsteer_data * data)251 static void imx_irqsteer_save_regs(struct irqsteer_data *data)
252 {
253 	int i;
254 
255 	for (i = 0; i < data->reg_num; i++)
256 		data->saved_reg[i] = readl_relaxed(data->regs +
257 						CHANMASK(i, data->reg_num));
258 }
259 
imx_irqsteer_restore_regs(struct irqsteer_data * data)260 static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
261 {
262 	int i;
263 
264 	writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
265 	for (i = 0; i < data->reg_num; i++)
266 		writel_relaxed(data->saved_reg[i],
267 			       data->regs + CHANMASK(i, data->reg_num));
268 }
269 
imx_irqsteer_suspend(struct device * dev)270 static int imx_irqsteer_suspend(struct device *dev)
271 {
272 	struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
273 
274 	imx_irqsteer_save_regs(irqsteer_data);
275 	clk_disable_unprepare(irqsteer_data->ipg_clk);
276 
277 	return 0;
278 }
279 
imx_irqsteer_resume(struct device * dev)280 static int imx_irqsteer_resume(struct device *dev)
281 {
282 	struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
283 	int ret;
284 
285 	ret = clk_prepare_enable(irqsteer_data->ipg_clk);
286 	if (ret) {
287 		dev_err(dev, "failed to enable ipg clk: %d\n", ret);
288 		return ret;
289 	}
290 	imx_irqsteer_restore_regs(irqsteer_data);
291 
292 	return 0;
293 }
294 #endif
295 
296 static const struct dev_pm_ops imx_irqsteer_pm_ops = {
297 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_irqsteer_suspend, imx_irqsteer_resume)
298 };
299 
300 static const struct of_device_id imx_irqsteer_dt_ids[] = {
301 	{ .compatible = "fsl,imx-irqsteer", },
302 	{},
303 };
304 
305 static struct platform_driver imx_irqsteer_driver = {
306 	.driver = {
307 		.name = "imx-irqsteer",
308 		.of_match_table = imx_irqsteer_dt_ids,
309 		.pm = &imx_irqsteer_pm_ops,
310 	},
311 	.probe = imx_irqsteer_probe,
312 	.remove = imx_irqsteer_remove,
313 };
314 builtin_platform_driver(imx_irqsteer_driver);
315