1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016 MediaTek Inc.
4 * Author: Youlin.Pei <youlin.pei@mediatek.com>
5 */
6
7 #include <linux/interrupt.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqdomain.h>
12 #include <linux/of.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_address.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17
18 #define CIRQ_ACK 0x40
19 #define CIRQ_MASK_SET 0xc0
20 #define CIRQ_MASK_CLR 0x100
21 #define CIRQ_SENS_SET 0x180
22 #define CIRQ_SENS_CLR 0x1c0
23 #define CIRQ_POL_SET 0x240
24 #define CIRQ_POL_CLR 0x280
25 #define CIRQ_CONTROL 0x300
26
27 #define CIRQ_EN 0x1
28 #define CIRQ_EDGE 0x2
29 #define CIRQ_FLUSH 0x4
30
31 struct mtk_cirq_chip_data {
32 void __iomem *base;
33 unsigned int ext_irq_start;
34 unsigned int ext_irq_end;
35 struct irq_domain *domain;
36 };
37
38 static struct mtk_cirq_chip_data *cirq_data;
39
mtk_cirq_write_mask(struct irq_data * data,unsigned int offset)40 static void mtk_cirq_write_mask(struct irq_data *data, unsigned int offset)
41 {
42 struct mtk_cirq_chip_data *chip_data = data->chip_data;
43 unsigned int cirq_num = data->hwirq;
44 u32 mask = 1 << (cirq_num % 32);
45
46 writel_relaxed(mask, chip_data->base + offset + (cirq_num / 32) * 4);
47 }
48
mtk_cirq_mask(struct irq_data * data)49 static void mtk_cirq_mask(struct irq_data *data)
50 {
51 mtk_cirq_write_mask(data, CIRQ_MASK_SET);
52 irq_chip_mask_parent(data);
53 }
54
mtk_cirq_unmask(struct irq_data * data)55 static void mtk_cirq_unmask(struct irq_data *data)
56 {
57 mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
58 irq_chip_unmask_parent(data);
59 }
60
mtk_cirq_set_type(struct irq_data * data,unsigned int type)61 static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
62 {
63 int ret;
64
65 switch (type & IRQ_TYPE_SENSE_MASK) {
66 case IRQ_TYPE_EDGE_FALLING:
67 mtk_cirq_write_mask(data, CIRQ_POL_CLR);
68 mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
69 break;
70 case IRQ_TYPE_EDGE_RISING:
71 mtk_cirq_write_mask(data, CIRQ_POL_SET);
72 mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
73 break;
74 case IRQ_TYPE_LEVEL_LOW:
75 mtk_cirq_write_mask(data, CIRQ_POL_CLR);
76 mtk_cirq_write_mask(data, CIRQ_SENS_SET);
77 break;
78 case IRQ_TYPE_LEVEL_HIGH:
79 mtk_cirq_write_mask(data, CIRQ_POL_SET);
80 mtk_cirq_write_mask(data, CIRQ_SENS_SET);
81 break;
82 default:
83 break;
84 }
85
86 data = data->parent_data;
87 ret = data->chip->irq_set_type(data, type);
88 return ret;
89 }
90
91 static struct irq_chip mtk_cirq_chip = {
92 .name = "MT_CIRQ",
93 .irq_mask = mtk_cirq_mask,
94 .irq_unmask = mtk_cirq_unmask,
95 .irq_eoi = irq_chip_eoi_parent,
96 .irq_set_type = mtk_cirq_set_type,
97 .irq_retrigger = irq_chip_retrigger_hierarchy,
98 #ifdef CONFIG_SMP
99 .irq_set_affinity = irq_chip_set_affinity_parent,
100 #endif
101 };
102
mtk_cirq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)103 static int mtk_cirq_domain_translate(struct irq_domain *d,
104 struct irq_fwspec *fwspec,
105 unsigned long *hwirq,
106 unsigned int *type)
107 {
108 if (is_of_node(fwspec->fwnode)) {
109 if (fwspec->param_count != 3)
110 return -EINVAL;
111
112 /* No PPI should point to this domain */
113 if (fwspec->param[0] != 0)
114 return -EINVAL;
115
116 /* cirq support irq number check */
117 if (fwspec->param[1] < cirq_data->ext_irq_start ||
118 fwspec->param[1] > cirq_data->ext_irq_end)
119 return -EINVAL;
120
121 *hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
122 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
123 return 0;
124 }
125
126 return -EINVAL;
127 }
128
mtk_cirq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)129 static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
130 unsigned int nr_irqs, void *arg)
131 {
132 int ret;
133 irq_hw_number_t hwirq;
134 unsigned int type;
135 struct irq_fwspec *fwspec = arg;
136 struct irq_fwspec parent_fwspec = *fwspec;
137
138 ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
139 if (ret)
140 return ret;
141
142 if (WARN_ON(nr_irqs != 1))
143 return -EINVAL;
144
145 irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
146 &mtk_cirq_chip,
147 domain->host_data);
148
149 parent_fwspec.fwnode = domain->parent->fwnode;
150 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
151 &parent_fwspec);
152 }
153
154 static const struct irq_domain_ops cirq_domain_ops = {
155 .translate = mtk_cirq_domain_translate,
156 .alloc = mtk_cirq_domain_alloc,
157 .free = irq_domain_free_irqs_common,
158 };
159
160 #ifdef CONFIG_PM_SLEEP
mtk_cirq_suspend(void)161 static int mtk_cirq_suspend(void)
162 {
163 u32 value, mask;
164 unsigned int irq, hwirq_num;
165 bool pending, masked;
166 int i, pendret, maskret;
167
168 /*
169 * When external interrupts happened, CIRQ will record the status
170 * even CIRQ is not enabled. When execute flush command, CIRQ will
171 * resend the signals according to the status. So if don't clear the
172 * status, CIRQ will resend the wrong signals.
173 *
174 * arch_suspend_disable_irqs() will be called before CIRQ suspend
175 * callback. If clear all the status simply, the external interrupts
176 * which happened between arch_suspend_disable_irqs and CIRQ suspend
177 * callback will be lost. Using following steps to avoid this issue;
178 *
179 * - Iterate over all the CIRQ supported interrupts;
180 * - For each interrupt, inspect its pending and masked status at GIC
181 * level;
182 * - If pending and unmasked, it happened between
183 * arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
184 * it. Otherwise, ACK it.
185 */
186 hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
187 for (i = 0; i < hwirq_num; i++) {
188 irq = irq_find_mapping(cirq_data->domain, i);
189 if (irq) {
190 pendret = irq_get_irqchip_state(irq,
191 IRQCHIP_STATE_PENDING,
192 &pending);
193
194 maskret = irq_get_irqchip_state(irq,
195 IRQCHIP_STATE_MASKED,
196 &masked);
197
198 if (pendret == 0 && maskret == 0 &&
199 (pending && !masked))
200 continue;
201 }
202
203 mask = 1 << (i % 32);
204 writel_relaxed(mask, cirq_data->base + CIRQ_ACK + (i / 32) * 4);
205 }
206
207 /* set edge_only mode, record edge-triggerd interrupts */
208 /* enable cirq */
209 value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
210 value |= (CIRQ_EDGE | CIRQ_EN);
211 writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
212
213 return 0;
214 }
215
mtk_cirq_resume(void)216 static void mtk_cirq_resume(void)
217 {
218 u32 value;
219
220 /* flush recored interrupts, will send signals to parent controller */
221 value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
222 writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
223
224 /* disable cirq */
225 value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
226 value &= ~(CIRQ_EDGE | CIRQ_EN);
227 writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
228 }
229
230 static struct syscore_ops mtk_cirq_syscore_ops = {
231 .suspend = mtk_cirq_suspend,
232 .resume = mtk_cirq_resume,
233 };
234
mtk_cirq_syscore_init(void)235 static void mtk_cirq_syscore_init(void)
236 {
237 register_syscore_ops(&mtk_cirq_syscore_ops);
238 }
239 #else
mtk_cirq_syscore_init(void)240 static inline void mtk_cirq_syscore_init(void) {}
241 #endif
242
mtk_cirq_of_init(struct device_node * node,struct device_node * parent)243 static int __init mtk_cirq_of_init(struct device_node *node,
244 struct device_node *parent)
245 {
246 struct irq_domain *domain, *domain_parent;
247 unsigned int irq_num;
248 int ret;
249
250 domain_parent = irq_find_host(parent);
251 if (!domain_parent) {
252 pr_err("mtk_cirq: interrupt-parent not found\n");
253 return -EINVAL;
254 }
255
256 cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
257 if (!cirq_data)
258 return -ENOMEM;
259
260 cirq_data->base = of_iomap(node, 0);
261 if (!cirq_data->base) {
262 pr_err("mtk_cirq: unable to map cirq register\n");
263 ret = -ENXIO;
264 goto out_free;
265 }
266
267 ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
268 &cirq_data->ext_irq_start);
269 if (ret)
270 goto out_unmap;
271
272 ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
273 &cirq_data->ext_irq_end);
274 if (ret)
275 goto out_unmap;
276
277 irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
278 domain = irq_domain_add_hierarchy(domain_parent, 0,
279 irq_num, node,
280 &cirq_domain_ops, cirq_data);
281 if (!domain) {
282 ret = -ENOMEM;
283 goto out_unmap;
284 }
285 cirq_data->domain = domain;
286
287 mtk_cirq_syscore_init();
288
289 return 0;
290
291 out_unmap:
292 iounmap(cirq_data->base);
293 out_free:
294 kfree(cirq_data);
295 return ret;
296 }
297
298 IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
299