1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Freescale Semiconductor, Inc.
4 */
5
6 #include <linux/of_address.h>
7 #include <linux/of_irq.h>
8 #include <linux/slab.h>
9 #include <linux/irqchip.h>
10 #include <linux/syscore_ops.h>
11
12 #define IMR_NUM 4
13 #define GPC_MAX_IRQS (IMR_NUM * 32)
14
15 #define GPC_IMR1_CORE0 0x30
16 #define GPC_IMR1_CORE1 0x40
17 #define GPC_IMR1_CORE2 0x1c0
18 #define GPC_IMR1_CORE3 0x1d0
19
20
21 struct gpcv2_irqchip_data {
22 struct raw_spinlock rlock;
23 void __iomem *gpc_base;
24 u32 wakeup_sources[IMR_NUM];
25 u32 saved_irq_mask[IMR_NUM];
26 u32 cpu2wakeup;
27 };
28
29 static struct gpcv2_irqchip_data *imx_gpcv2_instance;
30
gpcv2_idx_to_reg(struct gpcv2_irqchip_data * cd,int i)31 static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
32 {
33 return cd->gpc_base + cd->cpu2wakeup + i * 4;
34 }
35
gpcv2_wakeup_source_save(void)36 static int gpcv2_wakeup_source_save(void)
37 {
38 struct gpcv2_irqchip_data *cd;
39 void __iomem *reg;
40 int i;
41
42 cd = imx_gpcv2_instance;
43 if (!cd)
44 return 0;
45
46 for (i = 0; i < IMR_NUM; i++) {
47 reg = gpcv2_idx_to_reg(cd, i);
48 cd->saved_irq_mask[i] = readl_relaxed(reg);
49 writel_relaxed(cd->wakeup_sources[i], reg);
50 }
51
52 return 0;
53 }
54
gpcv2_wakeup_source_restore(void)55 static void gpcv2_wakeup_source_restore(void)
56 {
57 struct gpcv2_irqchip_data *cd;
58 int i;
59
60 cd = imx_gpcv2_instance;
61 if (!cd)
62 return;
63
64 for (i = 0; i < IMR_NUM; i++)
65 writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i));
66 }
67
68 static struct syscore_ops imx_gpcv2_syscore_ops = {
69 .suspend = gpcv2_wakeup_source_save,
70 .resume = gpcv2_wakeup_source_restore,
71 };
72
imx_gpcv2_irq_set_wake(struct irq_data * d,unsigned int on)73 static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
74 {
75 struct gpcv2_irqchip_data *cd = d->chip_data;
76 unsigned int idx = d->hwirq / 32;
77 unsigned long flags;
78 u32 mask, val;
79
80 raw_spin_lock_irqsave(&cd->rlock, flags);
81 mask = BIT(d->hwirq % 32);
82 val = cd->wakeup_sources[idx];
83
84 cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
85 raw_spin_unlock_irqrestore(&cd->rlock, flags);
86
87 /*
88 * Do *not* call into the parent, as the GIC doesn't have any
89 * wake-up facility...
90 */
91
92 return 0;
93 }
94
imx_gpcv2_irq_unmask(struct irq_data * d)95 static void imx_gpcv2_irq_unmask(struct irq_data *d)
96 {
97 struct gpcv2_irqchip_data *cd = d->chip_data;
98 void __iomem *reg;
99 u32 val;
100
101 raw_spin_lock(&cd->rlock);
102 reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
103 val = readl_relaxed(reg);
104 val &= ~BIT(d->hwirq % 32);
105 writel_relaxed(val, reg);
106 raw_spin_unlock(&cd->rlock);
107
108 irq_chip_unmask_parent(d);
109 }
110
imx_gpcv2_irq_mask(struct irq_data * d)111 static void imx_gpcv2_irq_mask(struct irq_data *d)
112 {
113 struct gpcv2_irqchip_data *cd = d->chip_data;
114 void __iomem *reg;
115 u32 val;
116
117 raw_spin_lock(&cd->rlock);
118 reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
119 val = readl_relaxed(reg);
120 val |= BIT(d->hwirq % 32);
121 writel_relaxed(val, reg);
122 raw_spin_unlock(&cd->rlock);
123
124 irq_chip_mask_parent(d);
125 }
126
127 static struct irq_chip gpcv2_irqchip_data_chip = {
128 .name = "GPCv2",
129 .irq_eoi = irq_chip_eoi_parent,
130 .irq_mask = imx_gpcv2_irq_mask,
131 .irq_unmask = imx_gpcv2_irq_unmask,
132 .irq_set_wake = imx_gpcv2_irq_set_wake,
133 .irq_retrigger = irq_chip_retrigger_hierarchy,
134 .irq_set_type = irq_chip_set_type_parent,
135 #ifdef CONFIG_SMP
136 .irq_set_affinity = irq_chip_set_affinity_parent,
137 #endif
138 };
139
imx_gpcv2_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)140 static int imx_gpcv2_domain_translate(struct irq_domain *d,
141 struct irq_fwspec *fwspec,
142 unsigned long *hwirq,
143 unsigned int *type)
144 {
145 if (is_of_node(fwspec->fwnode)) {
146 if (fwspec->param_count != 3)
147 return -EINVAL;
148
149 /* No PPI should point to this domain */
150 if (fwspec->param[0] != 0)
151 return -EINVAL;
152
153 *hwirq = fwspec->param[1];
154 *type = fwspec->param[2];
155 return 0;
156 }
157
158 return -EINVAL;
159 }
160
imx_gpcv2_domain_alloc(struct irq_domain * domain,unsigned int irq,unsigned int nr_irqs,void * data)161 static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
162 unsigned int irq, unsigned int nr_irqs,
163 void *data)
164 {
165 struct irq_fwspec *fwspec = data;
166 struct irq_fwspec parent_fwspec;
167 irq_hw_number_t hwirq;
168 unsigned int type;
169 int err;
170 int i;
171
172 err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type);
173 if (err)
174 return err;
175
176 if (hwirq >= GPC_MAX_IRQS)
177 return -EINVAL;
178
179 for (i = 0; i < nr_irqs; i++) {
180 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
181 &gpcv2_irqchip_data_chip, domain->host_data);
182 }
183
184 parent_fwspec = *fwspec;
185 parent_fwspec.fwnode = domain->parent->fwnode;
186 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
187 &parent_fwspec);
188 }
189
190 static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
191 .translate = imx_gpcv2_domain_translate,
192 .alloc = imx_gpcv2_domain_alloc,
193 .free = irq_domain_free_irqs_common,
194 };
195
196 static const struct of_device_id gpcv2_of_match[] = {
197 { .compatible = "fsl,imx7d-gpc", .data = (const void *) 2 },
198 { .compatible = "fsl,imx8mq-gpc", .data = (const void *) 4 },
199 { /* END */ }
200 };
201
imx_gpcv2_irqchip_init(struct device_node * node,struct device_node * parent)202 static int __init imx_gpcv2_irqchip_init(struct device_node *node,
203 struct device_node *parent)
204 {
205 struct irq_domain *parent_domain, *domain;
206 struct gpcv2_irqchip_data *cd;
207 const struct of_device_id *id;
208 unsigned long core_num;
209 int i;
210
211 if (!parent) {
212 pr_err("%pOF: no parent, giving up\n", node);
213 return -ENODEV;
214 }
215
216 id = of_match_node(gpcv2_of_match, node);
217 if (!id) {
218 pr_err("%pOF: unknown compatibility string\n", node);
219 return -ENODEV;
220 }
221
222 core_num = (unsigned long)id->data;
223
224 parent_domain = irq_find_host(parent);
225 if (!parent_domain) {
226 pr_err("%pOF: unable to get parent domain\n", node);
227 return -ENXIO;
228 }
229
230 cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
231 if (!cd) {
232 pr_err("%pOF: kzalloc failed!\n", node);
233 return -ENOMEM;
234 }
235
236 raw_spin_lock_init(&cd->rlock);
237
238 cd->gpc_base = of_iomap(node, 0);
239 if (!cd->gpc_base) {
240 pr_err("%pOF: unable to map gpc registers\n", node);
241 kfree(cd);
242 return -ENOMEM;
243 }
244
245 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
246 node, &gpcv2_irqchip_data_domain_ops, cd);
247 if (!domain) {
248 iounmap(cd->gpc_base);
249 kfree(cd);
250 return -ENOMEM;
251 }
252 irq_set_default_host(domain);
253
254 /* Initially mask all interrupts */
255 for (i = 0; i < IMR_NUM; i++) {
256 void __iomem *reg = cd->gpc_base + i * 4;
257
258 switch (core_num) {
259 case 4:
260 writel_relaxed(~0, reg + GPC_IMR1_CORE2);
261 writel_relaxed(~0, reg + GPC_IMR1_CORE3);
262 /* fall through */
263 case 2:
264 writel_relaxed(~0, reg + GPC_IMR1_CORE0);
265 writel_relaxed(~0, reg + GPC_IMR1_CORE1);
266 }
267 cd->wakeup_sources[i] = ~0;
268 }
269
270 /* Let CORE0 as the default CPU to wake up by GPC */
271 cd->cpu2wakeup = GPC_IMR1_CORE0;
272
273 /*
274 * Due to hardware design failure, need to make sure GPR
275 * interrupt(#32) is unmasked during RUN mode to avoid entering
276 * DSM by mistake.
277 */
278 writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
279
280 imx_gpcv2_instance = cd;
281 register_syscore_ops(&imx_gpcv2_syscore_ops);
282
283 /*
284 * Clear the OF_POPULATED flag set in of_irq_init so that
285 * later the GPC power domain driver will not be skipped.
286 */
287 of_node_clear_flag(node, OF_POPULATED);
288 return 0;
289 }
290
291 IRQCHIP_DECLARE(imx_gpcv2_imx7d, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
292 IRQCHIP_DECLARE(imx_gpcv2_imx8mq, "fsl,imx8mq-gpc", imx_gpcv2_irqchip_init);
293