• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OMAP WakeupGen Source file
4  *
5  * OMAP WakeupGen is the interrupt controller extension used along
6  * with ARM GIC to wake the CPU out from low power states on
7  * external interrupts. It is responsible for generating wakeup
8  * event from the incoming interrupts and enable bits. It is
9  * implemented in MPU always ON power domain. During normal operation,
10  * WakeupGen delivers external interrupts directly to the GIC.
11  *
12  * Copyright (C) 2011 Texas Instruments, Inc.
13  *	Santosh Shilimkar <santosh.shilimkar@ti.com>
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/io.h>
19 #include <linux/irq.h>
20 #include <linux/irqchip.h>
21 #include <linux/irqdomain.h>
22 #include <linux/of_address.h>
23 #include <linux/platform_device.h>
24 #include <linux/cpu.h>
25 #include <linux/notifier.h>
26 #include <linux/cpu_pm.h>
27 
28 #include "omap-wakeupgen.h"
29 #include "omap-secure.h"
30 
31 #include "soc.h"
32 #include "omap4-sar-layout.h"
33 #include "common.h"
34 #include "pm.h"
35 
36 #define AM43XX_NR_REG_BANKS	7
37 #define AM43XX_IRQS		224
38 #define MAX_NR_REG_BANKS	AM43XX_NR_REG_BANKS
39 #define MAX_IRQS		AM43XX_IRQS
40 #define DEFAULT_NR_REG_BANKS	5
41 #define DEFAULT_IRQS		160
42 #define WKG_MASK_ALL		0x00000000
43 #define WKG_UNMASK_ALL		0xffffffff
44 #define CPU_ENA_OFFSET		0x400
45 #define CPU0_ID			0x0
46 #define CPU1_ID			0x1
47 #define OMAP4_NR_BANKS		4
48 #define OMAP4_NR_IRQS		128
49 
50 #define SYS_NIRQ1_EXT_SYS_IRQ_1	7
51 #define SYS_NIRQ2_EXT_SYS_IRQ_2	119
52 
53 static void __iomem *wakeupgen_base;
54 static void __iomem *sar_base;
55 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
56 static unsigned int irq_target_cpu[MAX_IRQS];
57 static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
58 static unsigned int max_irqs = DEFAULT_IRQS;
59 static unsigned int omap_secure_apis;
60 
61 #ifdef CONFIG_CPU_PM
62 static unsigned int wakeupgen_context[MAX_NR_REG_BANKS];
63 #endif
64 
65 struct omap_wakeupgen_ops {
66 	void (*save_context)(void);
67 	void (*restore_context)(void);
68 };
69 
70 static struct omap_wakeupgen_ops *wakeupgen_ops;
71 
72 /*
73  * Static helper functions.
74  */
wakeupgen_readl(u8 idx,u32 cpu)75 static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
76 {
77 	return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 +
78 				(cpu * CPU_ENA_OFFSET) + (idx * 4));
79 }
80 
wakeupgen_writel(u32 val,u8 idx,u32 cpu)81 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
82 {
83 	writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
84 				(cpu * CPU_ENA_OFFSET) + (idx * 4));
85 }
86 
sar_writel(u32 val,u32 offset,u8 idx)87 static inline void sar_writel(u32 val, u32 offset, u8 idx)
88 {
89 	writel_relaxed(val, sar_base + offset + (idx * 4));
90 }
91 
_wakeupgen_get_irq_info(u32 irq,u32 * bit_posn,u8 * reg_index)92 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
93 {
94 	/*
95 	 * Each WakeupGen register controls 32 interrupt.
96 	 * i.e. 1 bit per SPI IRQ
97 	 */
98 	*reg_index = irq >> 5;
99 	*bit_posn = irq %= 32;
100 
101 	return 0;
102 }
103 
_wakeupgen_clear(unsigned int irq,unsigned int cpu)104 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
105 {
106 	u32 val, bit_number;
107 	u8 i;
108 
109 	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
110 		return;
111 
112 	val = wakeupgen_readl(i, cpu);
113 	val &= ~BIT(bit_number);
114 	wakeupgen_writel(val, i, cpu);
115 }
116 
_wakeupgen_set(unsigned int irq,unsigned int cpu)117 static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
118 {
119 	u32 val, bit_number;
120 	u8 i;
121 
122 	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
123 		return;
124 
125 	val = wakeupgen_readl(i, cpu);
126 	val |= BIT(bit_number);
127 	wakeupgen_writel(val, i, cpu);
128 }
129 
130 /*
131  * Architecture specific Mask extension
132  */
wakeupgen_mask(struct irq_data * d)133 static void wakeupgen_mask(struct irq_data *d)
134 {
135 	unsigned long flags;
136 
137 	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
138 	_wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
139 	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
140 	irq_chip_mask_parent(d);
141 }
142 
143 /*
144  * Architecture specific Unmask extension
145  */
wakeupgen_unmask(struct irq_data * d)146 static void wakeupgen_unmask(struct irq_data *d)
147 {
148 	unsigned long flags;
149 
150 	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
151 	_wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
152 	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
153 	irq_chip_unmask_parent(d);
154 }
155 
156 /*
157  * The sys_nirq pins bypass peripheral modules and are wired directly
158  * to MPUSS wakeupgen. They get automatically inverted for GIC.
159  */
wakeupgen_irq_set_type(struct irq_data * d,unsigned int type)160 static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
161 {
162 	bool inverted = false;
163 
164 	switch (type) {
165 	case IRQ_TYPE_LEVEL_LOW:
166 		type &= ~IRQ_TYPE_LEVEL_MASK;
167 		type |= IRQ_TYPE_LEVEL_HIGH;
168 		inverted = true;
169 		break;
170 	case IRQ_TYPE_EDGE_FALLING:
171 		type &= ~IRQ_TYPE_EDGE_BOTH;
172 		type |= IRQ_TYPE_EDGE_RISING;
173 		inverted = true;
174 		break;
175 	default:
176 		break;
177 	}
178 
179 	if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
180 	    d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
181 		pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
182 			d->hwirq);
183 
184 	return irq_chip_set_type_parent(d, type);
185 }
186 
187 #ifdef CONFIG_HOTPLUG_CPU
188 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
189 
_wakeupgen_save_masks(unsigned int cpu)190 static void _wakeupgen_save_masks(unsigned int cpu)
191 {
192 	u8 i;
193 
194 	for (i = 0; i < irq_banks; i++)
195 		per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
196 }
197 
_wakeupgen_restore_masks(unsigned int cpu)198 static void _wakeupgen_restore_masks(unsigned int cpu)
199 {
200 	u8 i;
201 
202 	for (i = 0; i < irq_banks; i++)
203 		wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
204 }
205 
_wakeupgen_set_all(unsigned int cpu,unsigned int reg)206 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
207 {
208 	u8 i;
209 
210 	for (i = 0; i < irq_banks; i++)
211 		wakeupgen_writel(reg, i, cpu);
212 }
213 
214 /*
215  * Mask or unmask all interrupts on given CPU.
216  *	0 = Mask all interrupts on the 'cpu'
217  *	1 = Unmask all interrupts on the 'cpu'
218  * Ensure that the initial mask is maintained. This is faster than
219  * iterating through GIC registers to arrive at the correct masks.
220  */
wakeupgen_irqmask_all(unsigned int cpu,unsigned int set)221 static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
222 {
223 	unsigned long flags;
224 
225 	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
226 	if (set) {
227 		_wakeupgen_save_masks(cpu);
228 		_wakeupgen_set_all(cpu, WKG_MASK_ALL);
229 	} else {
230 		_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
231 		_wakeupgen_restore_masks(cpu);
232 	}
233 	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
234 }
235 #endif
236 
237 #ifdef CONFIG_CPU_PM
omap4_irq_save_context(void)238 static inline void omap4_irq_save_context(void)
239 {
240 	u32 i, val;
241 
242 	if (omap_rev() == OMAP4430_REV_ES1_0)
243 		return;
244 
245 	for (i = 0; i < irq_banks; i++) {
246 		/* Save the CPUx interrupt mask for IRQ 0 to 127 */
247 		val = wakeupgen_readl(i, 0);
248 		sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
249 		val = wakeupgen_readl(i, 1);
250 		sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
251 
252 		/*
253 		 * Disable the secure interrupts for CPUx. The restore
254 		 * code blindly restores secure and non-secure interrupt
255 		 * masks from SAR RAM. Secure interrupts are not suppose
256 		 * to be enabled from HLOS. So overwrite the SAR location
257 		 * so that the secure interrupt remains disabled.
258 		 */
259 		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
260 		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
261 	}
262 
263 	/* Save AuxBoot* registers */
264 	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
265 	writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET);
266 	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
267 	writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET);
268 
269 	/* Save SyncReq generation logic */
270 	val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
271 	writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
272 	val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
273 	writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET);
274 
275 	/* Set the Backup Bit Mask status */
276 	val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET);
277 	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
278 	writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
279 
280 }
281 
omap5_irq_save_context(void)282 static inline void omap5_irq_save_context(void)
283 {
284 	u32 i, val;
285 
286 	for (i = 0; i < irq_banks; i++) {
287 		/* Save the CPUx interrupt mask for IRQ 0 to 159 */
288 		val = wakeupgen_readl(i, 0);
289 		sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
290 		val = wakeupgen_readl(i, 1);
291 		sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
292 		sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
293 		sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
294 	}
295 
296 	/* Save AuxBoot* registers */
297 	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
298 	writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
299 	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
300 	writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
301 
302 	/* Set the Backup Bit Mask status */
303 	val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
304 	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
305 	writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
306 
307 }
308 
am43xx_irq_save_context(void)309 static inline void am43xx_irq_save_context(void)
310 {
311 	u32 i;
312 
313 	for (i = 0; i < irq_banks; i++) {
314 		wakeupgen_context[i] = wakeupgen_readl(i, 0);
315 		wakeupgen_writel(0, i, CPU0_ID);
316 	}
317 }
318 
319 /*
320  * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
321  * ROM code. WakeupGen IP is integrated along with GIC to manage the
322  * interrupt wakeups from CPU low power states. It manages
323  * masking/unmasking of Shared peripheral interrupts(SPI). So the
324  * interrupt enable/disable control should be in sync and consistent
325  * at WakeupGen and GIC so that interrupts are not lost.
326  */
irq_save_context(void)327 static void irq_save_context(void)
328 {
329 	/* DRA7 has no SAR to save */
330 	if (soc_is_dra7xx())
331 		return;
332 
333 	if (wakeupgen_ops && wakeupgen_ops->save_context)
334 		wakeupgen_ops->save_context();
335 }
336 
337 /*
338  * Clear WakeupGen SAR backup status.
339  */
irq_sar_clear(void)340 static void irq_sar_clear(void)
341 {
342 	u32 val;
343 	u32 offset = SAR_BACKUP_STATUS_OFFSET;
344 	/* DRA7 has no SAR to save */
345 	if (soc_is_dra7xx())
346 		return;
347 
348 	if (soc_is_omap54xx())
349 		offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
350 
351 	val = readl_relaxed(sar_base + offset);
352 	val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
353 	writel_relaxed(val, sar_base + offset);
354 }
355 
am43xx_irq_restore_context(void)356 static void am43xx_irq_restore_context(void)
357 {
358 	u32 i;
359 
360 	for (i = 0; i < irq_banks; i++)
361 		wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID);
362 }
363 
irq_restore_context(void)364 static void irq_restore_context(void)
365 {
366 	if (wakeupgen_ops && wakeupgen_ops->restore_context)
367 		wakeupgen_ops->restore_context();
368 }
369 
370 /*
371  * Save GIC and Wakeupgen interrupt context using secure API
372  * for HS/EMU devices.
373  */
irq_save_secure_context(void)374 static void irq_save_secure_context(void)
375 {
376 	u32 ret;
377 	ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
378 				FLAG_START_CRITICAL,
379 				0, 0, 0, 0, 0);
380 	if (ret != API_HAL_RET_VALUE_OK)
381 		pr_err("GIC and Wakeupgen context save failed\n");
382 }
383 
384 /* Define ops for context save and restore for each SoC */
385 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {
386 	.save_context = omap4_irq_save_context,
387 	.restore_context = irq_sar_clear,
388 };
389 
390 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {
391 	.save_context = omap5_irq_save_context,
392 	.restore_context = irq_sar_clear,
393 };
394 
395 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {
396 	.save_context = am43xx_irq_save_context,
397 	.restore_context = am43xx_irq_restore_context,
398 };
399 #else
400 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {};
401 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {};
402 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {};
403 #endif
404 
405 #ifdef CONFIG_HOTPLUG_CPU
omap_wakeupgen_cpu_online(unsigned int cpu)406 static int omap_wakeupgen_cpu_online(unsigned int cpu)
407 {
408 	wakeupgen_irqmask_all(cpu, 0);
409 	return 0;
410 }
411 
omap_wakeupgen_cpu_dead(unsigned int cpu)412 static int omap_wakeupgen_cpu_dead(unsigned int cpu)
413 {
414 	wakeupgen_irqmask_all(cpu, 1);
415 	return 0;
416 }
417 
irq_hotplug_init(void)418 static void __init irq_hotplug_init(void)
419 {
420 	cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
421 				  omap_wakeupgen_cpu_online, NULL);
422 	cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
423 				  "arm/omap-wake:dead", NULL,
424 				  omap_wakeupgen_cpu_dead);
425 }
426 #else
irq_hotplug_init(void)427 static void __init irq_hotplug_init(void)
428 {}
429 #endif
430 
431 #ifdef CONFIG_CPU_PM
irq_notifier(struct notifier_block * self,unsigned long cmd,void * v)432 static int irq_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
433 {
434 	switch (cmd) {
435 	case CPU_CLUSTER_PM_ENTER:
436 		if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx())
437 			irq_save_context();
438 		else
439 			irq_save_secure_context();
440 		break;
441 	case CPU_CLUSTER_PM_EXIT:
442 		if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx())
443 			irq_restore_context();
444 		break;
445 	}
446 	return NOTIFY_OK;
447 }
448 
449 static struct notifier_block irq_notifier_block = {
450 	.notifier_call = irq_notifier,
451 };
452 
irq_pm_init(void)453 static void __init irq_pm_init(void)
454 {
455 	/* FIXME: Remove this when MPU OSWR support is added */
456 	if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
457 		cpu_pm_register_notifier(&irq_notifier_block);
458 }
459 #else
irq_pm_init(void)460 static void __init irq_pm_init(void)
461 {}
462 #endif
463 
omap_get_wakeupgen_base(void)464 void __iomem *omap_get_wakeupgen_base(void)
465 {
466 	return wakeupgen_base;
467 }
468 
omap_secure_apis_support(void)469 int omap_secure_apis_support(void)
470 {
471 	return omap_secure_apis;
472 }
473 
474 static struct irq_chip wakeupgen_chip = {
475 	.name			= "WUGEN",
476 	.irq_eoi		= irq_chip_eoi_parent,
477 	.irq_mask		= wakeupgen_mask,
478 	.irq_unmask		= wakeupgen_unmask,
479 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
480 	.irq_set_type		= wakeupgen_irq_set_type,
481 	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
482 #ifdef CONFIG_SMP
483 	.irq_set_affinity	= irq_chip_set_affinity_parent,
484 #endif
485 };
486 
wakeupgen_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)487 static int wakeupgen_domain_translate(struct irq_domain *d,
488 				      struct irq_fwspec *fwspec,
489 				      unsigned long *hwirq,
490 				      unsigned int *type)
491 {
492 	if (is_of_node(fwspec->fwnode)) {
493 		if (fwspec->param_count != 3)
494 			return -EINVAL;
495 
496 		/* No PPI should point to this domain */
497 		if (fwspec->param[0] != 0)
498 			return -EINVAL;
499 
500 		*hwirq = fwspec->param[1];
501 		*type = fwspec->param[2];
502 		return 0;
503 	}
504 
505 	return -EINVAL;
506 }
507 
wakeupgen_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * data)508 static int wakeupgen_domain_alloc(struct irq_domain *domain,
509 				  unsigned int virq,
510 				  unsigned int nr_irqs, void *data)
511 {
512 	struct irq_fwspec *fwspec = data;
513 	struct irq_fwspec parent_fwspec;
514 	irq_hw_number_t hwirq;
515 	int i;
516 
517 	if (fwspec->param_count != 3)
518 		return -EINVAL;	/* Not GIC compliant */
519 	if (fwspec->param[0] != 0)
520 		return -EINVAL;	/* No PPI should point to this domain */
521 
522 	hwirq = fwspec->param[1];
523 	if (hwirq >= MAX_IRQS)
524 		return -EINVAL;	/* Can't deal with this */
525 
526 	for (i = 0; i < nr_irqs; i++)
527 		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
528 					      &wakeupgen_chip, NULL);
529 
530 	parent_fwspec = *fwspec;
531 	parent_fwspec.fwnode = domain->parent->fwnode;
532 	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
533 					    &parent_fwspec);
534 }
535 
536 static const struct irq_domain_ops wakeupgen_domain_ops = {
537 	.translate	= wakeupgen_domain_translate,
538 	.alloc		= wakeupgen_domain_alloc,
539 	.free		= irq_domain_free_irqs_common,
540 };
541 
542 /*
543  * Initialise the wakeupgen module.
544  */
wakeupgen_init(struct device_node * node,struct device_node * parent)545 static int __init wakeupgen_init(struct device_node *node,
546 				 struct device_node *parent)
547 {
548 	struct irq_domain *parent_domain, *domain;
549 	int i;
550 	unsigned int boot_cpu = smp_processor_id();
551 	u32 val;
552 
553 	if (!parent) {
554 		pr_err("%pOF: no parent, giving up\n", node);
555 		return -ENODEV;
556 	}
557 
558 	parent_domain = irq_find_host(parent);
559 	if (!parent_domain) {
560 		pr_err("%pOF: unable to obtain parent domain\n", node);
561 		return -ENXIO;
562 	}
563 	/* Not supported on OMAP4 ES1.0 silicon */
564 	if (omap_rev() == OMAP4430_REV_ES1_0) {
565 		WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
566 		return -EPERM;
567 	}
568 
569 	/* Static mapping, never released */
570 	wakeupgen_base = of_iomap(node, 0);
571 	if (WARN_ON(!wakeupgen_base))
572 		return -ENOMEM;
573 
574 	if (cpu_is_omap44xx()) {
575 		irq_banks = OMAP4_NR_BANKS;
576 		max_irqs = OMAP4_NR_IRQS;
577 		omap_secure_apis = 1;
578 		wakeupgen_ops = &omap4_wakeupgen_ops;
579 	} else if (soc_is_omap54xx()) {
580 		wakeupgen_ops = &omap5_wakeupgen_ops;
581 	} else if (soc_is_am43xx()) {
582 		irq_banks = AM43XX_NR_REG_BANKS;
583 		max_irqs = AM43XX_IRQS;
584 		wakeupgen_ops = &am43xx_wakeupgen_ops;
585 	}
586 
587 	domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
588 					  node, &wakeupgen_domain_ops,
589 					  NULL);
590 	if (!domain) {
591 		iounmap(wakeupgen_base);
592 		return -ENOMEM;
593 	}
594 
595 	/* Clear all IRQ bitmasks at wakeupGen level */
596 	for (i = 0; i < irq_banks; i++) {
597 		wakeupgen_writel(0, i, CPU0_ID);
598 		if (!soc_is_am43xx())
599 			wakeupgen_writel(0, i, CPU1_ID);
600 	}
601 
602 	/*
603 	 * FIXME: Add support to set_smp_affinity() once the core
604 	 * GIC code has necessary hooks in place.
605 	 */
606 
607 	/* Associate all the IRQs to boot CPU like GIC init does. */
608 	for (i = 0; i < max_irqs; i++)
609 		irq_target_cpu[i] = boot_cpu;
610 
611 	/*
612 	 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
613 	 * 0x0:	ES1 behavior, CPU cores would enter and exit OFF mode together.
614 	 * 0x1:	ES2 behavior, CPU cores are allowed to enter/exit OFF mode
615 	 * independently.
616 	 * This needs to be set one time thanks to always ON domain.
617 	 *
618 	 * We do not support ES1 behavior anymore. OMAP5 is assumed to be
619 	 * ES2.0, and the same is applicable for DRA7.
620 	 */
621 	if (soc_is_omap54xx() || soc_is_dra7xx()) {
622 		val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE);
623 		val |= BIT(5);
624 		omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val);
625 	}
626 
627 	irq_hotplug_init();
628 	irq_pm_init();
629 
630 	sar_base = omap4_get_sar_ram_base();
631 
632 	return 0;
633 }
634 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
635