1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Library implementing the most common irq chip callback functions
4 *
5 * Copyright (C) 2011, Thomas Gleixner
6 */
7 #include <linux/io.h>
8 #include <linux/irq.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/irqdomain.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/syscore_ops.h>
15
16 #include "internals.h"
17
18 static LIST_HEAD(gc_list);
19 static DEFINE_RAW_SPINLOCK(gc_lock);
20
21 /**
22 * irq_gc_noop - NOOP function
23 * @d: irq_data
24 */
irq_gc_noop(struct irq_data * d)25 void irq_gc_noop(struct irq_data *d)
26 {
27 }
28
29 /**
30 * irq_gc_mask_disable_reg - Mask chip via disable register
31 * @d: irq_data
32 *
33 * Chip has separate enable/disable registers instead of a single mask
34 * register.
35 */
irq_gc_mask_disable_reg(struct irq_data * d)36 void irq_gc_mask_disable_reg(struct irq_data *d)
37 {
38 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
39 struct irq_chip_type *ct = irq_data_get_chip_type(d);
40 u32 mask = d->mask;
41
42 irq_gc_lock(gc);
43 irq_reg_writel(gc, mask, ct->regs.disable);
44 *ct->mask_cache &= ~mask;
45 irq_gc_unlock(gc);
46 }
47
48 /**
49 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
50 * @d: irq_data
51 *
52 * Chip has a single mask register. Values of this register are cached
53 * and protected by gc->lock
54 */
irq_gc_mask_set_bit(struct irq_data * d)55 void irq_gc_mask_set_bit(struct irq_data *d)
56 {
57 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
58 struct irq_chip_type *ct = irq_data_get_chip_type(d);
59 u32 mask = d->mask;
60
61 irq_gc_lock(gc);
62 *ct->mask_cache |= mask;
63 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
64 irq_gc_unlock(gc);
65 }
66 EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
67
68 /**
69 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
70 * @d: irq_data
71 *
72 * Chip has a single mask register. Values of this register are cached
73 * and protected by gc->lock
74 */
irq_gc_mask_clr_bit(struct irq_data * d)75 void irq_gc_mask_clr_bit(struct irq_data *d)
76 {
77 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
78 struct irq_chip_type *ct = irq_data_get_chip_type(d);
79 u32 mask = d->mask;
80
81 irq_gc_lock(gc);
82 *ct->mask_cache &= ~mask;
83 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
84 irq_gc_unlock(gc);
85 }
86 EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
87
88 /**
89 * irq_gc_unmask_enable_reg - Unmask chip via enable register
90 * @d: irq_data
91 *
92 * Chip has separate enable/disable registers instead of a single mask
93 * register.
94 */
irq_gc_unmask_enable_reg(struct irq_data * d)95 void irq_gc_unmask_enable_reg(struct irq_data *d)
96 {
97 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
98 struct irq_chip_type *ct = irq_data_get_chip_type(d);
99 u32 mask = d->mask;
100
101 irq_gc_lock(gc);
102 irq_reg_writel(gc, mask, ct->regs.enable);
103 *ct->mask_cache |= mask;
104 irq_gc_unlock(gc);
105 }
106
107 /**
108 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
109 * @d: irq_data
110 */
irq_gc_ack_set_bit(struct irq_data * d)111 void irq_gc_ack_set_bit(struct irq_data *d)
112 {
113 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
114 struct irq_chip_type *ct = irq_data_get_chip_type(d);
115 u32 mask = d->mask;
116
117 irq_gc_lock(gc);
118 irq_reg_writel(gc, mask, ct->regs.ack);
119 irq_gc_unlock(gc);
120 }
121 EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
122
123 /**
124 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
125 * @d: irq_data
126 */
irq_gc_ack_clr_bit(struct irq_data * d)127 void irq_gc_ack_clr_bit(struct irq_data *d)
128 {
129 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
130 struct irq_chip_type *ct = irq_data_get_chip_type(d);
131 u32 mask = ~d->mask;
132
133 irq_gc_lock(gc);
134 irq_reg_writel(gc, mask, ct->regs.ack);
135 irq_gc_unlock(gc);
136 }
137
138 /**
139 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
140 * @d: irq_data
141 *
142 * This generic implementation of the irq_mask_ack method is for chips
143 * with separate enable/disable registers instead of a single mask
144 * register and where a pending interrupt is acknowledged by setting a
145 * bit.
146 *
147 * Note: This is the only permutation currently used. Similar generic
148 * functions should be added here if other permutations are required.
149 */
irq_gc_mask_disable_and_ack_set(struct irq_data * d)150 void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
151 {
152 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
153 struct irq_chip_type *ct = irq_data_get_chip_type(d);
154 u32 mask = d->mask;
155
156 irq_gc_lock(gc);
157 irq_reg_writel(gc, mask, ct->regs.disable);
158 *ct->mask_cache &= ~mask;
159 irq_reg_writel(gc, mask, ct->regs.ack);
160 irq_gc_unlock(gc);
161 }
162
163 /**
164 * irq_gc_eoi - EOI interrupt
165 * @d: irq_data
166 */
irq_gc_eoi(struct irq_data * d)167 void irq_gc_eoi(struct irq_data *d)
168 {
169 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
170 struct irq_chip_type *ct = irq_data_get_chip_type(d);
171 u32 mask = d->mask;
172
173 irq_gc_lock(gc);
174 irq_reg_writel(gc, mask, ct->regs.eoi);
175 irq_gc_unlock(gc);
176 }
177
178 /**
179 * irq_gc_set_wake - Set/clr wake bit for an interrupt
180 * @d: irq_data
181 * @on: Indicates whether the wake bit should be set or cleared
182 *
183 * For chips where the wake from suspend functionality is not
184 * configured in a separate register and the wakeup active state is
185 * just stored in a bitmask.
186 */
irq_gc_set_wake(struct irq_data * d,unsigned int on)187 int irq_gc_set_wake(struct irq_data *d, unsigned int on)
188 {
189 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
190 u32 mask = d->mask;
191
192 if (!(mask & gc->wake_enabled))
193 return -EINVAL;
194
195 irq_gc_lock(gc);
196 if (on)
197 gc->wake_active |= mask;
198 else
199 gc->wake_active &= ~mask;
200 irq_gc_unlock(gc);
201 return 0;
202 }
203 EXPORT_SYMBOL_GPL(irq_gc_set_wake);
204
irq_readl_be(void __iomem * addr)205 static u32 irq_readl_be(void __iomem *addr)
206 {
207 return ioread32be(addr);
208 }
209
irq_writel_be(u32 val,void __iomem * addr)210 static void irq_writel_be(u32 val, void __iomem *addr)
211 {
212 iowrite32be(val, addr);
213 }
214
irq_init_generic_chip(struct irq_chip_generic * gc,const char * name,int num_ct,unsigned int irq_base,void __iomem * reg_base,irq_flow_handler_t handler)215 void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
216 int num_ct, unsigned int irq_base,
217 void __iomem *reg_base, irq_flow_handler_t handler)
218 {
219 raw_spin_lock_init(&gc->lock);
220 gc->num_ct = num_ct;
221 gc->irq_base = irq_base;
222 gc->reg_base = reg_base;
223 gc->chip_types->chip.name = name;
224 gc->chip_types->handler = handler;
225 }
226
227 /**
228 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
229 * @name: Name of the irq chip
230 * @num_ct: Number of irq_chip_type instances associated with this
231 * @irq_base: Interrupt base nr for this chip
232 * @reg_base: Register base address (virtual)
233 * @handler: Default flow handler associated with this chip
234 *
235 * Returns an initialized irq_chip_generic structure. The chip defaults
236 * to the primary (index 0) irq_chip_type and @handler
237 */
238 struct irq_chip_generic *
irq_alloc_generic_chip(const char * name,int num_ct,unsigned int irq_base,void __iomem * reg_base,irq_flow_handler_t handler)239 irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
240 void __iomem *reg_base, irq_flow_handler_t handler)
241 {
242 struct irq_chip_generic *gc;
243
244 gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
245 if (gc) {
246 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
247 handler);
248 }
249 return gc;
250 }
251 EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
252
253 static void
irq_gc_init_mask_cache(struct irq_chip_generic * gc,enum irq_gc_flags flags)254 irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
255 {
256 struct irq_chip_type *ct = gc->chip_types;
257 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
258 int i;
259
260 for (i = 0; i < gc->num_ct; i++) {
261 if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
262 mskptr = &ct[i].mask_cache_priv;
263 mskreg = ct[i].regs.mask;
264 }
265 ct[i].mask_cache = mskptr;
266 if (flags & IRQ_GC_INIT_MASK_CACHE)
267 *mskptr = irq_reg_readl(gc, mskreg);
268 }
269 }
270
271 /**
272 * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
273 * @d: irq domain for which to allocate chips
274 * @irqs_per_chip: Number of interrupts each chip handles (max 32)
275 * @num_ct: Number of irq_chip_type instances associated with this
276 * @name: Name of the irq chip
277 * @handler: Default flow handler associated with these chips
278 * @clr: IRQ_* bits to clear in the mapping function
279 * @set: IRQ_* bits to set in the mapping function
280 * @gcflags: Generic chip specific setup flags
281 */
__irq_alloc_domain_generic_chips(struct irq_domain * d,int irqs_per_chip,int num_ct,const char * name,irq_flow_handler_t handler,unsigned int clr,unsigned int set,enum irq_gc_flags gcflags)282 int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
283 int num_ct, const char *name,
284 irq_flow_handler_t handler,
285 unsigned int clr, unsigned int set,
286 enum irq_gc_flags gcflags)
287 {
288 struct irq_domain_chip_generic *dgc;
289 struct irq_chip_generic *gc;
290 unsigned long flags;
291 int numchips, i;
292 size_t dgc_sz;
293 size_t gc_sz;
294 size_t sz;
295 void *tmp;
296
297 if (d->gc)
298 return -EBUSY;
299
300 numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
301 if (!numchips)
302 return -EINVAL;
303
304 /* Allocate a pointer, generic chip and chiptypes for each chip */
305 gc_sz = struct_size(gc, chip_types, num_ct);
306 dgc_sz = struct_size(dgc, gc, numchips);
307 sz = dgc_sz + numchips * gc_sz;
308
309 tmp = dgc = kzalloc(sz, GFP_KERNEL);
310 if (!dgc)
311 return -ENOMEM;
312 dgc->irqs_per_chip = irqs_per_chip;
313 dgc->num_chips = numchips;
314 dgc->irq_flags_to_set = set;
315 dgc->irq_flags_to_clear = clr;
316 dgc->gc_flags = gcflags;
317 d->gc = dgc;
318
319 /* Calc pointer to the first generic chip */
320 tmp += dgc_sz;
321 for (i = 0; i < numchips; i++) {
322 /* Store the pointer to the generic chip */
323 dgc->gc[i] = gc = tmp;
324 irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
325 NULL, handler);
326
327 gc->domain = d;
328 if (gcflags & IRQ_GC_BE_IO) {
329 gc->reg_readl = &irq_readl_be;
330 gc->reg_writel = &irq_writel_be;
331 }
332
333 raw_spin_lock_irqsave(&gc_lock, flags);
334 list_add_tail(&gc->list, &gc_list);
335 raw_spin_unlock_irqrestore(&gc_lock, flags);
336 /* Calc pointer to the next generic chip */
337 tmp += gc_sz;
338 }
339 return 0;
340 }
341 EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
342
343 static struct irq_chip_generic *
__irq_get_domain_generic_chip(struct irq_domain * d,unsigned int hw_irq)344 __irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
345 {
346 struct irq_domain_chip_generic *dgc = d->gc;
347 int idx;
348
349 if (!dgc)
350 return ERR_PTR(-ENODEV);
351 idx = hw_irq / dgc->irqs_per_chip;
352 if (idx >= dgc->num_chips)
353 return ERR_PTR(-EINVAL);
354 return dgc->gc[idx];
355 }
356
357 /**
358 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
359 * @d: irq domain pointer
360 * @hw_irq: Hardware interrupt number
361 */
362 struct irq_chip_generic *
irq_get_domain_generic_chip(struct irq_domain * d,unsigned int hw_irq)363 irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
364 {
365 struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
366
367 return !IS_ERR(gc) ? gc : NULL;
368 }
369 EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
370
371 /*
372 * Separate lockdep classes for interrupt chip which can nest irq_desc
373 * lock and request mutex.
374 */
375 static struct lock_class_key irq_nested_lock_class;
376 static struct lock_class_key irq_nested_request_class;
377
378 /*
379 * irq_map_generic_chip - Map a generic chip for an irq domain
380 */
irq_map_generic_chip(struct irq_domain * d,unsigned int virq,irq_hw_number_t hw_irq)381 int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
382 irq_hw_number_t hw_irq)
383 {
384 struct irq_data *data = irq_domain_get_irq_data(d, virq);
385 struct irq_domain_chip_generic *dgc = d->gc;
386 struct irq_chip_generic *gc;
387 struct irq_chip_type *ct;
388 struct irq_chip *chip;
389 unsigned long flags;
390 int idx;
391
392 gc = __irq_get_domain_generic_chip(d, hw_irq);
393 if (IS_ERR(gc))
394 return PTR_ERR(gc);
395
396 idx = hw_irq % dgc->irqs_per_chip;
397
398 if (test_bit(idx, &gc->unused))
399 return -ENOTSUPP;
400
401 if (test_bit(idx, &gc->installed))
402 return -EBUSY;
403
404 ct = gc->chip_types;
405 chip = &ct->chip;
406
407 /* We only init the cache for the first mapping of a generic chip */
408 if (!gc->installed) {
409 raw_spin_lock_irqsave(&gc->lock, flags);
410 irq_gc_init_mask_cache(gc, dgc->gc_flags);
411 raw_spin_unlock_irqrestore(&gc->lock, flags);
412 }
413
414 /* Mark the interrupt as installed */
415 set_bit(idx, &gc->installed);
416
417 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
418 irq_set_lockdep_class(virq, &irq_nested_lock_class,
419 &irq_nested_request_class);
420
421 if (chip->irq_calc_mask)
422 chip->irq_calc_mask(data);
423 else
424 data->mask = 1 << idx;
425
426 irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
427 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
428 return 0;
429 }
430
irq_unmap_generic_chip(struct irq_domain * d,unsigned int virq)431 static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
432 {
433 struct irq_data *data = irq_domain_get_irq_data(d, virq);
434 struct irq_domain_chip_generic *dgc = d->gc;
435 unsigned int hw_irq = data->hwirq;
436 struct irq_chip_generic *gc;
437 int irq_idx;
438
439 gc = irq_get_domain_generic_chip(d, hw_irq);
440 if (!gc)
441 return;
442
443 irq_idx = hw_irq % dgc->irqs_per_chip;
444
445 clear_bit(irq_idx, &gc->installed);
446 irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
447 NULL);
448
449 }
450
451 struct irq_domain_ops irq_generic_chip_ops = {
452 .map = irq_map_generic_chip,
453 .unmap = irq_unmap_generic_chip,
454 .xlate = irq_domain_xlate_onetwocell,
455 };
456 EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
457
458 /**
459 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
460 * @gc: Generic irq chip holding all data
461 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
462 * @flags: Flags for initialization
463 * @clr: IRQ_* bits to clear
464 * @set: IRQ_* bits to set
465 *
466 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
467 * initializes all interrupts to the primary irq_chip_type and its
468 * associated handler.
469 */
irq_setup_generic_chip(struct irq_chip_generic * gc,u32 msk,enum irq_gc_flags flags,unsigned int clr,unsigned int set)470 void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
471 enum irq_gc_flags flags, unsigned int clr,
472 unsigned int set)
473 {
474 struct irq_chip_type *ct = gc->chip_types;
475 struct irq_chip *chip = &ct->chip;
476 unsigned int i;
477
478 raw_spin_lock(&gc_lock);
479 list_add_tail(&gc->list, &gc_list);
480 raw_spin_unlock(&gc_lock);
481
482 irq_gc_init_mask_cache(gc, flags);
483
484 for (i = gc->irq_base; msk; msk >>= 1, i++) {
485 if (!(msk & 0x01))
486 continue;
487
488 if (flags & IRQ_GC_INIT_NESTED_LOCK)
489 irq_set_lockdep_class(i, &irq_nested_lock_class,
490 &irq_nested_request_class);
491
492 if (!(flags & IRQ_GC_NO_MASK)) {
493 struct irq_data *d = irq_get_irq_data(i);
494
495 if (chip->irq_calc_mask)
496 chip->irq_calc_mask(d);
497 else
498 d->mask = 1 << (i - gc->irq_base);
499 }
500 irq_set_chip_and_handler(i, chip, ct->handler);
501 irq_set_chip_data(i, gc);
502 irq_modify_status(i, clr, set);
503 }
504 gc->irq_cnt = i - gc->irq_base;
505 }
506 EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
507
508 /**
509 * irq_setup_alt_chip - Switch to alternative chip
510 * @d: irq_data for this interrupt
511 * @type: Flow type to be initialized
512 *
513 * Only to be called from chip->irq_set_type() callbacks.
514 */
irq_setup_alt_chip(struct irq_data * d,unsigned int type)515 int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
516 {
517 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
518 struct irq_chip_type *ct = gc->chip_types;
519 unsigned int i;
520
521 for (i = 0; i < gc->num_ct; i++, ct++) {
522 if (ct->type & type) {
523 d->chip = &ct->chip;
524 irq_data_to_desc(d)->handle_irq = ct->handler;
525 return 0;
526 }
527 }
528 return -EINVAL;
529 }
530 EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
531
532 /**
533 * irq_remove_generic_chip - Remove a chip
534 * @gc: Generic irq chip holding all data
535 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
536 * @clr: IRQ_* bits to clear
537 * @set: IRQ_* bits to set
538 *
539 * Remove up to 32 interrupts starting from gc->irq_base.
540 */
irq_remove_generic_chip(struct irq_chip_generic * gc,u32 msk,unsigned int clr,unsigned int set)541 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
542 unsigned int clr, unsigned int set)
543 {
544 unsigned int i, virq;
545
546 raw_spin_lock(&gc_lock);
547 list_del(&gc->list);
548 raw_spin_unlock(&gc_lock);
549
550 for (i = 0; msk; msk >>= 1, i++) {
551 if (!(msk & 0x01))
552 continue;
553
554 /*
555 * Interrupt domain based chips store the base hardware
556 * interrupt number in gc::irq_base. Otherwise gc::irq_base
557 * contains the base Linux interrupt number.
558 */
559 if (gc->domain) {
560 virq = irq_find_mapping(gc->domain, gc->irq_base + i);
561 if (!virq)
562 continue;
563 } else {
564 virq = gc->irq_base + i;
565 }
566
567 /* Remove handler first. That will mask the irq line */
568 irq_set_handler(virq, NULL);
569 irq_set_chip(virq, &no_irq_chip);
570 irq_set_chip_data(virq, NULL);
571 irq_modify_status(virq, clr, set);
572 }
573 }
574 EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
575
irq_gc_get_irq_data(struct irq_chip_generic * gc)576 static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
577 {
578 unsigned int virq;
579
580 if (!gc->domain)
581 return irq_get_irq_data(gc->irq_base);
582
583 /*
584 * We don't know which of the irqs has been actually
585 * installed. Use the first one.
586 */
587 if (!gc->installed)
588 return NULL;
589
590 virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
591 return virq ? irq_get_irq_data(virq) : NULL;
592 }
593
594 #ifdef CONFIG_PM
irq_gc_suspend(void)595 static int irq_gc_suspend(void)
596 {
597 struct irq_chip_generic *gc;
598
599 list_for_each_entry(gc, &gc_list, list) {
600 struct irq_chip_type *ct = gc->chip_types;
601
602 if (ct->chip.irq_suspend) {
603 struct irq_data *data = irq_gc_get_irq_data(gc);
604
605 if (data)
606 ct->chip.irq_suspend(data);
607 }
608
609 if (gc->suspend)
610 gc->suspend(gc);
611 }
612 return 0;
613 }
614
irq_gc_resume(void)615 static void irq_gc_resume(void)
616 {
617 struct irq_chip_generic *gc;
618
619 list_for_each_entry(gc, &gc_list, list) {
620 struct irq_chip_type *ct = gc->chip_types;
621
622 if (gc->resume)
623 gc->resume(gc);
624
625 if (ct->chip.irq_resume) {
626 struct irq_data *data = irq_gc_get_irq_data(gc);
627
628 if (data)
629 ct->chip.irq_resume(data);
630 }
631 }
632 }
633 #else
634 #define irq_gc_suspend NULL
635 #define irq_gc_resume NULL
636 #endif
637
irq_gc_shutdown(void)638 static void irq_gc_shutdown(void)
639 {
640 struct irq_chip_generic *gc;
641
642 list_for_each_entry(gc, &gc_list, list) {
643 struct irq_chip_type *ct = gc->chip_types;
644
645 if (ct->chip.irq_pm_shutdown) {
646 struct irq_data *data = irq_gc_get_irq_data(gc);
647
648 if (data)
649 ct->chip.irq_pm_shutdown(data);
650 }
651 }
652 }
653
654 static struct syscore_ops irq_gc_syscore_ops = {
655 .suspend = irq_gc_suspend,
656 .resume = irq_gc_resume,
657 .shutdown = irq_gc_shutdown,
658 };
659
irq_gc_init_ops(void)660 static int __init irq_gc_init_ops(void)
661 {
662 register_syscore_ops(&irq_gc_syscore_ops);
663 return 0;
664 }
665 device_initcall(irq_gc_init_ops);
666