1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support functions for OMAP GPIO
4 *
5 * Copyright (C) 2003-2005 Nokia Corporation
6 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
7 *
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10 */
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/err.h>
17 #include <linux/clk.h>
18 #include <linux/io.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/pm.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/gpio/driver.h>
26 #include <linux/bitops.h>
27 #include <linux/platform_data/gpio-omap.h>
28
29 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
30
31 struct gpio_regs {
32 u32 sysconfig;
33 u32 irqenable1;
34 u32 irqenable2;
35 u32 wake_en;
36 u32 ctrl;
37 u32 oe;
38 u32 leveldetect0;
39 u32 leveldetect1;
40 u32 risingdetect;
41 u32 fallingdetect;
42 u32 dataout;
43 u32 debounce;
44 u32 debounce_en;
45 };
46
47 struct gpio_bank {
48 void __iomem *base;
49 const struct omap_gpio_reg_offs *regs;
50
51 int irq;
52 u32 non_wakeup_gpios;
53 u32 enabled_non_wakeup_gpios;
54 struct gpio_regs context;
55 u32 saved_datain;
56 u32 level_mask;
57 u32 toggle_mask;
58 raw_spinlock_t lock;
59 raw_spinlock_t wa_lock;
60 struct gpio_chip chip;
61 struct clk *dbck;
62 struct notifier_block nb;
63 unsigned int is_suspended:1;
64 unsigned int needs_resume:1;
65 u32 mod_usage;
66 u32 irq_usage;
67 u32 dbck_enable_mask;
68 bool dbck_enabled;
69 bool is_mpuio;
70 bool dbck_flag;
71 bool loses_context;
72 bool context_valid;
73 int stride;
74 u32 width;
75 int context_loss_count;
76
77 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
78 int (*get_context_loss_count)(struct device *dev);
79 };
80
81 #define GPIO_MOD_CTRL_BIT BIT(0)
82
83 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
84 #define LINE_USED(line, offset) (line & (BIT(offset)))
85
86 static void omap_gpio_unmask_irq(struct irq_data *d);
87
omap_irq_data_get_bank(struct irq_data * d)88 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
89 {
90 struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
91 return gpiochip_get_data(chip);
92 }
93
omap_gpio_rmw(void __iomem * reg,u32 mask,bool set)94 static inline u32 omap_gpio_rmw(void __iomem *reg, u32 mask, bool set)
95 {
96 u32 val = readl_relaxed(reg);
97
98 if (set)
99 val |= mask;
100 else
101 val &= ~mask;
102
103 writel_relaxed(val, reg);
104
105 return val;
106 }
107
omap_set_gpio_direction(struct gpio_bank * bank,int gpio,int is_input)108 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
109 int is_input)
110 {
111 bank->context.oe = omap_gpio_rmw(bank->base + bank->regs->direction,
112 BIT(gpio), is_input);
113 }
114
115
116 /* set data out value using dedicate set/clear register */
omap_set_gpio_dataout_reg(struct gpio_bank * bank,unsigned offset,int enable)117 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset,
118 int enable)
119 {
120 void __iomem *reg = bank->base;
121 u32 l = BIT(offset);
122
123 if (enable) {
124 reg += bank->regs->set_dataout;
125 bank->context.dataout |= l;
126 } else {
127 reg += bank->regs->clr_dataout;
128 bank->context.dataout &= ~l;
129 }
130
131 writel_relaxed(l, reg);
132 }
133
134 /* set data out value using mask register */
omap_set_gpio_dataout_mask(struct gpio_bank * bank,unsigned offset,int enable)135 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset,
136 int enable)
137 {
138 bank->context.dataout = omap_gpio_rmw(bank->base + bank->regs->dataout,
139 BIT(offset), enable);
140 }
141
omap_gpio_dbck_enable(struct gpio_bank * bank)142 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
143 {
144 if (bank->dbck_enable_mask && !bank->dbck_enabled) {
145 clk_enable(bank->dbck);
146 bank->dbck_enabled = true;
147
148 writel_relaxed(bank->dbck_enable_mask,
149 bank->base + bank->regs->debounce_en);
150 }
151 }
152
omap_gpio_dbck_disable(struct gpio_bank * bank)153 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
154 {
155 if (bank->dbck_enable_mask && bank->dbck_enabled) {
156 /*
157 * Disable debounce before cutting it's clock. If debounce is
158 * enabled but the clock is not, GPIO module seems to be unable
159 * to detect events and generate interrupts at least on OMAP3.
160 */
161 writel_relaxed(0, bank->base + bank->regs->debounce_en);
162
163 clk_disable(bank->dbck);
164 bank->dbck_enabled = false;
165 }
166 }
167
168 /**
169 * omap2_set_gpio_debounce - low level gpio debounce time
170 * @bank: the gpio bank we're acting upon
171 * @offset: the gpio number on this @bank
172 * @debounce: debounce time to use
173 *
174 * OMAP's debounce time is in 31us steps
175 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
176 * so we need to convert and round up to the closest unit.
177 *
178 * Return: 0 on success, negative error otherwise.
179 */
omap2_set_gpio_debounce(struct gpio_bank * bank,unsigned offset,unsigned debounce)180 static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
181 unsigned debounce)
182 {
183 u32 val;
184 u32 l;
185 bool enable = !!debounce;
186
187 if (!bank->dbck_flag)
188 return -ENOTSUPP;
189
190 if (enable) {
191 debounce = DIV_ROUND_UP(debounce, 31) - 1;
192 if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce)
193 return -EINVAL;
194 }
195
196 l = BIT(offset);
197
198 clk_enable(bank->dbck);
199 writel_relaxed(debounce, bank->base + bank->regs->debounce);
200
201 val = omap_gpio_rmw(bank->base + bank->regs->debounce_en, l, enable);
202 bank->dbck_enable_mask = val;
203
204 clk_disable(bank->dbck);
205 /*
206 * Enable debounce clock per module.
207 * This call is mandatory because in omap_gpio_request() when
208 * *_runtime_get_sync() is called, _gpio_dbck_enable() within
209 * runtime callbck fails to turn on dbck because dbck_enable_mask
210 * used within _gpio_dbck_enable() is still not initialized at
211 * that point. Therefore we have to enable dbck here.
212 */
213 omap_gpio_dbck_enable(bank);
214 if (bank->dbck_enable_mask) {
215 bank->context.debounce = debounce;
216 bank->context.debounce_en = val;
217 }
218
219 return 0;
220 }
221
222 /**
223 * omap_clear_gpio_debounce - clear debounce settings for a gpio
224 * @bank: the gpio bank we're acting upon
225 * @offset: the gpio number on this @bank
226 *
227 * If a gpio is using debounce, then clear the debounce enable bit and if
228 * this is the only gpio in this bank using debounce, then clear the debounce
229 * time too. The debounce clock will also be disabled when calling this function
230 * if this is the only gpio in the bank using debounce.
231 */
omap_clear_gpio_debounce(struct gpio_bank * bank,unsigned offset)232 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
233 {
234 u32 gpio_bit = BIT(offset);
235
236 if (!bank->dbck_flag)
237 return;
238
239 if (!(bank->dbck_enable_mask & gpio_bit))
240 return;
241
242 bank->dbck_enable_mask &= ~gpio_bit;
243 bank->context.debounce_en &= ~gpio_bit;
244 writel_relaxed(bank->context.debounce_en,
245 bank->base + bank->regs->debounce_en);
246
247 if (!bank->dbck_enable_mask) {
248 bank->context.debounce = 0;
249 writel_relaxed(bank->context.debounce, bank->base +
250 bank->regs->debounce);
251 clk_disable(bank->dbck);
252 bank->dbck_enabled = false;
253 }
254 }
255
256 /*
257 * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
258 * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
259 * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
260 * are capable waking up the system from off mode.
261 */
omap_gpio_is_off_wakeup_capable(struct gpio_bank * bank,u32 gpio_mask)262 static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
263 {
264 u32 no_wake = bank->non_wakeup_gpios;
265
266 if (no_wake)
267 return !!(~no_wake & gpio_mask);
268
269 return false;
270 }
271
omap_set_gpio_trigger(struct gpio_bank * bank,int gpio,unsigned trigger)272 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
273 unsigned trigger)
274 {
275 void __iomem *base = bank->base;
276 u32 gpio_bit = BIT(gpio);
277
278 omap_gpio_rmw(base + bank->regs->leveldetect0, gpio_bit,
279 trigger & IRQ_TYPE_LEVEL_LOW);
280 omap_gpio_rmw(base + bank->regs->leveldetect1, gpio_bit,
281 trigger & IRQ_TYPE_LEVEL_HIGH);
282
283 /*
284 * We need the edge detection enabled for to allow the GPIO block
285 * to be woken from idle state. Set the appropriate edge detection
286 * in addition to the level detection.
287 */
288 omap_gpio_rmw(base + bank->regs->risingdetect, gpio_bit,
289 trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH));
290 omap_gpio_rmw(base + bank->regs->fallingdetect, gpio_bit,
291 trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW));
292
293 bank->context.leveldetect0 =
294 readl_relaxed(bank->base + bank->regs->leveldetect0);
295 bank->context.leveldetect1 =
296 readl_relaxed(bank->base + bank->regs->leveldetect1);
297 bank->context.risingdetect =
298 readl_relaxed(bank->base + bank->regs->risingdetect);
299 bank->context.fallingdetect =
300 readl_relaxed(bank->base + bank->regs->fallingdetect);
301
302 bank->level_mask = bank->context.leveldetect0 |
303 bank->context.leveldetect1;
304
305 /* This part needs to be executed always for OMAP{34xx, 44xx} */
306 if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
307 /*
308 * Log the edge gpio and manually trigger the IRQ
309 * after resume if the input level changes
310 * to avoid irq lost during PER RET/OFF mode
311 * Applies for omap2 non-wakeup gpio and all omap3 gpios
312 */
313 if (trigger & IRQ_TYPE_EDGE_BOTH)
314 bank->enabled_non_wakeup_gpios |= gpio_bit;
315 else
316 bank->enabled_non_wakeup_gpios &= ~gpio_bit;
317 }
318 }
319
320 /*
321 * This only applies to chips that can't do both rising and falling edge
322 * detection at once. For all other chips, this function is a noop.
323 */
omap_toggle_gpio_edge_triggering(struct gpio_bank * bank,int gpio)324 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
325 {
326 if (IS_ENABLED(CONFIG_ARCH_OMAP1) && bank->regs->irqctrl) {
327 void __iomem *reg = bank->base + bank->regs->irqctrl;
328
329 writel_relaxed(readl_relaxed(reg) ^ BIT(gpio), reg);
330 }
331 }
332
omap_set_gpio_triggering(struct gpio_bank * bank,int gpio,unsigned trigger)333 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
334 unsigned trigger)
335 {
336 void __iomem *reg = bank->base;
337 u32 l = 0;
338
339 if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
340 omap_set_gpio_trigger(bank, gpio, trigger);
341 } else if (bank->regs->irqctrl) {
342 reg += bank->regs->irqctrl;
343
344 l = readl_relaxed(reg);
345 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
346 bank->toggle_mask |= BIT(gpio);
347 if (trigger & IRQ_TYPE_EDGE_RISING)
348 l |= BIT(gpio);
349 else if (trigger & IRQ_TYPE_EDGE_FALLING)
350 l &= ~(BIT(gpio));
351 else
352 return -EINVAL;
353
354 writel_relaxed(l, reg);
355 } else if (bank->regs->edgectrl1) {
356 if (gpio & 0x08)
357 reg += bank->regs->edgectrl2;
358 else
359 reg += bank->regs->edgectrl1;
360
361 gpio &= 0x07;
362 l = readl_relaxed(reg);
363 l &= ~(3 << (gpio << 1));
364 if (trigger & IRQ_TYPE_EDGE_RISING)
365 l |= 2 << (gpio << 1);
366 if (trigger & IRQ_TYPE_EDGE_FALLING)
367 l |= BIT(gpio << 1);
368 writel_relaxed(l, reg);
369 }
370 return 0;
371 }
372
omap_enable_gpio_module(struct gpio_bank * bank,unsigned offset)373 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset)
374 {
375 if (bank->regs->pinctrl) {
376 void __iomem *reg = bank->base + bank->regs->pinctrl;
377
378 /* Claim the pin for MPU */
379 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg);
380 }
381
382 if (bank->regs->ctrl && !BANK_USED(bank)) {
383 void __iomem *reg = bank->base + bank->regs->ctrl;
384 u32 ctrl;
385
386 ctrl = readl_relaxed(reg);
387 /* Module is enabled, clocks are not gated */
388 ctrl &= ~GPIO_MOD_CTRL_BIT;
389 writel_relaxed(ctrl, reg);
390 bank->context.ctrl = ctrl;
391 }
392 }
393
omap_disable_gpio_module(struct gpio_bank * bank,unsigned offset)394 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
395 {
396 if (bank->regs->ctrl && !BANK_USED(bank)) {
397 void __iomem *reg = bank->base + bank->regs->ctrl;
398 u32 ctrl;
399
400 ctrl = readl_relaxed(reg);
401 /* Module is disabled, clocks are gated */
402 ctrl |= GPIO_MOD_CTRL_BIT;
403 writel_relaxed(ctrl, reg);
404 bank->context.ctrl = ctrl;
405 }
406 }
407
omap_gpio_is_input(struct gpio_bank * bank,unsigned offset)408 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset)
409 {
410 void __iomem *reg = bank->base + bank->regs->direction;
411
412 return readl_relaxed(reg) & BIT(offset);
413 }
414
omap_gpio_init_irq(struct gpio_bank * bank,unsigned offset)415 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset)
416 {
417 if (!LINE_USED(bank->mod_usage, offset)) {
418 omap_enable_gpio_module(bank, offset);
419 omap_set_gpio_direction(bank, offset, 1);
420 }
421 bank->irq_usage |= BIT(offset);
422 }
423
omap_gpio_irq_type(struct irq_data * d,unsigned type)424 static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
425 {
426 struct gpio_bank *bank = omap_irq_data_get_bank(d);
427 int retval;
428 unsigned long flags;
429 unsigned offset = d->hwirq;
430
431 if (type & ~IRQ_TYPE_SENSE_MASK)
432 return -EINVAL;
433
434 if (!bank->regs->leveldetect0 &&
435 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
436 return -EINVAL;
437
438 raw_spin_lock_irqsave(&bank->lock, flags);
439 retval = omap_set_gpio_triggering(bank, offset, type);
440 if (retval) {
441 raw_spin_unlock_irqrestore(&bank->lock, flags);
442 goto error;
443 }
444 omap_gpio_init_irq(bank, offset);
445 if (!omap_gpio_is_input(bank, offset)) {
446 raw_spin_unlock_irqrestore(&bank->lock, flags);
447 retval = -EINVAL;
448 goto error;
449 }
450 raw_spin_unlock_irqrestore(&bank->lock, flags);
451
452 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
453 irq_set_handler_locked(d, handle_level_irq);
454 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
455 /*
456 * Edge IRQs are already cleared/acked in irq_handler and
457 * not need to be masked, as result handle_edge_irq()
458 * logic is excessed here and may cause lose of interrupts.
459 * So just use handle_simple_irq.
460 */
461 irq_set_handler_locked(d, handle_simple_irq);
462
463 return 0;
464
465 error:
466 return retval;
467 }
468
omap_clear_gpio_irqbank(struct gpio_bank * bank,int gpio_mask)469 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
470 {
471 void __iomem *reg = bank->base;
472
473 reg += bank->regs->irqstatus;
474 writel_relaxed(gpio_mask, reg);
475
476 /* Workaround for clearing DSP GPIO interrupts to allow retention */
477 if (bank->regs->irqstatus2) {
478 reg = bank->base + bank->regs->irqstatus2;
479 writel_relaxed(gpio_mask, reg);
480 }
481
482 /* Flush posted write for the irq status to avoid spurious interrupts */
483 readl_relaxed(reg);
484 }
485
omap_clear_gpio_irqstatus(struct gpio_bank * bank,unsigned offset)486 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank,
487 unsigned offset)
488 {
489 omap_clear_gpio_irqbank(bank, BIT(offset));
490 }
491
omap_get_gpio_irqbank_mask(struct gpio_bank * bank)492 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
493 {
494 void __iomem *reg = bank->base;
495 u32 l;
496 u32 mask = (BIT(bank->width)) - 1;
497
498 reg += bank->regs->irqenable;
499 l = readl_relaxed(reg);
500 if (bank->regs->irqenable_inv)
501 l = ~l;
502 l &= mask;
503 return l;
504 }
505
omap_set_gpio_irqenable(struct gpio_bank * bank,unsigned offset,int enable)506 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
507 unsigned offset, int enable)
508 {
509 void __iomem *reg = bank->base;
510 u32 gpio_mask = BIT(offset);
511
512 if (bank->regs->set_irqenable && bank->regs->clr_irqenable) {
513 if (enable) {
514 reg += bank->regs->set_irqenable;
515 bank->context.irqenable1 |= gpio_mask;
516 } else {
517 reg += bank->regs->clr_irqenable;
518 bank->context.irqenable1 &= ~gpio_mask;
519 }
520 writel_relaxed(gpio_mask, reg);
521 } else {
522 bank->context.irqenable1 =
523 omap_gpio_rmw(reg + bank->regs->irqenable, gpio_mask,
524 enable ^ bank->regs->irqenable_inv);
525 }
526
527 /*
528 * Program GPIO wakeup along with IRQ enable to satisfy OMAP4430 TRM
529 * note requiring correlation between the IRQ enable registers and
530 * the wakeup registers. In any case, we want wakeup from idle
531 * enabled for the GPIOs which support this feature.
532 */
533 if (bank->regs->wkup_en &&
534 (bank->regs->edgectrl1 || !(bank->non_wakeup_gpios & gpio_mask))) {
535 bank->context.wake_en =
536 omap_gpio_rmw(bank->base + bank->regs->wkup_en,
537 gpio_mask, enable);
538 }
539 }
540
541 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
omap_gpio_wake_enable(struct irq_data * d,unsigned int enable)542 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
543 {
544 struct gpio_bank *bank = omap_irq_data_get_bank(d);
545
546 return irq_set_irq_wake(bank->irq, enable);
547 }
548
549 /*
550 * We need to unmask the GPIO bank interrupt as soon as possible to
551 * avoid missing GPIO interrupts for other lines in the bank.
552 * Then we need to mask-read-clear-unmask the triggered GPIO lines
553 * in the bank to avoid missing nested interrupts for a GPIO line.
554 * If we wait to unmask individual GPIO lines in the bank after the
555 * line's interrupt handler has been run, we may miss some nested
556 * interrupts.
557 */
omap_gpio_irq_handler(int irq,void * gpiobank)558 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
559 {
560 void __iomem *isr_reg = NULL;
561 u32 enabled, isr, edge;
562 unsigned int bit;
563 struct gpio_bank *bank = gpiobank;
564 unsigned long wa_lock_flags;
565 unsigned long lock_flags;
566
567 isr_reg = bank->base + bank->regs->irqstatus;
568 if (WARN_ON(!isr_reg))
569 goto exit;
570
571 if (WARN_ONCE(!pm_runtime_active(bank->chip.parent),
572 "gpio irq%i while runtime suspended?\n", irq))
573 return IRQ_NONE;
574
575 while (1) {
576 raw_spin_lock_irqsave(&bank->lock, lock_flags);
577
578 enabled = omap_get_gpio_irqbank_mask(bank);
579 isr = readl_relaxed(isr_reg) & enabled;
580
581 /*
582 * Clear edge sensitive interrupts before calling handler(s)
583 * so subsequent edge transitions are not missed while the
584 * handlers are running.
585 */
586 edge = isr & ~bank->level_mask;
587 if (edge)
588 omap_clear_gpio_irqbank(bank, edge);
589
590 raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
591
592 if (!isr)
593 break;
594
595 while (isr) {
596 bit = __ffs(isr);
597 isr &= ~(BIT(bit));
598
599 raw_spin_lock_irqsave(&bank->lock, lock_flags);
600 /*
601 * Some chips can't respond to both rising and falling
602 * at the same time. If this irq was requested with
603 * both flags, we need to flip the ICR data for the IRQ
604 * to respond to the IRQ for the opposite direction.
605 * This will be indicated in the bank toggle_mask.
606 */
607 if (bank->toggle_mask & (BIT(bit)))
608 omap_toggle_gpio_edge_triggering(bank, bit);
609
610 raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
611
612 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
613
614 generic_handle_irq(irq_find_mapping(bank->chip.irq.domain,
615 bit));
616
617 raw_spin_unlock_irqrestore(&bank->wa_lock,
618 wa_lock_flags);
619 }
620 }
621 exit:
622 return IRQ_HANDLED;
623 }
624
omap_gpio_irq_startup(struct irq_data * d)625 static unsigned int omap_gpio_irq_startup(struct irq_data *d)
626 {
627 struct gpio_bank *bank = omap_irq_data_get_bank(d);
628 unsigned long flags;
629 unsigned offset = d->hwirq;
630
631 raw_spin_lock_irqsave(&bank->lock, flags);
632
633 if (!LINE_USED(bank->mod_usage, offset))
634 omap_set_gpio_direction(bank, offset, 1);
635 omap_enable_gpio_module(bank, offset);
636 bank->irq_usage |= BIT(offset);
637
638 raw_spin_unlock_irqrestore(&bank->lock, flags);
639 omap_gpio_unmask_irq(d);
640
641 return 0;
642 }
643
omap_gpio_irq_shutdown(struct irq_data * d)644 static void omap_gpio_irq_shutdown(struct irq_data *d)
645 {
646 struct gpio_bank *bank = omap_irq_data_get_bank(d);
647 unsigned long flags;
648 unsigned offset = d->hwirq;
649
650 raw_spin_lock_irqsave(&bank->lock, flags);
651 bank->irq_usage &= ~(BIT(offset));
652 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
653 omap_clear_gpio_irqstatus(bank, offset);
654 omap_set_gpio_irqenable(bank, offset, 0);
655 if (!LINE_USED(bank->mod_usage, offset))
656 omap_clear_gpio_debounce(bank, offset);
657 omap_disable_gpio_module(bank, offset);
658 raw_spin_unlock_irqrestore(&bank->lock, flags);
659 }
660
omap_gpio_irq_bus_lock(struct irq_data * data)661 static void omap_gpio_irq_bus_lock(struct irq_data *data)
662 {
663 struct gpio_bank *bank = omap_irq_data_get_bank(data);
664
665 pm_runtime_get_sync(bank->chip.parent);
666 }
667
gpio_irq_bus_sync_unlock(struct irq_data * data)668 static void gpio_irq_bus_sync_unlock(struct irq_data *data)
669 {
670 struct gpio_bank *bank = omap_irq_data_get_bank(data);
671
672 pm_runtime_put(bank->chip.parent);
673 }
674
omap_gpio_mask_irq(struct irq_data * d)675 static void omap_gpio_mask_irq(struct irq_data *d)
676 {
677 struct gpio_bank *bank = omap_irq_data_get_bank(d);
678 unsigned offset = d->hwirq;
679 unsigned long flags;
680
681 raw_spin_lock_irqsave(&bank->lock, flags);
682 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
683 omap_set_gpio_irqenable(bank, offset, 0);
684 raw_spin_unlock_irqrestore(&bank->lock, flags);
685 }
686
omap_gpio_unmask_irq(struct irq_data * d)687 static void omap_gpio_unmask_irq(struct irq_data *d)
688 {
689 struct gpio_bank *bank = omap_irq_data_get_bank(d);
690 unsigned offset = d->hwirq;
691 u32 trigger = irqd_get_trigger_type(d);
692 unsigned long flags;
693
694 raw_spin_lock_irqsave(&bank->lock, flags);
695 omap_set_gpio_irqenable(bank, offset, 1);
696
697 /*
698 * For level-triggered GPIOs, clearing must be done after the source
699 * is cleared, thus after the handler has run. OMAP4 needs this done
700 * after enabing the interrupt to clear the wakeup status.
701 */
702 if (bank->regs->leveldetect0 && bank->regs->wkup_en &&
703 trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
704 omap_clear_gpio_irqstatus(bank, offset);
705
706 if (trigger)
707 omap_set_gpio_triggering(bank, offset, trigger);
708
709 raw_spin_unlock_irqrestore(&bank->lock, flags);
710 }
711
712 /*---------------------------------------------------------------------*/
713
omap_mpuio_suspend_noirq(struct device * dev)714 static int omap_mpuio_suspend_noirq(struct device *dev)
715 {
716 struct gpio_bank *bank = dev_get_drvdata(dev);
717 void __iomem *mask_reg = bank->base +
718 OMAP_MPUIO_GPIO_MASKIT / bank->stride;
719 unsigned long flags;
720
721 raw_spin_lock_irqsave(&bank->lock, flags);
722 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
723 raw_spin_unlock_irqrestore(&bank->lock, flags);
724
725 return 0;
726 }
727
omap_mpuio_resume_noirq(struct device * dev)728 static int omap_mpuio_resume_noirq(struct device *dev)
729 {
730 struct gpio_bank *bank = dev_get_drvdata(dev);
731 void __iomem *mask_reg = bank->base +
732 OMAP_MPUIO_GPIO_MASKIT / bank->stride;
733 unsigned long flags;
734
735 raw_spin_lock_irqsave(&bank->lock, flags);
736 writel_relaxed(bank->context.wake_en, mask_reg);
737 raw_spin_unlock_irqrestore(&bank->lock, flags);
738
739 return 0;
740 }
741
742 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
743 .suspend_noirq = omap_mpuio_suspend_noirq,
744 .resume_noirq = omap_mpuio_resume_noirq,
745 };
746
747 /* use platform_driver for this. */
748 static struct platform_driver omap_mpuio_driver = {
749 .driver = {
750 .name = "mpuio",
751 .pm = &omap_mpuio_dev_pm_ops,
752 },
753 };
754
755 static struct platform_device omap_mpuio_device = {
756 .name = "mpuio",
757 .id = -1,
758 .dev = {
759 .driver = &omap_mpuio_driver.driver,
760 }
761 /* could list the /proc/iomem resources */
762 };
763
omap_mpuio_init(struct gpio_bank * bank)764 static inline void omap_mpuio_init(struct gpio_bank *bank)
765 {
766 platform_set_drvdata(&omap_mpuio_device, bank);
767
768 if (platform_driver_register(&omap_mpuio_driver) == 0)
769 (void) platform_device_register(&omap_mpuio_device);
770 }
771
772 /*---------------------------------------------------------------------*/
773
omap_gpio_request(struct gpio_chip * chip,unsigned offset)774 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
775 {
776 struct gpio_bank *bank = gpiochip_get_data(chip);
777 unsigned long flags;
778
779 pm_runtime_get_sync(chip->parent);
780
781 raw_spin_lock_irqsave(&bank->lock, flags);
782 omap_enable_gpio_module(bank, offset);
783 bank->mod_usage |= BIT(offset);
784 raw_spin_unlock_irqrestore(&bank->lock, flags);
785
786 return 0;
787 }
788
omap_gpio_free(struct gpio_chip * chip,unsigned offset)789 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
790 {
791 struct gpio_bank *bank = gpiochip_get_data(chip);
792 unsigned long flags;
793
794 raw_spin_lock_irqsave(&bank->lock, flags);
795 bank->mod_usage &= ~(BIT(offset));
796 if (!LINE_USED(bank->irq_usage, offset)) {
797 omap_set_gpio_direction(bank, offset, 1);
798 omap_clear_gpio_debounce(bank, offset);
799 }
800 omap_disable_gpio_module(bank, offset);
801 raw_spin_unlock_irqrestore(&bank->lock, flags);
802
803 pm_runtime_put(chip->parent);
804 }
805
omap_gpio_get_direction(struct gpio_chip * chip,unsigned offset)806 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
807 {
808 struct gpio_bank *bank = gpiochip_get_data(chip);
809
810 if (readl_relaxed(bank->base + bank->regs->direction) & BIT(offset))
811 return GPIO_LINE_DIRECTION_IN;
812
813 return GPIO_LINE_DIRECTION_OUT;
814 }
815
omap_gpio_input(struct gpio_chip * chip,unsigned offset)816 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
817 {
818 struct gpio_bank *bank;
819 unsigned long flags;
820
821 bank = gpiochip_get_data(chip);
822 raw_spin_lock_irqsave(&bank->lock, flags);
823 omap_set_gpio_direction(bank, offset, 1);
824 raw_spin_unlock_irqrestore(&bank->lock, flags);
825 return 0;
826 }
827
omap_gpio_get(struct gpio_chip * chip,unsigned offset)828 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
829 {
830 struct gpio_bank *bank = gpiochip_get_data(chip);
831 void __iomem *reg;
832
833 if (omap_gpio_is_input(bank, offset))
834 reg = bank->base + bank->regs->datain;
835 else
836 reg = bank->base + bank->regs->dataout;
837
838 return (readl_relaxed(reg) & BIT(offset)) != 0;
839 }
840
omap_gpio_output(struct gpio_chip * chip,unsigned offset,int value)841 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
842 {
843 struct gpio_bank *bank;
844 unsigned long flags;
845
846 bank = gpiochip_get_data(chip);
847 raw_spin_lock_irqsave(&bank->lock, flags);
848 bank->set_dataout(bank, offset, value);
849 omap_set_gpio_direction(bank, offset, 0);
850 raw_spin_unlock_irqrestore(&bank->lock, flags);
851 return 0;
852 }
853
omap_gpio_get_multiple(struct gpio_chip * chip,unsigned long * mask,unsigned long * bits)854 static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
855 unsigned long *bits)
856 {
857 struct gpio_bank *bank = gpiochip_get_data(chip);
858 void __iomem *base = bank->base;
859 u32 direction, m, val = 0;
860
861 direction = readl_relaxed(base + bank->regs->direction);
862
863 m = direction & *mask;
864 if (m)
865 val |= readl_relaxed(base + bank->regs->datain) & m;
866
867 m = ~direction & *mask;
868 if (m)
869 val |= readl_relaxed(base + bank->regs->dataout) & m;
870
871 *bits = val;
872
873 return 0;
874 }
875
omap_gpio_debounce(struct gpio_chip * chip,unsigned offset,unsigned debounce)876 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
877 unsigned debounce)
878 {
879 struct gpio_bank *bank;
880 unsigned long flags;
881 int ret;
882
883 bank = gpiochip_get_data(chip);
884
885 raw_spin_lock_irqsave(&bank->lock, flags);
886 ret = omap2_set_gpio_debounce(bank, offset, debounce);
887 raw_spin_unlock_irqrestore(&bank->lock, flags);
888
889 if (ret)
890 dev_info(chip->parent,
891 "Could not set line %u debounce to %u microseconds (%d)",
892 offset, debounce, ret);
893
894 return ret;
895 }
896
omap_gpio_set_config(struct gpio_chip * chip,unsigned offset,unsigned long config)897 static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset,
898 unsigned long config)
899 {
900 u32 debounce;
901 int ret = -ENOTSUPP;
902
903 switch (pinconf_to_config_param(config)) {
904 case PIN_CONFIG_BIAS_DISABLE:
905 case PIN_CONFIG_BIAS_PULL_UP:
906 case PIN_CONFIG_BIAS_PULL_DOWN:
907 ret = gpiochip_generic_config(chip, offset, config);
908 break;
909 case PIN_CONFIG_INPUT_DEBOUNCE:
910 debounce = pinconf_to_config_argument(config);
911 ret = omap_gpio_debounce(chip, offset, debounce);
912 break;
913 default:
914 break;
915 }
916
917 return ret;
918 }
919
omap_gpio_set(struct gpio_chip * chip,unsigned offset,int value)920 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
921 {
922 struct gpio_bank *bank;
923 unsigned long flags;
924
925 bank = gpiochip_get_data(chip);
926 raw_spin_lock_irqsave(&bank->lock, flags);
927 bank->set_dataout(bank, offset, value);
928 raw_spin_unlock_irqrestore(&bank->lock, flags);
929 }
930
omap_gpio_set_multiple(struct gpio_chip * chip,unsigned long * mask,unsigned long * bits)931 static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
932 unsigned long *bits)
933 {
934 struct gpio_bank *bank = gpiochip_get_data(chip);
935 void __iomem *reg = bank->base + bank->regs->dataout;
936 unsigned long flags;
937 u32 l;
938
939 raw_spin_lock_irqsave(&bank->lock, flags);
940 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask);
941 writel_relaxed(l, reg);
942 bank->context.dataout = l;
943 raw_spin_unlock_irqrestore(&bank->lock, flags);
944 }
945
946 /*---------------------------------------------------------------------*/
947
omap_gpio_show_rev(struct gpio_bank * bank)948 static void omap_gpio_show_rev(struct gpio_bank *bank)
949 {
950 static bool called;
951 u32 rev;
952
953 if (called || bank->regs->revision == USHRT_MAX)
954 return;
955
956 rev = readw_relaxed(bank->base + bank->regs->revision);
957 pr_info("OMAP GPIO hardware version %d.%d\n",
958 (rev >> 4) & 0x0f, rev & 0x0f);
959
960 called = true;
961 }
962
omap_gpio_mod_init(struct gpio_bank * bank)963 static void omap_gpio_mod_init(struct gpio_bank *bank)
964 {
965 void __iomem *base = bank->base;
966 u32 l = 0xffffffff;
967
968 if (bank->width == 16)
969 l = 0xffff;
970
971 if (bank->is_mpuio) {
972 writel_relaxed(l, bank->base + bank->regs->irqenable);
973 return;
974 }
975
976 omap_gpio_rmw(base + bank->regs->irqenable, l,
977 bank->regs->irqenable_inv);
978 omap_gpio_rmw(base + bank->regs->irqstatus, l,
979 !bank->regs->irqenable_inv);
980 if (bank->regs->debounce_en)
981 writel_relaxed(0, base + bank->regs->debounce_en);
982
983 /* Save OE default value (0xffffffff) in the context */
984 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction);
985 /* Initialize interface clk ungated, module enabled */
986 if (bank->regs->ctrl)
987 writel_relaxed(0, base + bank->regs->ctrl);
988 }
989
omap_gpio_chip_init(struct gpio_bank * bank,struct irq_chip * irqc)990 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
991 {
992 struct gpio_irq_chip *irq;
993 static int gpio;
994 const char *label;
995 int irq_base = 0;
996 int ret;
997
998 /*
999 * REVISIT eventually switch from OMAP-specific gpio structs
1000 * over to the generic ones
1001 */
1002 bank->chip.request = omap_gpio_request;
1003 bank->chip.free = omap_gpio_free;
1004 bank->chip.get_direction = omap_gpio_get_direction;
1005 bank->chip.direction_input = omap_gpio_input;
1006 bank->chip.get = omap_gpio_get;
1007 bank->chip.get_multiple = omap_gpio_get_multiple;
1008 bank->chip.direction_output = omap_gpio_output;
1009 bank->chip.set_config = omap_gpio_set_config;
1010 bank->chip.set = omap_gpio_set;
1011 bank->chip.set_multiple = omap_gpio_set_multiple;
1012 if (bank->is_mpuio) {
1013 bank->chip.label = "mpuio";
1014 if (bank->regs->wkup_en)
1015 bank->chip.parent = &omap_mpuio_device.dev;
1016 bank->chip.base = OMAP_MPUIO(0);
1017 } else {
1018 label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d",
1019 gpio, gpio + bank->width - 1);
1020 if (!label)
1021 return -ENOMEM;
1022 bank->chip.label = label;
1023 bank->chip.base = gpio;
1024 }
1025 bank->chip.ngpio = bank->width;
1026
1027 #ifdef CONFIG_ARCH_OMAP1
1028 /*
1029 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1030 * irq_alloc_descs() since a base IRQ offset will no longer be needed.
1031 */
1032 irq_base = devm_irq_alloc_descs(bank->chip.parent,
1033 -1, 0, bank->width, 0);
1034 if (irq_base < 0) {
1035 dev_err(bank->chip.parent, "Couldn't allocate IRQ numbers\n");
1036 return -ENODEV;
1037 }
1038 #endif
1039
1040 /* MPUIO is a bit different, reading IRQ status clears it */
1041 if (bank->is_mpuio && !bank->regs->wkup_en)
1042 irqc->irq_set_wake = NULL;
1043
1044 irq = &bank->chip.irq;
1045 irq->chip = irqc;
1046 irq->handler = handle_bad_irq;
1047 irq->default_type = IRQ_TYPE_NONE;
1048 irq->num_parents = 1;
1049 irq->parents = &bank->irq;
1050 irq->first = irq_base;
1051
1052 ret = gpiochip_add_data(&bank->chip, bank);
1053 if (ret) {
1054 dev_err(bank->chip.parent,
1055 "Could not register gpio chip %d\n", ret);
1056 return ret;
1057 }
1058
1059 ret = devm_request_irq(bank->chip.parent, bank->irq,
1060 omap_gpio_irq_handler,
1061 0, dev_name(bank->chip.parent), bank);
1062 if (ret)
1063 gpiochip_remove(&bank->chip);
1064
1065 if (!bank->is_mpuio)
1066 gpio += bank->width;
1067
1068 return ret;
1069 }
1070
omap_gpio_init_context(struct gpio_bank * p)1071 static void omap_gpio_init_context(struct gpio_bank *p)
1072 {
1073 const struct omap_gpio_reg_offs *regs = p->regs;
1074 void __iomem *base = p->base;
1075
1076 p->context.sysconfig = readl_relaxed(base + regs->sysconfig);
1077 p->context.ctrl = readl_relaxed(base + regs->ctrl);
1078 p->context.oe = readl_relaxed(base + regs->direction);
1079 p->context.wake_en = readl_relaxed(base + regs->wkup_en);
1080 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
1081 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
1082 p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
1083 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
1084 p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
1085 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
1086 p->context.dataout = readl_relaxed(base + regs->dataout);
1087
1088 p->context_valid = true;
1089 }
1090
omap_gpio_restore_context(struct gpio_bank * bank)1091 static void omap_gpio_restore_context(struct gpio_bank *bank)
1092 {
1093 const struct omap_gpio_reg_offs *regs = bank->regs;
1094 void __iomem *base = bank->base;
1095
1096 writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
1097 writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
1098 writel_relaxed(bank->context.ctrl, base + regs->ctrl);
1099 writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
1100 writel_relaxed(bank->context.leveldetect1, base + regs->leveldetect1);
1101 writel_relaxed(bank->context.risingdetect, base + regs->risingdetect);
1102 writel_relaxed(bank->context.fallingdetect, base + regs->fallingdetect);
1103 writel_relaxed(bank->context.dataout, base + regs->dataout);
1104 writel_relaxed(bank->context.oe, base + regs->direction);
1105
1106 if (bank->dbck_enable_mask) {
1107 writel_relaxed(bank->context.debounce, base + regs->debounce);
1108 writel_relaxed(bank->context.debounce_en,
1109 base + regs->debounce_en);
1110 }
1111
1112 writel_relaxed(bank->context.irqenable1, base + regs->irqenable);
1113 writel_relaxed(bank->context.irqenable2, base + regs->irqenable2);
1114 }
1115
omap_gpio_idle(struct gpio_bank * bank,bool may_lose_context)1116 static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
1117 {
1118 struct device *dev = bank->chip.parent;
1119 void __iomem *base = bank->base;
1120 u32 mask, nowake;
1121
1122 bank->saved_datain = readl_relaxed(base + bank->regs->datain);
1123
1124 /* Save syconfig, it's runtime value can be different from init value */
1125 if (bank->loses_context)
1126 bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
1127
1128 if (!bank->enabled_non_wakeup_gpios)
1129 goto update_gpio_context_count;
1130
1131 /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
1132 mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
1133 mask &= ~bank->context.risingdetect;
1134 bank->saved_datain |= mask;
1135
1136 /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
1137 mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
1138 mask &= ~bank->context.fallingdetect;
1139 bank->saved_datain &= ~mask;
1140
1141 if (!may_lose_context)
1142 goto update_gpio_context_count;
1143
1144 /*
1145 * If going to OFF, remove triggering for all wkup domain
1146 * non-wakeup GPIOs. Otherwise spurious IRQs will be
1147 * generated. See OMAP2420 Errata item 1.101.
1148 */
1149 if (!bank->loses_context && bank->enabled_non_wakeup_gpios) {
1150 nowake = bank->enabled_non_wakeup_gpios;
1151 omap_gpio_rmw(base + bank->regs->fallingdetect, nowake, ~nowake);
1152 omap_gpio_rmw(base + bank->regs->risingdetect, nowake, ~nowake);
1153 }
1154
1155 update_gpio_context_count:
1156 if (bank->get_context_loss_count)
1157 bank->context_loss_count =
1158 bank->get_context_loss_count(dev);
1159
1160 omap_gpio_dbck_disable(bank);
1161 }
1162
omap_gpio_unidle(struct gpio_bank * bank)1163 static void omap_gpio_unidle(struct gpio_bank *bank)
1164 {
1165 struct device *dev = bank->chip.parent;
1166 u32 l = 0, gen, gen0, gen1;
1167 int c;
1168
1169 /*
1170 * On the first resume during the probe, the context has not
1171 * been initialised and so initialise it now. Also initialise
1172 * the context loss count.
1173 */
1174 if (bank->loses_context && !bank->context_valid) {
1175 omap_gpio_init_context(bank);
1176
1177 if (bank->get_context_loss_count)
1178 bank->context_loss_count =
1179 bank->get_context_loss_count(dev);
1180 }
1181
1182 omap_gpio_dbck_enable(bank);
1183
1184 if (bank->loses_context) {
1185 if (!bank->get_context_loss_count) {
1186 omap_gpio_restore_context(bank);
1187 } else {
1188 c = bank->get_context_loss_count(dev);
1189 if (c != bank->context_loss_count) {
1190 omap_gpio_restore_context(bank);
1191 } else {
1192 return;
1193 }
1194 }
1195 } else {
1196 /* Restore changes done for OMAP2420 errata 1.101 */
1197 writel_relaxed(bank->context.fallingdetect,
1198 bank->base + bank->regs->fallingdetect);
1199 writel_relaxed(bank->context.risingdetect,
1200 bank->base + bank->regs->risingdetect);
1201 }
1202
1203 l = readl_relaxed(bank->base + bank->regs->datain);
1204
1205 /*
1206 * Check if any of the non-wakeup interrupt GPIOs have changed
1207 * state. If so, generate an IRQ by software. This is
1208 * horribly racy, but it's the best we can do to work around
1209 * this silicon bug.
1210 */
1211 l ^= bank->saved_datain;
1212 l &= bank->enabled_non_wakeup_gpios;
1213
1214 /*
1215 * No need to generate IRQs for the rising edge for gpio IRQs
1216 * configured with falling edge only; and vice versa.
1217 */
1218 gen0 = l & bank->context.fallingdetect;
1219 gen0 &= bank->saved_datain;
1220
1221 gen1 = l & bank->context.risingdetect;
1222 gen1 &= ~(bank->saved_datain);
1223
1224 /* FIXME: Consider GPIO IRQs with level detections properly! */
1225 gen = l & (~(bank->context.fallingdetect) &
1226 ~(bank->context.risingdetect));
1227 /* Consider all GPIO IRQs needed to be updated */
1228 gen |= gen0 | gen1;
1229
1230 if (gen) {
1231 u32 old0, old1;
1232
1233 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0);
1234 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1);
1235
1236 if (!bank->regs->irqstatus_raw0) {
1237 writel_relaxed(old0 | gen, bank->base +
1238 bank->regs->leveldetect0);
1239 writel_relaxed(old1 | gen, bank->base +
1240 bank->regs->leveldetect1);
1241 }
1242
1243 if (bank->regs->irqstatus_raw0) {
1244 writel_relaxed(old0 | l, bank->base +
1245 bank->regs->leveldetect0);
1246 writel_relaxed(old1 | l, bank->base +
1247 bank->regs->leveldetect1);
1248 }
1249 writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
1250 writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
1251 }
1252 }
1253
gpio_omap_cpu_notifier(struct notifier_block * nb,unsigned long cmd,void * v)1254 static int gpio_omap_cpu_notifier(struct notifier_block *nb,
1255 unsigned long cmd, void *v)
1256 {
1257 struct gpio_bank *bank;
1258 unsigned long flags;
1259 int ret = NOTIFY_OK;
1260 u32 isr, mask;
1261
1262 bank = container_of(nb, struct gpio_bank, nb);
1263
1264 raw_spin_lock_irqsave(&bank->lock, flags);
1265 if (bank->is_suspended)
1266 goto out_unlock;
1267
1268 switch (cmd) {
1269 case CPU_CLUSTER_PM_ENTER:
1270 mask = omap_get_gpio_irqbank_mask(bank);
1271 isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask;
1272 if (isr) {
1273 ret = NOTIFY_BAD;
1274 break;
1275 }
1276 omap_gpio_idle(bank, true);
1277 break;
1278 case CPU_CLUSTER_PM_ENTER_FAILED:
1279 case CPU_CLUSTER_PM_EXIT:
1280 omap_gpio_unidle(bank);
1281 break;
1282 }
1283
1284 out_unlock:
1285 raw_spin_unlock_irqrestore(&bank->lock, flags);
1286
1287 return ret;
1288 }
1289
1290 static const struct omap_gpio_reg_offs omap2_gpio_regs = {
1291 .revision = OMAP24XX_GPIO_REVISION,
1292 .sysconfig = OMAP24XX_GPIO_SYSCONFIG,
1293 .direction = OMAP24XX_GPIO_OE,
1294 .datain = OMAP24XX_GPIO_DATAIN,
1295 .dataout = OMAP24XX_GPIO_DATAOUT,
1296 .set_dataout = OMAP24XX_GPIO_SETDATAOUT,
1297 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT,
1298 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1,
1299 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2,
1300 .irqenable = OMAP24XX_GPIO_IRQENABLE1,
1301 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2,
1302 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1,
1303 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1,
1304 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL,
1305 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN,
1306 .ctrl = OMAP24XX_GPIO_CTRL,
1307 .wkup_en = OMAP24XX_GPIO_WAKE_EN,
1308 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0,
1309 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1,
1310 .risingdetect = OMAP24XX_GPIO_RISINGDETECT,
1311 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT,
1312 };
1313
1314 static const struct omap_gpio_reg_offs omap4_gpio_regs = {
1315 .revision = OMAP4_GPIO_REVISION,
1316 .sysconfig = OMAP4_GPIO_SYSCONFIG,
1317 .direction = OMAP4_GPIO_OE,
1318 .datain = OMAP4_GPIO_DATAIN,
1319 .dataout = OMAP4_GPIO_DATAOUT,
1320 .set_dataout = OMAP4_GPIO_SETDATAOUT,
1321 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT,
1322 .irqstatus = OMAP4_GPIO_IRQSTATUS0,
1323 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1,
1324 .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0,
1325 .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1,
1326 .irqenable = OMAP4_GPIO_IRQSTATUSSET0,
1327 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1,
1328 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0,
1329 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0,
1330 .debounce = OMAP4_GPIO_DEBOUNCINGTIME,
1331 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE,
1332 .ctrl = OMAP4_GPIO_CTRL,
1333 .wkup_en = OMAP4_GPIO_IRQWAKEN0,
1334 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0,
1335 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1,
1336 .risingdetect = OMAP4_GPIO_RISINGDETECT,
1337 .fallingdetect = OMAP4_GPIO_FALLINGDETECT,
1338 };
1339
1340 static const struct omap_gpio_platform_data omap2_pdata = {
1341 .regs = &omap2_gpio_regs,
1342 .bank_width = 32,
1343 .dbck_flag = false,
1344 };
1345
1346 static const struct omap_gpio_platform_data omap3_pdata = {
1347 .regs = &omap2_gpio_regs,
1348 .bank_width = 32,
1349 .dbck_flag = true,
1350 };
1351
1352 static const struct omap_gpio_platform_data omap4_pdata = {
1353 .regs = &omap4_gpio_regs,
1354 .bank_width = 32,
1355 .dbck_flag = true,
1356 };
1357
1358 static const struct of_device_id omap_gpio_match[] = {
1359 {
1360 .compatible = "ti,omap4-gpio",
1361 .data = &omap4_pdata,
1362 },
1363 {
1364 .compatible = "ti,omap3-gpio",
1365 .data = &omap3_pdata,
1366 },
1367 {
1368 .compatible = "ti,omap2-gpio",
1369 .data = &omap2_pdata,
1370 },
1371 { },
1372 };
1373 MODULE_DEVICE_TABLE(of, omap_gpio_match);
1374
omap_gpio_probe(struct platform_device * pdev)1375 static int omap_gpio_probe(struct platform_device *pdev)
1376 {
1377 struct device *dev = &pdev->dev;
1378 struct device_node *node = dev->of_node;
1379 const struct of_device_id *match;
1380 const struct omap_gpio_platform_data *pdata;
1381 struct gpio_bank *bank;
1382 struct irq_chip *irqc;
1383 int ret;
1384
1385 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1386
1387 pdata = match ? match->data : dev_get_platdata(dev);
1388 if (!pdata)
1389 return -EINVAL;
1390
1391 bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
1392 if (!bank)
1393 return -ENOMEM;
1394
1395 irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
1396 if (!irqc)
1397 return -ENOMEM;
1398
1399 irqc->irq_startup = omap_gpio_irq_startup,
1400 irqc->irq_shutdown = omap_gpio_irq_shutdown,
1401 irqc->irq_ack = dummy_irq_chip.irq_ack,
1402 irqc->irq_mask = omap_gpio_mask_irq,
1403 irqc->irq_unmask = omap_gpio_unmask_irq,
1404 irqc->irq_set_type = omap_gpio_irq_type,
1405 irqc->irq_set_wake = omap_gpio_wake_enable,
1406 irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
1407 irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
1408 irqc->name = dev_name(&pdev->dev);
1409 irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
1410 irqc->parent_device = dev;
1411
1412 bank->irq = platform_get_irq(pdev, 0);
1413 if (bank->irq <= 0) {
1414 if (!bank->irq)
1415 bank->irq = -ENXIO;
1416 return dev_err_probe(dev, bank->irq, "can't get irq resource\n");
1417 }
1418
1419 bank->chip.parent = dev;
1420 bank->chip.owner = THIS_MODULE;
1421 bank->dbck_flag = pdata->dbck_flag;
1422 bank->stride = pdata->bank_stride;
1423 bank->width = pdata->bank_width;
1424 bank->is_mpuio = pdata->is_mpuio;
1425 bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1426 bank->regs = pdata->regs;
1427 #ifdef CONFIG_OF_GPIO
1428 bank->chip.of_node = of_node_get(node);
1429 #endif
1430
1431 if (node) {
1432 if (!of_property_read_bool(node, "ti,gpio-always-on"))
1433 bank->loses_context = true;
1434 } else {
1435 bank->loses_context = pdata->loses_context;
1436
1437 if (bank->loses_context)
1438 bank->get_context_loss_count =
1439 pdata->get_context_loss_count;
1440 }
1441
1442 if (bank->regs->set_dataout && bank->regs->clr_dataout)
1443 bank->set_dataout = omap_set_gpio_dataout_reg;
1444 else
1445 bank->set_dataout = omap_set_gpio_dataout_mask;
1446
1447 raw_spin_lock_init(&bank->lock);
1448 raw_spin_lock_init(&bank->wa_lock);
1449
1450 /* Static mapping, never released */
1451 bank->base = devm_platform_ioremap_resource(pdev, 0);
1452 if (IS_ERR(bank->base)) {
1453 return PTR_ERR(bank->base);
1454 }
1455
1456 if (bank->dbck_flag) {
1457 bank->dbck = devm_clk_get(dev, "dbclk");
1458 if (IS_ERR(bank->dbck)) {
1459 dev_err(dev,
1460 "Could not get gpio dbck. Disable debounce\n");
1461 bank->dbck_flag = false;
1462 } else {
1463 clk_prepare(bank->dbck);
1464 }
1465 }
1466
1467 platform_set_drvdata(pdev, bank);
1468
1469 pm_runtime_enable(dev);
1470 pm_runtime_get_sync(dev);
1471
1472 if (bank->is_mpuio)
1473 omap_mpuio_init(bank);
1474
1475 omap_gpio_mod_init(bank);
1476
1477 ret = omap_gpio_chip_init(bank, irqc);
1478 if (ret) {
1479 pm_runtime_put_sync(dev);
1480 pm_runtime_disable(dev);
1481 if (bank->dbck_flag)
1482 clk_unprepare(bank->dbck);
1483 return ret;
1484 }
1485
1486 omap_gpio_show_rev(bank);
1487
1488 bank->nb.notifier_call = gpio_omap_cpu_notifier;
1489 cpu_pm_register_notifier(&bank->nb);
1490
1491 pm_runtime_put(dev);
1492
1493 return 0;
1494 }
1495
omap_gpio_remove(struct platform_device * pdev)1496 static int omap_gpio_remove(struct platform_device *pdev)
1497 {
1498 struct gpio_bank *bank = platform_get_drvdata(pdev);
1499
1500 cpu_pm_unregister_notifier(&bank->nb);
1501 gpiochip_remove(&bank->chip);
1502 pm_runtime_disable(&pdev->dev);
1503 if (bank->dbck_flag)
1504 clk_unprepare(bank->dbck);
1505
1506 return 0;
1507 }
1508
omap_gpio_runtime_suspend(struct device * dev)1509 static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
1510 {
1511 struct gpio_bank *bank = dev_get_drvdata(dev);
1512 unsigned long flags;
1513
1514 raw_spin_lock_irqsave(&bank->lock, flags);
1515 omap_gpio_idle(bank, true);
1516 bank->is_suspended = true;
1517 raw_spin_unlock_irqrestore(&bank->lock, flags);
1518
1519 return 0;
1520 }
1521
omap_gpio_runtime_resume(struct device * dev)1522 static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
1523 {
1524 struct gpio_bank *bank = dev_get_drvdata(dev);
1525 unsigned long flags;
1526
1527 raw_spin_lock_irqsave(&bank->lock, flags);
1528 omap_gpio_unidle(bank);
1529 bank->is_suspended = false;
1530 raw_spin_unlock_irqrestore(&bank->lock, flags);
1531
1532 return 0;
1533 }
1534
omap_gpio_suspend(struct device * dev)1535 static int __maybe_unused omap_gpio_suspend(struct device *dev)
1536 {
1537 struct gpio_bank *bank = dev_get_drvdata(dev);
1538
1539 if (bank->is_suspended)
1540 return 0;
1541
1542 bank->needs_resume = 1;
1543
1544 return omap_gpio_runtime_suspend(dev);
1545 }
1546
omap_gpio_resume(struct device * dev)1547 static int __maybe_unused omap_gpio_resume(struct device *dev)
1548 {
1549 struct gpio_bank *bank = dev_get_drvdata(dev);
1550
1551 if (!bank->needs_resume)
1552 return 0;
1553
1554 bank->needs_resume = 0;
1555
1556 return omap_gpio_runtime_resume(dev);
1557 }
1558
1559 static const struct dev_pm_ops gpio_pm_ops = {
1560 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1561 NULL)
1562 SET_LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
1563 };
1564
1565 static struct platform_driver omap_gpio_driver = {
1566 .probe = omap_gpio_probe,
1567 .remove = omap_gpio_remove,
1568 .driver = {
1569 .name = "omap_gpio",
1570 .pm = &gpio_pm_ops,
1571 .of_match_table = omap_gpio_match,
1572 },
1573 };
1574
1575 /*
1576 * gpio driver register needs to be done before
1577 * machine_init functions access gpio APIs.
1578 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1579 */
omap_gpio_drv_reg(void)1580 static int __init omap_gpio_drv_reg(void)
1581 {
1582 return platform_driver_register(&omap_gpio_driver);
1583 }
1584 postcore_initcall(omap_gpio_drv_reg);
1585
omap_gpio_exit(void)1586 static void __exit omap_gpio_exit(void)
1587 {
1588 platform_driver_unregister(&omap_gpio_driver);
1589 }
1590 module_exit(omap_gpio_exit);
1591
1592 MODULE_DESCRIPTION("omap gpio driver");
1593 MODULE_ALIAS("platform:gpio-omap");
1594 MODULE_LICENSE("GPL v2");
1595