• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap based irq_chip
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
17 
18 #include "internal.h"
19 
20 struct regmap_irq_chip_data {
21 	struct mutex lock;
22 	struct irq_chip irq_chip;
23 
24 	struct regmap *map;
25 	const struct regmap_irq_chip *chip;
26 
27 	int irq_base;
28 	struct irq_domain *domain;
29 
30 	int irq;
31 	int wake_count;
32 
33 	void *status_reg_buf;
34 	unsigned int *main_status_buf;
35 	unsigned int *status_buf;
36 	unsigned int *mask_buf;
37 	unsigned int *mask_buf_def;
38 	unsigned int *wake_buf;
39 	unsigned int *type_buf;
40 	unsigned int *type_buf_def;
41 
42 	unsigned int irq_reg_stride;
43 	unsigned int type_reg_stride;
44 
45 	bool clear_status:1;
46 };
47 
48 static inline const
irq_to_regmap_irq(struct regmap_irq_chip_data * data,int irq)49 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
50 				     int irq)
51 {
52 	return &data->chip->irqs[irq];
53 }
54 
regmap_irq_lock(struct irq_data * data)55 static void regmap_irq_lock(struct irq_data *data)
56 {
57 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
58 
59 	mutex_lock(&d->lock);
60 }
61 
regmap_irq_update_bits(struct regmap_irq_chip_data * d,unsigned int reg,unsigned int mask,unsigned int val)62 static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
63 				  unsigned int reg, unsigned int mask,
64 				  unsigned int val)
65 {
66 	if (d->chip->mask_writeonly)
67 		return regmap_write_bits(d->map, reg, mask, val);
68 	else
69 		return regmap_update_bits(d->map, reg, mask, val);
70 }
71 
regmap_irq_sync_unlock(struct irq_data * data)72 static void regmap_irq_sync_unlock(struct irq_data *data)
73 {
74 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
75 	struct regmap *map = d->map;
76 	int i, ret;
77 	u32 reg;
78 	u32 unmask_offset;
79 	u32 val;
80 
81 	if (d->chip->runtime_pm) {
82 		ret = pm_runtime_get_sync(map->dev);
83 		if (ret < 0)
84 			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
85 				ret);
86 	}
87 
88 	if (d->clear_status) {
89 		for (i = 0; i < d->chip->num_regs; i++) {
90 			reg = d->chip->status_base +
91 				(i * map->reg_stride * d->irq_reg_stride);
92 
93 			ret = regmap_read(map, reg, &val);
94 			if (ret)
95 				dev_err(d->map->dev,
96 					"Failed to clear the interrupt status bits\n");
97 		}
98 
99 		d->clear_status = false;
100 	}
101 
102 	/*
103 	 * If there's been a change in the mask write it back to the
104 	 * hardware.  We rely on the use of the regmap core cache to
105 	 * suppress pointless writes.
106 	 */
107 	for (i = 0; i < d->chip->num_regs; i++) {
108 		if (!d->chip->mask_base)
109 			continue;
110 
111 		reg = d->chip->mask_base +
112 			(i * map->reg_stride * d->irq_reg_stride);
113 		if (d->chip->mask_invert) {
114 			ret = regmap_irq_update_bits(d, reg,
115 					 d->mask_buf_def[i], ~d->mask_buf[i]);
116 		} else if (d->chip->unmask_base) {
117 			/* set mask with mask_base register */
118 			ret = regmap_irq_update_bits(d, reg,
119 					d->mask_buf_def[i], ~d->mask_buf[i]);
120 			if (ret < 0)
121 				dev_err(d->map->dev,
122 					"Failed to sync unmasks in %x\n",
123 					reg);
124 			unmask_offset = d->chip->unmask_base -
125 							d->chip->mask_base;
126 			/* clear mask with unmask_base register */
127 			ret = regmap_irq_update_bits(d,
128 					reg + unmask_offset,
129 					d->mask_buf_def[i],
130 					d->mask_buf[i]);
131 		} else {
132 			ret = regmap_irq_update_bits(d, reg,
133 					 d->mask_buf_def[i], d->mask_buf[i]);
134 		}
135 		if (ret != 0)
136 			dev_err(d->map->dev, "Failed to sync masks in %x\n",
137 				reg);
138 
139 		reg = d->chip->wake_base +
140 			(i * map->reg_stride * d->irq_reg_stride);
141 		if (d->wake_buf) {
142 			if (d->chip->wake_invert)
143 				ret = regmap_irq_update_bits(d, reg,
144 							 d->mask_buf_def[i],
145 							 ~d->wake_buf[i]);
146 			else
147 				ret = regmap_irq_update_bits(d, reg,
148 							 d->mask_buf_def[i],
149 							 d->wake_buf[i]);
150 			if (ret != 0)
151 				dev_err(d->map->dev,
152 					"Failed to sync wakes in %x: %d\n",
153 					reg, ret);
154 		}
155 
156 		if (!d->chip->init_ack_masked)
157 			continue;
158 		/*
159 		 * Ack all the masked interrupts unconditionally,
160 		 * OR if there is masked interrupt which hasn't been Acked,
161 		 * it'll be ignored in irq handler, then may introduce irq storm
162 		 */
163 		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
164 			reg = d->chip->ack_base +
165 				(i * map->reg_stride * d->irq_reg_stride);
166 			/* some chips ack by write 0 */
167 			if (d->chip->ack_invert)
168 				ret = regmap_write(map, reg, ~d->mask_buf[i]);
169 			else
170 				ret = regmap_write(map, reg, d->mask_buf[i]);
171 			if (d->chip->clear_ack) {
172 				if (d->chip->ack_invert && !ret)
173 					ret = regmap_write(map, reg, UINT_MAX);
174 				else if (!ret)
175 					ret = regmap_write(map, reg, 0);
176 			}
177 			if (ret != 0)
178 				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
179 					reg, ret);
180 		}
181 	}
182 
183 	/* Don't update the type bits if we're using mask bits for irq type. */
184 	if (!d->chip->type_in_mask) {
185 		for (i = 0; i < d->chip->num_type_reg; i++) {
186 			if (!d->type_buf_def[i])
187 				continue;
188 			reg = d->chip->type_base +
189 				(i * map->reg_stride * d->type_reg_stride);
190 			if (d->chip->type_invert)
191 				ret = regmap_irq_update_bits(d, reg,
192 					d->type_buf_def[i], ~d->type_buf[i]);
193 			else
194 				ret = regmap_irq_update_bits(d, reg,
195 					d->type_buf_def[i], d->type_buf[i]);
196 			if (ret != 0)
197 				dev_err(d->map->dev, "Failed to sync type in %x\n",
198 					reg);
199 		}
200 	}
201 
202 	if (d->chip->runtime_pm)
203 		pm_runtime_put(map->dev);
204 
205 	/* If we've changed our wakeup count propagate it to the parent */
206 	if (d->wake_count < 0)
207 		for (i = d->wake_count; i < 0; i++)
208 			irq_set_irq_wake(d->irq, 0);
209 	else if (d->wake_count > 0)
210 		for (i = 0; i < d->wake_count; i++)
211 			irq_set_irq_wake(d->irq, 1);
212 
213 	d->wake_count = 0;
214 
215 	mutex_unlock(&d->lock);
216 }
217 
regmap_irq_enable(struct irq_data * data)218 static void regmap_irq_enable(struct irq_data *data)
219 {
220 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
221 	struct regmap *map = d->map;
222 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
223 	unsigned int reg = irq_data->reg_offset / map->reg_stride;
224 	unsigned int mask, type;
225 
226 	type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
227 
228 	/*
229 	 * The type_in_mask flag means that the underlying hardware uses
230 	 * separate mask bits for rising and falling edge interrupts, but
231 	 * we want to make them into a single virtual interrupt with
232 	 * configurable edge.
233 	 *
234 	 * If the interrupt we're enabling defines the falling or rising
235 	 * masks then instead of using the regular mask bits for this
236 	 * interrupt, use the value previously written to the type buffer
237 	 * at the corresponding offset in regmap_irq_set_type().
238 	 */
239 	if (d->chip->type_in_mask && type)
240 		mask = d->type_buf[reg] & irq_data->mask;
241 	else
242 		mask = irq_data->mask;
243 
244 	if (d->chip->clear_on_unmask)
245 		d->clear_status = true;
246 
247 	d->mask_buf[reg] &= ~mask;
248 }
249 
regmap_irq_disable(struct irq_data * data)250 static void regmap_irq_disable(struct irq_data *data)
251 {
252 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
253 	struct regmap *map = d->map;
254 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
255 
256 	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
257 }
258 
regmap_irq_set_type(struct irq_data * data,unsigned int type)259 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
260 {
261 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
262 	struct regmap *map = d->map;
263 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
264 	int reg;
265 	const struct regmap_irq_type *t = &irq_data->type;
266 
267 	if ((t->types_supported & type) != type)
268 		return 0;
269 
270 	reg = t->type_reg_offset / map->reg_stride;
271 
272 	if (t->type_reg_mask)
273 		d->type_buf[reg] &= ~t->type_reg_mask;
274 	else
275 		d->type_buf[reg] &= ~(t->type_falling_val |
276 				      t->type_rising_val |
277 				      t->type_level_low_val |
278 				      t->type_level_high_val);
279 	switch (type) {
280 	case IRQ_TYPE_EDGE_FALLING:
281 		d->type_buf[reg] |= t->type_falling_val;
282 		break;
283 
284 	case IRQ_TYPE_EDGE_RISING:
285 		d->type_buf[reg] |= t->type_rising_val;
286 		break;
287 
288 	case IRQ_TYPE_EDGE_BOTH:
289 		d->type_buf[reg] |= (t->type_falling_val |
290 					t->type_rising_val);
291 		break;
292 
293 	case IRQ_TYPE_LEVEL_HIGH:
294 		d->type_buf[reg] |= t->type_level_high_val;
295 		break;
296 
297 	case IRQ_TYPE_LEVEL_LOW:
298 		d->type_buf[reg] |= t->type_level_low_val;
299 		break;
300 	default:
301 		return -EINVAL;
302 	}
303 	return 0;
304 }
305 
regmap_irq_set_wake(struct irq_data * data,unsigned int on)306 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
307 {
308 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
309 	struct regmap *map = d->map;
310 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
311 
312 	if (on) {
313 		if (d->wake_buf)
314 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
315 				&= ~irq_data->mask;
316 		d->wake_count++;
317 	} else {
318 		if (d->wake_buf)
319 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
320 				|= irq_data->mask;
321 		d->wake_count--;
322 	}
323 
324 	return 0;
325 }
326 
327 static const struct irq_chip regmap_irq_chip = {
328 	.irq_bus_lock		= regmap_irq_lock,
329 	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
330 	.irq_disable		= regmap_irq_disable,
331 	.irq_enable		= regmap_irq_enable,
332 	.irq_set_type		= regmap_irq_set_type,
333 	.irq_set_wake		= regmap_irq_set_wake,
334 };
335 
read_sub_irq_data(struct regmap_irq_chip_data * data,unsigned int b)336 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
337 					   unsigned int b)
338 {
339 	const struct regmap_irq_chip *chip = data->chip;
340 	struct regmap *map = data->map;
341 	struct regmap_irq_sub_irq_map *subreg;
342 	int i, ret = 0;
343 
344 	if (!chip->sub_reg_offsets) {
345 		/* Assume linear mapping */
346 		ret = regmap_read(map, chip->status_base +
347 				  (b * map->reg_stride * data->irq_reg_stride),
348 				   &data->status_buf[b]);
349 	} else {
350 		subreg = &chip->sub_reg_offsets[b];
351 		for (i = 0; i < subreg->num_regs; i++) {
352 			unsigned int offset = subreg->offset[i];
353 
354 			ret = regmap_read(map, chip->status_base + offset,
355 					  &data->status_buf[offset]);
356 			if (ret)
357 				break;
358 		}
359 	}
360 	return ret;
361 }
362 
regmap_irq_thread(int irq,void * d)363 static irqreturn_t regmap_irq_thread(int irq, void *d)
364 {
365 	struct regmap_irq_chip_data *data = d;
366 	const struct regmap_irq_chip *chip = data->chip;
367 	struct regmap *map = data->map;
368 	int ret, i;
369 	bool handled = false;
370 	u32 reg;
371 
372 	if (chip->handle_pre_irq)
373 		chip->handle_pre_irq(chip->irq_drv_data);
374 
375 	if (chip->runtime_pm) {
376 		ret = pm_runtime_get_sync(map->dev);
377 		if (ret < 0) {
378 			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
379 				ret);
380 			goto exit;
381 		}
382 	}
383 
384 	/*
385 	 * Read only registers with active IRQs if the chip has 'main status
386 	 * register'. Else read in the statuses, using a single bulk read if
387 	 * possible in order to reduce the I/O overheads.
388 	 */
389 
390 	if (chip->num_main_regs) {
391 		unsigned int max_main_bits;
392 		unsigned long size;
393 
394 		size = chip->num_regs * sizeof(unsigned int);
395 
396 		max_main_bits = (chip->num_main_status_bits) ?
397 				 chip->num_main_status_bits : chip->num_regs;
398 		/* Clear the status buf as we don't read all status regs */
399 		memset(data->status_buf, 0, size);
400 
401 		/* We could support bulk read for main status registers
402 		 * but I don't expect to see devices with really many main
403 		 * status registers so let's only support single reads for the
404 		 * sake of simplicity. and add bulk reads only if needed
405 		 */
406 		for (i = 0; i < chip->num_main_regs; i++) {
407 			ret = regmap_read(map, chip->main_status +
408 				  (i * map->reg_stride
409 				   * data->irq_reg_stride),
410 				  &data->main_status_buf[i]);
411 			if (ret) {
412 				dev_err(map->dev,
413 					"Failed to read IRQ status %d\n",
414 					ret);
415 				goto exit;
416 			}
417 		}
418 
419 		/* Read sub registers with active IRQs */
420 		for (i = 0; i < chip->num_main_regs; i++) {
421 			unsigned int b;
422 			const unsigned long mreg = data->main_status_buf[i];
423 
424 			for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
425 				if (i * map->format.val_bytes * 8 + b >
426 				    max_main_bits)
427 					break;
428 				ret = read_sub_irq_data(data, b);
429 
430 				if (ret != 0) {
431 					dev_err(map->dev,
432 						"Failed to read IRQ status %d\n",
433 						ret);
434 					goto exit;
435 				}
436 			}
437 
438 		}
439 	} else if (!map->use_single_read && map->reg_stride == 1 &&
440 		   data->irq_reg_stride == 1) {
441 
442 		u8 *buf8 = data->status_reg_buf;
443 		u16 *buf16 = data->status_reg_buf;
444 		u32 *buf32 = data->status_reg_buf;
445 
446 		BUG_ON(!data->status_reg_buf);
447 
448 		ret = regmap_bulk_read(map, chip->status_base,
449 				       data->status_reg_buf,
450 				       chip->num_regs);
451 		if (ret != 0) {
452 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
453 				ret);
454 			goto exit;
455 		}
456 
457 		for (i = 0; i < data->chip->num_regs; i++) {
458 			switch (map->format.val_bytes) {
459 			case 1:
460 				data->status_buf[i] = buf8[i];
461 				break;
462 			case 2:
463 				data->status_buf[i] = buf16[i];
464 				break;
465 			case 4:
466 				data->status_buf[i] = buf32[i];
467 				break;
468 			default:
469 				BUG();
470 				goto exit;
471 			}
472 		}
473 
474 	} else {
475 		for (i = 0; i < data->chip->num_regs; i++) {
476 			ret = regmap_read(map, chip->status_base +
477 					  (i * map->reg_stride
478 					   * data->irq_reg_stride),
479 					  &data->status_buf[i]);
480 
481 			if (ret != 0) {
482 				dev_err(map->dev,
483 					"Failed to read IRQ status: %d\n",
484 					ret);
485 				goto exit;
486 			}
487 		}
488 	}
489 
490 	/*
491 	 * Ignore masked IRQs and ack if we need to; we ack early so
492 	 * there is no race between handling and acknowleding the
493 	 * interrupt.  We assume that typically few of the interrupts
494 	 * will fire simultaneously so don't worry about overhead from
495 	 * doing a write per register.
496 	 */
497 	for (i = 0; i < data->chip->num_regs; i++) {
498 		data->status_buf[i] &= ~data->mask_buf[i];
499 
500 		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
501 			reg = chip->ack_base +
502 				(i * map->reg_stride * data->irq_reg_stride);
503 			if (chip->ack_invert)
504 				ret = regmap_write(map, reg,
505 						~data->status_buf[i]);
506 			else
507 				ret = regmap_write(map, reg,
508 						data->status_buf[i]);
509 			if (chip->clear_ack) {
510 				if (chip->ack_invert && !ret)
511 					ret = regmap_write(map, reg, UINT_MAX);
512 				else if (!ret)
513 					ret = regmap_write(map, reg, 0);
514 			}
515 			if (ret != 0)
516 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
517 					reg, ret);
518 		}
519 	}
520 
521 	for (i = 0; i < chip->num_irqs; i++) {
522 		if (data->status_buf[chip->irqs[i].reg_offset /
523 				     map->reg_stride] & chip->irqs[i].mask) {
524 			handle_nested_irq(irq_find_mapping(data->domain, i));
525 			handled = true;
526 		}
527 	}
528 
529 exit:
530 	if (chip->runtime_pm)
531 		pm_runtime_put(map->dev);
532 
533 	if (chip->handle_post_irq)
534 		chip->handle_post_irq(chip->irq_drv_data);
535 
536 	if (handled)
537 		return IRQ_HANDLED;
538 	else
539 		return IRQ_NONE;
540 }
541 
regmap_irq_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)542 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
543 			  irq_hw_number_t hw)
544 {
545 	struct regmap_irq_chip_data *data = h->host_data;
546 
547 	irq_set_chip_data(virq, data);
548 	irq_set_chip(virq, &data->irq_chip);
549 	irq_set_nested_thread(virq, 1);
550 	irq_set_parent(virq, data->irq);
551 	irq_set_noprobe(virq);
552 
553 	return 0;
554 }
555 
556 static const struct irq_domain_ops regmap_domain_ops = {
557 	.map	= regmap_irq_map,
558 	.xlate	= irq_domain_xlate_onetwocell,
559 };
560 
561 /**
562  * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
563  *
564  * @fwnode: The firmware node where the IRQ domain should be added to.
565  * @map: The regmap for the device.
566  * @irq: The IRQ the device uses to signal interrupts.
567  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
568  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
569  * @chip: Configuration for the interrupt controller.
570  * @data: Runtime data structure for the controller, allocated on success.
571  *
572  * Returns 0 on success or an errno on failure.
573  *
574  * In order for this to be efficient the chip really should use a
575  * register cache.  The chip driver is responsible for restoring the
576  * register values used by the IRQ controller over suspend and resume.
577  */
regmap_add_irq_chip_fwnode(struct fwnode_handle * fwnode,struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)578 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
579 			       struct regmap *map, int irq,
580 			       int irq_flags, int irq_base,
581 			       const struct regmap_irq_chip *chip,
582 			       struct regmap_irq_chip_data **data)
583 {
584 	struct regmap_irq_chip_data *d;
585 	int i;
586 	int ret = -ENOMEM;
587 	int num_type_reg;
588 	u32 reg;
589 	u32 unmask_offset;
590 
591 	if (chip->num_regs <= 0)
592 		return -EINVAL;
593 
594 	if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
595 		return -EINVAL;
596 
597 	for (i = 0; i < chip->num_irqs; i++) {
598 		if (chip->irqs[i].reg_offset % map->reg_stride)
599 			return -EINVAL;
600 		if (chip->irqs[i].reg_offset / map->reg_stride >=
601 		    chip->num_regs)
602 			return -EINVAL;
603 	}
604 
605 	if (irq_base) {
606 		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
607 		if (irq_base < 0) {
608 			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
609 				 irq_base);
610 			return irq_base;
611 		}
612 	}
613 
614 	d = kzalloc(sizeof(*d), GFP_KERNEL);
615 	if (!d)
616 		return -ENOMEM;
617 
618 	if (chip->num_main_regs) {
619 		d->main_status_buf = kcalloc(chip->num_main_regs,
620 					     sizeof(unsigned int),
621 					     GFP_KERNEL);
622 
623 		if (!d->main_status_buf)
624 			goto err_alloc;
625 	}
626 
627 	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
628 				GFP_KERNEL);
629 	if (!d->status_buf)
630 		goto err_alloc;
631 
632 	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
633 			      GFP_KERNEL);
634 	if (!d->mask_buf)
635 		goto err_alloc;
636 
637 	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
638 				  GFP_KERNEL);
639 	if (!d->mask_buf_def)
640 		goto err_alloc;
641 
642 	if (chip->wake_base) {
643 		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
644 				      GFP_KERNEL);
645 		if (!d->wake_buf)
646 			goto err_alloc;
647 	}
648 
649 	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
650 	if (num_type_reg) {
651 		d->type_buf_def = kcalloc(num_type_reg,
652 					  sizeof(unsigned int), GFP_KERNEL);
653 		if (!d->type_buf_def)
654 			goto err_alloc;
655 
656 		d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
657 				      GFP_KERNEL);
658 		if (!d->type_buf)
659 			goto err_alloc;
660 	}
661 
662 	d->irq_chip = regmap_irq_chip;
663 	d->irq_chip.name = chip->name;
664 	d->irq = irq;
665 	d->map = map;
666 	d->chip = chip;
667 	d->irq_base = irq_base;
668 
669 	if (chip->irq_reg_stride)
670 		d->irq_reg_stride = chip->irq_reg_stride;
671 	else
672 		d->irq_reg_stride = 1;
673 
674 	if (chip->type_reg_stride)
675 		d->type_reg_stride = chip->type_reg_stride;
676 	else
677 		d->type_reg_stride = 1;
678 
679 	if (!map->use_single_read && map->reg_stride == 1 &&
680 	    d->irq_reg_stride == 1) {
681 		d->status_reg_buf = kmalloc_array(chip->num_regs,
682 						  map->format.val_bytes,
683 						  GFP_KERNEL);
684 		if (!d->status_reg_buf)
685 			goto err_alloc;
686 	}
687 
688 	mutex_init(&d->lock);
689 
690 	for (i = 0; i < chip->num_irqs; i++)
691 		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
692 			|= chip->irqs[i].mask;
693 
694 	/* Mask all the interrupts by default */
695 	for (i = 0; i < chip->num_regs; i++) {
696 		d->mask_buf[i] = d->mask_buf_def[i];
697 		if (!chip->mask_base)
698 			continue;
699 
700 		reg = chip->mask_base +
701 			(i * map->reg_stride * d->irq_reg_stride);
702 		if (chip->mask_invert)
703 			ret = regmap_irq_update_bits(d, reg,
704 					 d->mask_buf[i], ~d->mask_buf[i]);
705 		else if (d->chip->unmask_base) {
706 			unmask_offset = d->chip->unmask_base -
707 					d->chip->mask_base;
708 			ret = regmap_irq_update_bits(d,
709 					reg + unmask_offset,
710 					d->mask_buf[i],
711 					d->mask_buf[i]);
712 		} else
713 			ret = regmap_irq_update_bits(d, reg,
714 					 d->mask_buf[i], d->mask_buf[i]);
715 		if (ret != 0) {
716 			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
717 				reg, ret);
718 			goto err_alloc;
719 		}
720 
721 		if (!chip->init_ack_masked)
722 			continue;
723 
724 		/* Ack masked but set interrupts */
725 		reg = chip->status_base +
726 			(i * map->reg_stride * d->irq_reg_stride);
727 		ret = regmap_read(map, reg, &d->status_buf[i]);
728 		if (ret != 0) {
729 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
730 				ret);
731 			goto err_alloc;
732 		}
733 
734 		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
735 			reg = chip->ack_base +
736 				(i * map->reg_stride * d->irq_reg_stride);
737 			if (chip->ack_invert)
738 				ret = regmap_write(map, reg,
739 					~(d->status_buf[i] & d->mask_buf[i]));
740 			else
741 				ret = regmap_write(map, reg,
742 					d->status_buf[i] & d->mask_buf[i]);
743 			if (chip->clear_ack) {
744 				if (chip->ack_invert && !ret)
745 					ret = regmap_write(map, reg, UINT_MAX);
746 				else if (!ret)
747 					ret = regmap_write(map, reg, 0);
748 			}
749 			if (ret != 0) {
750 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
751 					reg, ret);
752 				goto err_alloc;
753 			}
754 		}
755 	}
756 
757 	/* Wake is disabled by default */
758 	if (d->wake_buf) {
759 		for (i = 0; i < chip->num_regs; i++) {
760 			d->wake_buf[i] = d->mask_buf_def[i];
761 			reg = chip->wake_base +
762 				(i * map->reg_stride * d->irq_reg_stride);
763 
764 			if (chip->wake_invert)
765 				ret = regmap_irq_update_bits(d, reg,
766 							 d->mask_buf_def[i],
767 							 0);
768 			else
769 				ret = regmap_irq_update_bits(d, reg,
770 							 d->mask_buf_def[i],
771 							 d->wake_buf[i]);
772 			if (ret != 0) {
773 				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
774 					reg, ret);
775 				goto err_alloc;
776 			}
777 		}
778 	}
779 
780 	if (chip->num_type_reg && !chip->type_in_mask) {
781 		for (i = 0; i < chip->num_type_reg; ++i) {
782 			reg = chip->type_base +
783 				(i * map->reg_stride * d->type_reg_stride);
784 
785 			ret = regmap_read(map, reg, &d->type_buf_def[i]);
786 
787 			if (d->chip->type_invert)
788 				d->type_buf_def[i] = ~d->type_buf_def[i];
789 
790 			if (ret) {
791 				dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
792 					reg, ret);
793 				goto err_alloc;
794 			}
795 		}
796 	}
797 
798 	if (irq_base)
799 		d->domain = irq_domain_add_legacy(to_of_node(fwnode),
800 						  chip->num_irqs, irq_base,
801 						  0, &regmap_domain_ops, d);
802 	else
803 		d->domain = irq_domain_add_linear(to_of_node(fwnode),
804 						  chip->num_irqs,
805 						  &regmap_domain_ops, d);
806 	if (!d->domain) {
807 		dev_err(map->dev, "Failed to create IRQ domain\n");
808 		ret = -ENOMEM;
809 		goto err_alloc;
810 	}
811 
812 	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
813 				   irq_flags | IRQF_ONESHOT,
814 				   chip->name, d);
815 	if (ret != 0) {
816 		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
817 			irq, chip->name, ret);
818 		goto err_domain;
819 	}
820 
821 	*data = d;
822 
823 	return 0;
824 
825 err_domain:
826 	/* Should really dispose of the domain but... */
827 err_alloc:
828 	kfree(d->type_buf);
829 	kfree(d->type_buf_def);
830 	kfree(d->wake_buf);
831 	kfree(d->mask_buf_def);
832 	kfree(d->mask_buf);
833 	kfree(d->status_buf);
834 	kfree(d->status_reg_buf);
835 	kfree(d);
836 	return ret;
837 }
838 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
839 
840 /**
841  * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
842  *
843  * @map: The regmap for the device.
844  * @irq: The IRQ the device uses to signal interrupts.
845  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
846  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
847  * @chip: Configuration for the interrupt controller.
848  * @data: Runtime data structure for the controller, allocated on success.
849  *
850  * Returns 0 on success or an errno on failure.
851  *
852  * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
853  * node of the regmap is used.
854  */
regmap_add_irq_chip(struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)855 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
856 			int irq_base, const struct regmap_irq_chip *chip,
857 			struct regmap_irq_chip_data **data)
858 {
859 	return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
860 					  irq_flags, irq_base, chip, data);
861 }
862 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
863 
864 /**
865  * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
866  *
867  * @irq: Primary IRQ for the device
868  * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
869  *
870  * This function also disposes of all mapped IRQs on the chip.
871  */
regmap_del_irq_chip(int irq,struct regmap_irq_chip_data * d)872 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
873 {
874 	unsigned int virq;
875 	int hwirq;
876 
877 	if (!d)
878 		return;
879 
880 	free_irq(irq, d);
881 
882 	/* Dispose all virtual irq from irq domain before removing it */
883 	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
884 		/* Ignore hwirq if holes in the IRQ list */
885 		if (!d->chip->irqs[hwirq].mask)
886 			continue;
887 
888 		/*
889 		 * Find the virtual irq of hwirq on chip and if it is
890 		 * there then dispose it
891 		 */
892 		virq = irq_find_mapping(d->domain, hwirq);
893 		if (virq)
894 			irq_dispose_mapping(virq);
895 	}
896 
897 	irq_domain_remove(d->domain);
898 	kfree(d->type_buf);
899 	kfree(d->type_buf_def);
900 	kfree(d->wake_buf);
901 	kfree(d->mask_buf_def);
902 	kfree(d->mask_buf);
903 	kfree(d->status_reg_buf);
904 	kfree(d->status_buf);
905 	kfree(d);
906 }
907 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
908 
devm_regmap_irq_chip_release(struct device * dev,void * res)909 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
910 {
911 	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
912 
913 	regmap_del_irq_chip(d->irq, d);
914 }
915 
devm_regmap_irq_chip_match(struct device * dev,void * res,void * data)916 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
917 
918 {
919 	struct regmap_irq_chip_data **r = res;
920 
921 	if (!r || !*r) {
922 		WARN_ON(!r || !*r);
923 		return 0;
924 	}
925 	return *r == data;
926 }
927 
928 /**
929  * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
930  *
931  * @dev: The device pointer on which irq_chip belongs to.
932  * @fwnode: The firmware node where the IRQ domain should be added to.
933  * @map: The regmap for the device.
934  * @irq: The IRQ the device uses to signal interrupts
935  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
936  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
937  * @chip: Configuration for the interrupt controller.
938  * @data: Runtime data structure for the controller, allocated on success
939  *
940  * Returns 0 on success or an errno on failure.
941  *
942  * The &regmap_irq_chip_data will be automatically released when the device is
943  * unbound.
944  */
devm_regmap_add_irq_chip_fwnode(struct device * dev,struct fwnode_handle * fwnode,struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)945 int devm_regmap_add_irq_chip_fwnode(struct device *dev,
946 				    struct fwnode_handle *fwnode,
947 				    struct regmap *map, int irq,
948 				    int irq_flags, int irq_base,
949 				    const struct regmap_irq_chip *chip,
950 				    struct regmap_irq_chip_data **data)
951 {
952 	struct regmap_irq_chip_data **ptr, *d;
953 	int ret;
954 
955 	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
956 			   GFP_KERNEL);
957 	if (!ptr)
958 		return -ENOMEM;
959 
960 	ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
961 					 chip, &d);
962 	if (ret < 0) {
963 		devres_free(ptr);
964 		return ret;
965 	}
966 
967 	*ptr = d;
968 	devres_add(dev, ptr);
969 	*data = d;
970 	return 0;
971 }
972 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
973 
974 /**
975  * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
976  *
977  * @dev: The device pointer on which irq_chip belongs to.
978  * @map: The regmap for the device.
979  * @irq: The IRQ the device uses to signal interrupts
980  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
981  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
982  * @chip: Configuration for the interrupt controller.
983  * @data: Runtime data structure for the controller, allocated on success
984  *
985  * Returns 0 on success or an errno on failure.
986  *
987  * The &regmap_irq_chip_data will be automatically released when the device is
988  * unbound.
989  */
devm_regmap_add_irq_chip(struct device * dev,struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)990 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
991 			     int irq_flags, int irq_base,
992 			     const struct regmap_irq_chip *chip,
993 			     struct regmap_irq_chip_data **data)
994 {
995 	return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
996 					       irq, irq_flags, irq_base, chip,
997 					       data);
998 }
999 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
1000 
1001 /**
1002  * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1003  *
1004  * @dev: Device for which which resource was allocated.
1005  * @irq: Primary IRQ for the device.
1006  * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
1007  *
1008  * A resource managed version of regmap_del_irq_chip().
1009  */
devm_regmap_del_irq_chip(struct device * dev,int irq,struct regmap_irq_chip_data * data)1010 void devm_regmap_del_irq_chip(struct device *dev, int irq,
1011 			      struct regmap_irq_chip_data *data)
1012 {
1013 	int rc;
1014 
1015 	WARN_ON(irq != data->irq);
1016 	rc = devres_release(dev, devm_regmap_irq_chip_release,
1017 			    devm_regmap_irq_chip_match, data);
1018 
1019 	if (rc != 0)
1020 		WARN_ON(rc);
1021 }
1022 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
1023 
1024 /**
1025  * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1026  *
1027  * @data: regmap irq controller to operate on.
1028  *
1029  * Useful for drivers to request their own IRQs.
1030  */
regmap_irq_chip_get_base(struct regmap_irq_chip_data * data)1031 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
1032 {
1033 	WARN_ON(!data->irq_base);
1034 	return data->irq_base;
1035 }
1036 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
1037 
1038 /**
1039  * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1040  *
1041  * @data: regmap irq controller to operate on.
1042  * @irq: index of the interrupt requested in the chip IRQs.
1043  *
1044  * Useful for drivers to request their own IRQs.
1045  */
regmap_irq_get_virq(struct regmap_irq_chip_data * data,int irq)1046 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
1047 {
1048 	/* Handle holes in the IRQ list */
1049 	if (!data->chip->irqs[irq].mask)
1050 		return -EINVAL;
1051 
1052 	return irq_create_mapping(data->domain, irq);
1053 }
1054 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
1055 
1056 /**
1057  * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
1058  *
1059  * @data: regmap_irq controller to operate on.
1060  *
1061  * Useful for drivers to request their own IRQs and for integration
1062  * with subsystems.  For ease of integration NULL is accepted as a
1063  * domain, allowing devices to just call this even if no domain is
1064  * allocated.
1065  */
regmap_irq_get_domain(struct regmap_irq_chip_data * data)1066 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
1067 {
1068 	if (data)
1069 		return data->domain;
1070 	else
1071 		return NULL;
1072 }
1073 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
1074