• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPIO Greybus driver.
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/gpio.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/mutex.h>
17 
18 #include "greybus.h"
19 #include "gbphy.h"
20 
21 struct gb_gpio_line {
22 	/* The following has to be an array of line_max entries */
23 	/* --> make them just a flags field */
24 	u8			active:    1,
25 				direction: 1,	/* 0 = output, 1 = input */
26 				value:     1;	/* 0 = low, 1 = high */
27 	u16			debounce_usec;
28 
29 	u8			irq_type;
30 	bool			irq_type_pending;
31 	bool			masked;
32 	bool			masked_pending;
33 };
34 
35 struct gb_gpio_controller {
36 	struct gbphy_device	*gbphy_dev;
37 	struct gb_connection	*connection;
38 	u8			line_max;	/* max line number */
39 	struct gb_gpio_line	*lines;
40 
41 	struct gpio_chip	chip;
42 	struct irq_chip		irqc;
43 	struct irq_chip		*irqchip;
44 	struct irq_domain	*irqdomain;
45 	unsigned int		irq_base;
46 	irq_flow_handler_t	irq_handler;
47 	unsigned int		irq_default_type;
48 	struct mutex		irq_lock;
49 };
50 #define gpio_chip_to_gb_gpio_controller(chip) \
51 	container_of(chip, struct gb_gpio_controller, chip)
52 #define irq_data_to_gpio_chip(d) (d->domain->host_data)
53 
gb_gpio_line_count_operation(struct gb_gpio_controller * ggc)54 static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
55 {
56 	struct gb_gpio_line_count_response response;
57 	int ret;
58 
59 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
60 				NULL, 0, &response, sizeof(response));
61 	if (!ret)
62 		ggc->line_max = response.count;
63 	return ret;
64 }
65 
gb_gpio_activate_operation(struct gb_gpio_controller * ggc,u8 which)66 static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
67 {
68 	struct gb_gpio_activate_request request;
69 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
70 	int ret;
71 
72 	ret = gbphy_runtime_get_sync(gbphy_dev);
73 	if (ret)
74 		return ret;
75 
76 	request.which = which;
77 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
78 				 &request, sizeof(request), NULL, 0);
79 	if (ret) {
80 		gbphy_runtime_put_autosuspend(gbphy_dev);
81 		return ret;
82 	}
83 
84 	ggc->lines[which].active = true;
85 
86 	return 0;
87 }
88 
gb_gpio_deactivate_operation(struct gb_gpio_controller * ggc,u8 which)89 static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
90 					u8 which)
91 {
92 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
93 	struct device *dev = &gbphy_dev->dev;
94 	struct gb_gpio_deactivate_request request;
95 	int ret;
96 
97 	request.which = which;
98 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
99 				 &request, sizeof(request), NULL, 0);
100 	if (ret) {
101 		dev_err(dev, "failed to deactivate gpio %u\n", which);
102 		goto out_pm_put;
103 	}
104 
105 	ggc->lines[which].active = false;
106 
107 out_pm_put:
108 	gbphy_runtime_put_autosuspend(gbphy_dev);
109 }
110 
gb_gpio_get_direction_operation(struct gb_gpio_controller * ggc,u8 which)111 static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
112 					u8 which)
113 {
114 	struct device *dev = &ggc->gbphy_dev->dev;
115 	struct gb_gpio_get_direction_request request;
116 	struct gb_gpio_get_direction_response response;
117 	int ret;
118 	u8 direction;
119 
120 	request.which = which;
121 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
122 				&request, sizeof(request),
123 				&response, sizeof(response));
124 	if (ret)
125 		return ret;
126 
127 	direction = response.direction;
128 	if (direction && direction != 1) {
129 		dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
130 			 which, direction);
131 	}
132 	ggc->lines[which].direction = direction ? 1 : 0;
133 	return 0;
134 }
135 
gb_gpio_direction_in_operation(struct gb_gpio_controller * ggc,u8 which)136 static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
137 					u8 which)
138 {
139 	struct gb_gpio_direction_in_request request;
140 	int ret;
141 
142 	request.which = which;
143 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
144 				&request, sizeof(request), NULL, 0);
145 	if (!ret)
146 		ggc->lines[which].direction = 1;
147 	return ret;
148 }
149 
gb_gpio_direction_out_operation(struct gb_gpio_controller * ggc,u8 which,bool value_high)150 static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
151 					u8 which, bool value_high)
152 {
153 	struct gb_gpio_direction_out_request request;
154 	int ret;
155 
156 	request.which = which;
157 	request.value = value_high ? 1 : 0;
158 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
159 				&request, sizeof(request), NULL, 0);
160 	if (!ret)
161 		ggc->lines[which].direction = 0;
162 	return ret;
163 }
164 
gb_gpio_get_value_operation(struct gb_gpio_controller * ggc,u8 which)165 static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
166 					u8 which)
167 {
168 	struct device *dev = &ggc->gbphy_dev->dev;
169 	struct gb_gpio_get_value_request request;
170 	struct gb_gpio_get_value_response response;
171 	int ret;
172 	u8 value;
173 
174 	request.which = which;
175 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
176 				&request, sizeof(request),
177 				&response, sizeof(response));
178 	if (ret) {
179 		dev_err(dev, "failed to get value of gpio %u\n", which);
180 		return ret;
181 	}
182 
183 	value = response.value;
184 	if (value && value != 1) {
185 		dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
186 			 which, value);
187 	}
188 	ggc->lines[which].value = value ? 1 : 0;
189 	return 0;
190 }
191 
gb_gpio_set_value_operation(struct gb_gpio_controller * ggc,u8 which,bool value_high)192 static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
193 					u8 which, bool value_high)
194 {
195 	struct device *dev = &ggc->gbphy_dev->dev;
196 	struct gb_gpio_set_value_request request;
197 	int ret;
198 
199 	if (ggc->lines[which].direction == 1) {
200 		dev_warn(dev, "refusing to set value of input gpio %u\n",
201 			 which);
202 		return;
203 	}
204 
205 	request.which = which;
206 	request.value = value_high ? 1 : 0;
207 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
208 				&request, sizeof(request), NULL, 0);
209 	if (ret) {
210 		dev_err(dev, "failed to set value of gpio %u\n", which);
211 		return;
212 	}
213 
214 	ggc->lines[which].value = request.value;
215 }
216 
gb_gpio_set_debounce_operation(struct gb_gpio_controller * ggc,u8 which,u16 debounce_usec)217 static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
218 					u8 which, u16 debounce_usec)
219 {
220 	struct gb_gpio_set_debounce_request request;
221 	int ret;
222 
223 	request.which = which;
224 	request.usec = cpu_to_le16(debounce_usec);
225 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
226 				&request, sizeof(request), NULL, 0);
227 	if (!ret)
228 		ggc->lines[which].debounce_usec = debounce_usec;
229 	return ret;
230 }
231 
_gb_gpio_irq_mask(struct gb_gpio_controller * ggc,u8 hwirq)232 static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
233 {
234 	struct device *dev = &ggc->gbphy_dev->dev;
235 	struct gb_gpio_irq_mask_request request;
236 	int ret;
237 
238 	request.which = hwirq;
239 	ret = gb_operation_sync(ggc->connection,
240 				GB_GPIO_TYPE_IRQ_MASK,
241 				&request, sizeof(request), NULL, 0);
242 	if (ret)
243 		dev_err(dev, "failed to mask irq: %d\n", ret);
244 }
245 
_gb_gpio_irq_unmask(struct gb_gpio_controller * ggc,u8 hwirq)246 static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
247 {
248 	struct device *dev = &ggc->gbphy_dev->dev;
249 	struct gb_gpio_irq_unmask_request request;
250 	int ret;
251 
252 	request.which = hwirq;
253 	ret = gb_operation_sync(ggc->connection,
254 				GB_GPIO_TYPE_IRQ_UNMASK,
255 				&request, sizeof(request), NULL, 0);
256 	if (ret)
257 		dev_err(dev, "failed to unmask irq: %d\n", ret);
258 }
259 
_gb_gpio_irq_set_type(struct gb_gpio_controller * ggc,u8 hwirq,u8 type)260 static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
261 					u8 hwirq, u8 type)
262 {
263 	struct device *dev = &ggc->gbphy_dev->dev;
264 	struct gb_gpio_irq_type_request request;
265 	int ret;
266 
267 	request.which = hwirq;
268 	request.type = type;
269 
270 	ret = gb_operation_sync(ggc->connection,
271 				GB_GPIO_TYPE_IRQ_TYPE,
272 				&request, sizeof(request), NULL, 0);
273 	if (ret)
274 		dev_err(dev, "failed to set irq type: %d\n", ret);
275 }
276 
gb_gpio_irq_mask(struct irq_data * d)277 static void gb_gpio_irq_mask(struct irq_data *d)
278 {
279 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
280 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
281 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
282 
283 	line->masked = true;
284 	line->masked_pending = true;
285 }
286 
gb_gpio_irq_unmask(struct irq_data * d)287 static void gb_gpio_irq_unmask(struct irq_data *d)
288 {
289 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
290 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
291 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
292 
293 	line->masked = false;
294 	line->masked_pending = true;
295 }
296 
gb_gpio_irq_set_type(struct irq_data * d,unsigned int type)297 static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
298 {
299 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
300 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
301 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
302 	struct device *dev = &ggc->gbphy_dev->dev;
303 	u8 irq_type;
304 
305 	switch (type) {
306 	case IRQ_TYPE_NONE:
307 		irq_type = GB_GPIO_IRQ_TYPE_NONE;
308 		break;
309 	case IRQ_TYPE_EDGE_RISING:
310 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
311 		break;
312 	case IRQ_TYPE_EDGE_FALLING:
313 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
314 		break;
315 	case IRQ_TYPE_EDGE_BOTH:
316 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
317 		break;
318 	case IRQ_TYPE_LEVEL_LOW:
319 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
320 		break;
321 	case IRQ_TYPE_LEVEL_HIGH:
322 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
323 		break;
324 	default:
325 		dev_err(dev, "unsupported irq type: %u\n", type);
326 		return -EINVAL;
327 	}
328 
329 	line->irq_type = irq_type;
330 	line->irq_type_pending = true;
331 
332 	return 0;
333 }
334 
gb_gpio_irq_bus_lock(struct irq_data * d)335 static void gb_gpio_irq_bus_lock(struct irq_data *d)
336 {
337 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
338 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
339 
340 	mutex_lock(&ggc->irq_lock);
341 }
342 
gb_gpio_irq_bus_sync_unlock(struct irq_data * d)343 static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
344 {
345 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
346 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
347 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
348 
349 	if (line->irq_type_pending) {
350 		_gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
351 		line->irq_type_pending = false;
352 	}
353 
354 	if (line->masked_pending) {
355 		if (line->masked)
356 			_gb_gpio_irq_mask(ggc, d->hwirq);
357 		else
358 			_gb_gpio_irq_unmask(ggc, d->hwirq);
359 		line->masked_pending = false;
360 	}
361 
362 	mutex_unlock(&ggc->irq_lock);
363 }
364 
gb_gpio_request_handler(struct gb_operation * op)365 static int gb_gpio_request_handler(struct gb_operation *op)
366 {
367 	struct gb_connection *connection = op->connection;
368 	struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
369 	struct device *dev = &ggc->gbphy_dev->dev;
370 	struct gb_message *request;
371 	struct gb_gpio_irq_event_request *event;
372 	u8 type = op->type;
373 	int irq;
374 	struct irq_desc *desc;
375 
376 	if (type != GB_GPIO_TYPE_IRQ_EVENT) {
377 		dev_err(dev, "unsupported unsolicited request: %u\n", type);
378 		return -EINVAL;
379 	}
380 
381 	request = op->request;
382 
383 	if (request->payload_size < sizeof(*event)) {
384 		dev_err(dev, "short event received (%zu < %zu)\n",
385 			request->payload_size, sizeof(*event));
386 		return -EINVAL;
387 	}
388 
389 	event = request->payload;
390 	if (event->which > ggc->line_max) {
391 		dev_err(dev, "invalid hw irq: %d\n", event->which);
392 		return -EINVAL;
393 	}
394 
395 	irq = irq_find_mapping(ggc->irqdomain, event->which);
396 	if (!irq) {
397 		dev_err(dev, "failed to find IRQ\n");
398 		return -EINVAL;
399 	}
400 	desc = irq_to_desc(irq);
401 	if (!desc) {
402 		dev_err(dev, "failed to look up irq\n");
403 		return -EINVAL;
404 	}
405 
406 	local_irq_disable();
407 	generic_handle_irq_desc(desc);
408 	local_irq_enable();
409 
410 	return 0;
411 }
412 
gb_gpio_request(struct gpio_chip * chip,unsigned int offset)413 static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
414 {
415 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
416 
417 	return gb_gpio_activate_operation(ggc, (u8)offset);
418 }
419 
gb_gpio_free(struct gpio_chip * chip,unsigned int offset)420 static void gb_gpio_free(struct gpio_chip *chip, unsigned int offset)
421 {
422 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
423 
424 	gb_gpio_deactivate_operation(ggc, (u8)offset);
425 }
426 
gb_gpio_get_direction(struct gpio_chip * chip,unsigned int offset)427 static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
428 {
429 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
430 	u8 which;
431 	int ret;
432 
433 	which = (u8)offset;
434 	ret = gb_gpio_get_direction_operation(ggc, which);
435 	if (ret)
436 		return ret;
437 
438 	return ggc->lines[which].direction ? 1 : 0;
439 }
440 
gb_gpio_direction_input(struct gpio_chip * chip,unsigned int offset)441 static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
442 {
443 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
444 
445 	return gb_gpio_direction_in_operation(ggc, (u8)offset);
446 }
447 
gb_gpio_direction_output(struct gpio_chip * chip,unsigned int offset,int value)448 static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
449 				    int value)
450 {
451 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
452 
453 	return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
454 }
455 
gb_gpio_get(struct gpio_chip * chip,unsigned int offset)456 static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
457 {
458 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
459 	u8 which;
460 	int ret;
461 
462 	which = (u8)offset;
463 	ret = gb_gpio_get_value_operation(ggc, which);
464 	if (ret)
465 		return ret;
466 
467 	return ggc->lines[which].value;
468 }
469 
gb_gpio_set(struct gpio_chip * chip,unsigned int offset,int value)470 static void gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
471 {
472 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
473 
474 	gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
475 }
476 
gb_gpio_set_config(struct gpio_chip * chip,unsigned int offset,unsigned long config)477 static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
478 			      unsigned long config)
479 {
480 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
481 	u32 debounce;
482 
483 	if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
484 		return -ENOTSUPP;
485 
486 	debounce = pinconf_to_config_argument(config);
487 	if (debounce > U16_MAX)
488 		return -EINVAL;
489 
490 	return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
491 }
492 
gb_gpio_controller_setup(struct gb_gpio_controller * ggc)493 static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
494 {
495 	int ret;
496 
497 	/* Now find out how many lines there are */
498 	ret = gb_gpio_line_count_operation(ggc);
499 	if (ret)
500 		return ret;
501 
502 	ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
503 			     GFP_KERNEL);
504 	if (!ggc->lines)
505 		return -ENOMEM;
506 
507 	return ret;
508 }
509 
510 /**
511  * gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
512  * @d: the irqdomain used by this irqchip
513  * @irq: the global irq number used by this GB gpio irqchip irq
514  * @hwirq: the local IRQ/GPIO line offset on this GB gpio
515  *
516  * This function will set up the mapping for a certain IRQ line on a
517  * GB gpio by assigning the GB gpio as chip data, and using the irqchip
518  * stored inside the GB gpio.
519  */
gb_gpio_irq_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)520 static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
521 			   irq_hw_number_t hwirq)
522 {
523 	struct gpio_chip *chip = domain->host_data;
524 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
525 
526 	irq_set_chip_data(irq, ggc);
527 	irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
528 	irq_set_noprobe(irq);
529 	/*
530 	 * No set-up of the hardware will happen if IRQ_TYPE_NONE
531 	 * is passed as default type.
532 	 */
533 	if (ggc->irq_default_type != IRQ_TYPE_NONE)
534 		irq_set_irq_type(irq, ggc->irq_default_type);
535 
536 	return 0;
537 }
538 
gb_gpio_irq_unmap(struct irq_domain * d,unsigned int irq)539 static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
540 {
541 	irq_set_chip_and_handler(irq, NULL, NULL);
542 	irq_set_chip_data(irq, NULL);
543 }
544 
545 static const struct irq_domain_ops gb_gpio_domain_ops = {
546 	.map	= gb_gpio_irq_map,
547 	.unmap	= gb_gpio_irq_unmap,
548 };
549 
550 /**
551  * gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
552  * @ggc: the gb_gpio_controller to remove the irqchip from
553  *
554  * This is called only from gb_gpio_remove()
555  */
gb_gpio_irqchip_remove(struct gb_gpio_controller * ggc)556 static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
557 {
558 	unsigned int offset;
559 
560 	/* Remove all IRQ mappings and delete the domain */
561 	if (ggc->irqdomain) {
562 		for (offset = 0; offset < (ggc->line_max + 1); offset++)
563 			irq_dispose_mapping(irq_find_mapping(ggc->irqdomain,
564 							     offset));
565 		irq_domain_remove(ggc->irqdomain);
566 	}
567 
568 	if (ggc->irqchip)
569 		ggc->irqchip = NULL;
570 }
571 
572 /**
573  * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
574  * @chip: the gpio chip to add the irqchip to
575  * @irqchip: the irqchip to add to the adapter
576  * @first_irq: if not dynamically assigned, the base (first) IRQ to
577  * allocate gpio irqs from
578  * @handler: the irq handler to use (often a predefined irq core function)
579  * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
580  * to have the core avoid setting up any default type in the hardware.
581  *
582  * This function closely associates a certain irqchip with a certain
583  * gpio chip, providing an irq domain to translate the local IRQs to
584  * global irqs, and making sure that the gpio chip
585  * is passed as chip data to all related functions. Driver callbacks
586  * need to use container_of() to get their local state containers back
587  * from the gpio chip passed as chip data. An irqdomain will be stored
588  * in the gpio chip that shall be used by the driver to handle IRQ number
589  * translation. The gpio chip will need to be initialized and registered
590  * before calling this function.
591  */
gb_gpio_irqchip_add(struct gpio_chip * chip,struct irq_chip * irqchip,unsigned int first_irq,irq_flow_handler_t handler,unsigned int type)592 static int gb_gpio_irqchip_add(struct gpio_chip *chip,
593 			 struct irq_chip *irqchip,
594 			 unsigned int first_irq,
595 			 irq_flow_handler_t handler,
596 			 unsigned int type)
597 {
598 	struct gb_gpio_controller *ggc;
599 	unsigned int offset;
600 	unsigned int irq_base;
601 
602 	if (!chip || !irqchip)
603 		return -EINVAL;
604 
605 	ggc = gpio_chip_to_gb_gpio_controller(chip);
606 
607 	ggc->irqchip = irqchip;
608 	ggc->irq_handler = handler;
609 	ggc->irq_default_type = type;
610 	ggc->irqdomain = irq_domain_add_simple(NULL,
611 					ggc->line_max + 1, first_irq,
612 					&gb_gpio_domain_ops, chip);
613 	if (!ggc->irqdomain) {
614 		ggc->irqchip = NULL;
615 		return -EINVAL;
616 	}
617 
618 	/*
619 	 * Prepare the mapping since the irqchip shall be orthogonal to
620 	 * any gpio calls. If the first_irq was zero, this is
621 	 * necessary to allocate descriptors for all IRQs.
622 	 */
623 	for (offset = 0; offset < (ggc->line_max + 1); offset++) {
624 		irq_base = irq_create_mapping(ggc->irqdomain, offset);
625 		if (offset == 0)
626 			ggc->irq_base = irq_base;
627 	}
628 
629 	return 0;
630 }
631 
gb_gpio_to_irq(struct gpio_chip * chip,unsigned int offset)632 static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
633 {
634 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
635 
636 	return irq_find_mapping(ggc->irqdomain, offset);
637 }
638 
gb_gpio_probe(struct gbphy_device * gbphy_dev,const struct gbphy_device_id * id)639 static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
640 			 const struct gbphy_device_id *id)
641 {
642 	struct gb_connection *connection;
643 	struct gb_gpio_controller *ggc;
644 	struct gpio_chip *gpio;
645 	struct irq_chip *irqc;
646 	int ret;
647 
648 	ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
649 	if (!ggc)
650 		return -ENOMEM;
651 
652 	connection = gb_connection_create(gbphy_dev->bundle,
653 					  le16_to_cpu(gbphy_dev->cport_desc->id),
654 					  gb_gpio_request_handler);
655 	if (IS_ERR(connection)) {
656 		ret = PTR_ERR(connection);
657 		goto exit_ggc_free;
658 	}
659 
660 	ggc->connection = connection;
661 	gb_connection_set_data(connection, ggc);
662 	ggc->gbphy_dev = gbphy_dev;
663 	gb_gbphy_set_data(gbphy_dev, ggc);
664 
665 	ret = gb_connection_enable_tx(connection);
666 	if (ret)
667 		goto exit_connection_destroy;
668 
669 	ret = gb_gpio_controller_setup(ggc);
670 	if (ret)
671 		goto exit_connection_disable;
672 
673 	irqc = &ggc->irqc;
674 	irqc->irq_mask = gb_gpio_irq_mask;
675 	irqc->irq_unmask = gb_gpio_irq_unmask;
676 	irqc->irq_set_type = gb_gpio_irq_set_type;
677 	irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
678 	irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
679 	irqc->name = "greybus_gpio";
680 
681 	mutex_init(&ggc->irq_lock);
682 
683 	gpio = &ggc->chip;
684 
685 	gpio->label = "greybus_gpio";
686 	gpio->parent = &gbphy_dev->dev;
687 	gpio->owner = THIS_MODULE;
688 
689 	gpio->request = gb_gpio_request;
690 	gpio->free = gb_gpio_free;
691 	gpio->get_direction = gb_gpio_get_direction;
692 	gpio->direction_input = gb_gpio_direction_input;
693 	gpio->direction_output = gb_gpio_direction_output;
694 	gpio->get = gb_gpio_get;
695 	gpio->set = gb_gpio_set;
696 	gpio->set_config = gb_gpio_set_config;
697 	gpio->to_irq = gb_gpio_to_irq;
698 	gpio->base = -1;		/* Allocate base dynamically */
699 	gpio->ngpio = ggc->line_max + 1;
700 	gpio->can_sleep = true;
701 
702 	ret = gb_connection_enable(connection);
703 	if (ret)
704 		goto exit_line_free;
705 
706 	ret = gb_gpio_irqchip_add(gpio, irqc, 0,
707 				   handle_level_irq, IRQ_TYPE_NONE);
708 	if (ret) {
709 		dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
710 		goto exit_line_free;
711 	}
712 
713 	ret = gpiochip_add(gpio);
714 	if (ret) {
715 		dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
716 		goto exit_gpio_irqchip_remove;
717 	}
718 
719 	gbphy_runtime_put_autosuspend(gbphy_dev);
720 	return 0;
721 
722 exit_gpio_irqchip_remove:
723 	gb_gpio_irqchip_remove(ggc);
724 exit_line_free:
725 	kfree(ggc->lines);
726 exit_connection_disable:
727 	gb_connection_disable(connection);
728 exit_connection_destroy:
729 	gb_connection_destroy(connection);
730 exit_ggc_free:
731 	kfree(ggc);
732 	return ret;
733 }
734 
gb_gpio_remove(struct gbphy_device * gbphy_dev)735 static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
736 {
737 	struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
738 	struct gb_connection *connection = ggc->connection;
739 	int ret;
740 
741 	ret = gbphy_runtime_get_sync(gbphy_dev);
742 	if (ret)
743 		gbphy_runtime_get_noresume(gbphy_dev);
744 
745 	gb_connection_disable_rx(connection);
746 	gpiochip_remove(&ggc->chip);
747 	gb_gpio_irqchip_remove(ggc);
748 	gb_connection_disable(connection);
749 	gb_connection_destroy(connection);
750 	kfree(ggc->lines);
751 	kfree(ggc);
752 }
753 
754 static const struct gbphy_device_id gb_gpio_id_table[] = {
755 	{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
756 	{ },
757 };
758 MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
759 
760 static struct gbphy_driver gpio_driver = {
761 	.name		= "gpio",
762 	.probe		= gb_gpio_probe,
763 	.remove		= gb_gpio_remove,
764 	.id_table	= gb_gpio_id_table,
765 };
766 
767 module_gbphy_driver(gpio_driver);
768 MODULE_LICENSE("GPL v2");
769