• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Mellanox hotplug driver
4  *
5  * Copyright (C) 2016-2020 Mellanox Technologies
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/device.h>
10 #include <linux/hwmon.h>
11 #include <linux/hwmon-sysfs.h>
12 #include <linux/i2c.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_data/mlxreg.h>
17 #include <linux/platform_device.h>
18 #include <linux/spinlock.h>
19 #include <linux/string_helpers.h>
20 #include <linux/regmap.h>
21 #include <linux/workqueue.h>
22 
23 /* Offset of event and mask registers from status register. */
24 #define MLXREG_HOTPLUG_EVENT_OFF	1
25 #define MLXREG_HOTPLUG_MASK_OFF		2
26 #define MLXREG_HOTPLUG_AGGR_MASK_OFF	1
27 
28 /* ASIC good health mask. */
29 #define MLXREG_HOTPLUG_GOOD_HEALTH_MASK	0x02
30 
31 #define MLXREG_HOTPLUG_ATTRS_MAX	24
32 #define MLXREG_HOTPLUG_NOT_ASSERT	3
33 
34 /**
35  * struct mlxreg_hotplug_priv_data - platform private data:
36  * @irq: platform device interrupt number;
37  * @dev: basic device;
38  * @pdev: platform device;
39  * @plat: platform data;
40  * @regmap: register map handle;
41  * @dwork_irq: delayed work template;
42  * @lock: spin lock;
43  * @hwmon: hwmon device;
44  * @mlxreg_hotplug_attr: sysfs attributes array;
45  * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
46  * @group: sysfs attribute group;
47  * @groups: list of sysfs attribute group for hwmon registration;
48  * @cell: location of top aggregation interrupt register;
49  * @mask: top aggregation interrupt common mask;
50  * @aggr_cache: last value of aggregation register status;
51  * @after_probe: flag indication probing completion;
52  * @not_asserted: number of entries in workqueue with no signal assertion;
53  */
54 struct mlxreg_hotplug_priv_data {
55 	int irq;
56 	struct device *dev;
57 	struct platform_device *pdev;
58 	struct mlxreg_hotplug_platform_data *plat;
59 	struct regmap *regmap;
60 	struct delayed_work dwork_irq;
61 	spinlock_t lock; /* sync with interrupt */
62 	struct device *hwmon;
63 	struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
64 	struct sensor_device_attribute_2
65 			mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
66 	struct attribute_group group;
67 	const struct attribute_group *groups[2];
68 	u32 cell;
69 	u32 mask;
70 	u32 aggr_cache;
71 	bool after_probe;
72 	u8 not_asserted;
73 };
74 
75 /* Environment variables array for udev. */
76 static char *mlxreg_hotplug_udev_envp[] = { NULL, NULL };
77 
78 static int
mlxreg_hotplug_udev_event_send(struct kobject * kobj,struct mlxreg_core_data * data,bool action)79 mlxreg_hotplug_udev_event_send(struct kobject *kobj,
80 			       struct mlxreg_core_data *data, bool action)
81 {
82 	char event_str[MLXREG_CORE_LABEL_MAX_SIZE + 2];
83 	char label[MLXREG_CORE_LABEL_MAX_SIZE] = { 0 };
84 
85 	mlxreg_hotplug_udev_envp[0] = event_str;
86 	string_upper(label, data->label);
87 	snprintf(event_str, MLXREG_CORE_LABEL_MAX_SIZE, "%s=%d", label, !!action);
88 
89 	return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp);
90 }
91 
mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_data * data)92 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
93 					struct mlxreg_core_data *data)
94 {
95 	struct mlxreg_core_hotplug_platform_data *pdata;
96 	struct i2c_client *client;
97 
98 	/* Notify user by sending hwmon uevent. */
99 	mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, true);
100 
101 	/*
102 	 * Return if adapter number is negative. It could be in case hotplug
103 	 * event is not associated with hotplug device.
104 	 */
105 	if (data->hpdev.nr < 0)
106 		return 0;
107 
108 	pdata = dev_get_platdata(&priv->pdev->dev);
109 	data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
110 					      pdata->shift_nr);
111 	if (!data->hpdev.adapter) {
112 		dev_err(priv->dev, "Failed to get adapter for bus %d\n",
113 			data->hpdev.nr + pdata->shift_nr);
114 		return -EFAULT;
115 	}
116 
117 	client = i2c_new_client_device(data->hpdev.adapter,
118 				       data->hpdev.brdinfo);
119 	if (IS_ERR(client)) {
120 		dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
121 			data->hpdev.brdinfo->type, data->hpdev.nr +
122 			pdata->shift_nr, data->hpdev.brdinfo->addr);
123 
124 		i2c_put_adapter(data->hpdev.adapter);
125 		data->hpdev.adapter = NULL;
126 		return PTR_ERR(client);
127 	}
128 
129 	data->hpdev.client = client;
130 
131 	return 0;
132 }
133 
134 static void
mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_data * data)135 mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
136 			      struct mlxreg_core_data *data)
137 {
138 	/* Notify user by sending hwmon uevent. */
139 	mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false);
140 
141 	if (data->hpdev.client) {
142 		i2c_unregister_device(data->hpdev.client);
143 		data->hpdev.client = NULL;
144 	}
145 
146 	if (data->hpdev.adapter) {
147 		i2c_put_adapter(data->hpdev.adapter);
148 		data->hpdev.adapter = NULL;
149 	}
150 }
151 
mlxreg_hotplug_attr_show(struct device * dev,struct device_attribute * attr,char * buf)152 static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
153 					struct device_attribute *attr,
154 					char *buf)
155 {
156 	struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
157 	struct mlxreg_core_hotplug_platform_data *pdata;
158 	int index = to_sensor_dev_attr_2(attr)->index;
159 	int nr = to_sensor_dev_attr_2(attr)->nr;
160 	struct mlxreg_core_item *item;
161 	struct mlxreg_core_data *data;
162 	u32 regval;
163 	int ret;
164 
165 	pdata = dev_get_platdata(&priv->pdev->dev);
166 	item = pdata->items + nr;
167 	data = item->data + index;
168 
169 	ret = regmap_read(priv->regmap, data->reg, &regval);
170 	if (ret)
171 		return ret;
172 
173 	if (item->health) {
174 		regval &= data->mask;
175 	} else {
176 		/* Bit = 0 : functional if item->inversed is true. */
177 		if (item->inversed)
178 			regval = !(regval & data->mask);
179 		else
180 			regval = !!(regval & data->mask);
181 	}
182 
183 	return sprintf(buf, "%u\n", regval);
184 }
185 
186 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
187 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
188 
mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data * priv)189 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
190 {
191 	struct mlxreg_core_hotplug_platform_data *pdata;
192 	struct mlxreg_core_item *item;
193 	struct mlxreg_core_data *data;
194 	unsigned long mask;
195 	u32 regval;
196 	int num_attrs = 0, id = 0, i, j, k, ret;
197 
198 	pdata = dev_get_platdata(&priv->pdev->dev);
199 	item = pdata->items;
200 
201 	/* Go over all kinds of items - psu, pwr, fan. */
202 	for (i = 0; i < pdata->counter; i++, item++) {
203 		if (item->capability) {
204 			/*
205 			 * Read group capability register to get actual number
206 			 * of interrupt capable components and set group mask
207 			 * accordingly.
208 			 */
209 			ret = regmap_read(priv->regmap, item->capability,
210 					  &regval);
211 			if (ret)
212 				return ret;
213 
214 			item->mask = GENMASK((regval & item->mask) - 1, 0);
215 		}
216 
217 		data = item->data;
218 
219 		/* Go over all unmasked units within item. */
220 		mask = item->mask;
221 		k = 0;
222 		for_each_set_bit(j, &mask, item->count) {
223 			if (data->capability) {
224 				/*
225 				 * Read capability register and skip non
226 				 * relevant attributes.
227 				 */
228 				ret = regmap_read(priv->regmap,
229 						  data->capability, &regval);
230 				if (ret)
231 					return ret;
232 				if (!(regval & data->bit)) {
233 					data++;
234 					continue;
235 				}
236 			}
237 			PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
238 			PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
239 							     GFP_KERNEL,
240 							     data->label);
241 
242 			if (!PRIV_ATTR(id)->name) {
243 				dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
244 					id);
245 				return -ENOMEM;
246 			}
247 
248 			PRIV_DEV_ATTR(id).dev_attr.attr.name =
249 							PRIV_ATTR(id)->name;
250 			PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
251 			PRIV_DEV_ATTR(id).dev_attr.show =
252 						mlxreg_hotplug_attr_show;
253 			PRIV_DEV_ATTR(id).nr = i;
254 			PRIV_DEV_ATTR(id).index = k;
255 			sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
256 			data++;
257 			id++;
258 			k++;
259 		}
260 		num_attrs += k;
261 	}
262 
263 	priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
264 					 num_attrs,
265 					 sizeof(struct attribute *),
266 					 GFP_KERNEL);
267 	if (!priv->group.attrs)
268 		return -ENOMEM;
269 
270 	priv->group.attrs = priv->mlxreg_hotplug_attr;
271 	priv->groups[0] = &priv->group;
272 	priv->groups[1] = NULL;
273 
274 	return 0;
275 }
276 
277 static void
mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_item * item)278 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
279 			   struct mlxreg_core_item *item)
280 {
281 	struct mlxreg_core_data *data;
282 	unsigned long asserted;
283 	u32 regval, bit;
284 	int ret;
285 
286 	/*
287 	 * Validate if item related to received signal type is valid.
288 	 * It should never happen, excepted the situation when some
289 	 * piece of hardware is broken. In such situation just produce
290 	 * error message and return. Caller must continue to handle the
291 	 * signals from other devices if any.
292 	 */
293 	if (unlikely(!item)) {
294 		dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
295 			item->reg, item->mask);
296 
297 		return;
298 	}
299 
300 	/* Mask event. */
301 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
302 			   0);
303 	if (ret)
304 		goto out;
305 
306 	/* Read status. */
307 	ret = regmap_read(priv->regmap, item->reg, &regval);
308 	if (ret)
309 		goto out;
310 
311 	/* Set asserted bits and save last status. */
312 	regval &= item->mask;
313 	asserted = item->cache ^ regval;
314 	item->cache = regval;
315 
316 	for_each_set_bit(bit, &asserted, 8) {
317 		data = item->data + bit;
318 		if (regval & BIT(bit)) {
319 			if (item->inversed)
320 				mlxreg_hotplug_device_destroy(priv, data);
321 			else
322 				mlxreg_hotplug_device_create(priv, data);
323 		} else {
324 			if (item->inversed)
325 				mlxreg_hotplug_device_create(priv, data);
326 			else
327 				mlxreg_hotplug_device_destroy(priv, data);
328 		}
329 	}
330 
331 	/* Acknowledge event. */
332 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
333 			   0);
334 	if (ret)
335 		goto out;
336 
337 	/* Unmask event. */
338 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
339 			   item->mask);
340 
341  out:
342 	if (ret)
343 		dev_err(priv->dev, "Failed to complete workqueue.\n");
344 }
345 
346 static void
mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_item * item)347 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
348 				  struct mlxreg_core_item *item)
349 {
350 	struct mlxreg_core_data *data = item->data;
351 	u32 regval;
352 	int i, ret = 0;
353 
354 	for (i = 0; i < item->count; i++, data++) {
355 		/* Mask event. */
356 		ret = regmap_write(priv->regmap, data->reg +
357 				   MLXREG_HOTPLUG_MASK_OFF, 0);
358 		if (ret)
359 			goto out;
360 
361 		/* Read status. */
362 		ret = regmap_read(priv->regmap, data->reg, &regval);
363 		if (ret)
364 			goto out;
365 
366 		regval &= data->mask;
367 
368 		if (item->cache == regval)
369 			goto ack_event;
370 
371 		/*
372 		 * ASIC health indication is provided through two bits. Bits
373 		 * value 0x2 indicates that ASIC reached the good health, value
374 		 * 0x0 indicates ASIC the bad health or dormant state and value
375 		 * 0x3 indicates the booting state. During ASIC reset it should
376 		 * pass the following states: dormant -> booting -> good.
377 		 */
378 		if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
379 			if (!data->attached) {
380 				/*
381 				 * ASIC is in steady state. Connect associated
382 				 * device, if configured.
383 				 */
384 				mlxreg_hotplug_device_create(priv, data);
385 				data->attached = true;
386 			}
387 		} else {
388 			if (data->attached) {
389 				/*
390 				 * ASIC health is failed after ASIC has been
391 				 * in steady state. Disconnect associated
392 				 * device, if it has been connected.
393 				 */
394 				mlxreg_hotplug_device_destroy(priv, data);
395 				data->attached = false;
396 				data->health_cntr = 0;
397 			}
398 		}
399 		item->cache = regval;
400 ack_event:
401 		/* Acknowledge event. */
402 		ret = regmap_write(priv->regmap, data->reg +
403 				   MLXREG_HOTPLUG_EVENT_OFF, 0);
404 		if (ret)
405 			goto out;
406 
407 		/* Unmask event. */
408 		ret = regmap_write(priv->regmap, data->reg +
409 				   MLXREG_HOTPLUG_MASK_OFF, data->mask);
410 		if (ret)
411 			goto out;
412 	}
413 
414  out:
415 	if (ret)
416 		dev_err(priv->dev, "Failed to complete workqueue.\n");
417 }
418 
419 /*
420  * mlxreg_hotplug_work_handler - performs traversing of device interrupt
421  * registers according to the below hierarchy schema:
422  *
423  *				Aggregation registers (status/mask)
424  * PSU registers:		*---*
425  * *-----------------*		|   |
426  * |status/event/mask|----->    | * |
427  * *-----------------*		|   |
428  * Power registers:		|   |
429  * *-----------------*		|   |
430  * |status/event/mask|----->    | * |
431  * *-----------------*		|   |
432  * FAN registers:		|   |--> CPU
433  * *-----------------*		|   |
434  * |status/event/mask|----->    | * |
435  * *-----------------*		|   |
436  * ASIC registers:		|   |
437  * *-----------------*		|   |
438  * |status/event/mask|----->    | * |
439  * *-----------------*		|   |
440  *				*---*
441  *
442  * In case some system changed are detected: FAN in/out, PSU in/out, power
443  * cable attached/detached, ASIC health good/bad, relevant device is created
444  * or destroyed.
445  */
mlxreg_hotplug_work_handler(struct work_struct * work)446 static void mlxreg_hotplug_work_handler(struct work_struct *work)
447 {
448 	struct mlxreg_core_hotplug_platform_data *pdata;
449 	struct mlxreg_hotplug_priv_data *priv;
450 	struct mlxreg_core_item *item;
451 	u32 regval, aggr_asserted;
452 	unsigned long flags;
453 	int i, ret;
454 
455 	priv = container_of(work, struct mlxreg_hotplug_priv_data,
456 			    dwork_irq.work);
457 	pdata = dev_get_platdata(&priv->pdev->dev);
458 	item = pdata->items;
459 
460 	/* Mask aggregation event. */
461 	ret = regmap_write(priv->regmap, pdata->cell +
462 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
463 	if (ret < 0)
464 		goto out;
465 
466 	/* Read aggregation status. */
467 	ret = regmap_read(priv->regmap, pdata->cell, &regval);
468 	if (ret)
469 		goto out;
470 
471 	regval &= pdata->mask;
472 	aggr_asserted = priv->aggr_cache ^ regval;
473 	priv->aggr_cache = regval;
474 
475 	/*
476 	 * Handler is invoked, but no assertion is detected at top aggregation
477 	 * status level. Set aggr_asserted to mask value to allow handler extra
478 	 * run over all relevant signals to recover any missed signal.
479 	 */
480 	if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
481 		priv->not_asserted = 0;
482 		aggr_asserted = pdata->mask;
483 	}
484 	if (!aggr_asserted)
485 		goto unmask_event;
486 
487 	/* Handle topology and health configuration changes. */
488 	for (i = 0; i < pdata->counter; i++, item++) {
489 		if (aggr_asserted & item->aggr_mask) {
490 			if (item->health)
491 				mlxreg_hotplug_health_work_helper(priv, item);
492 			else
493 				mlxreg_hotplug_work_helper(priv, item);
494 		}
495 	}
496 
497 	spin_lock_irqsave(&priv->lock, flags);
498 
499 	/*
500 	 * It is possible, that some signals have been inserted, while
501 	 * interrupt has been masked by mlxreg_hotplug_work_handler. In this
502 	 * case such signals will be missed. In order to handle these signals
503 	 * delayed work is canceled and work task re-scheduled for immediate
504 	 * execution. It allows to handle missed signals, if any. In other case
505 	 * work handler just validates that no new signals have been received
506 	 * during masking.
507 	 */
508 	cancel_delayed_work(&priv->dwork_irq);
509 	schedule_delayed_work(&priv->dwork_irq, 0);
510 
511 	spin_unlock_irqrestore(&priv->lock, flags);
512 
513 	return;
514 
515 unmask_event:
516 	priv->not_asserted++;
517 	/* Unmask aggregation event (no need acknowledge). */
518 	ret = regmap_write(priv->regmap, pdata->cell +
519 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
520 
521  out:
522 	if (ret)
523 		dev_err(priv->dev, "Failed to complete workqueue.\n");
524 }
525 
mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data * priv)526 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
527 {
528 	struct mlxreg_core_hotplug_platform_data *pdata;
529 	struct mlxreg_core_item *item;
530 	struct mlxreg_core_data *data;
531 	u32 regval;
532 	int i, j, ret;
533 
534 	pdata = dev_get_platdata(&priv->pdev->dev);
535 	item = pdata->items;
536 
537 	for (i = 0; i < pdata->counter; i++, item++) {
538 		/* Clear group presense event. */
539 		ret = regmap_write(priv->regmap, item->reg +
540 				   MLXREG_HOTPLUG_EVENT_OFF, 0);
541 		if (ret)
542 			goto out;
543 
544 		/*
545 		 * Verify if hardware configuration requires to disable
546 		 * interrupt capability for some of components.
547 		 */
548 		data = item->data;
549 		for (j = 0; j < item->count; j++, data++) {
550 			/* Verify if the attribute has capability register. */
551 			if (data->capability) {
552 				/* Read capability register. */
553 				ret = regmap_read(priv->regmap,
554 						  data->capability, &regval);
555 				if (ret)
556 					goto out;
557 
558 				if (!(regval & data->bit))
559 					item->mask &= ~BIT(j);
560 			}
561 		}
562 
563 		/* Set group initial status as mask and unmask group event. */
564 		if (item->inversed) {
565 			item->cache = item->mask;
566 			ret = regmap_write(priv->regmap, item->reg +
567 					   MLXREG_HOTPLUG_MASK_OFF,
568 					   item->mask);
569 			if (ret)
570 				goto out;
571 		}
572 	}
573 
574 	/* Keep aggregation initial status as zero and unmask events. */
575 	ret = regmap_write(priv->regmap, pdata->cell +
576 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
577 	if (ret)
578 		goto out;
579 
580 	/* Keep low aggregation initial status as zero and unmask events. */
581 	if (pdata->cell_low) {
582 		ret = regmap_write(priv->regmap, pdata->cell_low +
583 				   MLXREG_HOTPLUG_AGGR_MASK_OFF,
584 				   pdata->mask_low);
585 		if (ret)
586 			goto out;
587 	}
588 
589 	/* Invoke work handler for initializing hot plug devices setting. */
590 	mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
591 
592  out:
593 	if (ret)
594 		dev_err(priv->dev, "Failed to set interrupts.\n");
595 	enable_irq(priv->irq);
596 	return ret;
597 }
598 
mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data * priv)599 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
600 {
601 	struct mlxreg_core_hotplug_platform_data *pdata;
602 	struct mlxreg_core_item *item;
603 	struct mlxreg_core_data *data;
604 	int count, i, j;
605 
606 	pdata = dev_get_platdata(&priv->pdev->dev);
607 	item = pdata->items;
608 	disable_irq(priv->irq);
609 	cancel_delayed_work_sync(&priv->dwork_irq);
610 
611 	/* Mask low aggregation event, if defined. */
612 	if (pdata->cell_low)
613 		regmap_write(priv->regmap, pdata->cell_low +
614 			     MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
615 
616 	/* Mask aggregation event. */
617 	regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
618 		     0);
619 
620 	/* Clear topology configurations. */
621 	for (i = 0; i < pdata->counter; i++, item++) {
622 		data = item->data;
623 		/* Mask group presense event. */
624 		regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
625 			     0);
626 		/* Clear group presense event. */
627 		regmap_write(priv->regmap, data->reg +
628 			     MLXREG_HOTPLUG_EVENT_OFF, 0);
629 
630 		/* Remove all the attached devices in group. */
631 		count = item->count;
632 		for (j = 0; j < count; j++, data++)
633 			mlxreg_hotplug_device_destroy(priv, data);
634 	}
635 }
636 
mlxreg_hotplug_irq_handler(int irq,void * dev)637 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
638 {
639 	struct mlxreg_hotplug_priv_data *priv;
640 
641 	priv = (struct mlxreg_hotplug_priv_data *)dev;
642 
643 	/* Schedule work task for immediate execution.*/
644 	schedule_delayed_work(&priv->dwork_irq, 0);
645 
646 	return IRQ_HANDLED;
647 }
648 
mlxreg_hotplug_probe(struct platform_device * pdev)649 static int mlxreg_hotplug_probe(struct platform_device *pdev)
650 {
651 	struct mlxreg_core_hotplug_platform_data *pdata;
652 	struct mlxreg_hotplug_priv_data *priv;
653 	struct i2c_adapter *deferred_adap;
654 	int err;
655 
656 	pdata = dev_get_platdata(&pdev->dev);
657 	if (!pdata) {
658 		dev_err(&pdev->dev, "Failed to get platform data.\n");
659 		return -EINVAL;
660 	}
661 
662 	/* Defer probing if the necessary adapter is not configured yet. */
663 	deferred_adap = i2c_get_adapter(pdata->deferred_nr);
664 	if (!deferred_adap)
665 		return -EPROBE_DEFER;
666 	i2c_put_adapter(deferred_adap);
667 
668 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
669 	if (!priv)
670 		return -ENOMEM;
671 
672 	if (pdata->irq) {
673 		priv->irq = pdata->irq;
674 	} else {
675 		priv->irq = platform_get_irq(pdev, 0);
676 		if (priv->irq < 0)
677 			return priv->irq;
678 	}
679 
680 	priv->regmap = pdata->regmap;
681 	priv->dev = pdev->dev.parent;
682 	priv->pdev = pdev;
683 
684 	err = devm_request_irq(&pdev->dev, priv->irq,
685 			       mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
686 			       | IRQF_SHARED, "mlxreg-hotplug", priv);
687 	if (err) {
688 		dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
689 		return err;
690 	}
691 
692 	disable_irq(priv->irq);
693 	spin_lock_init(&priv->lock);
694 	INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
695 	dev_set_drvdata(&pdev->dev, priv);
696 
697 	err = mlxreg_hotplug_attr_init(priv);
698 	if (err) {
699 		dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
700 			err);
701 		return err;
702 	}
703 
704 	priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
705 					"mlxreg_hotplug", priv, priv->groups);
706 	if (IS_ERR(priv->hwmon)) {
707 		dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
708 			PTR_ERR(priv->hwmon));
709 		return PTR_ERR(priv->hwmon);
710 	}
711 
712 	/* Perform initial interrupts setup. */
713 	mlxreg_hotplug_set_irq(priv);
714 	priv->after_probe = true;
715 
716 	return 0;
717 }
718 
mlxreg_hotplug_remove(struct platform_device * pdev)719 static int mlxreg_hotplug_remove(struct platform_device *pdev)
720 {
721 	struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
722 
723 	/* Clean interrupts setup. */
724 	mlxreg_hotplug_unset_irq(priv);
725 	devm_free_irq(&pdev->dev, priv->irq, priv);
726 
727 	return 0;
728 }
729 
730 static struct platform_driver mlxreg_hotplug_driver = {
731 	.driver = {
732 		.name = "mlxreg-hotplug",
733 	},
734 	.probe = mlxreg_hotplug_probe,
735 	.remove = mlxreg_hotplug_remove,
736 };
737 
738 module_platform_driver(mlxreg_hotplug_driver);
739 
740 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
741 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
742 MODULE_LICENSE("Dual BSD/GPL");
743 MODULE_ALIAS("platform:mlxreg-hotplug");
744