• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/uio/uio_dmem_genirq.c
3  *
4  * Userspace I/O platform driver with generic IRQ handling code.
5  *
6  * Copyright (C) 2012 Damian Hobson-Garcia
7  *
8  * Based on uio_pdrv_genirq.c by Magnus Damm
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published by
12  * the Free Software Foundation.
13  */
14 
15 #include <linux/platform_device.h>
16 #include <linux/uio_driver.h>
17 #include <linux/spinlock.h>
18 #include <linux/bitops.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/platform_data/uio_dmem_genirq.h>
22 #include <linux/stringify.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 
27 #include <linux/of.h>
28 #include <linux/of_platform.h>
29 #include <linux/of_address.h>
30 
31 #define DRIVER_NAME "uio_dmem_genirq"
32 #define DMEM_MAP_ERROR (~0)
33 
34 struct uio_dmem_genirq_platdata {
35 	struct uio_info *uioinfo;
36 	spinlock_t lock;
37 	unsigned long flags;
38 	struct platform_device *pdev;
39 	unsigned int dmem_region_start;
40 	unsigned int num_dmem_regions;
41 	void *dmem_region_vaddr[MAX_UIO_MAPS];
42 	struct mutex alloc_lock;
43 	unsigned int refcnt;
44 };
45 
uio_dmem_genirq_open(struct uio_info * info,struct inode * inode)46 static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
47 {
48 	struct uio_dmem_genirq_platdata *priv = info->priv;
49 	struct uio_mem *uiomem;
50 	int ret = 0;
51 	int dmem_region = priv->dmem_region_start;
52 
53 	uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
54 
55 	mutex_lock(&priv->alloc_lock);
56 	while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
57 		void *addr;
58 		if (!uiomem->size)
59 			break;
60 
61 		addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
62 				(dma_addr_t *)&uiomem->addr, GFP_KERNEL);
63 		if (!addr) {
64 			uiomem->addr = DMEM_MAP_ERROR;
65 		}
66 		priv->dmem_region_vaddr[dmem_region++] = addr;
67 		++uiomem;
68 	}
69 	priv->refcnt++;
70 
71 	mutex_unlock(&priv->alloc_lock);
72 	/* Wait until the Runtime PM code has woken up the device */
73 	pm_runtime_get_sync(&priv->pdev->dev);
74 	return ret;
75 }
76 
uio_dmem_genirq_release(struct uio_info * info,struct inode * inode)77 static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
78 {
79 	struct uio_dmem_genirq_platdata *priv = info->priv;
80 	struct uio_mem *uiomem;
81 	int dmem_region = priv->dmem_region_start;
82 
83 	/* Tell the Runtime PM code that the device has become idle */
84 	pm_runtime_put_sync(&priv->pdev->dev);
85 
86 	uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
87 
88 	mutex_lock(&priv->alloc_lock);
89 
90 	priv->refcnt--;
91 	while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
92 		if (!uiomem->size)
93 			break;
94 		if (priv->dmem_region_vaddr[dmem_region]) {
95 			dma_free_coherent(&priv->pdev->dev, uiomem->size,
96 					priv->dmem_region_vaddr[dmem_region],
97 					uiomem->addr);
98 		}
99 		uiomem->addr = DMEM_MAP_ERROR;
100 		++dmem_region;
101 		++uiomem;
102 	}
103 
104 	mutex_unlock(&priv->alloc_lock);
105 	return 0;
106 }
107 
uio_dmem_genirq_handler(int irq,struct uio_info * dev_info)108 static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
109 {
110 	struct uio_dmem_genirq_platdata *priv = dev_info->priv;
111 
112 	/* Just disable the interrupt in the interrupt controller, and
113 	 * remember the state so we can allow user space to enable it later.
114 	 */
115 
116 	if (!test_and_set_bit(0, &priv->flags))
117 		disable_irq_nosync(irq);
118 
119 	return IRQ_HANDLED;
120 }
121 
uio_dmem_genirq_irqcontrol(struct uio_info * dev_info,s32 irq_on)122 static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
123 {
124 	struct uio_dmem_genirq_platdata *priv = dev_info->priv;
125 	unsigned long flags;
126 
127 	/* Allow user space to enable and disable the interrupt
128 	 * in the interrupt controller, but keep track of the
129 	 * state to prevent per-irq depth damage.
130 	 *
131 	 * Serialize this operation to support multiple tasks.
132 	 */
133 
134 	spin_lock_irqsave(&priv->lock, flags);
135 	if (irq_on) {
136 		if (test_and_clear_bit(0, &priv->flags))
137 			enable_irq(dev_info->irq);
138 		spin_unlock_irqrestore(&priv->lock, flags);
139 	} else {
140 		if (!test_and_set_bit(0, &priv->flags)) {
141 			spin_unlock_irqrestore(&priv->lock, flags);
142 			disable_irq(dev_info->irq);
143 		}
144 	}
145 
146 	return 0;
147 }
148 
uio_dmem_genirq_probe(struct platform_device * pdev)149 static int uio_dmem_genirq_probe(struct platform_device *pdev)
150 {
151 	struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
152 	struct uio_info *uioinfo = &pdata->uioinfo;
153 	struct uio_dmem_genirq_platdata *priv;
154 	struct uio_mem *uiomem;
155 	int ret = -EINVAL;
156 	int i;
157 
158 	if (pdev->dev.of_node) {
159 		int irq;
160 
161 		/* alloc uioinfo for one device */
162 		uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
163 		if (!uioinfo) {
164 			ret = -ENOMEM;
165 			dev_err(&pdev->dev, "unable to kmalloc\n");
166 			goto bad2;
167 		}
168 		uioinfo->name = pdev->dev.of_node->name;
169 		uioinfo->version = "devicetree";
170 
171 		/* Multiple IRQs are not supported */
172 		irq = platform_get_irq(pdev, 0);
173 		if (irq == -ENXIO)
174 			uioinfo->irq = UIO_IRQ_NONE;
175 		else
176 			uioinfo->irq = irq;
177 	}
178 
179 	if (!uioinfo || !uioinfo->name || !uioinfo->version) {
180 		dev_err(&pdev->dev, "missing platform_data\n");
181 		goto bad0;
182 	}
183 
184 	if (uioinfo->handler || uioinfo->irqcontrol ||
185 	    uioinfo->irq_flags & IRQF_SHARED) {
186 		dev_err(&pdev->dev, "interrupt configuration error\n");
187 		goto bad0;
188 	}
189 
190 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
191 	if (!priv) {
192 		ret = -ENOMEM;
193 		dev_err(&pdev->dev, "unable to kmalloc\n");
194 		goto bad0;
195 	}
196 
197 	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
198 
199 	priv->uioinfo = uioinfo;
200 	spin_lock_init(&priv->lock);
201 	priv->flags = 0; /* interrupt is enabled to begin with */
202 	priv->pdev = pdev;
203 	mutex_init(&priv->alloc_lock);
204 
205 	if (!uioinfo->irq) {
206 		ret = platform_get_irq(pdev, 0);
207 		if (ret < 0) {
208 			dev_err(&pdev->dev, "failed to get IRQ\n");
209 			goto bad1;
210 		}
211 		uioinfo->irq = ret;
212 	}
213 	uiomem = &uioinfo->mem[0];
214 
215 	for (i = 0; i < pdev->num_resources; ++i) {
216 		struct resource *r = &pdev->resource[i];
217 
218 		if (r->flags != IORESOURCE_MEM)
219 			continue;
220 
221 		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
222 			dev_warn(&pdev->dev, "device has more than "
223 					__stringify(MAX_UIO_MAPS)
224 					" I/O memory resources.\n");
225 			break;
226 		}
227 
228 		uiomem->memtype = UIO_MEM_PHYS;
229 		uiomem->addr = r->start;
230 		uiomem->size = resource_size(r);
231 		++uiomem;
232 	}
233 
234 	priv->dmem_region_start = uiomem - &uioinfo->mem[0];
235 	priv->num_dmem_regions = pdata->num_dynamic_regions;
236 
237 	for (i = 0; i < pdata->num_dynamic_regions; ++i) {
238 		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
239 			dev_warn(&pdev->dev, "device has more than "
240 					__stringify(MAX_UIO_MAPS)
241 					" dynamic and fixed memory regions.\n");
242 			break;
243 		}
244 		uiomem->memtype = UIO_MEM_PHYS;
245 		uiomem->addr = DMEM_MAP_ERROR;
246 		uiomem->size = pdata->dynamic_region_sizes[i];
247 		++uiomem;
248 	}
249 
250 	while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
251 		uiomem->size = 0;
252 		++uiomem;
253 	}
254 
255 	/* This driver requires no hardware specific kernel code to handle
256 	 * interrupts. Instead, the interrupt handler simply disables the
257 	 * interrupt in the interrupt controller. User space is responsible
258 	 * for performing hardware specific acknowledge and re-enabling of
259 	 * the interrupt in the interrupt controller.
260 	 *
261 	 * Interrupt sharing is not supported.
262 	 */
263 
264 	uioinfo->handler = uio_dmem_genirq_handler;
265 	uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
266 	uioinfo->open = uio_dmem_genirq_open;
267 	uioinfo->release = uio_dmem_genirq_release;
268 	uioinfo->priv = priv;
269 
270 	/* Enable Runtime PM for this device:
271 	 * The device starts in suspended state to allow the hardware to be
272 	 * turned off by default. The Runtime PM bus code should power on the
273 	 * hardware and enable clocks at open().
274 	 */
275 	pm_runtime_enable(&pdev->dev);
276 
277 	ret = uio_register_device(&pdev->dev, priv->uioinfo);
278 	if (ret) {
279 		dev_err(&pdev->dev, "unable to register uio device\n");
280 		pm_runtime_disable(&pdev->dev);
281 		goto bad1;
282 	}
283 
284 	platform_set_drvdata(pdev, priv);
285 	return 0;
286  bad1:
287 	kfree(priv);
288  bad0:
289 	/* kfree uioinfo for OF */
290 	if (pdev->dev.of_node)
291 		kfree(uioinfo);
292  bad2:
293 	return ret;
294 }
295 
uio_dmem_genirq_remove(struct platform_device * pdev)296 static int uio_dmem_genirq_remove(struct platform_device *pdev)
297 {
298 	struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev);
299 
300 	uio_unregister_device(priv->uioinfo);
301 	pm_runtime_disable(&pdev->dev);
302 
303 	priv->uioinfo->handler = NULL;
304 	priv->uioinfo->irqcontrol = NULL;
305 
306 	/* kfree uioinfo for OF */
307 	if (pdev->dev.of_node)
308 		kfree(priv->uioinfo);
309 
310 	kfree(priv);
311 	return 0;
312 }
313 
uio_dmem_genirq_runtime_nop(struct device * dev)314 static int uio_dmem_genirq_runtime_nop(struct device *dev)
315 {
316 	/* Runtime PM callback shared between ->runtime_suspend()
317 	 * and ->runtime_resume(). Simply returns success.
318 	 *
319 	 * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
320 	 * are used at open() and release() time. This allows the
321 	 * Runtime PM code to turn off power to the device while the
322 	 * device is unused, ie before open() and after release().
323 	 *
324 	 * This Runtime PM callback does not need to save or restore
325 	 * any registers since user space is responsbile for hardware
326 	 * register reinitialization after open().
327 	 */
328 	return 0;
329 }
330 
331 static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
332 	.runtime_suspend = uio_dmem_genirq_runtime_nop,
333 	.runtime_resume = uio_dmem_genirq_runtime_nop,
334 };
335 
336 #ifdef CONFIG_OF
337 static const struct of_device_id uio_of_genirq_match[] = {
338 	{ /* empty for now */ },
339 };
340 MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
341 #endif
342 
343 static struct platform_driver uio_dmem_genirq = {
344 	.probe = uio_dmem_genirq_probe,
345 	.remove = uio_dmem_genirq_remove,
346 	.driver = {
347 		.name = DRIVER_NAME,
348 		.pm = &uio_dmem_genirq_dev_pm_ops,
349 		.of_match_table = of_match_ptr(uio_of_genirq_match),
350 	},
351 };
352 
353 module_platform_driver(uio_dmem_genirq);
354 
355 MODULE_AUTHOR("Damian Hobson-Garcia");
356 MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory.");
357 MODULE_LICENSE("GPL v2");
358 MODULE_ALIAS("platform:" DRIVER_NAME);
359