• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * cdev.c - Application interfacing module for character devices
3  *
4  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * This file is licensed under GPLv2.
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/poll.h>
22 #include <linux/kfifo.h>
23 #include <linux/uaccess.h>
24 #include <linux/idr.h>
25 #include "mostcore.h"
26 
27 static dev_t aim_devno;
28 static struct class *aim_class;
29 static struct ida minor_id;
30 static unsigned int major;
31 static struct most_aim cdev_aim;
32 
33 struct aim_channel {
34 	wait_queue_head_t wq;
35 	spinlock_t unlink;	/* synchronization lock to unlink channels */
36 	struct cdev cdev;
37 	struct device *dev;
38 	struct mutex io_mutex;
39 	struct most_interface *iface;
40 	struct most_channel_config *cfg;
41 	unsigned int channel_id;
42 	dev_t devno;
43 	size_t mbo_offs;
44 	DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
45 	int access_ref;
46 	struct list_head list;
47 };
48 
49 #define to_channel(d) container_of(d, struct aim_channel, cdev)
50 static struct list_head channel_list;
51 static spinlock_t ch_list_lock;
52 
ch_has_mbo(struct aim_channel * c)53 static inline bool ch_has_mbo(struct aim_channel *c)
54 {
55 	return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
56 }
57 
ch_get_mbo(struct aim_channel * c,struct mbo ** mbo)58 static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
59 {
60 	if (!kfifo_peek(&c->fifo, mbo)) {
61 		*mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
62 		if (*mbo)
63 			kfifo_in(&c->fifo, mbo, 1);
64 	}
65 	return *mbo;
66 }
67 
get_channel(struct most_interface * iface,int id)68 static struct aim_channel *get_channel(struct most_interface *iface, int id)
69 {
70 	struct aim_channel *c, *tmp;
71 	unsigned long flags;
72 	int found_channel = 0;
73 
74 	spin_lock_irqsave(&ch_list_lock, flags);
75 	list_for_each_entry_safe(c, tmp, &channel_list, list) {
76 		if ((c->iface == iface) && (c->channel_id == id)) {
77 			found_channel = 1;
78 			break;
79 		}
80 	}
81 	spin_unlock_irqrestore(&ch_list_lock, flags);
82 	if (!found_channel)
83 		return NULL;
84 	return c;
85 }
86 
stop_channel(struct aim_channel * c)87 static void stop_channel(struct aim_channel *c)
88 {
89 	struct mbo *mbo;
90 
91 	while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
92 		most_put_mbo(mbo);
93 	most_stop_channel(c->iface, c->channel_id, &cdev_aim);
94 }
95 
destroy_cdev(struct aim_channel * c)96 static void destroy_cdev(struct aim_channel *c)
97 {
98 	unsigned long flags;
99 
100 	device_destroy(aim_class, c->devno);
101 	cdev_del(&c->cdev);
102 	kfifo_free(&c->fifo);
103 	spin_lock_irqsave(&ch_list_lock, flags);
104 	list_del(&c->list);
105 	spin_unlock_irqrestore(&ch_list_lock, flags);
106 	ida_simple_remove(&minor_id, MINOR(c->devno));
107 }
108 
109 /**
110  * aim_open - implements the syscall to open the device
111  * @inode: inode pointer
112  * @filp: file pointer
113  *
114  * This stores the channel pointer in the private data field of
115  * the file structure and activates the channel within the core.
116  */
aim_open(struct inode * inode,struct file * filp)117 static int aim_open(struct inode *inode, struct file *filp)
118 {
119 	struct aim_channel *c;
120 	int ret;
121 
122 	c = to_channel(inode->i_cdev);
123 	filp->private_data = c;
124 
125 	if (((c->cfg->direction == MOST_CH_RX) &&
126 	     ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
127 	     ((c->cfg->direction == MOST_CH_TX) &&
128 		((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
129 		pr_info("WARN: Access flags mismatch\n");
130 		return -EACCES;
131 	}
132 
133 	mutex_lock(&c->io_mutex);
134 	if (!c->dev) {
135 		pr_info("WARN: Device is destroyed\n");
136 		mutex_unlock(&c->io_mutex);
137 		return -ENODEV;
138 	}
139 
140 	if (c->access_ref) {
141 		pr_info("WARN: Device is busy\n");
142 		mutex_unlock(&c->io_mutex);
143 		return -EBUSY;
144 	}
145 
146 	c->mbo_offs = 0;
147 	ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
148 	if (!ret)
149 		c->access_ref = 1;
150 	mutex_unlock(&c->io_mutex);
151 	return ret;
152 }
153 
154 /**
155  * aim_close - implements the syscall to close the device
156  * @inode: inode pointer
157  * @filp: file pointer
158  *
159  * This stops the channel within the core.
160  */
aim_close(struct inode * inode,struct file * filp)161 static int aim_close(struct inode *inode, struct file *filp)
162 {
163 	struct aim_channel *c = to_channel(inode->i_cdev);
164 
165 	mutex_lock(&c->io_mutex);
166 	spin_lock(&c->unlink);
167 	c->access_ref = 0;
168 	spin_unlock(&c->unlink);
169 	if (c->dev) {
170 		stop_channel(c);
171 		mutex_unlock(&c->io_mutex);
172 	} else {
173 		destroy_cdev(c);
174 		mutex_unlock(&c->io_mutex);
175 		kfree(c);
176 	}
177 	return 0;
178 }
179 
180 /**
181  * aim_write - implements the syscall to write to the device
182  * @filp: file pointer
183  * @buf: pointer to user buffer
184  * @count: number of bytes to write
185  * @offset: offset from where to start writing
186  */
aim_write(struct file * filp,const char __user * buf,size_t count,loff_t * offset)187 static ssize_t aim_write(struct file *filp, const char __user *buf,
188 			 size_t count, loff_t *offset)
189 {
190 	int ret;
191 	size_t to_copy, left;
192 	struct mbo *mbo = NULL;
193 	struct aim_channel *c = filp->private_data;
194 
195 	mutex_lock(&c->io_mutex);
196 	while (c->dev && !ch_get_mbo(c, &mbo)) {
197 		mutex_unlock(&c->io_mutex);
198 
199 		if ((filp->f_flags & O_NONBLOCK))
200 			return -EAGAIN;
201 		if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
202 			return -ERESTARTSYS;
203 		mutex_lock(&c->io_mutex);
204 	}
205 
206 	if (unlikely(!c->dev)) {
207 		ret = -ENODEV;
208 		goto unlock;
209 	}
210 
211 	to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
212 	left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
213 	if (left == to_copy) {
214 		ret = -EFAULT;
215 		goto unlock;
216 	}
217 
218 	c->mbo_offs += to_copy - left;
219 	if (c->mbo_offs >= c->cfg->buffer_size ||
220 	    c->cfg->data_type == MOST_CH_CONTROL ||
221 	    c->cfg->data_type == MOST_CH_ASYNC) {
222 		kfifo_skip(&c->fifo);
223 		mbo->buffer_length = c->mbo_offs;
224 		c->mbo_offs = 0;
225 		most_submit_mbo(mbo);
226 	}
227 
228 	ret = to_copy - left;
229 unlock:
230 	mutex_unlock(&c->io_mutex);
231 	return ret;
232 }
233 
234 /**
235  * aim_read - implements the syscall to read from the device
236  * @filp: file pointer
237  * @buf: pointer to user buffer
238  * @count: number of bytes to read
239  * @offset: offset from where to start reading
240  */
241 static ssize_t
aim_read(struct file * filp,char __user * buf,size_t count,loff_t * offset)242 aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
243 {
244 	size_t to_copy, not_copied, copied;
245 	struct mbo *mbo;
246 	struct aim_channel *c = filp->private_data;
247 
248 	mutex_lock(&c->io_mutex);
249 	while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
250 		mutex_unlock(&c->io_mutex);
251 		if (filp->f_flags & O_NONBLOCK)
252 			return -EAGAIN;
253 		if (wait_event_interruptible(c->wq,
254 					     (!kfifo_is_empty(&c->fifo) ||
255 					      (!c->dev))))
256 			return -ERESTARTSYS;
257 		mutex_lock(&c->io_mutex);
258 	}
259 
260 	/* make sure we don't submit to gone devices */
261 	if (unlikely(!c->dev)) {
262 		mutex_unlock(&c->io_mutex);
263 		return -ENODEV;
264 	}
265 
266 	to_copy = min_t(size_t,
267 			count,
268 			mbo->processed_length - c->mbo_offs);
269 
270 	not_copied = copy_to_user(buf,
271 				  mbo->virt_address + c->mbo_offs,
272 				  to_copy);
273 
274 	copied = to_copy - not_copied;
275 
276 	c->mbo_offs += copied;
277 	if (c->mbo_offs >= mbo->processed_length) {
278 		kfifo_skip(&c->fifo);
279 		most_put_mbo(mbo);
280 		c->mbo_offs = 0;
281 	}
282 	mutex_unlock(&c->io_mutex);
283 	return copied;
284 }
285 
aim_poll(struct file * filp,poll_table * wait)286 static unsigned int aim_poll(struct file *filp, poll_table *wait)
287 {
288 	struct aim_channel *c = filp->private_data;
289 	unsigned int mask = 0;
290 
291 	poll_wait(filp, &c->wq, wait);
292 
293 	if (c->cfg->direction == MOST_CH_RX) {
294 		if (!kfifo_is_empty(&c->fifo))
295 			mask |= POLLIN | POLLRDNORM;
296 	} else {
297 		if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
298 			mask |= POLLOUT | POLLWRNORM;
299 	}
300 	return mask;
301 }
302 
303 /**
304  * Initialization of struct file_operations
305  */
306 static const struct file_operations channel_fops = {
307 	.owner = THIS_MODULE,
308 	.read = aim_read,
309 	.write = aim_write,
310 	.open = aim_open,
311 	.release = aim_close,
312 	.poll = aim_poll,
313 };
314 
315 /**
316  * aim_disconnect_channel - disconnect a channel
317  * @iface: pointer to interface instance
318  * @channel_id: channel index
319  *
320  * This frees allocated memory and removes the cdev that represents this
321  * channel in user space.
322  */
aim_disconnect_channel(struct most_interface * iface,int channel_id)323 static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
324 {
325 	struct aim_channel *c;
326 
327 	if (!iface) {
328 		pr_info("Bad interface pointer\n");
329 		return -EINVAL;
330 	}
331 
332 	c = get_channel(iface, channel_id);
333 	if (!c)
334 		return -ENXIO;
335 
336 	mutex_lock(&c->io_mutex);
337 	spin_lock(&c->unlink);
338 	c->dev = NULL;
339 	spin_unlock(&c->unlink);
340 	if (c->access_ref) {
341 		stop_channel(c);
342 		wake_up_interruptible(&c->wq);
343 		mutex_unlock(&c->io_mutex);
344 	} else {
345 		destroy_cdev(c);
346 		mutex_unlock(&c->io_mutex);
347 		kfree(c);
348 	}
349 	return 0;
350 }
351 
352 /**
353  * aim_rx_completion - completion handler for rx channels
354  * @mbo: pointer to buffer object that has completed
355  *
356  * This searches for the channel linked to this MBO and stores it in the local
357  * fifo buffer.
358  */
aim_rx_completion(struct mbo * mbo)359 static int aim_rx_completion(struct mbo *mbo)
360 {
361 	struct aim_channel *c;
362 
363 	if (!mbo)
364 		return -EINVAL;
365 
366 	c = get_channel(mbo->ifp, mbo->hdm_channel_id);
367 	if (!c)
368 		return -ENXIO;
369 
370 	spin_lock(&c->unlink);
371 	if (!c->access_ref || !c->dev) {
372 		spin_unlock(&c->unlink);
373 		return -ENODEV;
374 	}
375 	kfifo_in(&c->fifo, &mbo, 1);
376 	spin_unlock(&c->unlink);
377 #ifdef DEBUG_MESG
378 	if (kfifo_is_full(&c->fifo))
379 		pr_info("WARN: Fifo is full\n");
380 #endif
381 	wake_up_interruptible(&c->wq);
382 	return 0;
383 }
384 
385 /**
386  * aim_tx_completion - completion handler for tx channels
387  * @iface: pointer to interface instance
388  * @channel_id: channel index/ID
389  *
390  * This wakes sleeping processes in the wait-queue.
391  */
aim_tx_completion(struct most_interface * iface,int channel_id)392 static int aim_tx_completion(struct most_interface *iface, int channel_id)
393 {
394 	struct aim_channel *c;
395 
396 	if (!iface) {
397 		pr_info("Bad interface pointer\n");
398 		return -EINVAL;
399 	}
400 	if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
401 		pr_info("Channel ID out of range\n");
402 		return -EINVAL;
403 	}
404 
405 	c = get_channel(iface, channel_id);
406 	if (!c)
407 		return -ENXIO;
408 	wake_up_interruptible(&c->wq);
409 	return 0;
410 }
411 
412 /**
413  * aim_probe - probe function of the driver module
414  * @iface: pointer to interface instance
415  * @channel_id: channel index/ID
416  * @cfg: pointer to actual channel configuration
417  * @parent: pointer to kobject (needed for sysfs hook-up)
418  * @name: name of the device to be created
419  *
420  * This allocates achannel object and creates the device node in /dev
421  *
422  * Returns 0 on success or error code otherwise.
423  */
aim_probe(struct most_interface * iface,int channel_id,struct most_channel_config * cfg,struct kobject * parent,char * name)424 static int aim_probe(struct most_interface *iface, int channel_id,
425 		     struct most_channel_config *cfg,
426 		     struct kobject *parent, char *name)
427 {
428 	struct aim_channel *c;
429 	unsigned long cl_flags;
430 	int retval;
431 	int current_minor;
432 
433 	if ((!iface) || (!cfg) || (!parent) || (!name)) {
434 		pr_info("Probing AIM with bad arguments");
435 		return -EINVAL;
436 	}
437 	c = get_channel(iface, channel_id);
438 	if (c)
439 		return -EEXIST;
440 
441 	current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
442 	if (current_minor < 0)
443 		return current_minor;
444 
445 	c = kzalloc(sizeof(*c), GFP_KERNEL);
446 	if (!c) {
447 		retval = -ENOMEM;
448 		goto error_alloc_channel;
449 	}
450 
451 	c->devno = MKDEV(major, current_minor);
452 	cdev_init(&c->cdev, &channel_fops);
453 	c->cdev.owner = THIS_MODULE;
454 	cdev_add(&c->cdev, c->devno, 1);
455 	c->iface = iface;
456 	c->cfg = cfg;
457 	c->channel_id = channel_id;
458 	c->access_ref = 0;
459 	spin_lock_init(&c->unlink);
460 	INIT_KFIFO(c->fifo);
461 	retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
462 	if (retval) {
463 		pr_info("failed to alloc channel kfifo");
464 		goto error_alloc_kfifo;
465 	}
466 	init_waitqueue_head(&c->wq);
467 	mutex_init(&c->io_mutex);
468 	spin_lock_irqsave(&ch_list_lock, cl_flags);
469 	list_add_tail(&c->list, &channel_list);
470 	spin_unlock_irqrestore(&ch_list_lock, cl_flags);
471 	c->dev = device_create(aim_class,
472 				     NULL,
473 				     c->devno,
474 				     NULL,
475 				     "%s", name);
476 
477 	if (IS_ERR(c->dev)) {
478 		retval = PTR_ERR(c->dev);
479 		pr_info("failed to create new device node %s\n", name);
480 		goto error_create_device;
481 	}
482 	kobject_uevent(&c->dev->kobj, KOBJ_ADD);
483 	return 0;
484 
485 error_create_device:
486 	kfifo_free(&c->fifo);
487 	list_del(&c->list);
488 error_alloc_kfifo:
489 	cdev_del(&c->cdev);
490 	kfree(c);
491 error_alloc_channel:
492 	ida_simple_remove(&minor_id, current_minor);
493 	return retval;
494 }
495 
496 static struct most_aim cdev_aim = {
497 	.name = "cdev",
498 	.probe_channel = aim_probe,
499 	.disconnect_channel = aim_disconnect_channel,
500 	.rx_completion = aim_rx_completion,
501 	.tx_completion = aim_tx_completion,
502 };
503 
mod_init(void)504 static int __init mod_init(void)
505 {
506 	int err;
507 
508 	pr_info("init()\n");
509 
510 	INIT_LIST_HEAD(&channel_list);
511 	spin_lock_init(&ch_list_lock);
512 	ida_init(&minor_id);
513 
514 	err = alloc_chrdev_region(&aim_devno, 0, 50, "cdev");
515 	if (err < 0)
516 		goto dest_ida;
517 	major = MAJOR(aim_devno);
518 
519 	aim_class = class_create(THIS_MODULE, "most_cdev_aim");
520 	if (IS_ERR(aim_class)) {
521 		pr_err("no udev support\n");
522 		err = PTR_ERR(aim_class);
523 		goto free_cdev;
524 	}
525 	err = most_register_aim(&cdev_aim);
526 	if (err)
527 		goto dest_class;
528 	return 0;
529 
530 dest_class:
531 	class_destroy(aim_class);
532 free_cdev:
533 	unregister_chrdev_region(aim_devno, 1);
534 dest_ida:
535 	ida_destroy(&minor_id);
536 	return err;
537 }
538 
mod_exit(void)539 static void __exit mod_exit(void)
540 {
541 	struct aim_channel *c, *tmp;
542 
543 	pr_info("exit module\n");
544 
545 	most_deregister_aim(&cdev_aim);
546 
547 	list_for_each_entry_safe(c, tmp, &channel_list, list) {
548 		destroy_cdev(c);
549 		kfree(c);
550 	}
551 	class_destroy(aim_class);
552 	unregister_chrdev_region(aim_devno, 1);
553 	ida_destroy(&minor_id);
554 }
555 
556 module_init(mod_init);
557 module_exit(mod_exit);
558 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
559 MODULE_LICENSE("GPL");
560 MODULE_DESCRIPTION("character device AIM for mostcore");
561