1 /*
2 * cdev.c - Application interfacing module for character devices
3 *
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/poll.h>
22 #include <linux/kfifo.h>
23 #include <linux/uaccess.h>
24 #include <linux/idr.h>
25 #include "mostcore.h"
26
27 static dev_t aim_devno;
28 static struct class *aim_class;
29 static struct ida minor_id;
30 static unsigned int major;
31 static struct most_aim cdev_aim;
32
33 struct aim_channel {
34 wait_queue_head_t wq;
35 struct cdev cdev;
36 struct device *dev;
37 struct mutex io_mutex;
38 struct most_interface *iface;
39 struct most_channel_config *cfg;
40 unsigned int channel_id;
41 dev_t devno;
42 bool keep_mbo;
43 unsigned int mbo_offs;
44 struct mbo *stacked_mbo;
45 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
46 atomic_t access_ref;
47 struct list_head list;
48 };
49
50 #define to_channel(d) container_of(d, struct aim_channel, cdev)
51 static struct list_head channel_list;
52 static spinlock_t ch_list_lock;
53
get_channel(struct most_interface * iface,int id)54 static struct aim_channel *get_channel(struct most_interface *iface, int id)
55 {
56 struct aim_channel *channel, *tmp;
57 unsigned long flags;
58 int found_channel = 0;
59
60 spin_lock_irqsave(&ch_list_lock, flags);
61 list_for_each_entry_safe(channel, tmp, &channel_list, list) {
62 if ((channel->iface == iface) && (channel->channel_id == id)) {
63 found_channel = 1;
64 break;
65 }
66 }
67 spin_unlock_irqrestore(&ch_list_lock, flags);
68 if (!found_channel)
69 return NULL;
70 return channel;
71 }
72
73 /**
74 * aim_open - implements the syscall to open the device
75 * @inode: inode pointer
76 * @filp: file pointer
77 *
78 * This stores the channel pointer in the private data field of
79 * the file structure and activates the channel within the core.
80 */
aim_open(struct inode * inode,struct file * filp)81 static int aim_open(struct inode *inode, struct file *filp)
82 {
83 struct aim_channel *channel;
84 int ret;
85
86 channel = to_channel(inode->i_cdev);
87 filp->private_data = channel;
88
89 if (((channel->cfg->direction == MOST_CH_RX) &&
90 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
91 ((channel->cfg->direction == MOST_CH_TX) &&
92 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
93 pr_info("WARN: Access flags mismatch\n");
94 return -EACCES;
95 }
96 if (!atomic_inc_and_test(&channel->access_ref)) {
97 pr_info("WARN: Device is busy\n");
98 atomic_dec(&channel->access_ref);
99 return -EBUSY;
100 }
101
102 ret = most_start_channel(channel->iface, channel->channel_id,
103 &cdev_aim);
104 if (ret)
105 atomic_dec(&channel->access_ref);
106 return ret;
107 }
108
109 /**
110 * aim_close - implements the syscall to close the device
111 * @inode: inode pointer
112 * @filp: file pointer
113 *
114 * This stops the channel within the core.
115 */
aim_close(struct inode * inode,struct file * filp)116 static int aim_close(struct inode *inode, struct file *filp)
117 {
118 int ret;
119 struct mbo *mbo;
120 struct aim_channel *channel = to_channel(inode->i_cdev);
121
122 mutex_lock(&channel->io_mutex);
123 if (!channel->dev) {
124 mutex_unlock(&channel->io_mutex);
125 atomic_dec(&channel->access_ref);
126 device_destroy(aim_class, channel->devno);
127 cdev_del(&channel->cdev);
128 kfifo_free(&channel->fifo);
129 list_del(&channel->list);
130 ida_simple_remove(&minor_id, MINOR(channel->devno));
131 wake_up_interruptible(&channel->wq);
132 kfree(channel);
133 return 0;
134 }
135 mutex_unlock(&channel->io_mutex);
136
137 while (kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
138 most_put_mbo(mbo);
139 if (channel->keep_mbo)
140 most_put_mbo(channel->stacked_mbo);
141 ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim);
142 atomic_dec(&channel->access_ref);
143 wake_up_interruptible(&channel->wq);
144 return ret;
145 }
146
147 /**
148 * aim_write - implements the syscall to write to the device
149 * @filp: file pointer
150 * @buf: pointer to user buffer
151 * @count: number of bytes to write
152 * @offset: offset from where to start writing
153 */
aim_write(struct file * filp,const char __user * buf,size_t count,loff_t * offset)154 static ssize_t aim_write(struct file *filp, const char __user *buf,
155 size_t count, loff_t *offset)
156 {
157 int ret, err;
158 size_t actual_len = 0;
159 size_t max_len = 0;
160 ssize_t retval;
161 struct mbo *mbo;
162 struct aim_channel *channel = filp->private_data;
163
164 mutex_lock(&channel->io_mutex);
165 if (unlikely(!channel->dev)) {
166 mutex_unlock(&channel->io_mutex);
167 return -EPIPE;
168 }
169 mutex_unlock(&channel->io_mutex);
170
171 mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
172
173 if (!mbo) {
174 if ((filp->f_flags & O_NONBLOCK))
175 return -EAGAIN;
176 if (wait_event_interruptible(
177 channel->wq,
178 (mbo = most_get_mbo(channel->iface,
179 channel->channel_id,
180 &cdev_aim)) ||
181 (!channel->dev)))
182 return -ERESTARTSYS;
183 }
184
185 mutex_lock(&channel->io_mutex);
186 if (unlikely(!channel->dev)) {
187 mutex_unlock(&channel->io_mutex);
188 err = -EPIPE;
189 goto error;
190 }
191 mutex_unlock(&channel->io_mutex);
192
193 max_len = channel->cfg->buffer_size;
194 actual_len = min(count, max_len);
195 mbo->buffer_length = actual_len;
196
197 retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
198 if (retval) {
199 err = -EIO;
200 goto error;
201 }
202
203 ret = most_submit_mbo(mbo);
204 if (ret) {
205 pr_info("submitting MBO to core failed\n");
206 err = ret;
207 goto error;
208 }
209 return actual_len - retval;
210 error:
211 most_put_mbo(mbo);
212 return err;
213 }
214
215 /**
216 * aim_read - implements the syscall to read from the device
217 * @filp: file pointer
218 * @buf: pointer to user buffer
219 * @count: number of bytes to read
220 * @offset: offset from where to start reading
221 */
222 static ssize_t
aim_read(struct file * filp,char __user * buf,size_t count,loff_t * offset)223 aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
224 {
225 ssize_t retval;
226 size_t not_copied, proc_len;
227 struct mbo *mbo;
228 struct aim_channel *channel = filp->private_data;
229
230 if (channel->keep_mbo) {
231 mbo = channel->stacked_mbo;
232 channel->keep_mbo = false;
233 goto start_copy;
234 }
235 while ((!kfifo_out(&channel->fifo, &mbo, 1)) && (channel->dev)) {
236 if (filp->f_flags & O_NONBLOCK)
237 return -EAGAIN;
238 if (wait_event_interruptible(channel->wq,
239 (!kfifo_is_empty(&channel->fifo) ||
240 (!channel->dev))))
241 return -ERESTARTSYS;
242 }
243
244 start_copy:
245 /* make sure we don't submit to gone devices */
246 mutex_lock(&channel->io_mutex);
247 if (unlikely(!channel->dev)) {
248 mutex_unlock(&channel->io_mutex);
249 return -EIO;
250 }
251
252 if (count < mbo->processed_length)
253 channel->keep_mbo = true;
254
255 proc_len = min((int)count,
256 (int)(mbo->processed_length - channel->mbo_offs));
257
258 not_copied = copy_to_user(buf,
259 mbo->virt_address + channel->mbo_offs,
260 proc_len);
261
262 retval = not_copied ? proc_len - not_copied : proc_len;
263
264 if (channel->keep_mbo) {
265 channel->mbo_offs = retval;
266 channel->stacked_mbo = mbo;
267 } else {
268 most_put_mbo(mbo);
269 channel->mbo_offs = 0;
270 }
271 mutex_unlock(&channel->io_mutex);
272 return retval;
273 }
274
IS_ERR_OR_FALSE(int x)275 static inline bool __must_check IS_ERR_OR_FALSE(int x)
276 {
277 return x <= 0;
278 }
279
aim_poll(struct file * filp,poll_table * wait)280 static unsigned int aim_poll(struct file *filp, poll_table *wait)
281 {
282 struct aim_channel *c = filp->private_data;
283 unsigned int mask = 0;
284
285 poll_wait(filp, &c->wq, wait);
286
287 if (c->cfg->direction == MOST_CH_RX) {
288 if (!kfifo_is_empty(&c->fifo))
289 mask |= POLLIN | POLLRDNORM;
290 } else {
291 if (!IS_ERR_OR_FALSE(channel_has_mbo(c->iface, c->channel_id)))
292 mask |= POLLOUT | POLLWRNORM;
293 }
294 return mask;
295 }
296
297 /**
298 * Initialization of struct file_operations
299 */
300 static const struct file_operations channel_fops = {
301 .owner = THIS_MODULE,
302 .read = aim_read,
303 .write = aim_write,
304 .open = aim_open,
305 .release = aim_close,
306 .poll = aim_poll,
307 };
308
309 /**
310 * aim_disconnect_channel - disconnect a channel
311 * @iface: pointer to interface instance
312 * @channel_id: channel index
313 *
314 * This frees allocated memory and removes the cdev that represents this
315 * channel in user space.
316 */
aim_disconnect_channel(struct most_interface * iface,int channel_id)317 static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
318 {
319 struct aim_channel *channel;
320 unsigned long flags;
321
322 if (!iface) {
323 pr_info("Bad interface pointer\n");
324 return -EINVAL;
325 }
326
327 channel = get_channel(iface, channel_id);
328 if (!channel)
329 return -ENXIO;
330
331 mutex_lock(&channel->io_mutex);
332 channel->dev = NULL;
333 mutex_unlock(&channel->io_mutex);
334
335 if (atomic_read(&channel->access_ref)) {
336 device_destroy(aim_class, channel->devno);
337 cdev_del(&channel->cdev);
338 kfifo_free(&channel->fifo);
339 ida_simple_remove(&minor_id, MINOR(channel->devno));
340 spin_lock_irqsave(&ch_list_lock, flags);
341 list_del(&channel->list);
342 spin_unlock_irqrestore(&ch_list_lock, flags);
343 kfree(channel);
344 } else {
345 wake_up_interruptible(&channel->wq);
346 }
347 return 0;
348 }
349
350 /**
351 * aim_rx_completion - completion handler for rx channels
352 * @mbo: pointer to buffer object that has completed
353 *
354 * This searches for the channel linked to this MBO and stores it in the local
355 * fifo buffer.
356 */
aim_rx_completion(struct mbo * mbo)357 static int aim_rx_completion(struct mbo *mbo)
358 {
359 struct aim_channel *channel;
360
361 if (!mbo)
362 return -EINVAL;
363
364 channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
365 if (!channel)
366 return -ENXIO;
367
368 kfifo_in(&channel->fifo, &mbo, 1);
369 #ifdef DEBUG_MESG
370 if (kfifo_is_full(&channel->fifo))
371 pr_info("WARN: Fifo is full\n");
372 #endif
373 wake_up_interruptible(&channel->wq);
374 return 0;
375 }
376
377 /**
378 * aim_tx_completion - completion handler for tx channels
379 * @iface: pointer to interface instance
380 * @channel_id: channel index/ID
381 *
382 * This wakes sleeping processes in the wait-queue.
383 */
aim_tx_completion(struct most_interface * iface,int channel_id)384 static int aim_tx_completion(struct most_interface *iface, int channel_id)
385 {
386 struct aim_channel *channel;
387
388 if (!iface) {
389 pr_info("Bad interface pointer\n");
390 return -EINVAL;
391 }
392 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
393 pr_info("Channel ID out of range\n");
394 return -EINVAL;
395 }
396
397 channel = get_channel(iface, channel_id);
398 if (!channel)
399 return -ENXIO;
400 wake_up_interruptible(&channel->wq);
401 return 0;
402 }
403
404 static struct most_aim cdev_aim;
405
406 /**
407 * aim_probe - probe function of the driver module
408 * @iface: pointer to interface instance
409 * @channel_id: channel index/ID
410 * @cfg: pointer to actual channel configuration
411 * @parent: pointer to kobject (needed for sysfs hook-up)
412 * @name: name of the device to be created
413 *
414 * This allocates achannel object and creates the device node in /dev
415 *
416 * Returns 0 on success or error code otherwise.
417 */
aim_probe(struct most_interface * iface,int channel_id,struct most_channel_config * cfg,struct kobject * parent,char * name)418 static int aim_probe(struct most_interface *iface, int channel_id,
419 struct most_channel_config *cfg,
420 struct kobject *parent, char *name)
421 {
422 struct aim_channel *channel;
423 unsigned long cl_flags;
424 int retval;
425 int current_minor;
426
427 if ((!iface) || (!cfg) || (!parent) || (!name)) {
428 pr_info("Probing AIM with bad arguments");
429 return -EINVAL;
430 }
431 channel = get_channel(iface, channel_id);
432 if (channel)
433 return -EEXIST;
434
435 current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
436 if (current_minor < 0)
437 return current_minor;
438
439 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
440 if (!channel) {
441 retval = -ENOMEM;
442 goto error_alloc_channel;
443 }
444
445 channel->devno = MKDEV(major, current_minor);
446 cdev_init(&channel->cdev, &channel_fops);
447 channel->cdev.owner = THIS_MODULE;
448 cdev_add(&channel->cdev, channel->devno, 1);
449 channel->iface = iface;
450 channel->cfg = cfg;
451 channel->channel_id = channel_id;
452 channel->mbo_offs = 0;
453 atomic_set(&channel->access_ref, -1);
454 INIT_KFIFO(channel->fifo);
455 retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
456 if (retval) {
457 pr_info("failed to alloc channel kfifo");
458 goto error_alloc_kfifo;
459 }
460 init_waitqueue_head(&channel->wq);
461 mutex_init(&channel->io_mutex);
462 spin_lock_irqsave(&ch_list_lock, cl_flags);
463 list_add_tail(&channel->list, &channel_list);
464 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
465 channel->dev = device_create(aim_class,
466 NULL,
467 channel->devno,
468 NULL,
469 "%s", name);
470
471 retval = IS_ERR(channel->dev);
472 if (retval) {
473 pr_info("failed to create new device node %s\n", name);
474 goto error_create_device;
475 }
476 kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
477 return 0;
478
479 error_create_device:
480 kfifo_free(&channel->fifo);
481 list_del(&channel->list);
482 error_alloc_kfifo:
483 cdev_del(&channel->cdev);
484 kfree(channel);
485 error_alloc_channel:
486 ida_simple_remove(&minor_id, current_minor);
487 return retval;
488 }
489
490 static struct most_aim cdev_aim = {
491 .name = "cdev",
492 .probe_channel = aim_probe,
493 .disconnect_channel = aim_disconnect_channel,
494 .rx_completion = aim_rx_completion,
495 .tx_completion = aim_tx_completion,
496 };
497
mod_init(void)498 static int __init mod_init(void)
499 {
500 pr_info("init()\n");
501
502 INIT_LIST_HEAD(&channel_list);
503 spin_lock_init(&ch_list_lock);
504 ida_init(&minor_id);
505
506 if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0)
507 return -EIO;
508 major = MAJOR(aim_devno);
509
510 aim_class = class_create(THIS_MODULE, "most_cdev_aim");
511 if (IS_ERR(aim_class)) {
512 pr_err("no udev support\n");
513 goto free_cdev;
514 }
515
516 if (most_register_aim(&cdev_aim))
517 goto dest_class;
518 return 0;
519
520 dest_class:
521 class_destroy(aim_class);
522 free_cdev:
523 unregister_chrdev_region(aim_devno, 1);
524 return -EIO;
525 }
526
mod_exit(void)527 static void __exit mod_exit(void)
528 {
529 struct aim_channel *channel, *tmp;
530
531 pr_info("exit module\n");
532
533 most_deregister_aim(&cdev_aim);
534
535 list_for_each_entry_safe(channel, tmp, &channel_list, list) {
536 device_destroy(aim_class, channel->devno);
537 cdev_del(&channel->cdev);
538 kfifo_free(&channel->fifo);
539 list_del(&channel->list);
540 ida_simple_remove(&minor_id, MINOR(channel->devno));
541 kfree(channel);
542 }
543 class_destroy(aim_class);
544 unregister_chrdev_region(aim_devno, 1);
545 ida_destroy(&minor_id);
546 }
547
548 module_init(mod_init);
549 module_exit(mod_exit);
550 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
551 MODULE_LICENSE("GPL");
552 MODULE_DESCRIPTION("character device AIM for mostcore");
553