• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * VMEbus User access driver
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by:
8  *   Tom Armistead and Ajit Prem
9  *     Copyright 2004 Motorola Inc.
10  *
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/cdev.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/errno.h>
25 #include <linux/init.h>
26 #include <linux/ioctl.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/mutex.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 
38 #include <linux/io.h>
39 #include <linux/uaccess.h>
40 #include <linux/vme.h>
41 
42 #include "vme_user.h"
43 
44 static DEFINE_MUTEX(vme_user_mutex);
45 static const char driver_name[] = "vme_user";
46 
47 static int bus[VME_USER_BUS_MAX];
48 static unsigned int bus_num;
49 
50 /* Currently Documentation/devices.txt defines the following for VME:
51  *
52  * 221 char	VME bus
53  *		  0 = /dev/bus/vme/m0		First master image
54  *		  1 = /dev/bus/vme/m1		Second master image
55  *		  2 = /dev/bus/vme/m2		Third master image
56  *		  3 = /dev/bus/vme/m3		Fourth master image
57  *		  4 = /dev/bus/vme/s0		First slave image
58  *		  5 = /dev/bus/vme/s1		Second slave image
59  *		  6 = /dev/bus/vme/s2		Third slave image
60  *		  7 = /dev/bus/vme/s3		Fourth slave image
61  *		  8 = /dev/bus/vme/ctl		Control
62  *
63  *		It is expected that all VME bus drivers will use the
64  *		same interface.  For interface documentation see
65  *		http://www.vmelinux.org/.
66  *
67  * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
68  * even support the tsi148 chipset (which has 8 master and 8 slave windows).
69  * We'll run with this for now as far as possible, however it probably makes
70  * sense to get rid of the old mappings and just do everything dynamically.
71  *
72  * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
73  * defined above and try to support at least some of the interface from
74  * http://www.vmelinux.org/ as an alternative the driver can be written
75  * providing a saner interface later.
76  *
77  * The vmelinux.org driver never supported slave images, the devices reserved
78  * for slaves were repurposed to support all 8 master images on the UniverseII!
79  * We shall support 4 masters and 4 slaves with this driver.
80  */
81 #define VME_MAJOR	221	/* VME Major Device Number */
82 #define VME_DEVS	9	/* Number of dev entries */
83 
84 #define MASTER_MINOR	0
85 #define MASTER_MAX	3
86 #define SLAVE_MINOR	4
87 #define SLAVE_MAX	7
88 #define CONTROL_MINOR	8
89 
90 #define PCI_BUF_SIZE  0x20000	/* Size of one slave image buffer */
91 
92 /*
93  * Structure to handle image related parameters.
94  */
95 struct image_desc {
96 	void *kern_buf;	/* Buffer address in kernel space */
97 	dma_addr_t pci_buf;	/* Buffer address in PCI address space */
98 	unsigned long long size_buf;	/* Buffer size */
99 	struct mutex mutex;	/* Mutex for locking image */
100 	struct device *device;	/* Sysfs device */
101 	struct vme_resource *resource;	/* VME resource */
102 	int users;		/* Number of current users */
103 };
104 static struct image_desc image[VME_DEVS];
105 
106 struct driver_stats {
107 	unsigned long reads;
108 	unsigned long writes;
109 	unsigned long ioctls;
110 	unsigned long irqs;
111 	unsigned long berrs;
112 	unsigned long dmaerrors;
113 	unsigned long timeouts;
114 	unsigned long external;
115 };
116 static struct driver_stats statistics;
117 
118 static struct cdev *vme_user_cdev;		/* Character device */
119 static struct class *vme_user_sysfs_class;	/* Sysfs class */
120 static struct vme_dev *vme_user_bridge;		/* Pointer to user device */
121 
122 
123 static const int type[VME_DEVS] = {	MASTER_MINOR,	MASTER_MINOR,
124 					MASTER_MINOR,	MASTER_MINOR,
125 					SLAVE_MINOR,	SLAVE_MINOR,
126 					SLAVE_MINOR,	SLAVE_MINOR,
127 					CONTROL_MINOR
128 				};
129 
130 
131 static int vme_user_open(struct inode *, struct file *);
132 static int vme_user_release(struct inode *, struct file *);
133 static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
134 static ssize_t vme_user_write(struct file *, const char __user *, size_t,
135 	loff_t *);
136 static loff_t vme_user_llseek(struct file *, loff_t, int);
137 static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
138 
139 static int vme_user_match(struct vme_dev *);
140 static int vme_user_probe(struct vme_dev *);
141 static int vme_user_remove(struct vme_dev *);
142 
143 static const struct file_operations vme_user_fops = {
144 	.open = vme_user_open,
145 	.release = vme_user_release,
146 	.read = vme_user_read,
147 	.write = vme_user_write,
148 	.llseek = vme_user_llseek,
149 	.unlocked_ioctl = vme_user_unlocked_ioctl,
150 	.compat_ioctl = vme_user_unlocked_ioctl,
151 };
152 
153 
154 /*
155  * Reset all the statistic counters
156  */
reset_counters(void)157 static void reset_counters(void)
158 {
159 	statistics.reads = 0;
160 	statistics.writes = 0;
161 	statistics.ioctls = 0;
162 	statistics.irqs = 0;
163 	statistics.berrs = 0;
164 	statistics.dmaerrors = 0;
165 	statistics.timeouts = 0;
166 }
167 
vme_user_open(struct inode * inode,struct file * file)168 static int vme_user_open(struct inode *inode, struct file *file)
169 {
170 	int err;
171 	unsigned int minor = MINOR(inode->i_rdev);
172 
173 	mutex_lock(&image[minor].mutex);
174 	/* Allow device to be opened if a resource is needed and allocated. */
175 	if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
176 		pr_err("No resources allocated for device\n");
177 		err = -EINVAL;
178 		goto err_res;
179 	}
180 
181 	/* Increment user count */
182 	image[minor].users++;
183 
184 	mutex_unlock(&image[minor].mutex);
185 
186 	return 0;
187 
188 err_res:
189 	mutex_unlock(&image[minor].mutex);
190 
191 	return err;
192 }
193 
vme_user_release(struct inode * inode,struct file * file)194 static int vme_user_release(struct inode *inode, struct file *file)
195 {
196 	unsigned int minor = MINOR(inode->i_rdev);
197 
198 	mutex_lock(&image[minor].mutex);
199 
200 	/* Decrement user count */
201 	image[minor].users--;
202 
203 	mutex_unlock(&image[minor].mutex);
204 
205 	return 0;
206 }
207 
208 /*
209  * We are going ot alloc a page during init per window for small transfers.
210  * Small transfers will go VME -> buffer -> user space. Larger (more than a
211  * page) transfers will lock the user space buffer into memory and then
212  * transfer the data directly into the user space buffers.
213  */
resource_to_user(int minor,char __user * buf,size_t count,loff_t * ppos)214 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
215 	loff_t *ppos)
216 {
217 	ssize_t retval;
218 	ssize_t copied = 0;
219 
220 	if (count <= image[minor].size_buf) {
221 		/* We copy to kernel buffer */
222 		copied = vme_master_read(image[minor].resource,
223 			image[minor].kern_buf, count, *ppos);
224 		if (copied < 0)
225 			return (int)copied;
226 
227 		retval = __copy_to_user(buf, image[minor].kern_buf,
228 			(unsigned long)copied);
229 		if (retval != 0) {
230 			copied = (copied - retval);
231 			pr_info("User copy failed\n");
232 			return -EINVAL;
233 		}
234 
235 	} else {
236 		/* XXX Need to write this */
237 		pr_info("Currently don't support large transfers\n");
238 		/* Map in pages from userspace */
239 
240 		/* Call vme_master_read to do the transfer */
241 		return -EINVAL;
242 	}
243 
244 	return copied;
245 }
246 
247 /*
248  * We are going to alloc a page during init per window for small transfers.
249  * Small transfers will go user space -> buffer -> VME. Larger (more than a
250  * page) transfers will lock the user space buffer into memory and then
251  * transfer the data directly from the user space buffers out to VME.
252  */
resource_from_user(unsigned int minor,const char __user * buf,size_t count,loff_t * ppos)253 static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
254 	size_t count, loff_t *ppos)
255 {
256 	ssize_t retval;
257 	ssize_t copied = 0;
258 
259 	if (count <= image[minor].size_buf) {
260 		retval = __copy_from_user(image[minor].kern_buf, buf,
261 			(unsigned long)count);
262 		if (retval != 0)
263 			copied = (copied - retval);
264 		else
265 			copied = count;
266 
267 		copied = vme_master_write(image[minor].resource,
268 			image[minor].kern_buf, copied, *ppos);
269 	} else {
270 		/* XXX Need to write this */
271 		pr_info("Currently don't support large transfers\n");
272 		/* Map in pages from userspace */
273 
274 		/* Call vme_master_write to do the transfer */
275 		return -EINVAL;
276 	}
277 
278 	return copied;
279 }
280 
buffer_to_user(unsigned int minor,char __user * buf,size_t count,loff_t * ppos)281 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
282 	size_t count, loff_t *ppos)
283 {
284 	void *image_ptr;
285 	ssize_t retval;
286 
287 	image_ptr = image[minor].kern_buf + *ppos;
288 
289 	retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
290 	if (retval != 0) {
291 		retval = (count - retval);
292 		pr_warn("Partial copy to userspace\n");
293 	} else
294 		retval = count;
295 
296 	/* Return number of bytes successfully read */
297 	return retval;
298 }
299 
buffer_from_user(unsigned int minor,const char __user * buf,size_t count,loff_t * ppos)300 static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
301 	size_t count, loff_t *ppos)
302 {
303 	void *image_ptr;
304 	size_t retval;
305 
306 	image_ptr = image[minor].kern_buf + *ppos;
307 
308 	retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
309 	if (retval != 0) {
310 		retval = (count - retval);
311 		pr_warn("Partial copy to userspace\n");
312 	} else
313 		retval = count;
314 
315 	/* Return number of bytes successfully read */
316 	return retval;
317 }
318 
vme_user_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)319 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
320 			loff_t *ppos)
321 {
322 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
323 	ssize_t retval;
324 	size_t image_size;
325 	size_t okcount;
326 
327 	if (minor == CONTROL_MINOR)
328 		return 0;
329 
330 	mutex_lock(&image[minor].mutex);
331 
332 	/* XXX Do we *really* want this helper - we can use vme_*_get ? */
333 	image_size = vme_get_size(image[minor].resource);
334 
335 	/* Ensure we are starting at a valid location */
336 	if ((*ppos < 0) || (*ppos > (image_size - 1))) {
337 		mutex_unlock(&image[minor].mutex);
338 		return 0;
339 	}
340 
341 	/* Ensure not reading past end of the image */
342 	if (*ppos + count > image_size)
343 		okcount = image_size - *ppos;
344 	else
345 		okcount = count;
346 
347 	switch (type[minor]) {
348 	case MASTER_MINOR:
349 		retval = resource_to_user(minor, buf, okcount, ppos);
350 		break;
351 	case SLAVE_MINOR:
352 		retval = buffer_to_user(minor, buf, okcount, ppos);
353 		break;
354 	default:
355 		retval = -EINVAL;
356 	}
357 
358 	mutex_unlock(&image[minor].mutex);
359 	if (retval > 0)
360 		*ppos += retval;
361 
362 	return retval;
363 }
364 
vme_user_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)365 static ssize_t vme_user_write(struct file *file, const char __user *buf,
366 			size_t count, loff_t *ppos)
367 {
368 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
369 	ssize_t retval;
370 	size_t image_size;
371 	size_t okcount;
372 
373 	if (minor == CONTROL_MINOR)
374 		return 0;
375 
376 	mutex_lock(&image[minor].mutex);
377 
378 	image_size = vme_get_size(image[minor].resource);
379 
380 	/* Ensure we are starting at a valid location */
381 	if ((*ppos < 0) || (*ppos > (image_size - 1))) {
382 		mutex_unlock(&image[minor].mutex);
383 		return 0;
384 	}
385 
386 	/* Ensure not reading past end of the image */
387 	if (*ppos + count > image_size)
388 		okcount = image_size - *ppos;
389 	else
390 		okcount = count;
391 
392 	switch (type[minor]) {
393 	case MASTER_MINOR:
394 		retval = resource_from_user(minor, buf, okcount, ppos);
395 		break;
396 	case SLAVE_MINOR:
397 		retval = buffer_from_user(minor, buf, okcount, ppos);
398 		break;
399 	default:
400 		retval = -EINVAL;
401 	}
402 
403 	mutex_unlock(&image[minor].mutex);
404 
405 	if (retval > 0)
406 		*ppos += retval;
407 
408 	return retval;
409 }
410 
vme_user_llseek(struct file * file,loff_t off,int whence)411 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
412 {
413 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
414 	size_t image_size;
415 	loff_t res;
416 
417 	if (minor == CONTROL_MINOR)
418 		return -EINVAL;
419 
420 	mutex_lock(&image[minor].mutex);
421 	image_size = vme_get_size(image[minor].resource);
422 	res = fixed_size_llseek(file, off, whence, image_size);
423 	mutex_unlock(&image[minor].mutex);
424 
425 	return res;
426 }
427 
428 /*
429  * The ioctls provided by the old VME access method (the one at vmelinux.org)
430  * are most certainly wrong as the effectively push the registers layout
431  * through to user space. Given that the VME core can handle multiple bridges,
432  * with different register layouts this is most certainly not the way to go.
433  *
434  * We aren't using the structures defined in the Motorola driver either - these
435  * are also quite low level, however we should use the definitions that have
436  * already been defined.
437  */
vme_user_ioctl(struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)438 static int vme_user_ioctl(struct inode *inode, struct file *file,
439 	unsigned int cmd, unsigned long arg)
440 {
441 	struct vme_master master;
442 	struct vme_slave slave;
443 	struct vme_irq_id irq_req;
444 	unsigned long copied;
445 	unsigned int minor = MINOR(inode->i_rdev);
446 	int retval;
447 	dma_addr_t pci_addr;
448 	void __user *argp = (void __user *)arg;
449 
450 	statistics.ioctls++;
451 
452 	switch (type[minor]) {
453 	case CONTROL_MINOR:
454 		switch (cmd) {
455 		case VME_IRQ_GEN:
456 			copied = copy_from_user(&irq_req, argp,
457 						sizeof(struct vme_irq_id));
458 			if (copied != 0) {
459 				pr_warn("Partial copy from userspace\n");
460 				return -EFAULT;
461 			}
462 
463 			return vme_irq_generate(vme_user_bridge,
464 						  irq_req.level,
465 						  irq_req.statid);
466 		}
467 		break;
468 	case MASTER_MINOR:
469 		switch (cmd) {
470 		case VME_GET_MASTER:
471 			memset(&master, 0, sizeof(struct vme_master));
472 
473 			/* XXX	We do not want to push aspace, cycle and width
474 			 *	to userspace as they are
475 			 */
476 			retval = vme_master_get(image[minor].resource,
477 				&master.enable, &master.vme_addr,
478 				&master.size, &master.aspace,
479 				&master.cycle, &master.dwidth);
480 
481 			copied = copy_to_user(argp, &master,
482 				sizeof(struct vme_master));
483 			if (copied != 0) {
484 				pr_warn("Partial copy to userspace\n");
485 				return -EFAULT;
486 			}
487 
488 			return retval;
489 
490 		case VME_SET_MASTER:
491 
492 			copied = copy_from_user(&master, argp, sizeof(master));
493 			if (copied != 0) {
494 				pr_warn("Partial copy from userspace\n");
495 				return -EFAULT;
496 			}
497 
498 			/* XXX	We do not want to push aspace, cycle and width
499 			 *	to userspace as they are
500 			 */
501 			return vme_master_set(image[minor].resource,
502 				master.enable, master.vme_addr, master.size,
503 				master.aspace, master.cycle, master.dwidth);
504 
505 			break;
506 		}
507 		break;
508 	case SLAVE_MINOR:
509 		switch (cmd) {
510 		case VME_GET_SLAVE:
511 			memset(&slave, 0, sizeof(struct vme_slave));
512 
513 			/* XXX	We do not want to push aspace, cycle and width
514 			 *	to userspace as they are
515 			 */
516 			retval = vme_slave_get(image[minor].resource,
517 				&slave.enable, &slave.vme_addr,
518 				&slave.size, &pci_addr, &slave.aspace,
519 				&slave.cycle);
520 
521 			copied = copy_to_user(argp, &slave,
522 				sizeof(struct vme_slave));
523 			if (copied != 0) {
524 				pr_warn("Partial copy to userspace\n");
525 				return -EFAULT;
526 			}
527 
528 			return retval;
529 
530 		case VME_SET_SLAVE:
531 
532 			copied = copy_from_user(&slave, argp, sizeof(slave));
533 			if (copied != 0) {
534 				pr_warn("Partial copy from userspace\n");
535 				return -EFAULT;
536 			}
537 
538 			/* XXX	We do not want to push aspace, cycle and width
539 			 *	to userspace as they are
540 			 */
541 			return vme_slave_set(image[minor].resource,
542 				slave.enable, slave.vme_addr, slave.size,
543 				image[minor].pci_buf, slave.aspace,
544 				slave.cycle);
545 
546 			break;
547 		}
548 		break;
549 	}
550 
551 	return -EINVAL;
552 }
553 
554 static long
vme_user_unlocked_ioctl(struct file * file,unsigned int cmd,unsigned long arg)555 vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
556 {
557 	int ret;
558 
559 	mutex_lock(&vme_user_mutex);
560 	ret = vme_user_ioctl(file_inode(file), file, cmd, arg);
561 	mutex_unlock(&vme_user_mutex);
562 
563 	return ret;
564 }
565 
566 
567 /*
568  * Unallocate a previously allocated buffer
569  */
buf_unalloc(int num)570 static void buf_unalloc(int num)
571 {
572 	if (image[num].kern_buf) {
573 #ifdef VME_DEBUG
574 		pr_debug("UniverseII:Releasing buffer at %p\n",
575 			 image[num].pci_buf);
576 #endif
577 
578 		vme_free_consistent(image[num].resource, image[num].size_buf,
579 			image[num].kern_buf, image[num].pci_buf);
580 
581 		image[num].kern_buf = NULL;
582 		image[num].pci_buf = 0;
583 		image[num].size_buf = 0;
584 
585 #ifdef VME_DEBUG
586 	} else {
587 		pr_debug("UniverseII: Buffer not allocated\n");
588 #endif
589 	}
590 }
591 
592 static struct vme_driver vme_user_driver = {
593 	.name = driver_name,
594 	.match = vme_user_match,
595 	.probe = vme_user_probe,
596 	.remove = vme_user_remove,
597 };
598 
599 
vme_user_init(void)600 static int __init vme_user_init(void)
601 {
602 	int retval = 0;
603 
604 	pr_info("VME User Space Access Driver\n");
605 
606 	if (bus_num == 0) {
607 		pr_err("No cards, skipping registration\n");
608 		retval = -ENODEV;
609 		goto err_nocard;
610 	}
611 
612 	/* Let's start by supporting one bus, we can support more than one
613 	 * in future revisions if that ever becomes necessary.
614 	 */
615 	if (bus_num > VME_USER_BUS_MAX) {
616 		pr_err("Driver only able to handle %d buses\n",
617 		       VME_USER_BUS_MAX);
618 		bus_num = VME_USER_BUS_MAX;
619 	}
620 
621 	/*
622 	 * Here we just register the maximum number of devices we can and
623 	 * leave vme_user_match() to allow only 1 to go through to probe().
624 	 * This way, if we later want to allow multiple user access devices,
625 	 * we just change the code in vme_user_match().
626 	 */
627 	retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
628 	if (retval != 0)
629 		goto err_reg;
630 
631 	return retval;
632 
633 err_reg:
634 err_nocard:
635 	return retval;
636 }
637 
vme_user_match(struct vme_dev * vdev)638 static int vme_user_match(struct vme_dev *vdev)
639 {
640 	int i;
641 
642 	int cur_bus = vme_bus_num(vdev);
643 	int cur_slot = vme_slot_num(vdev);
644 
645 	for (i = 0; i < bus_num; i++)
646 		if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
647 			return 1;
648 
649 	return 0;
650 }
651 
652 /*
653  * In this simple access driver, the old behaviour is being preserved as much
654  * as practical. We will therefore reserve the buffers and request the images
655  * here so that we don't have to do it later.
656  */
vme_user_probe(struct vme_dev * vdev)657 static int vme_user_probe(struct vme_dev *vdev)
658 {
659 	int i, err;
660 	char *name;
661 
662 	/* Save pointer to the bridge device */
663 	if (vme_user_bridge != NULL) {
664 		dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
665 		err = -EINVAL;
666 		goto err_dev;
667 	}
668 	vme_user_bridge = vdev;
669 
670 	/* Initialise descriptors */
671 	for (i = 0; i < VME_DEVS; i++) {
672 		image[i].kern_buf = NULL;
673 		image[i].pci_buf = 0;
674 		mutex_init(&image[i].mutex);
675 		image[i].device = NULL;
676 		image[i].resource = NULL;
677 		image[i].users = 0;
678 	}
679 
680 	/* Initialise statistics counters */
681 	reset_counters();
682 
683 	/* Assign major and minor numbers for the driver */
684 	err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
685 		driver_name);
686 	if (err) {
687 		dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
688 			 VME_MAJOR);
689 		goto err_region;
690 	}
691 
692 	/* Register the driver as a char device */
693 	vme_user_cdev = cdev_alloc();
694 	if (!vme_user_cdev) {
695 		err = -ENOMEM;
696 		goto err_char;
697 	}
698 	vme_user_cdev->ops = &vme_user_fops;
699 	vme_user_cdev->owner = THIS_MODULE;
700 	err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
701 	if (err) {
702 		dev_warn(&vdev->dev, "cdev_all failed\n");
703 		goto err_char;
704 	}
705 
706 	/* Request slave resources and allocate buffers (128kB wide) */
707 	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
708 		/* XXX Need to properly request attributes */
709 		/* For ca91cx42 bridge there are only two slave windows
710 		 * supporting A16 addressing, so we request A24 supported
711 		 * by all windows.
712 		 */
713 		image[i].resource = vme_slave_request(vme_user_bridge,
714 			VME_A24, VME_SCT);
715 		if (image[i].resource == NULL) {
716 			dev_warn(&vdev->dev,
717 				 "Unable to allocate slave resource\n");
718 			err = -ENOMEM;
719 			goto err_slave;
720 		}
721 		image[i].size_buf = PCI_BUF_SIZE;
722 		image[i].kern_buf = vme_alloc_consistent(image[i].resource,
723 			image[i].size_buf, &image[i].pci_buf);
724 		if (image[i].kern_buf == NULL) {
725 			dev_warn(&vdev->dev,
726 				 "Unable to allocate memory for buffer\n");
727 			image[i].pci_buf = 0;
728 			vme_slave_free(image[i].resource);
729 			err = -ENOMEM;
730 			goto err_slave;
731 		}
732 	}
733 
734 	/*
735 	 * Request master resources allocate page sized buffers for small
736 	 * reads and writes
737 	 */
738 	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
739 		/* XXX Need to properly request attributes */
740 		image[i].resource = vme_master_request(vme_user_bridge,
741 			VME_A32, VME_SCT, VME_D32);
742 		if (image[i].resource == NULL) {
743 			dev_warn(&vdev->dev,
744 				 "Unable to allocate master resource\n");
745 			err = -ENOMEM;
746 			goto err_master;
747 		}
748 		image[i].size_buf = PCI_BUF_SIZE;
749 		image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
750 		if (image[i].kern_buf == NULL) {
751 			err = -ENOMEM;
752 			vme_master_free(image[i].resource);
753 			goto err_master;
754 		}
755 	}
756 
757 	/* Create sysfs entries - on udev systems this creates the dev files */
758 	vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
759 	if (IS_ERR(vme_user_sysfs_class)) {
760 		dev_err(&vdev->dev, "Error creating vme_user class.\n");
761 		err = PTR_ERR(vme_user_sysfs_class);
762 		goto err_class;
763 	}
764 
765 	/* Add sysfs Entries */
766 	for (i = 0; i < VME_DEVS; i++) {
767 		int num;
768 
769 		switch (type[i]) {
770 		case MASTER_MINOR:
771 			name = "bus/vme/m%d";
772 			break;
773 		case CONTROL_MINOR:
774 			name = "bus/vme/ctl";
775 			break;
776 		case SLAVE_MINOR:
777 			name = "bus/vme/s%d";
778 			break;
779 		default:
780 			err = -EINVAL;
781 			goto err_sysfs;
782 		}
783 
784 		num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
785 		image[i].device = device_create(vme_user_sysfs_class, NULL,
786 					MKDEV(VME_MAJOR, i), NULL, name, num);
787 		if (IS_ERR(image[i].device)) {
788 			dev_info(&vdev->dev, "Error creating sysfs device\n");
789 			err = PTR_ERR(image[i].device);
790 			goto err_sysfs;
791 		}
792 	}
793 
794 	return 0;
795 
796 err_sysfs:
797 	while (i > 0) {
798 		i--;
799 		device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
800 	}
801 	class_destroy(vme_user_sysfs_class);
802 
803 	/* Ensure counter set correcty to unalloc all master windows */
804 	i = MASTER_MAX + 1;
805 err_master:
806 	while (i > MASTER_MINOR) {
807 		i--;
808 		kfree(image[i].kern_buf);
809 		vme_master_free(image[i].resource);
810 	}
811 
812 	/*
813 	 * Ensure counter set correcty to unalloc all slave windows and buffers
814 	 */
815 	i = SLAVE_MAX + 1;
816 err_slave:
817 	while (i > SLAVE_MINOR) {
818 		i--;
819 		buf_unalloc(i);
820 		vme_slave_free(image[i].resource);
821 	}
822 err_class:
823 	cdev_del(vme_user_cdev);
824 err_char:
825 	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
826 err_region:
827 err_dev:
828 	return err;
829 }
830 
vme_user_remove(struct vme_dev * dev)831 static int vme_user_remove(struct vme_dev *dev)
832 {
833 	int i;
834 
835 	/* Remove sysfs Entries */
836 	for (i = 0; i < VME_DEVS; i++) {
837 		mutex_destroy(&image[i].mutex);
838 		device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
839 	}
840 	class_destroy(vme_user_sysfs_class);
841 
842 	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
843 		kfree(image[i].kern_buf);
844 		vme_master_free(image[i].resource);
845 	}
846 
847 	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
848 		vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
849 		buf_unalloc(i);
850 		vme_slave_free(image[i].resource);
851 	}
852 
853 	/* Unregister device driver */
854 	cdev_del(vme_user_cdev);
855 
856 	/* Unregiser the major and minor device numbers */
857 	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
858 
859 	return 0;
860 }
861 
vme_user_exit(void)862 static void __exit vme_user_exit(void)
863 {
864 	vme_unregister_driver(&vme_user_driver);
865 }
866 
867 
868 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
869 module_param_array(bus, int, &bus_num, 0);
870 
871 MODULE_DESCRIPTION("VME User Space Access Driver");
872 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
873 MODULE_LICENSE("GPL");
874 
875 module_init(vme_user_init);
876 module_exit(vme_user_exit);
877