• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * VME Bridge Framework
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/vme.h>
34 
35 #include "vme_bridge.h"
36 
37 /* Bitmask and list of registered buses both protected by common mutex */
38 static unsigned int vme_bus_numbers;
39 static LIST_HEAD(vme_bus_list);
40 static DEFINE_MUTEX(vme_buses_lock);
41 
42 static void __exit vme_exit(void);
43 static int __init vme_init(void);
44 
dev_to_vme_dev(struct device * dev)45 static struct vme_dev *dev_to_vme_dev(struct device *dev)
46 {
47 	return container_of(dev, struct vme_dev, dev);
48 }
49 
50 /*
51  * Find the bridge that the resource is associated with.
52  */
find_bridge(struct vme_resource * resource)53 static struct vme_bridge *find_bridge(struct vme_resource *resource)
54 {
55 	/* Get list to search */
56 	switch (resource->type) {
57 	case VME_MASTER:
58 		return list_entry(resource->entry, struct vme_master_resource,
59 			list)->parent;
60 		break;
61 	case VME_SLAVE:
62 		return list_entry(resource->entry, struct vme_slave_resource,
63 			list)->parent;
64 		break;
65 	case VME_DMA:
66 		return list_entry(resource->entry, struct vme_dma_resource,
67 			list)->parent;
68 		break;
69 	case VME_LM:
70 		return list_entry(resource->entry, struct vme_lm_resource,
71 			list)->parent;
72 		break;
73 	default:
74 		printk(KERN_ERR "Unknown resource type\n");
75 		return NULL;
76 		break;
77 	}
78 }
79 
80 /*
81  * Allocate a contiguous block of memory for use by the driver. This is used to
82  * create the buffers for the slave windows.
83  */
vme_alloc_consistent(struct vme_resource * resource,size_t size,dma_addr_t * dma)84 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
85 	dma_addr_t *dma)
86 {
87 	struct vme_bridge *bridge;
88 
89 	if (resource == NULL) {
90 		printk(KERN_ERR "No resource\n");
91 		return NULL;
92 	}
93 
94 	bridge = find_bridge(resource);
95 	if (bridge == NULL) {
96 		printk(KERN_ERR "Can't find bridge\n");
97 		return NULL;
98 	}
99 
100 	if (bridge->parent == NULL) {
101 		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
102 		return NULL;
103 	}
104 
105 	if (bridge->alloc_consistent == NULL) {
106 		printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
107 		       bridge->name);
108 		return NULL;
109 	}
110 
111 	return bridge->alloc_consistent(bridge->parent, size, dma);
112 }
113 EXPORT_SYMBOL(vme_alloc_consistent);
114 
115 /*
116  * Free previously allocated contiguous block of memory.
117  */
vme_free_consistent(struct vme_resource * resource,size_t size,void * vaddr,dma_addr_t dma)118 void vme_free_consistent(struct vme_resource *resource, size_t size,
119 	void *vaddr, dma_addr_t dma)
120 {
121 	struct vme_bridge *bridge;
122 
123 	if (resource == NULL) {
124 		printk(KERN_ERR "No resource\n");
125 		return;
126 	}
127 
128 	bridge = find_bridge(resource);
129 	if (bridge == NULL) {
130 		printk(KERN_ERR "Can't find bridge\n");
131 		return;
132 	}
133 
134 	if (bridge->parent == NULL) {
135 		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
136 		return;
137 	}
138 
139 	if (bridge->free_consistent == NULL) {
140 		printk(KERN_ERR "free_consistent not supported by bridge %s\n",
141 		       bridge->name);
142 		return;
143 	}
144 
145 	bridge->free_consistent(bridge->parent, size, vaddr, dma);
146 }
147 EXPORT_SYMBOL(vme_free_consistent);
148 
vme_get_size(struct vme_resource * resource)149 size_t vme_get_size(struct vme_resource *resource)
150 {
151 	int enabled, retval;
152 	unsigned long long base, size;
153 	dma_addr_t buf_base;
154 	u32 aspace, cycle, dwidth;
155 
156 	switch (resource->type) {
157 	case VME_MASTER:
158 		retval = vme_master_get(resource, &enabled, &base, &size,
159 			&aspace, &cycle, &dwidth);
160 
161 		return size;
162 		break;
163 	case VME_SLAVE:
164 		retval = vme_slave_get(resource, &enabled, &base, &size,
165 			&buf_base, &aspace, &cycle);
166 
167 		return size;
168 		break;
169 	case VME_DMA:
170 		return 0;
171 		break;
172 	default:
173 		printk(KERN_ERR "Unknown resource type\n");
174 		return 0;
175 		break;
176 	}
177 }
178 EXPORT_SYMBOL(vme_get_size);
179 
vme_check_window(u32 aspace,unsigned long long vme_base,unsigned long long size)180 int vme_check_window(u32 aspace, unsigned long long vme_base,
181 		     unsigned long long size)
182 {
183 	int retval = 0;
184 
185 	switch (aspace) {
186 	case VME_A16:
187 		if (((vme_base + size) > VME_A16_MAX) ||
188 				(vme_base > VME_A16_MAX))
189 			retval = -EFAULT;
190 		break;
191 	case VME_A24:
192 		if (((vme_base + size) > VME_A24_MAX) ||
193 				(vme_base > VME_A24_MAX))
194 			retval = -EFAULT;
195 		break;
196 	case VME_A32:
197 		if (((vme_base + size) > VME_A32_MAX) ||
198 				(vme_base > VME_A32_MAX))
199 			retval = -EFAULT;
200 		break;
201 	case VME_A64:
202 		if ((size != 0) && (vme_base > U64_MAX + 1 - size))
203 			retval = -EFAULT;
204 		break;
205 	case VME_CRCSR:
206 		if (((vme_base + size) > VME_CRCSR_MAX) ||
207 				(vme_base > VME_CRCSR_MAX))
208 			retval = -EFAULT;
209 		break;
210 	case VME_USER1:
211 	case VME_USER2:
212 	case VME_USER3:
213 	case VME_USER4:
214 		/* User Defined */
215 		break;
216 	default:
217 		printk(KERN_ERR "Invalid address space\n");
218 		retval = -EINVAL;
219 		break;
220 	}
221 
222 	return retval;
223 }
224 EXPORT_SYMBOL(vme_check_window);
225 
vme_get_aspace(int am)226 static u32 vme_get_aspace(int am)
227 {
228 	switch (am) {
229 	case 0x29:
230 	case 0x2D:
231 		return VME_A16;
232 	case 0x38:
233 	case 0x39:
234 	case 0x3A:
235 	case 0x3B:
236 	case 0x3C:
237 	case 0x3D:
238 	case 0x3E:
239 	case 0x3F:
240 		return VME_A24;
241 	case 0x8:
242 	case 0x9:
243 	case 0xA:
244 	case 0xB:
245 	case 0xC:
246 	case 0xD:
247 	case 0xE:
248 	case 0xF:
249 		return VME_A32;
250 	case 0x0:
251 	case 0x1:
252 	case 0x3:
253 		return VME_A64;
254 	}
255 
256 	return 0;
257 }
258 
259 /*
260  * Request a slave image with specific attributes, return some unique
261  * identifier.
262  */
vme_slave_request(struct vme_dev * vdev,u32 address,u32 cycle)263 struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
264 	u32 cycle)
265 {
266 	struct vme_bridge *bridge;
267 	struct list_head *slave_pos = NULL;
268 	struct vme_slave_resource *allocated_image = NULL;
269 	struct vme_slave_resource *slave_image = NULL;
270 	struct vme_resource *resource = NULL;
271 
272 	bridge = vdev->bridge;
273 	if (bridge == NULL) {
274 		printk(KERN_ERR "Can't find VME bus\n");
275 		goto err_bus;
276 	}
277 
278 	/* Loop through slave resources */
279 	list_for_each(slave_pos, &bridge->slave_resources) {
280 		slave_image = list_entry(slave_pos,
281 			struct vme_slave_resource, list);
282 
283 		if (slave_image == NULL) {
284 			printk(KERN_ERR "Registered NULL Slave resource\n");
285 			continue;
286 		}
287 
288 		/* Find an unlocked and compatible image */
289 		mutex_lock(&slave_image->mtx);
290 		if (((slave_image->address_attr & address) == address) &&
291 			((slave_image->cycle_attr & cycle) == cycle) &&
292 			(slave_image->locked == 0)) {
293 
294 			slave_image->locked = 1;
295 			mutex_unlock(&slave_image->mtx);
296 			allocated_image = slave_image;
297 			break;
298 		}
299 		mutex_unlock(&slave_image->mtx);
300 	}
301 
302 	/* No free image */
303 	if (allocated_image == NULL)
304 		goto err_image;
305 
306 	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
307 	if (resource == NULL) {
308 		printk(KERN_WARNING "Unable to allocate resource structure\n");
309 		goto err_alloc;
310 	}
311 	resource->type = VME_SLAVE;
312 	resource->entry = &allocated_image->list;
313 
314 	return resource;
315 
316 err_alloc:
317 	/* Unlock image */
318 	mutex_lock(&slave_image->mtx);
319 	slave_image->locked = 0;
320 	mutex_unlock(&slave_image->mtx);
321 err_image:
322 err_bus:
323 	return NULL;
324 }
325 EXPORT_SYMBOL(vme_slave_request);
326 
vme_slave_set(struct vme_resource * resource,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t buf_base,u32 aspace,u32 cycle)327 int vme_slave_set(struct vme_resource *resource, int enabled,
328 	unsigned long long vme_base, unsigned long long size,
329 	dma_addr_t buf_base, u32 aspace, u32 cycle)
330 {
331 	struct vme_bridge *bridge = find_bridge(resource);
332 	struct vme_slave_resource *image;
333 	int retval;
334 
335 	if (resource->type != VME_SLAVE) {
336 		printk(KERN_ERR "Not a slave resource\n");
337 		return -EINVAL;
338 	}
339 
340 	image = list_entry(resource->entry, struct vme_slave_resource, list);
341 
342 	if (bridge->slave_set == NULL) {
343 		printk(KERN_ERR "Function not supported\n");
344 		return -ENOSYS;
345 	}
346 
347 	if (!(((image->address_attr & aspace) == aspace) &&
348 		((image->cycle_attr & cycle) == cycle))) {
349 		printk(KERN_ERR "Invalid attributes\n");
350 		return -EINVAL;
351 	}
352 
353 	retval = vme_check_window(aspace, vme_base, size);
354 	if (retval)
355 		return retval;
356 
357 	return bridge->slave_set(image, enabled, vme_base, size, buf_base,
358 		aspace, cycle);
359 }
360 EXPORT_SYMBOL(vme_slave_set);
361 
vme_slave_get(struct vme_resource * resource,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * buf_base,u32 * aspace,u32 * cycle)362 int vme_slave_get(struct vme_resource *resource, int *enabled,
363 	unsigned long long *vme_base, unsigned long long *size,
364 	dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
365 {
366 	struct vme_bridge *bridge = find_bridge(resource);
367 	struct vme_slave_resource *image;
368 
369 	if (resource->type != VME_SLAVE) {
370 		printk(KERN_ERR "Not a slave resource\n");
371 		return -EINVAL;
372 	}
373 
374 	image = list_entry(resource->entry, struct vme_slave_resource, list);
375 
376 	if (bridge->slave_get == NULL) {
377 		printk(KERN_ERR "vme_slave_get not supported\n");
378 		return -EINVAL;
379 	}
380 
381 	return bridge->slave_get(image, enabled, vme_base, size, buf_base,
382 		aspace, cycle);
383 }
384 EXPORT_SYMBOL(vme_slave_get);
385 
vme_slave_free(struct vme_resource * resource)386 void vme_slave_free(struct vme_resource *resource)
387 {
388 	struct vme_slave_resource *slave_image;
389 
390 	if (resource->type != VME_SLAVE) {
391 		printk(KERN_ERR "Not a slave resource\n");
392 		return;
393 	}
394 
395 	slave_image = list_entry(resource->entry, struct vme_slave_resource,
396 		list);
397 	if (slave_image == NULL) {
398 		printk(KERN_ERR "Can't find slave resource\n");
399 		return;
400 	}
401 
402 	/* Unlock image */
403 	mutex_lock(&slave_image->mtx);
404 	if (slave_image->locked == 0)
405 		printk(KERN_ERR "Image is already free\n");
406 
407 	slave_image->locked = 0;
408 	mutex_unlock(&slave_image->mtx);
409 
410 	/* Free up resource memory */
411 	kfree(resource);
412 }
413 EXPORT_SYMBOL(vme_slave_free);
414 
415 /*
416  * Request a master image with specific attributes, return some unique
417  * identifier.
418  */
vme_master_request(struct vme_dev * vdev,u32 address,u32 cycle,u32 dwidth)419 struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
420 	u32 cycle, u32 dwidth)
421 {
422 	struct vme_bridge *bridge;
423 	struct list_head *master_pos = NULL;
424 	struct vme_master_resource *allocated_image = NULL;
425 	struct vme_master_resource *master_image = NULL;
426 	struct vme_resource *resource = NULL;
427 
428 	bridge = vdev->bridge;
429 	if (bridge == NULL) {
430 		printk(KERN_ERR "Can't find VME bus\n");
431 		goto err_bus;
432 	}
433 
434 	/* Loop through master resources */
435 	list_for_each(master_pos, &bridge->master_resources) {
436 		master_image = list_entry(master_pos,
437 			struct vme_master_resource, list);
438 
439 		if (master_image == NULL) {
440 			printk(KERN_WARNING "Registered NULL master resource\n");
441 			continue;
442 		}
443 
444 		/* Find an unlocked and compatible image */
445 		spin_lock(&master_image->lock);
446 		if (((master_image->address_attr & address) == address) &&
447 			((master_image->cycle_attr & cycle) == cycle) &&
448 			((master_image->width_attr & dwidth) == dwidth) &&
449 			(master_image->locked == 0)) {
450 
451 			master_image->locked = 1;
452 			spin_unlock(&master_image->lock);
453 			allocated_image = master_image;
454 			break;
455 		}
456 		spin_unlock(&master_image->lock);
457 	}
458 
459 	/* Check to see if we found a resource */
460 	if (allocated_image == NULL) {
461 		printk(KERN_ERR "Can't find a suitable resource\n");
462 		goto err_image;
463 	}
464 
465 	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
466 	if (resource == NULL) {
467 		printk(KERN_ERR "Unable to allocate resource structure\n");
468 		goto err_alloc;
469 	}
470 	resource->type = VME_MASTER;
471 	resource->entry = &allocated_image->list;
472 
473 	return resource;
474 
475 err_alloc:
476 	/* Unlock image */
477 	spin_lock(&master_image->lock);
478 	master_image->locked = 0;
479 	spin_unlock(&master_image->lock);
480 err_image:
481 err_bus:
482 	return NULL;
483 }
484 EXPORT_SYMBOL(vme_master_request);
485 
vme_master_set(struct vme_resource * resource,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)486 int vme_master_set(struct vme_resource *resource, int enabled,
487 	unsigned long long vme_base, unsigned long long size, u32 aspace,
488 	u32 cycle, u32 dwidth)
489 {
490 	struct vme_bridge *bridge = find_bridge(resource);
491 	struct vme_master_resource *image;
492 	int retval;
493 
494 	if (resource->type != VME_MASTER) {
495 		printk(KERN_ERR "Not a master resource\n");
496 		return -EINVAL;
497 	}
498 
499 	image = list_entry(resource->entry, struct vme_master_resource, list);
500 
501 	if (bridge->master_set == NULL) {
502 		printk(KERN_WARNING "vme_master_set not supported\n");
503 		return -EINVAL;
504 	}
505 
506 	if (!(((image->address_attr & aspace) == aspace) &&
507 		((image->cycle_attr & cycle) == cycle) &&
508 		((image->width_attr & dwidth) == dwidth))) {
509 		printk(KERN_WARNING "Invalid attributes\n");
510 		return -EINVAL;
511 	}
512 
513 	retval = vme_check_window(aspace, vme_base, size);
514 	if (retval)
515 		return retval;
516 
517 	return bridge->master_set(image, enabled, vme_base, size, aspace,
518 		cycle, dwidth);
519 }
520 EXPORT_SYMBOL(vme_master_set);
521 
vme_master_get(struct vme_resource * resource,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)522 int vme_master_get(struct vme_resource *resource, int *enabled,
523 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
524 	u32 *cycle, u32 *dwidth)
525 {
526 	struct vme_bridge *bridge = find_bridge(resource);
527 	struct vme_master_resource *image;
528 
529 	if (resource->type != VME_MASTER) {
530 		printk(KERN_ERR "Not a master resource\n");
531 		return -EINVAL;
532 	}
533 
534 	image = list_entry(resource->entry, struct vme_master_resource, list);
535 
536 	if (bridge->master_get == NULL) {
537 		printk(KERN_WARNING "%s not supported\n", __func__);
538 		return -EINVAL;
539 	}
540 
541 	return bridge->master_get(image, enabled, vme_base, size, aspace,
542 		cycle, dwidth);
543 }
544 EXPORT_SYMBOL(vme_master_get);
545 
546 /*
547  * Read data out of VME space into a buffer.
548  */
vme_master_read(struct vme_resource * resource,void * buf,size_t count,loff_t offset)549 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
550 	loff_t offset)
551 {
552 	struct vme_bridge *bridge = find_bridge(resource);
553 	struct vme_master_resource *image;
554 	size_t length;
555 
556 	if (bridge->master_read == NULL) {
557 		printk(KERN_WARNING "Reading from resource not supported\n");
558 		return -EINVAL;
559 	}
560 
561 	if (resource->type != VME_MASTER) {
562 		printk(KERN_ERR "Not a master resource\n");
563 		return -EINVAL;
564 	}
565 
566 	image = list_entry(resource->entry, struct vme_master_resource, list);
567 
568 	length = vme_get_size(resource);
569 
570 	if (offset > length) {
571 		printk(KERN_WARNING "Invalid Offset\n");
572 		return -EFAULT;
573 	}
574 
575 	if ((offset + count) > length)
576 		count = length - offset;
577 
578 	return bridge->master_read(image, buf, count, offset);
579 
580 }
581 EXPORT_SYMBOL(vme_master_read);
582 
583 /*
584  * Write data out to VME space from a buffer.
585  */
vme_master_write(struct vme_resource * resource,void * buf,size_t count,loff_t offset)586 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
587 	size_t count, loff_t offset)
588 {
589 	struct vme_bridge *bridge = find_bridge(resource);
590 	struct vme_master_resource *image;
591 	size_t length;
592 
593 	if (bridge->master_write == NULL) {
594 		printk(KERN_WARNING "Writing to resource not supported\n");
595 		return -EINVAL;
596 	}
597 
598 	if (resource->type != VME_MASTER) {
599 		printk(KERN_ERR "Not a master resource\n");
600 		return -EINVAL;
601 	}
602 
603 	image = list_entry(resource->entry, struct vme_master_resource, list);
604 
605 	length = vme_get_size(resource);
606 
607 	if (offset > length) {
608 		printk(KERN_WARNING "Invalid Offset\n");
609 		return -EFAULT;
610 	}
611 
612 	if ((offset + count) > length)
613 		count = length - offset;
614 
615 	return bridge->master_write(image, buf, count, offset);
616 }
617 EXPORT_SYMBOL(vme_master_write);
618 
619 /*
620  * Perform RMW cycle to provided location.
621  */
vme_master_rmw(struct vme_resource * resource,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)622 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
623 	unsigned int compare, unsigned int swap, loff_t offset)
624 {
625 	struct vme_bridge *bridge = find_bridge(resource);
626 	struct vme_master_resource *image;
627 
628 	if (bridge->master_rmw == NULL) {
629 		printk(KERN_WARNING "Writing to resource not supported\n");
630 		return -EINVAL;
631 	}
632 
633 	if (resource->type != VME_MASTER) {
634 		printk(KERN_ERR "Not a master resource\n");
635 		return -EINVAL;
636 	}
637 
638 	image = list_entry(resource->entry, struct vme_master_resource, list);
639 
640 	return bridge->master_rmw(image, mask, compare, swap, offset);
641 }
642 EXPORT_SYMBOL(vme_master_rmw);
643 
vme_master_mmap(struct vme_resource * resource,struct vm_area_struct * vma)644 int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
645 {
646 	struct vme_master_resource *image;
647 	phys_addr_t phys_addr;
648 	unsigned long vma_size;
649 
650 	if (resource->type != VME_MASTER) {
651 		pr_err("Not a master resource\n");
652 		return -EINVAL;
653 	}
654 
655 	image = list_entry(resource->entry, struct vme_master_resource, list);
656 	phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
657 	vma_size = vma->vm_end - vma->vm_start;
658 
659 	if (phys_addr + vma_size > image->bus_resource.end + 1) {
660 		pr_err("Map size cannot exceed the window size\n");
661 		return -EFAULT;
662 	}
663 
664 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
665 
666 	return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
667 }
668 EXPORT_SYMBOL(vme_master_mmap);
669 
vme_master_free(struct vme_resource * resource)670 void vme_master_free(struct vme_resource *resource)
671 {
672 	struct vme_master_resource *master_image;
673 
674 	if (resource->type != VME_MASTER) {
675 		printk(KERN_ERR "Not a master resource\n");
676 		return;
677 	}
678 
679 	master_image = list_entry(resource->entry, struct vme_master_resource,
680 		list);
681 	if (master_image == NULL) {
682 		printk(KERN_ERR "Can't find master resource\n");
683 		return;
684 	}
685 
686 	/* Unlock image */
687 	spin_lock(&master_image->lock);
688 	if (master_image->locked == 0)
689 		printk(KERN_ERR "Image is already free\n");
690 
691 	master_image->locked = 0;
692 	spin_unlock(&master_image->lock);
693 
694 	/* Free up resource memory */
695 	kfree(resource);
696 }
697 EXPORT_SYMBOL(vme_master_free);
698 
699 /*
700  * Request a DMA controller with specific attributes, return some unique
701  * identifier.
702  */
vme_dma_request(struct vme_dev * vdev,u32 route)703 struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
704 {
705 	struct vme_bridge *bridge;
706 	struct list_head *dma_pos = NULL;
707 	struct vme_dma_resource *allocated_ctrlr = NULL;
708 	struct vme_dma_resource *dma_ctrlr = NULL;
709 	struct vme_resource *resource = NULL;
710 
711 	/* XXX Not checking resource attributes */
712 	printk(KERN_ERR "No VME resource Attribute tests done\n");
713 
714 	bridge = vdev->bridge;
715 	if (bridge == NULL) {
716 		printk(KERN_ERR "Can't find VME bus\n");
717 		goto err_bus;
718 	}
719 
720 	/* Loop through DMA resources */
721 	list_for_each(dma_pos, &bridge->dma_resources) {
722 		dma_ctrlr = list_entry(dma_pos,
723 			struct vme_dma_resource, list);
724 
725 		if (dma_ctrlr == NULL) {
726 			printk(KERN_ERR "Registered NULL DMA resource\n");
727 			continue;
728 		}
729 
730 		/* Find an unlocked and compatible controller */
731 		mutex_lock(&dma_ctrlr->mtx);
732 		if (((dma_ctrlr->route_attr & route) == route) &&
733 			(dma_ctrlr->locked == 0)) {
734 
735 			dma_ctrlr->locked = 1;
736 			mutex_unlock(&dma_ctrlr->mtx);
737 			allocated_ctrlr = dma_ctrlr;
738 			break;
739 		}
740 		mutex_unlock(&dma_ctrlr->mtx);
741 	}
742 
743 	/* Check to see if we found a resource */
744 	if (allocated_ctrlr == NULL)
745 		goto err_ctrlr;
746 
747 	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
748 	if (resource == NULL) {
749 		printk(KERN_WARNING "Unable to allocate resource structure\n");
750 		goto err_alloc;
751 	}
752 	resource->type = VME_DMA;
753 	resource->entry = &allocated_ctrlr->list;
754 
755 	return resource;
756 
757 err_alloc:
758 	/* Unlock image */
759 	mutex_lock(&dma_ctrlr->mtx);
760 	dma_ctrlr->locked = 0;
761 	mutex_unlock(&dma_ctrlr->mtx);
762 err_ctrlr:
763 err_bus:
764 	return NULL;
765 }
766 EXPORT_SYMBOL(vme_dma_request);
767 
768 /*
769  * Start new list
770  */
vme_new_dma_list(struct vme_resource * resource)771 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
772 {
773 	struct vme_dma_resource *ctrlr;
774 	struct vme_dma_list *dma_list;
775 
776 	if (resource->type != VME_DMA) {
777 		printk(KERN_ERR "Not a DMA resource\n");
778 		return NULL;
779 	}
780 
781 	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
782 
783 	dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
784 	if (dma_list == NULL) {
785 		printk(KERN_ERR "Unable to allocate memory for new dma list\n");
786 		return NULL;
787 	}
788 	INIT_LIST_HEAD(&dma_list->entries);
789 	dma_list->parent = ctrlr;
790 	mutex_init(&dma_list->mtx);
791 
792 	return dma_list;
793 }
794 EXPORT_SYMBOL(vme_new_dma_list);
795 
796 /*
797  * Create "Pattern" type attributes
798  */
vme_dma_pattern_attribute(u32 pattern,u32 type)799 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
800 {
801 	struct vme_dma_attr *attributes;
802 	struct vme_dma_pattern *pattern_attr;
803 
804 	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
805 	if (attributes == NULL) {
806 		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
807 		goto err_attr;
808 	}
809 
810 	pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
811 	if (pattern_attr == NULL) {
812 		printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
813 		goto err_pat;
814 	}
815 
816 	attributes->type = VME_DMA_PATTERN;
817 	attributes->private = (void *)pattern_attr;
818 
819 	pattern_attr->pattern = pattern;
820 	pattern_attr->type = type;
821 
822 	return attributes;
823 
824 err_pat:
825 	kfree(attributes);
826 err_attr:
827 	return NULL;
828 }
829 EXPORT_SYMBOL(vme_dma_pattern_attribute);
830 
831 /*
832  * Create "PCI" type attributes
833  */
vme_dma_pci_attribute(dma_addr_t address)834 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
835 {
836 	struct vme_dma_attr *attributes;
837 	struct vme_dma_pci *pci_attr;
838 
839 	/* XXX Run some sanity checks here */
840 
841 	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
842 	if (attributes == NULL) {
843 		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
844 		goto err_attr;
845 	}
846 
847 	pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
848 	if (pci_attr == NULL) {
849 		printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
850 		goto err_pci;
851 	}
852 
853 
854 
855 	attributes->type = VME_DMA_PCI;
856 	attributes->private = (void *)pci_attr;
857 
858 	pci_attr->address = address;
859 
860 	return attributes;
861 
862 err_pci:
863 	kfree(attributes);
864 err_attr:
865 	return NULL;
866 }
867 EXPORT_SYMBOL(vme_dma_pci_attribute);
868 
869 /*
870  * Create "VME" type attributes
871  */
vme_dma_vme_attribute(unsigned long long address,u32 aspace,u32 cycle,u32 dwidth)872 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
873 	u32 aspace, u32 cycle, u32 dwidth)
874 {
875 	struct vme_dma_attr *attributes;
876 	struct vme_dma_vme *vme_attr;
877 
878 	attributes = kmalloc(
879 		sizeof(struct vme_dma_attr), GFP_KERNEL);
880 	if (attributes == NULL) {
881 		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
882 		goto err_attr;
883 	}
884 
885 	vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
886 	if (vme_attr == NULL) {
887 		printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
888 		goto err_vme;
889 	}
890 
891 	attributes->type = VME_DMA_VME;
892 	attributes->private = (void *)vme_attr;
893 
894 	vme_attr->address = address;
895 	vme_attr->aspace = aspace;
896 	vme_attr->cycle = cycle;
897 	vme_attr->dwidth = dwidth;
898 
899 	return attributes;
900 
901 err_vme:
902 	kfree(attributes);
903 err_attr:
904 	return NULL;
905 }
906 EXPORT_SYMBOL(vme_dma_vme_attribute);
907 
908 /*
909  * Free attribute
910  */
vme_dma_free_attribute(struct vme_dma_attr * attributes)911 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
912 {
913 	kfree(attributes->private);
914 	kfree(attributes);
915 }
916 EXPORT_SYMBOL(vme_dma_free_attribute);
917 
vme_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)918 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
919 	struct vme_dma_attr *dest, size_t count)
920 {
921 	struct vme_bridge *bridge = list->parent->parent;
922 	int retval;
923 
924 	if (bridge->dma_list_add == NULL) {
925 		printk(KERN_WARNING "Link List DMA generation not supported\n");
926 		return -EINVAL;
927 	}
928 
929 	if (!mutex_trylock(&list->mtx)) {
930 		printk(KERN_ERR "Link List already submitted\n");
931 		return -EINVAL;
932 	}
933 
934 	retval = bridge->dma_list_add(list, src, dest, count);
935 
936 	mutex_unlock(&list->mtx);
937 
938 	return retval;
939 }
940 EXPORT_SYMBOL(vme_dma_list_add);
941 
vme_dma_list_exec(struct vme_dma_list * list)942 int vme_dma_list_exec(struct vme_dma_list *list)
943 {
944 	struct vme_bridge *bridge = list->parent->parent;
945 	int retval;
946 
947 	if (bridge->dma_list_exec == NULL) {
948 		printk(KERN_ERR "Link List DMA execution not supported\n");
949 		return -EINVAL;
950 	}
951 
952 	mutex_lock(&list->mtx);
953 
954 	retval = bridge->dma_list_exec(list);
955 
956 	mutex_unlock(&list->mtx);
957 
958 	return retval;
959 }
960 EXPORT_SYMBOL(vme_dma_list_exec);
961 
vme_dma_list_free(struct vme_dma_list * list)962 int vme_dma_list_free(struct vme_dma_list *list)
963 {
964 	struct vme_bridge *bridge = list->parent->parent;
965 	int retval;
966 
967 	if (bridge->dma_list_empty == NULL) {
968 		printk(KERN_WARNING "Emptying of Link Lists not supported\n");
969 		return -EINVAL;
970 	}
971 
972 	if (!mutex_trylock(&list->mtx)) {
973 		printk(KERN_ERR "Link List in use\n");
974 		return -EINVAL;
975 	}
976 
977 	/*
978 	 * Empty out all of the entries from the dma list. We need to go to the
979 	 * low level driver as dma entries are driver specific.
980 	 */
981 	retval = bridge->dma_list_empty(list);
982 	if (retval) {
983 		printk(KERN_ERR "Unable to empty link-list entries\n");
984 		mutex_unlock(&list->mtx);
985 		return retval;
986 	}
987 	mutex_unlock(&list->mtx);
988 	kfree(list);
989 
990 	return retval;
991 }
992 EXPORT_SYMBOL(vme_dma_list_free);
993 
vme_dma_free(struct vme_resource * resource)994 int vme_dma_free(struct vme_resource *resource)
995 {
996 	struct vme_dma_resource *ctrlr;
997 
998 	if (resource->type != VME_DMA) {
999 		printk(KERN_ERR "Not a DMA resource\n");
1000 		return -EINVAL;
1001 	}
1002 
1003 	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
1004 
1005 	if (!mutex_trylock(&ctrlr->mtx)) {
1006 		printk(KERN_ERR "Resource busy, can't free\n");
1007 		return -EBUSY;
1008 	}
1009 
1010 	if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
1011 		printk(KERN_WARNING "Resource still processing transfers\n");
1012 		mutex_unlock(&ctrlr->mtx);
1013 		return -EBUSY;
1014 	}
1015 
1016 	ctrlr->locked = 0;
1017 
1018 	mutex_unlock(&ctrlr->mtx);
1019 
1020 	kfree(resource);
1021 
1022 	return 0;
1023 }
1024 EXPORT_SYMBOL(vme_dma_free);
1025 
vme_bus_error_handler(struct vme_bridge * bridge,unsigned long long address,int am)1026 void vme_bus_error_handler(struct vme_bridge *bridge,
1027 			   unsigned long long address, int am)
1028 {
1029 	struct list_head *handler_pos = NULL;
1030 	struct vme_error_handler *handler;
1031 	int handler_triggered = 0;
1032 	u32 aspace = vme_get_aspace(am);
1033 
1034 	list_for_each(handler_pos, &bridge->vme_error_handlers) {
1035 		handler = list_entry(handler_pos, struct vme_error_handler,
1036 				     list);
1037 		if ((aspace == handler->aspace) &&
1038 		    (address >= handler->start) &&
1039 		    (address < handler->end)) {
1040 			if (!handler->num_errors)
1041 				handler->first_error = address;
1042 			if (handler->num_errors != UINT_MAX)
1043 				handler->num_errors++;
1044 			handler_triggered = 1;
1045 		}
1046 	}
1047 
1048 	if (!handler_triggered)
1049 		dev_err(bridge->parent,
1050 			"Unhandled VME access error at address 0x%llx\n",
1051 			address);
1052 }
1053 EXPORT_SYMBOL(vme_bus_error_handler);
1054 
vme_register_error_handler(struct vme_bridge * bridge,u32 aspace,unsigned long long address,size_t len)1055 struct vme_error_handler *vme_register_error_handler(
1056 	struct vme_bridge *bridge, u32 aspace,
1057 	unsigned long long address, size_t len)
1058 {
1059 	struct vme_error_handler *handler;
1060 
1061 	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
1062 	if (!handler)
1063 		return NULL;
1064 
1065 	handler->aspace = aspace;
1066 	handler->start = address;
1067 	handler->end = address + len;
1068 	handler->num_errors = 0;
1069 	handler->first_error = 0;
1070 	list_add_tail(&handler->list, &bridge->vme_error_handlers);
1071 
1072 	return handler;
1073 }
1074 EXPORT_SYMBOL(vme_register_error_handler);
1075 
vme_unregister_error_handler(struct vme_error_handler * handler)1076 void vme_unregister_error_handler(struct vme_error_handler *handler)
1077 {
1078 	list_del(&handler->list);
1079 	kfree(handler);
1080 }
1081 EXPORT_SYMBOL(vme_unregister_error_handler);
1082 
vme_irq_handler(struct vme_bridge * bridge,int level,int statid)1083 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
1084 {
1085 	void (*call)(int, int, void *);
1086 	void *priv_data;
1087 
1088 	call = bridge->irq[level - 1].callback[statid].func;
1089 	priv_data = bridge->irq[level - 1].callback[statid].priv_data;
1090 
1091 	if (call != NULL)
1092 		call(level, statid, priv_data);
1093 	else
1094 		printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
1095 		       level, statid);
1096 }
1097 EXPORT_SYMBOL(vme_irq_handler);
1098 
vme_irq_request(struct vme_dev * vdev,int level,int statid,void (* callback)(int,int,void *),void * priv_data)1099 int vme_irq_request(struct vme_dev *vdev, int level, int statid,
1100 	void (*callback)(int, int, void *),
1101 	void *priv_data)
1102 {
1103 	struct vme_bridge *bridge;
1104 
1105 	bridge = vdev->bridge;
1106 	if (bridge == NULL) {
1107 		printk(KERN_ERR "Can't find VME bus\n");
1108 		return -EINVAL;
1109 	}
1110 
1111 	if ((level < 1) || (level > 7)) {
1112 		printk(KERN_ERR "Invalid interrupt level\n");
1113 		return -EINVAL;
1114 	}
1115 
1116 	if (bridge->irq_set == NULL) {
1117 		printk(KERN_ERR "Configuring interrupts not supported\n");
1118 		return -EINVAL;
1119 	}
1120 
1121 	mutex_lock(&bridge->irq_mtx);
1122 
1123 	if (bridge->irq[level - 1].callback[statid].func) {
1124 		mutex_unlock(&bridge->irq_mtx);
1125 		printk(KERN_WARNING "VME Interrupt already taken\n");
1126 		return -EBUSY;
1127 	}
1128 
1129 	bridge->irq[level - 1].count++;
1130 	bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1131 	bridge->irq[level - 1].callback[statid].func = callback;
1132 
1133 	/* Enable IRQ level */
1134 	bridge->irq_set(bridge, level, 1, 1);
1135 
1136 	mutex_unlock(&bridge->irq_mtx);
1137 
1138 	return 0;
1139 }
1140 EXPORT_SYMBOL(vme_irq_request);
1141 
vme_irq_free(struct vme_dev * vdev,int level,int statid)1142 void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1143 {
1144 	struct vme_bridge *bridge;
1145 
1146 	bridge = vdev->bridge;
1147 	if (bridge == NULL) {
1148 		printk(KERN_ERR "Can't find VME bus\n");
1149 		return;
1150 	}
1151 
1152 	if ((level < 1) || (level > 7)) {
1153 		printk(KERN_ERR "Invalid interrupt level\n");
1154 		return;
1155 	}
1156 
1157 	if (bridge->irq_set == NULL) {
1158 		printk(KERN_ERR "Configuring interrupts not supported\n");
1159 		return;
1160 	}
1161 
1162 	mutex_lock(&bridge->irq_mtx);
1163 
1164 	bridge->irq[level - 1].count--;
1165 
1166 	/* Disable IRQ level if no more interrupts attached at this level*/
1167 	if (bridge->irq[level - 1].count == 0)
1168 		bridge->irq_set(bridge, level, 0, 1);
1169 
1170 	bridge->irq[level - 1].callback[statid].func = NULL;
1171 	bridge->irq[level - 1].callback[statid].priv_data = NULL;
1172 
1173 	mutex_unlock(&bridge->irq_mtx);
1174 }
1175 EXPORT_SYMBOL(vme_irq_free);
1176 
vme_irq_generate(struct vme_dev * vdev,int level,int statid)1177 int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1178 {
1179 	struct vme_bridge *bridge;
1180 
1181 	bridge = vdev->bridge;
1182 	if (bridge == NULL) {
1183 		printk(KERN_ERR "Can't find VME bus\n");
1184 		return -EINVAL;
1185 	}
1186 
1187 	if ((level < 1) || (level > 7)) {
1188 		printk(KERN_WARNING "Invalid interrupt level\n");
1189 		return -EINVAL;
1190 	}
1191 
1192 	if (bridge->irq_generate == NULL) {
1193 		printk(KERN_WARNING "Interrupt generation not supported\n");
1194 		return -EINVAL;
1195 	}
1196 
1197 	return bridge->irq_generate(bridge, level, statid);
1198 }
1199 EXPORT_SYMBOL(vme_irq_generate);
1200 
1201 /*
1202  * Request the location monitor, return resource or NULL
1203  */
vme_lm_request(struct vme_dev * vdev)1204 struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1205 {
1206 	struct vme_bridge *bridge;
1207 	struct list_head *lm_pos = NULL;
1208 	struct vme_lm_resource *allocated_lm = NULL;
1209 	struct vme_lm_resource *lm = NULL;
1210 	struct vme_resource *resource = NULL;
1211 
1212 	bridge = vdev->bridge;
1213 	if (bridge == NULL) {
1214 		printk(KERN_ERR "Can't find VME bus\n");
1215 		goto err_bus;
1216 	}
1217 
1218 	/* Loop through DMA resources */
1219 	list_for_each(lm_pos, &bridge->lm_resources) {
1220 		lm = list_entry(lm_pos,
1221 			struct vme_lm_resource, list);
1222 
1223 		if (lm == NULL) {
1224 			printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1225 			continue;
1226 		}
1227 
1228 		/* Find an unlocked controller */
1229 		mutex_lock(&lm->mtx);
1230 		if (lm->locked == 0) {
1231 			lm->locked = 1;
1232 			mutex_unlock(&lm->mtx);
1233 			allocated_lm = lm;
1234 			break;
1235 		}
1236 		mutex_unlock(&lm->mtx);
1237 	}
1238 
1239 	/* Check to see if we found a resource */
1240 	if (allocated_lm == NULL)
1241 		goto err_lm;
1242 
1243 	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1244 	if (resource == NULL) {
1245 		printk(KERN_ERR "Unable to allocate resource structure\n");
1246 		goto err_alloc;
1247 	}
1248 	resource->type = VME_LM;
1249 	resource->entry = &allocated_lm->list;
1250 
1251 	return resource;
1252 
1253 err_alloc:
1254 	/* Unlock image */
1255 	mutex_lock(&lm->mtx);
1256 	lm->locked = 0;
1257 	mutex_unlock(&lm->mtx);
1258 err_lm:
1259 err_bus:
1260 	return NULL;
1261 }
1262 EXPORT_SYMBOL(vme_lm_request);
1263 
vme_lm_count(struct vme_resource * resource)1264 int vme_lm_count(struct vme_resource *resource)
1265 {
1266 	struct vme_lm_resource *lm;
1267 
1268 	if (resource->type != VME_LM) {
1269 		printk(KERN_ERR "Not a Location Monitor resource\n");
1270 		return -EINVAL;
1271 	}
1272 
1273 	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1274 
1275 	return lm->monitors;
1276 }
1277 EXPORT_SYMBOL(vme_lm_count);
1278 
vme_lm_set(struct vme_resource * resource,unsigned long long lm_base,u32 aspace,u32 cycle)1279 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1280 	u32 aspace, u32 cycle)
1281 {
1282 	struct vme_bridge *bridge = find_bridge(resource);
1283 	struct vme_lm_resource *lm;
1284 
1285 	if (resource->type != VME_LM) {
1286 		printk(KERN_ERR "Not a Location Monitor resource\n");
1287 		return -EINVAL;
1288 	}
1289 
1290 	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1291 
1292 	if (bridge->lm_set == NULL) {
1293 		printk(KERN_ERR "vme_lm_set not supported\n");
1294 		return -EINVAL;
1295 	}
1296 
1297 	return bridge->lm_set(lm, lm_base, aspace, cycle);
1298 }
1299 EXPORT_SYMBOL(vme_lm_set);
1300 
vme_lm_get(struct vme_resource * resource,unsigned long long * lm_base,u32 * aspace,u32 * cycle)1301 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1302 	u32 *aspace, u32 *cycle)
1303 {
1304 	struct vme_bridge *bridge = find_bridge(resource);
1305 	struct vme_lm_resource *lm;
1306 
1307 	if (resource->type != VME_LM) {
1308 		printk(KERN_ERR "Not a Location Monitor resource\n");
1309 		return -EINVAL;
1310 	}
1311 
1312 	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1313 
1314 	if (bridge->lm_get == NULL) {
1315 		printk(KERN_ERR "vme_lm_get not supported\n");
1316 		return -EINVAL;
1317 	}
1318 
1319 	return bridge->lm_get(lm, lm_base, aspace, cycle);
1320 }
1321 EXPORT_SYMBOL(vme_lm_get);
1322 
vme_lm_attach(struct vme_resource * resource,int monitor,void (* callback)(int))1323 int vme_lm_attach(struct vme_resource *resource, int monitor,
1324 	void (*callback)(int))
1325 {
1326 	struct vme_bridge *bridge = find_bridge(resource);
1327 	struct vme_lm_resource *lm;
1328 
1329 	if (resource->type != VME_LM) {
1330 		printk(KERN_ERR "Not a Location Monitor resource\n");
1331 		return -EINVAL;
1332 	}
1333 
1334 	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1335 
1336 	if (bridge->lm_attach == NULL) {
1337 		printk(KERN_ERR "vme_lm_attach not supported\n");
1338 		return -EINVAL;
1339 	}
1340 
1341 	return bridge->lm_attach(lm, monitor, callback);
1342 }
1343 EXPORT_SYMBOL(vme_lm_attach);
1344 
vme_lm_detach(struct vme_resource * resource,int monitor)1345 int vme_lm_detach(struct vme_resource *resource, int monitor)
1346 {
1347 	struct vme_bridge *bridge = find_bridge(resource);
1348 	struct vme_lm_resource *lm;
1349 
1350 	if (resource->type != VME_LM) {
1351 		printk(KERN_ERR "Not a Location Monitor resource\n");
1352 		return -EINVAL;
1353 	}
1354 
1355 	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1356 
1357 	if (bridge->lm_detach == NULL) {
1358 		printk(KERN_ERR "vme_lm_detach not supported\n");
1359 		return -EINVAL;
1360 	}
1361 
1362 	return bridge->lm_detach(lm, monitor);
1363 }
1364 EXPORT_SYMBOL(vme_lm_detach);
1365 
vme_lm_free(struct vme_resource * resource)1366 void vme_lm_free(struct vme_resource *resource)
1367 {
1368 	struct vme_lm_resource *lm;
1369 
1370 	if (resource->type != VME_LM) {
1371 		printk(KERN_ERR "Not a Location Monitor resource\n");
1372 		return;
1373 	}
1374 
1375 	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1376 
1377 	mutex_lock(&lm->mtx);
1378 
1379 	/* XXX
1380 	 * Check to see that there aren't any callbacks still attached, if
1381 	 * there are we should probably be detaching them!
1382 	 */
1383 
1384 	lm->locked = 0;
1385 
1386 	mutex_unlock(&lm->mtx);
1387 
1388 	kfree(resource);
1389 }
1390 EXPORT_SYMBOL(vme_lm_free);
1391 
vme_slot_num(struct vme_dev * vdev)1392 int vme_slot_num(struct vme_dev *vdev)
1393 {
1394 	struct vme_bridge *bridge;
1395 
1396 	bridge = vdev->bridge;
1397 	if (bridge == NULL) {
1398 		printk(KERN_ERR "Can't find VME bus\n");
1399 		return -EINVAL;
1400 	}
1401 
1402 	if (bridge->slot_get == NULL) {
1403 		printk(KERN_WARNING "vme_slot_num not supported\n");
1404 		return -EINVAL;
1405 	}
1406 
1407 	return bridge->slot_get(bridge);
1408 }
1409 EXPORT_SYMBOL(vme_slot_num);
1410 
vme_bus_num(struct vme_dev * vdev)1411 int vme_bus_num(struct vme_dev *vdev)
1412 {
1413 	struct vme_bridge *bridge;
1414 
1415 	bridge = vdev->bridge;
1416 	if (bridge == NULL) {
1417 		pr_err("Can't find VME bus\n");
1418 		return -EINVAL;
1419 	}
1420 
1421 	return bridge->num;
1422 }
1423 EXPORT_SYMBOL(vme_bus_num);
1424 
1425 /* - Bridge Registration --------------------------------------------------- */
1426 
vme_dev_release(struct device * dev)1427 static void vme_dev_release(struct device *dev)
1428 {
1429 	kfree(dev_to_vme_dev(dev));
1430 }
1431 
vme_register_bridge(struct vme_bridge * bridge)1432 int vme_register_bridge(struct vme_bridge *bridge)
1433 {
1434 	int i;
1435 	int ret = -1;
1436 
1437 	mutex_lock(&vme_buses_lock);
1438 	for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1439 		if ((vme_bus_numbers & (1 << i)) == 0) {
1440 			vme_bus_numbers |= (1 << i);
1441 			bridge->num = i;
1442 			INIT_LIST_HEAD(&bridge->devices);
1443 			list_add_tail(&bridge->bus_list, &vme_bus_list);
1444 			ret = 0;
1445 			break;
1446 		}
1447 	}
1448 	mutex_unlock(&vme_buses_lock);
1449 
1450 	return ret;
1451 }
1452 EXPORT_SYMBOL(vme_register_bridge);
1453 
vme_unregister_bridge(struct vme_bridge * bridge)1454 void vme_unregister_bridge(struct vme_bridge *bridge)
1455 {
1456 	struct vme_dev *vdev;
1457 	struct vme_dev *tmp;
1458 
1459 	mutex_lock(&vme_buses_lock);
1460 	vme_bus_numbers &= ~(1 << bridge->num);
1461 	list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1462 		list_del(&vdev->drv_list);
1463 		list_del(&vdev->bridge_list);
1464 		device_unregister(&vdev->dev);
1465 	}
1466 	list_del(&bridge->bus_list);
1467 	mutex_unlock(&vme_buses_lock);
1468 }
1469 EXPORT_SYMBOL(vme_unregister_bridge);
1470 
1471 /* - Driver Registration --------------------------------------------------- */
1472 
__vme_register_driver_bus(struct vme_driver * drv,struct vme_bridge * bridge,unsigned int ndevs)1473 static int __vme_register_driver_bus(struct vme_driver *drv,
1474 	struct vme_bridge *bridge, unsigned int ndevs)
1475 {
1476 	int err;
1477 	unsigned int i;
1478 	struct vme_dev *vdev;
1479 	struct vme_dev *tmp;
1480 
1481 	for (i = 0; i < ndevs; i++) {
1482 		vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
1483 		if (!vdev) {
1484 			err = -ENOMEM;
1485 			goto err_devalloc;
1486 		}
1487 		vdev->num = i;
1488 		vdev->bridge = bridge;
1489 		vdev->dev.platform_data = drv;
1490 		vdev->dev.release = vme_dev_release;
1491 		vdev->dev.parent = bridge->parent;
1492 		vdev->dev.bus = &vme_bus_type;
1493 		dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1494 			vdev->num);
1495 
1496 		err = device_register(&vdev->dev);
1497 		if (err)
1498 			goto err_reg;
1499 
1500 		if (vdev->dev.platform_data) {
1501 			list_add_tail(&vdev->drv_list, &drv->devices);
1502 			list_add_tail(&vdev->bridge_list, &bridge->devices);
1503 		} else
1504 			device_unregister(&vdev->dev);
1505 	}
1506 	return 0;
1507 
1508 err_reg:
1509 	put_device(&vdev->dev);
1510 	kfree(vdev);
1511 err_devalloc:
1512 	list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1513 		list_del(&vdev->drv_list);
1514 		list_del(&vdev->bridge_list);
1515 		device_unregister(&vdev->dev);
1516 	}
1517 	return err;
1518 }
1519 
__vme_register_driver(struct vme_driver * drv,unsigned int ndevs)1520 static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1521 {
1522 	struct vme_bridge *bridge;
1523 	int err = 0;
1524 
1525 	mutex_lock(&vme_buses_lock);
1526 	list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1527 		/*
1528 		 * This cannot cause trouble as we already have vme_buses_lock
1529 		 * and if the bridge is removed, it will have to go through
1530 		 * vme_unregister_bridge() to do it (which calls remove() on
1531 		 * the bridge which in turn tries to acquire vme_buses_lock and
1532 		 * will have to wait).
1533 		 */
1534 		err = __vme_register_driver_bus(drv, bridge, ndevs);
1535 		if (err)
1536 			break;
1537 	}
1538 	mutex_unlock(&vme_buses_lock);
1539 	return err;
1540 }
1541 
vme_register_driver(struct vme_driver * drv,unsigned int ndevs)1542 int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1543 {
1544 	int err;
1545 
1546 	drv->driver.name = drv->name;
1547 	drv->driver.bus = &vme_bus_type;
1548 	INIT_LIST_HEAD(&drv->devices);
1549 
1550 	err = driver_register(&drv->driver);
1551 	if (err)
1552 		return err;
1553 
1554 	err = __vme_register_driver(drv, ndevs);
1555 	if (err)
1556 		driver_unregister(&drv->driver);
1557 
1558 	return err;
1559 }
1560 EXPORT_SYMBOL(vme_register_driver);
1561 
vme_unregister_driver(struct vme_driver * drv)1562 void vme_unregister_driver(struct vme_driver *drv)
1563 {
1564 	struct vme_dev *dev, *dev_tmp;
1565 
1566 	mutex_lock(&vme_buses_lock);
1567 	list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1568 		list_del(&dev->drv_list);
1569 		list_del(&dev->bridge_list);
1570 		device_unregister(&dev->dev);
1571 	}
1572 	mutex_unlock(&vme_buses_lock);
1573 
1574 	driver_unregister(&drv->driver);
1575 }
1576 EXPORT_SYMBOL(vme_unregister_driver);
1577 
1578 /* - Bus Registration ------------------------------------------------------ */
1579 
vme_bus_match(struct device * dev,struct device_driver * drv)1580 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1581 {
1582 	struct vme_driver *vme_drv;
1583 
1584 	vme_drv = container_of(drv, struct vme_driver, driver);
1585 
1586 	if (dev->platform_data == vme_drv) {
1587 		struct vme_dev *vdev = dev_to_vme_dev(dev);
1588 
1589 		if (vme_drv->match && vme_drv->match(vdev))
1590 			return 1;
1591 
1592 		dev->platform_data = NULL;
1593 	}
1594 	return 0;
1595 }
1596 
vme_bus_probe(struct device * dev)1597 static int vme_bus_probe(struct device *dev)
1598 {
1599 	int retval = -ENODEV;
1600 	struct vme_driver *driver;
1601 	struct vme_dev *vdev = dev_to_vme_dev(dev);
1602 
1603 	driver = dev->platform_data;
1604 
1605 	if (driver->probe != NULL)
1606 		retval = driver->probe(vdev);
1607 
1608 	return retval;
1609 }
1610 
vme_bus_remove(struct device * dev)1611 static int vme_bus_remove(struct device *dev)
1612 {
1613 	int retval = -ENODEV;
1614 	struct vme_driver *driver;
1615 	struct vme_dev *vdev = dev_to_vme_dev(dev);
1616 
1617 	driver = dev->platform_data;
1618 
1619 	if (driver->remove != NULL)
1620 		retval = driver->remove(vdev);
1621 
1622 	return retval;
1623 }
1624 
1625 struct bus_type vme_bus_type = {
1626 	.name = "vme",
1627 	.match = vme_bus_match,
1628 	.probe = vme_bus_probe,
1629 	.remove = vme_bus_remove,
1630 };
1631 EXPORT_SYMBOL(vme_bus_type);
1632 
vme_init(void)1633 static int __init vme_init(void)
1634 {
1635 	return bus_register(&vme_bus_type);
1636 }
1637 
vme_exit(void)1638 static void __exit vme_exit(void)
1639 {
1640 	bus_unregister(&vme_bus_type);
1641 }
1642 
1643 subsys_initcall(vme_init);
1644 module_exit(vme_exit);
1645