• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * Derived from ca91c042.c by Michael Wyrick
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  */
17 
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/poll.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <linux/vme.h>
33 
34 #include "../vme_bridge.h"
35 #include "vme_ca91cx42.h"
36 
37 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
38 static void ca91cx42_remove(struct pci_dev *);
39 
40 /* Module parameters */
41 static int geoid;
42 
43 static const char driver_name[] = "vme_ca91cx42";
44 
45 static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
46 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
47 	{ },
48 };
49 
50 static struct pci_driver ca91cx42_driver = {
51 	.name = driver_name,
52 	.id_table = ca91cx42_ids,
53 	.probe = ca91cx42_probe,
54 	.remove = ca91cx42_remove,
55 };
56 
ca91cx42_DMA_irqhandler(struct ca91cx42_driver * bridge)57 static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
58 {
59 	wake_up(&bridge->dma_queue);
60 
61 	return CA91CX42_LINT_DMA;
62 }
63 
ca91cx42_LM_irqhandler(struct ca91cx42_driver * bridge,u32 stat)64 static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
65 {
66 	int i;
67 	u32 serviced = 0;
68 
69 	for (i = 0; i < 4; i++) {
70 		if (stat & CA91CX42_LINT_LM[i]) {
71 			/* We only enable interrupts if the callback is set */
72 			bridge->lm_callback[i](i);
73 			serviced |= CA91CX42_LINT_LM[i];
74 		}
75 	}
76 
77 	return serviced;
78 }
79 
80 /* XXX This needs to be split into 4 queues */
ca91cx42_MB_irqhandler(struct ca91cx42_driver * bridge,int mbox_mask)81 static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
82 {
83 	wake_up(&bridge->mbox_queue);
84 
85 	return CA91CX42_LINT_MBOX;
86 }
87 
ca91cx42_IACK_irqhandler(struct ca91cx42_driver * bridge)88 static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
89 {
90 	wake_up(&bridge->iack_queue);
91 
92 	return CA91CX42_LINT_SW_IACK;
93 }
94 
ca91cx42_VERR_irqhandler(struct vme_bridge * ca91cx42_bridge)95 static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
96 {
97 	int val;
98 	struct ca91cx42_driver *bridge;
99 
100 	bridge = ca91cx42_bridge->driver_priv;
101 
102 	val = ioread32(bridge->base + DGCS);
103 
104 	if (!(val & 0x00000800)) {
105 		dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
106 			"Read Error DGCS=%08X\n", val);
107 	}
108 
109 	return CA91CX42_LINT_VERR;
110 }
111 
ca91cx42_LERR_irqhandler(struct vme_bridge * ca91cx42_bridge)112 static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
113 {
114 	int val;
115 	struct ca91cx42_driver *bridge;
116 
117 	bridge = ca91cx42_bridge->driver_priv;
118 
119 	val = ioread32(bridge->base + DGCS);
120 
121 	if (!(val & 0x00000800))
122 		dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
123 			"Read Error DGCS=%08X\n", val);
124 
125 	return CA91CX42_LINT_LERR;
126 }
127 
128 
ca91cx42_VIRQ_irqhandler(struct vme_bridge * ca91cx42_bridge,int stat)129 static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
130 	int stat)
131 {
132 	int vec, i, serviced = 0;
133 	struct ca91cx42_driver *bridge;
134 
135 	bridge = ca91cx42_bridge->driver_priv;
136 
137 
138 	for (i = 7; i > 0; i--) {
139 		if (stat & (1 << i)) {
140 			vec = ioread32(bridge->base +
141 				CA91CX42_V_STATID[i]) & 0xff;
142 
143 			vme_irq_handler(ca91cx42_bridge, i, vec);
144 
145 			serviced |= (1 << i);
146 		}
147 	}
148 
149 	return serviced;
150 }
151 
ca91cx42_irqhandler(int irq,void * ptr)152 static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
153 {
154 	u32 stat, enable, serviced = 0;
155 	struct vme_bridge *ca91cx42_bridge;
156 	struct ca91cx42_driver *bridge;
157 
158 	ca91cx42_bridge = ptr;
159 
160 	bridge = ca91cx42_bridge->driver_priv;
161 
162 	enable = ioread32(bridge->base + LINT_EN);
163 	stat = ioread32(bridge->base + LINT_STAT);
164 
165 	/* Only look at unmasked interrupts */
166 	stat &= enable;
167 
168 	if (unlikely(!stat))
169 		return IRQ_NONE;
170 
171 	if (stat & CA91CX42_LINT_DMA)
172 		serviced |= ca91cx42_DMA_irqhandler(bridge);
173 	if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
174 			CA91CX42_LINT_LM3))
175 		serviced |= ca91cx42_LM_irqhandler(bridge, stat);
176 	if (stat & CA91CX42_LINT_MBOX)
177 		serviced |= ca91cx42_MB_irqhandler(bridge, stat);
178 	if (stat & CA91CX42_LINT_SW_IACK)
179 		serviced |= ca91cx42_IACK_irqhandler(bridge);
180 	if (stat & CA91CX42_LINT_VERR)
181 		serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
182 	if (stat & CA91CX42_LINT_LERR)
183 		serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
184 	if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
185 			CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
186 			CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
187 			CA91CX42_LINT_VIRQ7))
188 		serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
189 
190 	/* Clear serviced interrupts */
191 	iowrite32(serviced, bridge->base + LINT_STAT);
192 
193 	return IRQ_HANDLED;
194 }
195 
ca91cx42_irq_init(struct vme_bridge * ca91cx42_bridge)196 static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
197 {
198 	int result, tmp;
199 	struct pci_dev *pdev;
200 	struct ca91cx42_driver *bridge;
201 
202 	bridge = ca91cx42_bridge->driver_priv;
203 
204 	/* Need pdev */
205 	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
206 
207 	/* Initialise list for VME bus errors */
208 	INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
209 
210 	mutex_init(&ca91cx42_bridge->irq_mtx);
211 
212 	/* Disable interrupts from PCI to VME */
213 	iowrite32(0, bridge->base + VINT_EN);
214 
215 	/* Disable PCI interrupts */
216 	iowrite32(0, bridge->base + LINT_EN);
217 	/* Clear Any Pending PCI Interrupts */
218 	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
219 
220 	result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
221 			driver_name, ca91cx42_bridge);
222 	if (result) {
223 		dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
224 		       pdev->irq);
225 		return result;
226 	}
227 
228 	/* Ensure all interrupts are mapped to PCI Interrupt 0 */
229 	iowrite32(0, bridge->base + LINT_MAP0);
230 	iowrite32(0, bridge->base + LINT_MAP1);
231 	iowrite32(0, bridge->base + LINT_MAP2);
232 
233 	/* Enable DMA, mailbox & LM Interrupts */
234 	tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
235 		CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
236 		CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
237 
238 	iowrite32(tmp, bridge->base + LINT_EN);
239 
240 	return 0;
241 }
242 
ca91cx42_irq_exit(struct ca91cx42_driver * bridge,struct pci_dev * pdev)243 static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
244 	struct pci_dev *pdev)
245 {
246 	/* Disable interrupts from PCI to VME */
247 	iowrite32(0, bridge->base + VINT_EN);
248 
249 	/* Disable PCI interrupts */
250 	iowrite32(0, bridge->base + LINT_EN);
251 	/* Clear Any Pending PCI Interrupts */
252 	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
253 
254 	free_irq(pdev->irq, pdev);
255 }
256 
ca91cx42_iack_received(struct ca91cx42_driver * bridge,int level)257 static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
258 {
259 	u32 tmp;
260 
261 	tmp = ioread32(bridge->base + LINT_STAT);
262 
263 	if (tmp & (1 << level))
264 		return 0;
265 	else
266 		return 1;
267 }
268 
269 /*
270  * Set up an VME interrupt
271  */
ca91cx42_irq_set(struct vme_bridge * ca91cx42_bridge,int level,int state,int sync)272 static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
273 	int state, int sync)
274 
275 {
276 	struct pci_dev *pdev;
277 	u32 tmp;
278 	struct ca91cx42_driver *bridge;
279 
280 	bridge = ca91cx42_bridge->driver_priv;
281 
282 	/* Enable IRQ level */
283 	tmp = ioread32(bridge->base + LINT_EN);
284 
285 	if (state == 0)
286 		tmp &= ~CA91CX42_LINT_VIRQ[level];
287 	else
288 		tmp |= CA91CX42_LINT_VIRQ[level];
289 
290 	iowrite32(tmp, bridge->base + LINT_EN);
291 
292 	if ((state == 0) && (sync != 0)) {
293 		pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
294 			dev);
295 
296 		synchronize_irq(pdev->irq);
297 	}
298 }
299 
ca91cx42_irq_generate(struct vme_bridge * ca91cx42_bridge,int level,int statid)300 static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
301 	int statid)
302 {
303 	u32 tmp;
304 	struct ca91cx42_driver *bridge;
305 
306 	bridge = ca91cx42_bridge->driver_priv;
307 
308 	/* Universe can only generate even vectors */
309 	if (statid & 1)
310 		return -EINVAL;
311 
312 	mutex_lock(&bridge->vme_int);
313 
314 	tmp = ioread32(bridge->base + VINT_EN);
315 
316 	/* Set Status/ID */
317 	iowrite32(statid << 24, bridge->base + STATID);
318 
319 	/* Assert VMEbus IRQ */
320 	tmp = tmp | (1 << (level + 24));
321 	iowrite32(tmp, bridge->base + VINT_EN);
322 
323 	/* Wait for IACK */
324 	wait_event_interruptible(bridge->iack_queue,
325 				 ca91cx42_iack_received(bridge, level));
326 
327 	/* Return interrupt to low state */
328 	tmp = ioread32(bridge->base + VINT_EN);
329 	tmp = tmp & ~(1 << (level + 24));
330 	iowrite32(tmp, bridge->base + VINT_EN);
331 
332 	mutex_unlock(&bridge->vme_int);
333 
334 	return 0;
335 }
336 
ca91cx42_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)337 static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
338 	unsigned long long vme_base, unsigned long long size,
339 	dma_addr_t pci_base, u32 aspace, u32 cycle)
340 {
341 	unsigned int i, addr = 0, granularity;
342 	unsigned int temp_ctl = 0;
343 	unsigned int vme_bound, pci_offset;
344 	struct vme_bridge *ca91cx42_bridge;
345 	struct ca91cx42_driver *bridge;
346 
347 	ca91cx42_bridge = image->parent;
348 
349 	bridge = ca91cx42_bridge->driver_priv;
350 
351 	i = image->number;
352 
353 	switch (aspace) {
354 	case VME_A16:
355 		addr |= CA91CX42_VSI_CTL_VAS_A16;
356 		break;
357 	case VME_A24:
358 		addr |= CA91CX42_VSI_CTL_VAS_A24;
359 		break;
360 	case VME_A32:
361 		addr |= CA91CX42_VSI_CTL_VAS_A32;
362 		break;
363 	case VME_USER1:
364 		addr |= CA91CX42_VSI_CTL_VAS_USER1;
365 		break;
366 	case VME_USER2:
367 		addr |= CA91CX42_VSI_CTL_VAS_USER2;
368 		break;
369 	case VME_A64:
370 	case VME_CRCSR:
371 	case VME_USER3:
372 	case VME_USER4:
373 	default:
374 		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
375 		return -EINVAL;
376 		break;
377 	}
378 
379 	/*
380 	 * Bound address is a valid address for the window, adjust
381 	 * accordingly
382 	 */
383 	vme_bound = vme_base + size;
384 	pci_offset = pci_base - vme_base;
385 
386 	if ((i == 0) || (i == 4))
387 		granularity = 0x1000;
388 	else
389 		granularity = 0x10000;
390 
391 	if (vme_base & (granularity - 1)) {
392 		dev_err(ca91cx42_bridge->parent, "Invalid VME base "
393 			"alignment\n");
394 		return -EINVAL;
395 	}
396 	if (vme_bound & (granularity - 1)) {
397 		dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
398 			"alignment\n");
399 		return -EINVAL;
400 	}
401 	if (pci_offset & (granularity - 1)) {
402 		dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
403 			"alignment\n");
404 		return -EINVAL;
405 	}
406 
407 	/* Disable while we are mucking around */
408 	temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
409 	temp_ctl &= ~CA91CX42_VSI_CTL_EN;
410 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
411 
412 	/* Setup mapping */
413 	iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
414 	iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
415 	iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
416 
417 	/* Setup address space */
418 	temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
419 	temp_ctl |= addr;
420 
421 	/* Setup cycle types */
422 	temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
423 	if (cycle & VME_SUPER)
424 		temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
425 	if (cycle & VME_USER)
426 		temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
427 	if (cycle & VME_PROG)
428 		temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
429 	if (cycle & VME_DATA)
430 		temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
431 
432 	/* Write ctl reg without enable */
433 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
434 
435 	if (enabled)
436 		temp_ctl |= CA91CX42_VSI_CTL_EN;
437 
438 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
439 
440 	return 0;
441 }
442 
ca91cx42_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)443 static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
444 	unsigned long long *vme_base, unsigned long long *size,
445 	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
446 {
447 	unsigned int i, granularity = 0, ctl = 0;
448 	unsigned long long vme_bound, pci_offset;
449 	struct ca91cx42_driver *bridge;
450 
451 	bridge = image->parent->driver_priv;
452 
453 	i = image->number;
454 
455 	if ((i == 0) || (i == 4))
456 		granularity = 0x1000;
457 	else
458 		granularity = 0x10000;
459 
460 	/* Read Registers */
461 	ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
462 
463 	*vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
464 	vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
465 	pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
466 
467 	*pci_base = (dma_addr_t)vme_base + pci_offset;
468 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
469 
470 	*enabled = 0;
471 	*aspace = 0;
472 	*cycle = 0;
473 
474 	if (ctl & CA91CX42_VSI_CTL_EN)
475 		*enabled = 1;
476 
477 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
478 		*aspace = VME_A16;
479 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
480 		*aspace = VME_A24;
481 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
482 		*aspace = VME_A32;
483 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
484 		*aspace = VME_USER1;
485 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
486 		*aspace = VME_USER2;
487 
488 	if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
489 		*cycle |= VME_SUPER;
490 	if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
491 		*cycle |= VME_USER;
492 	if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
493 		*cycle |= VME_PROG;
494 	if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
495 		*cycle |= VME_DATA;
496 
497 	return 0;
498 }
499 
500 /*
501  * Allocate and map PCI Resource
502  */
ca91cx42_alloc_resource(struct vme_master_resource * image,unsigned long long size)503 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
504 	unsigned long long size)
505 {
506 	unsigned long long existing_size;
507 	int retval = 0;
508 	struct pci_dev *pdev;
509 	struct vme_bridge *ca91cx42_bridge;
510 
511 	ca91cx42_bridge = image->parent;
512 
513 	/* Find pci_dev container of dev */
514 	if (ca91cx42_bridge->parent == NULL) {
515 		dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
516 		return -EINVAL;
517 	}
518 	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
519 
520 	existing_size = (unsigned long long)(image->bus_resource.end -
521 		image->bus_resource.start);
522 
523 	/* If the existing size is OK, return */
524 	if (existing_size == (size - 1))
525 		return 0;
526 
527 	if (existing_size != 0) {
528 		iounmap(image->kern_base);
529 		image->kern_base = NULL;
530 		kfree(image->bus_resource.name);
531 		release_resource(&image->bus_resource);
532 		memset(&image->bus_resource, 0, sizeof(struct resource));
533 	}
534 
535 	if (image->bus_resource.name == NULL) {
536 		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
537 		if (image->bus_resource.name == NULL) {
538 			dev_err(ca91cx42_bridge->parent, "Unable to allocate "
539 				"memory for resource name\n");
540 			retval = -ENOMEM;
541 			goto err_name;
542 		}
543 	}
544 
545 	sprintf((char *)image->bus_resource.name, "%s.%d",
546 		ca91cx42_bridge->name, image->number);
547 
548 	image->bus_resource.start = 0;
549 	image->bus_resource.end = (unsigned long)size;
550 	image->bus_resource.flags = IORESOURCE_MEM;
551 
552 	retval = pci_bus_alloc_resource(pdev->bus,
553 		&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
554 		0, NULL, NULL);
555 	if (retval) {
556 		dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
557 			"resource for window %d size 0x%lx start 0x%lx\n",
558 			image->number, (unsigned long)size,
559 			(unsigned long)image->bus_resource.start);
560 		goto err_resource;
561 	}
562 
563 	image->kern_base = ioremap_nocache(
564 		image->bus_resource.start, size);
565 	if (image->kern_base == NULL) {
566 		dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
567 		retval = -ENOMEM;
568 		goto err_remap;
569 	}
570 
571 	return 0;
572 
573 err_remap:
574 	release_resource(&image->bus_resource);
575 err_resource:
576 	kfree(image->bus_resource.name);
577 	memset(&image->bus_resource, 0, sizeof(struct resource));
578 err_name:
579 	return retval;
580 }
581 
582 /*
583  * Free and unmap PCI Resource
584  */
ca91cx42_free_resource(struct vme_master_resource * image)585 static void ca91cx42_free_resource(struct vme_master_resource *image)
586 {
587 	iounmap(image->kern_base);
588 	image->kern_base = NULL;
589 	release_resource(&image->bus_resource);
590 	kfree(image->bus_resource.name);
591 	memset(&image->bus_resource, 0, sizeof(struct resource));
592 }
593 
594 
ca91cx42_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)595 static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
596 	unsigned long long vme_base, unsigned long long size, u32 aspace,
597 	u32 cycle, u32 dwidth)
598 {
599 	int retval = 0;
600 	unsigned int i, granularity = 0;
601 	unsigned int temp_ctl = 0;
602 	unsigned long long pci_bound, vme_offset, pci_base;
603 	struct vme_bridge *ca91cx42_bridge;
604 	struct ca91cx42_driver *bridge;
605 
606 	ca91cx42_bridge = image->parent;
607 
608 	bridge = ca91cx42_bridge->driver_priv;
609 
610 	i = image->number;
611 
612 	if ((i == 0) || (i == 4))
613 		granularity = 0x1000;
614 	else
615 		granularity = 0x10000;
616 
617 	/* Verify input data */
618 	if (vme_base & (granularity - 1)) {
619 		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
620 			"alignment\n");
621 		retval = -EINVAL;
622 		goto err_window;
623 	}
624 	if (size & (granularity - 1)) {
625 		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
626 			"alignment\n");
627 		retval = -EINVAL;
628 		goto err_window;
629 	}
630 
631 	spin_lock(&image->lock);
632 
633 	/*
634 	 * Let's allocate the resource here rather than further up the stack as
635 	 * it avoids pushing loads of bus dependent stuff up the stack
636 	 */
637 	retval = ca91cx42_alloc_resource(image, size);
638 	if (retval) {
639 		spin_unlock(&image->lock);
640 		dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
641 			"for resource name\n");
642 		retval = -ENOMEM;
643 		goto err_res;
644 	}
645 
646 	pci_base = (unsigned long long)image->bus_resource.start;
647 
648 	/*
649 	 * Bound address is a valid address for the window, adjust
650 	 * according to window granularity.
651 	 */
652 	pci_bound = pci_base + size;
653 	vme_offset = vme_base - pci_base;
654 
655 	/* Disable while we are mucking around */
656 	temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
657 	temp_ctl &= ~CA91CX42_LSI_CTL_EN;
658 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
659 
660 	/* Setup cycle types */
661 	temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
662 	if (cycle & VME_BLT)
663 		temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
664 	if (cycle & VME_MBLT)
665 		temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
666 
667 	/* Setup data width */
668 	temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
669 	switch (dwidth) {
670 	case VME_D8:
671 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
672 		break;
673 	case VME_D16:
674 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
675 		break;
676 	case VME_D32:
677 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
678 		break;
679 	case VME_D64:
680 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
681 		break;
682 	default:
683 		spin_unlock(&image->lock);
684 		dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
685 		retval = -EINVAL;
686 		goto err_dwidth;
687 		break;
688 	}
689 
690 	/* Setup address space */
691 	temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
692 	switch (aspace) {
693 	case VME_A16:
694 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
695 		break;
696 	case VME_A24:
697 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
698 		break;
699 	case VME_A32:
700 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
701 		break;
702 	case VME_CRCSR:
703 		temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
704 		break;
705 	case VME_USER1:
706 		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
707 		break;
708 	case VME_USER2:
709 		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
710 		break;
711 	case VME_A64:
712 	case VME_USER3:
713 	case VME_USER4:
714 	default:
715 		spin_unlock(&image->lock);
716 		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
717 		retval = -EINVAL;
718 		goto err_aspace;
719 		break;
720 	}
721 
722 	temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
723 	if (cycle & VME_SUPER)
724 		temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
725 	if (cycle & VME_PROG)
726 		temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
727 
728 	/* Setup mapping */
729 	iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
730 	iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
731 	iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
732 
733 	/* Write ctl reg without enable */
734 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
735 
736 	if (enabled)
737 		temp_ctl |= CA91CX42_LSI_CTL_EN;
738 
739 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
740 
741 	spin_unlock(&image->lock);
742 	return 0;
743 
744 err_aspace:
745 err_dwidth:
746 	ca91cx42_free_resource(image);
747 err_res:
748 err_window:
749 	return retval;
750 }
751 
__ca91cx42_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)752 static int __ca91cx42_master_get(struct vme_master_resource *image,
753 	int *enabled, unsigned long long *vme_base, unsigned long long *size,
754 	u32 *aspace, u32 *cycle, u32 *dwidth)
755 {
756 	unsigned int i, ctl;
757 	unsigned long long pci_base, pci_bound, vme_offset;
758 	struct ca91cx42_driver *bridge;
759 
760 	bridge = image->parent->driver_priv;
761 
762 	i = image->number;
763 
764 	ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
765 
766 	pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
767 	vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
768 	pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
769 
770 	*vme_base = pci_base + vme_offset;
771 	*size = (unsigned long long)(pci_bound - pci_base);
772 
773 	*enabled = 0;
774 	*aspace = 0;
775 	*cycle = 0;
776 	*dwidth = 0;
777 
778 	if (ctl & CA91CX42_LSI_CTL_EN)
779 		*enabled = 1;
780 
781 	/* Setup address space */
782 	switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
783 	case CA91CX42_LSI_CTL_VAS_A16:
784 		*aspace = VME_A16;
785 		break;
786 	case CA91CX42_LSI_CTL_VAS_A24:
787 		*aspace = VME_A24;
788 		break;
789 	case CA91CX42_LSI_CTL_VAS_A32:
790 		*aspace = VME_A32;
791 		break;
792 	case CA91CX42_LSI_CTL_VAS_CRCSR:
793 		*aspace = VME_CRCSR;
794 		break;
795 	case CA91CX42_LSI_CTL_VAS_USER1:
796 		*aspace = VME_USER1;
797 		break;
798 	case CA91CX42_LSI_CTL_VAS_USER2:
799 		*aspace = VME_USER2;
800 		break;
801 	}
802 
803 	/* XXX Not sure howto check for MBLT */
804 	/* Setup cycle types */
805 	if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
806 		*cycle |= VME_BLT;
807 	else
808 		*cycle |= VME_SCT;
809 
810 	if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
811 		*cycle |= VME_SUPER;
812 	else
813 		*cycle |= VME_USER;
814 
815 	if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
816 		*cycle = VME_PROG;
817 	else
818 		*cycle = VME_DATA;
819 
820 	/* Setup data width */
821 	switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
822 	case CA91CX42_LSI_CTL_VDW_D8:
823 		*dwidth = VME_D8;
824 		break;
825 	case CA91CX42_LSI_CTL_VDW_D16:
826 		*dwidth = VME_D16;
827 		break;
828 	case CA91CX42_LSI_CTL_VDW_D32:
829 		*dwidth = VME_D32;
830 		break;
831 	case CA91CX42_LSI_CTL_VDW_D64:
832 		*dwidth = VME_D64;
833 		break;
834 	}
835 
836 	return 0;
837 }
838 
ca91cx42_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)839 static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
840 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
841 	u32 *cycle, u32 *dwidth)
842 {
843 	int retval;
844 
845 	spin_lock(&image->lock);
846 
847 	retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
848 		cycle, dwidth);
849 
850 	spin_unlock(&image->lock);
851 
852 	return retval;
853 }
854 
ca91cx42_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)855 static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
856 	void *buf, size_t count, loff_t offset)
857 {
858 	ssize_t retval;
859 	void *addr = image->kern_base + offset;
860 	unsigned int done = 0;
861 	unsigned int count32;
862 
863 	if (count == 0)
864 		return 0;
865 
866 	spin_lock(&image->lock);
867 
868 	/* The following code handles VME address alignment problem
869 	 * in order to assure the maximal data width cycle.
870 	 * We cannot use memcpy_xxx directly here because it
871 	 * may cut data transfer in 8-bits cycles, thus making
872 	 * D16 cycle impossible.
873 	 * From the other hand, the bridge itself assures that
874 	 * maximal configured data cycle is used and splits it
875 	 * automatically for non-aligned addresses.
876 	 */
877 	if ((uintptr_t)addr & 0x1) {
878 		*(u8 *)buf = ioread8(addr);
879 		done += 1;
880 		if (done == count)
881 			goto out;
882 	}
883 	if ((uintptr_t)addr & 0x2) {
884 		if ((count - done) < 2) {
885 			*(u8 *)(buf + done) = ioread8(addr + done);
886 			done += 1;
887 			goto out;
888 		} else {
889 			*(u16 *)(buf + done) = ioread16(addr + done);
890 			done += 2;
891 		}
892 	}
893 
894 	count32 = (count - done) & ~0x3;
895 	if (count32 > 0) {
896 		memcpy_fromio(buf + done, addr + done, (unsigned int)count);
897 		done += count32;
898 	}
899 
900 	if ((count - done) & 0x2) {
901 		*(u16 *)(buf + done) = ioread16(addr + done);
902 		done += 2;
903 	}
904 	if ((count - done) & 0x1) {
905 		*(u8 *)(buf + done) = ioread8(addr + done);
906 		done += 1;
907 	}
908 out:
909 	retval = count;
910 	spin_unlock(&image->lock);
911 
912 	return retval;
913 }
914 
ca91cx42_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)915 static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
916 	void *buf, size_t count, loff_t offset)
917 {
918 	ssize_t retval;
919 	void *addr = image->kern_base + offset;
920 	unsigned int done = 0;
921 	unsigned int count32;
922 
923 	if (count == 0)
924 		return 0;
925 
926 	spin_lock(&image->lock);
927 
928 	/* Here we apply for the same strategy we do in master_read
929 	 * function in order to assure D16 cycle when required.
930 	 */
931 	if ((uintptr_t)addr & 0x1) {
932 		iowrite8(*(u8 *)buf, addr);
933 		done += 1;
934 		if (done == count)
935 			goto out;
936 	}
937 	if ((uintptr_t)addr & 0x2) {
938 		if ((count - done) < 2) {
939 			iowrite8(*(u8 *)(buf + done), addr + done);
940 			done += 1;
941 			goto out;
942 		} else {
943 			iowrite16(*(u16 *)(buf + done), addr + done);
944 			done += 2;
945 		}
946 	}
947 
948 	count32 = (count - done) & ~0x3;
949 	if (count32 > 0) {
950 		memcpy_toio(addr + done, buf + done, count32);
951 		done += count32;
952 	}
953 
954 	if ((count - done) & 0x2) {
955 		iowrite16(*(u16 *)(buf + done), addr + done);
956 		done += 2;
957 	}
958 	if ((count - done) & 0x1) {
959 		iowrite8(*(u8 *)(buf + done), addr + done);
960 		done += 1;
961 	}
962 out:
963 	retval = count;
964 
965 	spin_unlock(&image->lock);
966 
967 	return retval;
968 }
969 
ca91cx42_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)970 static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
971 	unsigned int mask, unsigned int compare, unsigned int swap,
972 	loff_t offset)
973 {
974 	u32 result;
975 	uintptr_t pci_addr;
976 	int i;
977 	struct ca91cx42_driver *bridge;
978 	struct device *dev;
979 
980 	bridge = image->parent->driver_priv;
981 	dev = image->parent->parent;
982 
983 	/* Find the PCI address that maps to the desired VME address */
984 	i = image->number;
985 
986 	/* Locking as we can only do one of these at a time */
987 	mutex_lock(&bridge->vme_rmw);
988 
989 	/* Lock image */
990 	spin_lock(&image->lock);
991 
992 	pci_addr = (uintptr_t)image->kern_base + offset;
993 
994 	/* Address must be 4-byte aligned */
995 	if (pci_addr & 0x3) {
996 		dev_err(dev, "RMW Address not 4-byte aligned\n");
997 		result = -EINVAL;
998 		goto out;
999 	}
1000 
1001 	/* Ensure RMW Disabled whilst configuring */
1002 	iowrite32(0, bridge->base + SCYC_CTL);
1003 
1004 	/* Configure registers */
1005 	iowrite32(mask, bridge->base + SCYC_EN);
1006 	iowrite32(compare, bridge->base + SCYC_CMP);
1007 	iowrite32(swap, bridge->base + SCYC_SWP);
1008 	iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1009 
1010 	/* Enable RMW */
1011 	iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1012 
1013 	/* Kick process off with a read to the required address. */
1014 	result = ioread32(image->kern_base + offset);
1015 
1016 	/* Disable RMW */
1017 	iowrite32(0, bridge->base + SCYC_CTL);
1018 
1019 out:
1020 	spin_unlock(&image->lock);
1021 
1022 	mutex_unlock(&bridge->vme_rmw);
1023 
1024 	return result;
1025 }
1026 
ca91cx42_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1027 static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1028 	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1029 {
1030 	struct ca91cx42_dma_entry *entry, *prev;
1031 	struct vme_dma_pci *pci_attr;
1032 	struct vme_dma_vme *vme_attr;
1033 	dma_addr_t desc_ptr;
1034 	int retval = 0;
1035 	struct device *dev;
1036 
1037 	dev = list->parent->parent->parent;
1038 
1039 	/* XXX descriptor must be aligned on 64-bit boundaries */
1040 	entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
1041 	if (entry == NULL) {
1042 		dev_err(dev, "Failed to allocate memory for dma resource "
1043 			"structure\n");
1044 		retval = -ENOMEM;
1045 		goto err_mem;
1046 	}
1047 
1048 	/* Test descriptor alignment */
1049 	if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1050 		dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1051 			"required: %p\n", &entry->descriptor);
1052 		retval = -EINVAL;
1053 		goto err_align;
1054 	}
1055 
1056 	memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
1057 
1058 	if (dest->type == VME_DMA_VME) {
1059 		entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1060 		vme_attr = dest->private;
1061 		pci_attr = src->private;
1062 	} else {
1063 		vme_attr = src->private;
1064 		pci_attr = dest->private;
1065 	}
1066 
1067 	/* Check we can do fulfill required attributes */
1068 	if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1069 		VME_USER2)) != 0) {
1070 
1071 		dev_err(dev, "Unsupported cycle type\n");
1072 		retval = -EINVAL;
1073 		goto err_aspace;
1074 	}
1075 
1076 	if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1077 		VME_PROG | VME_DATA)) != 0) {
1078 
1079 		dev_err(dev, "Unsupported cycle type\n");
1080 		retval = -EINVAL;
1081 		goto err_cycle;
1082 	}
1083 
1084 	/* Check to see if we can fulfill source and destination */
1085 	if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1086 		((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1087 
1088 		dev_err(dev, "Cannot perform transfer with this "
1089 			"source-destination combination\n");
1090 		retval = -EINVAL;
1091 		goto err_direct;
1092 	}
1093 
1094 	/* Setup cycle types */
1095 	if (vme_attr->cycle & VME_BLT)
1096 		entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1097 
1098 	/* Setup data width */
1099 	switch (vme_attr->dwidth) {
1100 	case VME_D8:
1101 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1102 		break;
1103 	case VME_D16:
1104 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1105 		break;
1106 	case VME_D32:
1107 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1108 		break;
1109 	case VME_D64:
1110 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1111 		break;
1112 	default:
1113 		dev_err(dev, "Invalid data width\n");
1114 		return -EINVAL;
1115 	}
1116 
1117 	/* Setup address space */
1118 	switch (vme_attr->aspace) {
1119 	case VME_A16:
1120 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1121 		break;
1122 	case VME_A24:
1123 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1124 		break;
1125 	case VME_A32:
1126 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1127 		break;
1128 	case VME_USER1:
1129 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1130 		break;
1131 	case VME_USER2:
1132 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1133 		break;
1134 	default:
1135 		dev_err(dev, "Invalid address space\n");
1136 		return -EINVAL;
1137 		break;
1138 	}
1139 
1140 	if (vme_attr->cycle & VME_SUPER)
1141 		entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1142 	if (vme_attr->cycle & VME_PROG)
1143 		entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1144 
1145 	entry->descriptor.dtbc = count;
1146 	entry->descriptor.dla = pci_attr->address;
1147 	entry->descriptor.dva = vme_attr->address;
1148 	entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1149 
1150 	/* Add to list */
1151 	list_add_tail(&entry->list, &list->entries);
1152 
1153 	/* Fill out previous descriptors "Next Address" */
1154 	if (entry->list.prev != &list->entries) {
1155 		prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1156 			list);
1157 		/* We need the bus address for the pointer */
1158 		desc_ptr = virt_to_bus(&entry->descriptor);
1159 		prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1160 	}
1161 
1162 	return 0;
1163 
1164 err_cycle:
1165 err_aspace:
1166 err_direct:
1167 err_align:
1168 	kfree(entry);
1169 err_mem:
1170 	return retval;
1171 }
1172 
ca91cx42_dma_busy(struct vme_bridge * ca91cx42_bridge)1173 static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1174 {
1175 	u32 tmp;
1176 	struct ca91cx42_driver *bridge;
1177 
1178 	bridge = ca91cx42_bridge->driver_priv;
1179 
1180 	tmp = ioread32(bridge->base + DGCS);
1181 
1182 	if (tmp & CA91CX42_DGCS_ACT)
1183 		return 0;
1184 	else
1185 		return 1;
1186 }
1187 
ca91cx42_dma_list_exec(struct vme_dma_list * list)1188 static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1189 {
1190 	struct vme_dma_resource *ctrlr;
1191 	struct ca91cx42_dma_entry *entry;
1192 	int retval = 0;
1193 	dma_addr_t bus_addr;
1194 	u32 val;
1195 	struct device *dev;
1196 	struct ca91cx42_driver *bridge;
1197 
1198 	ctrlr = list->parent;
1199 
1200 	bridge = ctrlr->parent->driver_priv;
1201 	dev = ctrlr->parent->parent;
1202 
1203 	mutex_lock(&ctrlr->mtx);
1204 
1205 	if (!(list_empty(&ctrlr->running))) {
1206 		/*
1207 		 * XXX We have an active DMA transfer and currently haven't
1208 		 *     sorted out the mechanism for "pending" DMA transfers.
1209 		 *     Return busy.
1210 		 */
1211 		/* Need to add to pending here */
1212 		mutex_unlock(&ctrlr->mtx);
1213 		return -EBUSY;
1214 	} else {
1215 		list_add(&list->list, &ctrlr->running);
1216 	}
1217 
1218 	/* Get first bus address and write into registers */
1219 	entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1220 		list);
1221 
1222 	bus_addr = virt_to_bus(&entry->descriptor);
1223 
1224 	mutex_unlock(&ctrlr->mtx);
1225 
1226 	iowrite32(0, bridge->base + DTBC);
1227 	iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1228 
1229 	/* Start the operation */
1230 	val = ioread32(bridge->base + DGCS);
1231 
1232 	/* XXX Could set VMEbus On and Off Counters here */
1233 	val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1234 
1235 	val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1236 		CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1237 		CA91CX42_DGCS_PERR);
1238 
1239 	iowrite32(val, bridge->base + DGCS);
1240 
1241 	val |= CA91CX42_DGCS_GO;
1242 
1243 	iowrite32(val, bridge->base + DGCS);
1244 
1245 	wait_event_interruptible(bridge->dma_queue,
1246 		ca91cx42_dma_busy(ctrlr->parent));
1247 
1248 	/*
1249 	 * Read status register, this register is valid until we kick off a
1250 	 * new transfer.
1251 	 */
1252 	val = ioread32(bridge->base + DGCS);
1253 
1254 	if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1255 		CA91CX42_DGCS_PERR)) {
1256 
1257 		dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1258 		val = ioread32(bridge->base + DCTL);
1259 	}
1260 
1261 	/* Remove list from running list */
1262 	mutex_lock(&ctrlr->mtx);
1263 	list_del(&list->list);
1264 	mutex_unlock(&ctrlr->mtx);
1265 
1266 	return retval;
1267 
1268 }
1269 
ca91cx42_dma_list_empty(struct vme_dma_list * list)1270 static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1271 {
1272 	struct list_head *pos, *temp;
1273 	struct ca91cx42_dma_entry *entry;
1274 
1275 	/* detach and free each entry */
1276 	list_for_each_safe(pos, temp, &list->entries) {
1277 		list_del(pos);
1278 		entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1279 		kfree(entry);
1280 	}
1281 
1282 	return 0;
1283 }
1284 
1285 /*
1286  * All 4 location monitors reside at the same base - this is therefore a
1287  * system wide configuration.
1288  *
1289  * This does not enable the LM monitor - that should be done when the first
1290  * callback is attached and disabled when the last callback is removed.
1291  */
ca91cx42_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1292 static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1293 	unsigned long long lm_base, u32 aspace, u32 cycle)
1294 {
1295 	u32 temp_base, lm_ctl = 0;
1296 	int i;
1297 	struct ca91cx42_driver *bridge;
1298 	struct device *dev;
1299 
1300 	bridge = lm->parent->driver_priv;
1301 	dev = lm->parent->parent;
1302 
1303 	/* Check the alignment of the location monitor */
1304 	temp_base = (u32)lm_base;
1305 	if (temp_base & 0xffff) {
1306 		dev_err(dev, "Location monitor must be aligned to 64KB "
1307 			"boundary");
1308 		return -EINVAL;
1309 	}
1310 
1311 	mutex_lock(&lm->mtx);
1312 
1313 	/* If we already have a callback attached, we can't move it! */
1314 	for (i = 0; i < lm->monitors; i++) {
1315 		if (bridge->lm_callback[i] != NULL) {
1316 			mutex_unlock(&lm->mtx);
1317 			dev_err(dev, "Location monitor callback attached, "
1318 				"can't reset\n");
1319 			return -EBUSY;
1320 		}
1321 	}
1322 
1323 	switch (aspace) {
1324 	case VME_A16:
1325 		lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1326 		break;
1327 	case VME_A24:
1328 		lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1329 		break;
1330 	case VME_A32:
1331 		lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1332 		break;
1333 	default:
1334 		mutex_unlock(&lm->mtx);
1335 		dev_err(dev, "Invalid address space\n");
1336 		return -EINVAL;
1337 		break;
1338 	}
1339 
1340 	if (cycle & VME_SUPER)
1341 		lm_ctl |= CA91CX42_LM_CTL_SUPR;
1342 	if (cycle & VME_USER)
1343 		lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1344 	if (cycle & VME_PROG)
1345 		lm_ctl |= CA91CX42_LM_CTL_PGM;
1346 	if (cycle & VME_DATA)
1347 		lm_ctl |= CA91CX42_LM_CTL_DATA;
1348 
1349 	iowrite32(lm_base, bridge->base + LM_BS);
1350 	iowrite32(lm_ctl, bridge->base + LM_CTL);
1351 
1352 	mutex_unlock(&lm->mtx);
1353 
1354 	return 0;
1355 }
1356 
1357 /* Get configuration of the callback monitor and return whether it is enabled
1358  * or disabled.
1359  */
ca91cx42_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)1360 static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1361 	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1362 {
1363 	u32 lm_ctl, enabled = 0;
1364 	struct ca91cx42_driver *bridge;
1365 
1366 	bridge = lm->parent->driver_priv;
1367 
1368 	mutex_lock(&lm->mtx);
1369 
1370 	*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1371 	lm_ctl = ioread32(bridge->base + LM_CTL);
1372 
1373 	if (lm_ctl & CA91CX42_LM_CTL_EN)
1374 		enabled = 1;
1375 
1376 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1377 		*aspace = VME_A16;
1378 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1379 		*aspace = VME_A24;
1380 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1381 		*aspace = VME_A32;
1382 
1383 	*cycle = 0;
1384 	if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1385 		*cycle |= VME_SUPER;
1386 	if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1387 		*cycle |= VME_USER;
1388 	if (lm_ctl & CA91CX42_LM_CTL_PGM)
1389 		*cycle |= VME_PROG;
1390 	if (lm_ctl & CA91CX42_LM_CTL_DATA)
1391 		*cycle |= VME_DATA;
1392 
1393 	mutex_unlock(&lm->mtx);
1394 
1395 	return enabled;
1396 }
1397 
1398 /*
1399  * Attach a callback to a specific location monitor.
1400  *
1401  * Callback will be passed the monitor triggered.
1402  */
ca91cx42_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(int))1403 static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1404 	void (*callback)(int))
1405 {
1406 	u32 lm_ctl, tmp;
1407 	struct ca91cx42_driver *bridge;
1408 	struct device *dev;
1409 
1410 	bridge = lm->parent->driver_priv;
1411 	dev = lm->parent->parent;
1412 
1413 	mutex_lock(&lm->mtx);
1414 
1415 	/* Ensure that the location monitor is configured - need PGM or DATA */
1416 	lm_ctl = ioread32(bridge->base + LM_CTL);
1417 	if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1418 		mutex_unlock(&lm->mtx);
1419 		dev_err(dev, "Location monitor not properly configured\n");
1420 		return -EINVAL;
1421 	}
1422 
1423 	/* Check that a callback isn't already attached */
1424 	if (bridge->lm_callback[monitor] != NULL) {
1425 		mutex_unlock(&lm->mtx);
1426 		dev_err(dev, "Existing callback attached\n");
1427 		return -EBUSY;
1428 	}
1429 
1430 	/* Attach callback */
1431 	bridge->lm_callback[monitor] = callback;
1432 
1433 	/* Enable Location Monitor interrupt */
1434 	tmp = ioread32(bridge->base + LINT_EN);
1435 	tmp |= CA91CX42_LINT_LM[monitor];
1436 	iowrite32(tmp, bridge->base + LINT_EN);
1437 
1438 	/* Ensure that global Location Monitor Enable set */
1439 	if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1440 		lm_ctl |= CA91CX42_LM_CTL_EN;
1441 		iowrite32(lm_ctl, bridge->base + LM_CTL);
1442 	}
1443 
1444 	mutex_unlock(&lm->mtx);
1445 
1446 	return 0;
1447 }
1448 
1449 /*
1450  * Detach a callback function forn a specific location monitor.
1451  */
ca91cx42_lm_detach(struct vme_lm_resource * lm,int monitor)1452 static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1453 {
1454 	u32 tmp;
1455 	struct ca91cx42_driver *bridge;
1456 
1457 	bridge = lm->parent->driver_priv;
1458 
1459 	mutex_lock(&lm->mtx);
1460 
1461 	/* Disable Location Monitor and ensure previous interrupts are clear */
1462 	tmp = ioread32(bridge->base + LINT_EN);
1463 	tmp &= ~CA91CX42_LINT_LM[monitor];
1464 	iowrite32(tmp, bridge->base + LINT_EN);
1465 
1466 	iowrite32(CA91CX42_LINT_LM[monitor],
1467 		 bridge->base + LINT_STAT);
1468 
1469 	/* Detach callback */
1470 	bridge->lm_callback[monitor] = NULL;
1471 
1472 	/* If all location monitors disabled, disable global Location Monitor */
1473 	if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1474 			CA91CX42_LINT_LM3)) == 0) {
1475 		tmp = ioread32(bridge->base + LM_CTL);
1476 		tmp &= ~CA91CX42_LM_CTL_EN;
1477 		iowrite32(tmp, bridge->base + LM_CTL);
1478 	}
1479 
1480 	mutex_unlock(&lm->mtx);
1481 
1482 	return 0;
1483 }
1484 
ca91cx42_slot_get(struct vme_bridge * ca91cx42_bridge)1485 static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1486 {
1487 	u32 slot = 0;
1488 	struct ca91cx42_driver *bridge;
1489 
1490 	bridge = ca91cx42_bridge->driver_priv;
1491 
1492 	if (!geoid) {
1493 		slot = ioread32(bridge->base + VCSR_BS);
1494 		slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1495 	} else
1496 		slot = geoid;
1497 
1498 	return (int)slot;
1499 
1500 }
1501 
ca91cx42_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)1502 static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
1503 	dma_addr_t *dma)
1504 {
1505 	struct pci_dev *pdev;
1506 
1507 	/* Find pci_dev container of dev */
1508 	pdev = container_of(parent, struct pci_dev, dev);
1509 
1510 	return pci_alloc_consistent(pdev, size, dma);
1511 }
1512 
ca91cx42_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)1513 static void ca91cx42_free_consistent(struct device *parent, size_t size,
1514 	void *vaddr, dma_addr_t dma)
1515 {
1516 	struct pci_dev *pdev;
1517 
1518 	/* Find pci_dev container of dev */
1519 	pdev = container_of(parent, struct pci_dev, dev);
1520 
1521 	pci_free_consistent(pdev, size, vaddr, dma);
1522 }
1523 
1524 /*
1525  * Configure CR/CSR space
1526  *
1527  * Access to the CR/CSR can be configured at power-up. The location of the
1528  * CR/CSR registers in the CR/CSR address space is determined by the boards
1529  * Auto-ID or Geographic address. This function ensures that the window is
1530  * enabled at an offset consistent with the boards geopgraphic address.
1531  */
ca91cx42_crcsr_init(struct vme_bridge * ca91cx42_bridge,struct pci_dev * pdev)1532 static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1533 	struct pci_dev *pdev)
1534 {
1535 	unsigned int crcsr_addr;
1536 	int tmp, slot;
1537 	struct ca91cx42_driver *bridge;
1538 
1539 	bridge = ca91cx42_bridge->driver_priv;
1540 
1541 	slot = ca91cx42_slot_get(ca91cx42_bridge);
1542 
1543 	/* Write CSR Base Address if slot ID is supplied as a module param */
1544 	if (geoid)
1545 		iowrite32(geoid << 27, bridge->base + VCSR_BS);
1546 
1547 	dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1548 	if (slot == 0) {
1549 		dev_err(&pdev->dev, "Slot number is unset, not configuring "
1550 			"CR/CSR space\n");
1551 		return -EINVAL;
1552 	}
1553 
1554 	/* Allocate mem for CR/CSR image */
1555 	bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1556 		&bridge->crcsr_bus);
1557 	if (bridge->crcsr_kernel == NULL) {
1558 		dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1559 			"image\n");
1560 		return -ENOMEM;
1561 	}
1562 
1563 	memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
1564 
1565 	crcsr_addr = slot * (512 * 1024);
1566 	iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1567 
1568 	tmp = ioread32(bridge->base + VCSR_CTL);
1569 	tmp |= CA91CX42_VCSR_CTL_EN;
1570 	iowrite32(tmp, bridge->base + VCSR_CTL);
1571 
1572 	return 0;
1573 }
1574 
ca91cx42_crcsr_exit(struct vme_bridge * ca91cx42_bridge,struct pci_dev * pdev)1575 static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1576 	struct pci_dev *pdev)
1577 {
1578 	u32 tmp;
1579 	struct ca91cx42_driver *bridge;
1580 
1581 	bridge = ca91cx42_bridge->driver_priv;
1582 
1583 	/* Turn off CR/CSR space */
1584 	tmp = ioread32(bridge->base + VCSR_CTL);
1585 	tmp &= ~CA91CX42_VCSR_CTL_EN;
1586 	iowrite32(tmp, bridge->base + VCSR_CTL);
1587 
1588 	/* Free image */
1589 	iowrite32(0, bridge->base + VCSR_TO);
1590 
1591 	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1592 		bridge->crcsr_bus);
1593 }
1594 
ca91cx42_probe(struct pci_dev * pdev,const struct pci_device_id * id)1595 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1596 {
1597 	int retval, i;
1598 	u32 data;
1599 	struct list_head *pos = NULL, *n;
1600 	struct vme_bridge *ca91cx42_bridge;
1601 	struct ca91cx42_driver *ca91cx42_device;
1602 	struct vme_master_resource *master_image;
1603 	struct vme_slave_resource *slave_image;
1604 	struct vme_dma_resource *dma_ctrlr;
1605 	struct vme_lm_resource *lm;
1606 
1607 	/* We want to support more than one of each bridge so we need to
1608 	 * dynamically allocate the bridge structure
1609 	 */
1610 	ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1611 
1612 	if (ca91cx42_bridge == NULL) {
1613 		dev_err(&pdev->dev, "Failed to allocate memory for device "
1614 			"structure\n");
1615 		retval = -ENOMEM;
1616 		goto err_struct;
1617 	}
1618 
1619 	ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1620 
1621 	if (ca91cx42_device == NULL) {
1622 		dev_err(&pdev->dev, "Failed to allocate memory for device "
1623 			"structure\n");
1624 		retval = -ENOMEM;
1625 		goto err_driver;
1626 	}
1627 
1628 	ca91cx42_bridge->driver_priv = ca91cx42_device;
1629 
1630 	/* Enable the device */
1631 	retval = pci_enable_device(pdev);
1632 	if (retval) {
1633 		dev_err(&pdev->dev, "Unable to enable device\n");
1634 		goto err_enable;
1635 	}
1636 
1637 	/* Map Registers */
1638 	retval = pci_request_regions(pdev, driver_name);
1639 	if (retval) {
1640 		dev_err(&pdev->dev, "Unable to reserve resources\n");
1641 		goto err_resource;
1642 	}
1643 
1644 	/* map registers in BAR 0 */
1645 	ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1646 		4096);
1647 	if (!ca91cx42_device->base) {
1648 		dev_err(&pdev->dev, "Unable to remap CRG region\n");
1649 		retval = -EIO;
1650 		goto err_remap;
1651 	}
1652 
1653 	/* Check to see if the mapping worked out */
1654 	data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1655 	if (data != PCI_VENDOR_ID_TUNDRA) {
1656 		dev_err(&pdev->dev, "PCI_ID check failed\n");
1657 		retval = -EIO;
1658 		goto err_test;
1659 	}
1660 
1661 	/* Initialize wait queues & mutual exclusion flags */
1662 	init_waitqueue_head(&ca91cx42_device->dma_queue);
1663 	init_waitqueue_head(&ca91cx42_device->iack_queue);
1664 	mutex_init(&ca91cx42_device->vme_int);
1665 	mutex_init(&ca91cx42_device->vme_rmw);
1666 
1667 	ca91cx42_bridge->parent = &pdev->dev;
1668 	strcpy(ca91cx42_bridge->name, driver_name);
1669 
1670 	/* Setup IRQ */
1671 	retval = ca91cx42_irq_init(ca91cx42_bridge);
1672 	if (retval != 0) {
1673 		dev_err(&pdev->dev, "Chip Initialization failed.\n");
1674 		goto err_irq;
1675 	}
1676 
1677 	/* Add master windows to list */
1678 	INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
1679 	for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1680 		master_image = kmalloc(sizeof(struct vme_master_resource),
1681 			GFP_KERNEL);
1682 		if (master_image == NULL) {
1683 			dev_err(&pdev->dev, "Failed to allocate memory for "
1684 			"master resource structure\n");
1685 			retval = -ENOMEM;
1686 			goto err_master;
1687 		}
1688 		master_image->parent = ca91cx42_bridge;
1689 		spin_lock_init(&master_image->lock);
1690 		master_image->locked = 0;
1691 		master_image->number = i;
1692 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1693 			VME_CRCSR | VME_USER1 | VME_USER2;
1694 		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1695 			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1696 		master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1697 		memset(&master_image->bus_resource, 0,
1698 			sizeof(struct resource));
1699 		master_image->kern_base  = NULL;
1700 		list_add_tail(&master_image->list,
1701 			&ca91cx42_bridge->master_resources);
1702 	}
1703 
1704 	/* Add slave windows to list */
1705 	INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
1706 	for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1707 		slave_image = kmalloc(sizeof(struct vme_slave_resource),
1708 			GFP_KERNEL);
1709 		if (slave_image == NULL) {
1710 			dev_err(&pdev->dev, "Failed to allocate memory for "
1711 			"slave resource structure\n");
1712 			retval = -ENOMEM;
1713 			goto err_slave;
1714 		}
1715 		slave_image->parent = ca91cx42_bridge;
1716 		mutex_init(&slave_image->mtx);
1717 		slave_image->locked = 0;
1718 		slave_image->number = i;
1719 		slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1720 			VME_USER2;
1721 
1722 		/* Only windows 0 and 4 support A16 */
1723 		if (i == 0 || i == 4)
1724 			slave_image->address_attr |= VME_A16;
1725 
1726 		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1727 			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1728 		list_add_tail(&slave_image->list,
1729 			&ca91cx42_bridge->slave_resources);
1730 	}
1731 
1732 	/* Add dma engines to list */
1733 	INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
1734 	for (i = 0; i < CA91C142_MAX_DMA; i++) {
1735 		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1736 			GFP_KERNEL);
1737 		if (dma_ctrlr == NULL) {
1738 			dev_err(&pdev->dev, "Failed to allocate memory for "
1739 			"dma resource structure\n");
1740 			retval = -ENOMEM;
1741 			goto err_dma;
1742 		}
1743 		dma_ctrlr->parent = ca91cx42_bridge;
1744 		mutex_init(&dma_ctrlr->mtx);
1745 		dma_ctrlr->locked = 0;
1746 		dma_ctrlr->number = i;
1747 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1748 			VME_DMA_MEM_TO_VME;
1749 		INIT_LIST_HEAD(&dma_ctrlr->pending);
1750 		INIT_LIST_HEAD(&dma_ctrlr->running);
1751 		list_add_tail(&dma_ctrlr->list,
1752 			&ca91cx42_bridge->dma_resources);
1753 	}
1754 
1755 	/* Add location monitor to list */
1756 	INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
1757 	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1758 	if (lm == NULL) {
1759 		dev_err(&pdev->dev, "Failed to allocate memory for "
1760 		"location monitor resource structure\n");
1761 		retval = -ENOMEM;
1762 		goto err_lm;
1763 	}
1764 	lm->parent = ca91cx42_bridge;
1765 	mutex_init(&lm->mtx);
1766 	lm->locked = 0;
1767 	lm->number = 1;
1768 	lm->monitors = 4;
1769 	list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1770 
1771 	ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1772 	ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1773 	ca91cx42_bridge->master_get = ca91cx42_master_get;
1774 	ca91cx42_bridge->master_set = ca91cx42_master_set;
1775 	ca91cx42_bridge->master_read = ca91cx42_master_read;
1776 	ca91cx42_bridge->master_write = ca91cx42_master_write;
1777 	ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1778 	ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1779 	ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1780 	ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1781 	ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1782 	ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1783 	ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1784 	ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1785 	ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1786 	ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1787 	ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1788 	ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
1789 	ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
1790 
1791 	data = ioread32(ca91cx42_device->base + MISC_CTL);
1792 	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1793 		(data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1794 	dev_info(&pdev->dev, "Slot ID is %d\n",
1795 		ca91cx42_slot_get(ca91cx42_bridge));
1796 
1797 	if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1798 		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1799 
1800 	/* Need to save ca91cx42_bridge pointer locally in link list for use in
1801 	 * ca91cx42_remove()
1802 	 */
1803 	retval = vme_register_bridge(ca91cx42_bridge);
1804 	if (retval != 0) {
1805 		dev_err(&pdev->dev, "Chip Registration failed.\n");
1806 		goto err_reg;
1807 	}
1808 
1809 	pci_set_drvdata(pdev, ca91cx42_bridge);
1810 
1811 	return 0;
1812 
1813 err_reg:
1814 	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1815 err_lm:
1816 	/* resources are stored in link list */
1817 	list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1818 		lm = list_entry(pos, struct vme_lm_resource, list);
1819 		list_del(pos);
1820 		kfree(lm);
1821 	}
1822 err_dma:
1823 	/* resources are stored in link list */
1824 	list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1825 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1826 		list_del(pos);
1827 		kfree(dma_ctrlr);
1828 	}
1829 err_slave:
1830 	/* resources are stored in link list */
1831 	list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1832 		slave_image = list_entry(pos, struct vme_slave_resource, list);
1833 		list_del(pos);
1834 		kfree(slave_image);
1835 	}
1836 err_master:
1837 	/* resources are stored in link list */
1838 	list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1839 		master_image = list_entry(pos, struct vme_master_resource,
1840 			list);
1841 		list_del(pos);
1842 		kfree(master_image);
1843 	}
1844 
1845 	ca91cx42_irq_exit(ca91cx42_device, pdev);
1846 err_irq:
1847 err_test:
1848 	iounmap(ca91cx42_device->base);
1849 err_remap:
1850 	pci_release_regions(pdev);
1851 err_resource:
1852 	pci_disable_device(pdev);
1853 err_enable:
1854 	kfree(ca91cx42_device);
1855 err_driver:
1856 	kfree(ca91cx42_bridge);
1857 err_struct:
1858 	return retval;
1859 
1860 }
1861 
ca91cx42_remove(struct pci_dev * pdev)1862 static void ca91cx42_remove(struct pci_dev *pdev)
1863 {
1864 	struct list_head *pos = NULL, *n;
1865 	struct vme_master_resource *master_image;
1866 	struct vme_slave_resource *slave_image;
1867 	struct vme_dma_resource *dma_ctrlr;
1868 	struct vme_lm_resource *lm;
1869 	struct ca91cx42_driver *bridge;
1870 	struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1871 
1872 	bridge = ca91cx42_bridge->driver_priv;
1873 
1874 
1875 	/* Turn off Ints */
1876 	iowrite32(0, bridge->base + LINT_EN);
1877 
1878 	/* Turn off the windows */
1879 	iowrite32(0x00800000, bridge->base + LSI0_CTL);
1880 	iowrite32(0x00800000, bridge->base + LSI1_CTL);
1881 	iowrite32(0x00800000, bridge->base + LSI2_CTL);
1882 	iowrite32(0x00800000, bridge->base + LSI3_CTL);
1883 	iowrite32(0x00800000, bridge->base + LSI4_CTL);
1884 	iowrite32(0x00800000, bridge->base + LSI5_CTL);
1885 	iowrite32(0x00800000, bridge->base + LSI6_CTL);
1886 	iowrite32(0x00800000, bridge->base + LSI7_CTL);
1887 	iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1888 	iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1889 	iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1890 	iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1891 	iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1892 	iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1893 	iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1894 	iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1895 
1896 	vme_unregister_bridge(ca91cx42_bridge);
1897 
1898 	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1899 
1900 	/* resources are stored in link list */
1901 	list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1902 		lm = list_entry(pos, struct vme_lm_resource, list);
1903 		list_del(pos);
1904 		kfree(lm);
1905 	}
1906 
1907 	/* resources are stored in link list */
1908 	list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1909 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1910 		list_del(pos);
1911 		kfree(dma_ctrlr);
1912 	}
1913 
1914 	/* resources are stored in link list */
1915 	list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1916 		slave_image = list_entry(pos, struct vme_slave_resource, list);
1917 		list_del(pos);
1918 		kfree(slave_image);
1919 	}
1920 
1921 	/* resources are stored in link list */
1922 	list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1923 		master_image = list_entry(pos, struct vme_master_resource,
1924 			list);
1925 		list_del(pos);
1926 		kfree(master_image);
1927 	}
1928 
1929 	ca91cx42_irq_exit(bridge, pdev);
1930 
1931 	iounmap(bridge->base);
1932 
1933 	pci_release_regions(pdev);
1934 
1935 	pci_disable_device(pdev);
1936 
1937 	kfree(ca91cx42_bridge);
1938 }
1939 
1940 module_pci_driver(ca91cx42_driver);
1941 
1942 MODULE_PARM_DESC(geoid, "Override geographical addressing");
1943 module_param(geoid, int, 0);
1944 
1945 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1946 MODULE_LICENSE("GPL");
1947