• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Support for the Tundra TSI148 VME-PCI Bridge Chip
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <linux/byteorder/generic.h>
33 #include <linux/vme.h>
34 
35 #include "../vme_bridge.h"
36 #include "vme_tsi148.h"
37 
38 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39 static void tsi148_remove(struct pci_dev *);
40 
41 
42 /* Module parameter */
43 static bool err_chk;
44 static int geoid;
45 
46 static const char driver_name[] = "vme_tsi148";
47 
48 static const struct pci_device_id tsi148_ids[] = {
49 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
50 	{ },
51 };
52 
53 MODULE_DEVICE_TABLE(pci, tsi148_ids);
54 
55 static struct pci_driver tsi148_driver = {
56 	.name = driver_name,
57 	.id_table = tsi148_ids,
58 	.probe = tsi148_probe,
59 	.remove = tsi148_remove,
60 };
61 
reg_join(unsigned int high,unsigned int low,unsigned long long * variable)62 static void reg_join(unsigned int high, unsigned int low,
63 	unsigned long long *variable)
64 {
65 	*variable = (unsigned long long)high << 32;
66 	*variable |= (unsigned long long)low;
67 }
68 
reg_split(unsigned long long variable,unsigned int * high,unsigned int * low)69 static void reg_split(unsigned long long variable, unsigned int *high,
70 	unsigned int *low)
71 {
72 	*low = (unsigned int)variable & 0xFFFFFFFF;
73 	*high = (unsigned int)(variable >> 32);
74 }
75 
76 /*
77  * Wakes up DMA queue.
78  */
tsi148_DMA_irqhandler(struct tsi148_driver * bridge,int channel_mask)79 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
80 	int channel_mask)
81 {
82 	u32 serviced = 0;
83 
84 	if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
85 		wake_up(&bridge->dma_queue[0]);
86 		serviced |= TSI148_LCSR_INTC_DMA0C;
87 	}
88 	if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
89 		wake_up(&bridge->dma_queue[1]);
90 		serviced |= TSI148_LCSR_INTC_DMA1C;
91 	}
92 
93 	return serviced;
94 }
95 
96 /*
97  * Wake up location monitor queue
98  */
tsi148_LM_irqhandler(struct tsi148_driver * bridge,u32 stat)99 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
100 {
101 	int i;
102 	u32 serviced = 0;
103 
104 	for (i = 0; i < 4; i++) {
105 		if (stat & TSI148_LCSR_INTS_LMS[i]) {
106 			/* We only enable interrupts if the callback is set */
107 			bridge->lm_callback[i](bridge->lm_data[i]);
108 			serviced |= TSI148_LCSR_INTC_LMC[i];
109 		}
110 	}
111 
112 	return serviced;
113 }
114 
115 /*
116  * Wake up mail box queue.
117  *
118  * XXX This functionality is not exposed up though API.
119  */
tsi148_MB_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)120 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
121 {
122 	int i;
123 	u32 val;
124 	u32 serviced = 0;
125 	struct tsi148_driver *bridge;
126 
127 	bridge = tsi148_bridge->driver_priv;
128 
129 	for (i = 0; i < 4; i++) {
130 		if (stat & TSI148_LCSR_INTS_MBS[i]) {
131 			val = ioread32be(bridge->base +	TSI148_GCSR_MBOX[i]);
132 			dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
133 				": 0x%x\n", i, val);
134 			serviced |= TSI148_LCSR_INTC_MBC[i];
135 		}
136 	}
137 
138 	return serviced;
139 }
140 
141 /*
142  * Display error & status message when PERR (PCI) exception interrupt occurs.
143  */
tsi148_PERR_irqhandler(struct vme_bridge * tsi148_bridge)144 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
145 {
146 	struct tsi148_driver *bridge;
147 
148 	bridge = tsi148_bridge->driver_priv;
149 
150 	dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
151 		"attributes: %08x\n",
152 		ioread32be(bridge->base + TSI148_LCSR_EDPAU),
153 		ioread32be(bridge->base + TSI148_LCSR_EDPAL),
154 		ioread32be(bridge->base + TSI148_LCSR_EDPAT));
155 
156 	dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
157 		"completion reg: %08x\n",
158 		ioread32be(bridge->base + TSI148_LCSR_EDPXA),
159 		ioread32be(bridge->base + TSI148_LCSR_EDPXS));
160 
161 	iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
162 
163 	return TSI148_LCSR_INTC_PERRC;
164 }
165 
166 /*
167  * Save address and status when VME error interrupt occurs.
168  */
tsi148_VERR_irqhandler(struct vme_bridge * tsi148_bridge)169 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
170 {
171 	unsigned int error_addr_high, error_addr_low;
172 	unsigned long long error_addr;
173 	u32 error_attrib;
174 	int error_am;
175 	struct tsi148_driver *bridge;
176 
177 	bridge = tsi148_bridge->driver_priv;
178 
179 	error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
180 	error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
181 	error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
182 	error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
183 
184 	reg_join(error_addr_high, error_addr_low, &error_addr);
185 
186 	/* Check for exception register overflow (we have lost error data) */
187 	if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
188 		dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
189 			"Occurred\n");
190 	}
191 
192 	if (err_chk)
193 		vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
194 	else
195 		dev_err(tsi148_bridge->parent,
196 			"VME Bus Error at address: 0x%llx, attributes: %08x\n",
197 			error_addr, error_attrib);
198 
199 	/* Clear Status */
200 	iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
201 
202 	return TSI148_LCSR_INTC_VERRC;
203 }
204 
205 /*
206  * Wake up IACK queue.
207  */
tsi148_IACK_irqhandler(struct tsi148_driver * bridge)208 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
209 {
210 	wake_up(&bridge->iack_queue);
211 
212 	return TSI148_LCSR_INTC_IACKC;
213 }
214 
215 /*
216  * Calling VME bus interrupt callback if provided.
217  */
tsi148_VIRQ_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)218 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
219 	u32 stat)
220 {
221 	int vec, i, serviced = 0;
222 	struct tsi148_driver *bridge;
223 
224 	bridge = tsi148_bridge->driver_priv;
225 
226 	for (i = 7; i > 0; i--) {
227 		if (stat & (1 << i)) {
228 			/*
229 			 * Note: Even though the registers are defined as
230 			 * 32-bits in the spec, we only want to issue 8-bit
231 			 * IACK cycles on the bus, read from offset 3.
232 			 */
233 			vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
234 
235 			vme_irq_handler(tsi148_bridge, i, vec);
236 
237 			serviced |= (1 << i);
238 		}
239 	}
240 
241 	return serviced;
242 }
243 
244 /*
245  * Top level interrupt handler.  Clears appropriate interrupt status bits and
246  * then calls appropriate sub handler(s).
247  */
tsi148_irqhandler(int irq,void * ptr)248 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
249 {
250 	u32 stat, enable, serviced = 0;
251 	struct vme_bridge *tsi148_bridge;
252 	struct tsi148_driver *bridge;
253 
254 	tsi148_bridge = ptr;
255 
256 	bridge = tsi148_bridge->driver_priv;
257 
258 	/* Determine which interrupts are unmasked and set */
259 	enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
260 	stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
261 
262 	/* Only look at unmasked interrupts */
263 	stat &= enable;
264 
265 	if (unlikely(!stat))
266 		return IRQ_NONE;
267 
268 	/* Call subhandlers as appropriate */
269 	/* DMA irqs */
270 	if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
271 		serviced |= tsi148_DMA_irqhandler(bridge, stat);
272 
273 	/* Location monitor irqs */
274 	if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
275 			TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
276 		serviced |= tsi148_LM_irqhandler(bridge, stat);
277 
278 	/* Mail box irqs */
279 	if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
280 			TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
281 		serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
282 
283 	/* PCI bus error */
284 	if (stat & TSI148_LCSR_INTS_PERRS)
285 		serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
286 
287 	/* VME bus error */
288 	if (stat & TSI148_LCSR_INTS_VERRS)
289 		serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
290 
291 	/* IACK irq */
292 	if (stat & TSI148_LCSR_INTS_IACKS)
293 		serviced |= tsi148_IACK_irqhandler(bridge);
294 
295 	/* VME bus irqs */
296 	if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
297 			TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
298 			TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
299 			TSI148_LCSR_INTS_IRQ1S))
300 		serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
301 
302 	/* Clear serviced interrupts */
303 	iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
304 
305 	return IRQ_HANDLED;
306 }
307 
tsi148_irq_init(struct vme_bridge * tsi148_bridge)308 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
309 {
310 	int result;
311 	unsigned int tmp;
312 	struct pci_dev *pdev;
313 	struct tsi148_driver *bridge;
314 
315 	pdev = to_pci_dev(tsi148_bridge->parent);
316 
317 	bridge = tsi148_bridge->driver_priv;
318 
319 	result = request_irq(pdev->irq,
320 			     tsi148_irqhandler,
321 			     IRQF_SHARED,
322 			     driver_name, tsi148_bridge);
323 	if (result) {
324 		dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
325 			"vector %02X\n", pdev->irq);
326 		return result;
327 	}
328 
329 	/* Enable and unmask interrupts */
330 	tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
331 		TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
332 		TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
333 		TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
334 		TSI148_LCSR_INTEO_IACKEO;
335 
336 	/* This leaves the following interrupts masked.
337 	 * TSI148_LCSR_INTEO_VIEEO
338 	 * TSI148_LCSR_INTEO_SYSFLEO
339 	 * TSI148_LCSR_INTEO_ACFLEO
340 	 */
341 
342 	/* Don't enable Location Monitor interrupts here - they will be
343 	 * enabled when the location monitors are properly configured and
344 	 * a callback has been attached.
345 	 * TSI148_LCSR_INTEO_LM0EO
346 	 * TSI148_LCSR_INTEO_LM1EO
347 	 * TSI148_LCSR_INTEO_LM2EO
348 	 * TSI148_LCSR_INTEO_LM3EO
349 	 */
350 
351 	/* Don't enable VME interrupts until we add a handler, else the board
352 	 * will respond to it and we don't want that unless it knows how to
353 	 * properly deal with it.
354 	 * TSI148_LCSR_INTEO_IRQ7EO
355 	 * TSI148_LCSR_INTEO_IRQ6EO
356 	 * TSI148_LCSR_INTEO_IRQ5EO
357 	 * TSI148_LCSR_INTEO_IRQ4EO
358 	 * TSI148_LCSR_INTEO_IRQ3EO
359 	 * TSI148_LCSR_INTEO_IRQ2EO
360 	 * TSI148_LCSR_INTEO_IRQ1EO
361 	 */
362 
363 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
364 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
365 
366 	return 0;
367 }
368 
tsi148_irq_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)369 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
370 	struct pci_dev *pdev)
371 {
372 	struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
373 
374 	/* Turn off interrupts */
375 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
376 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
377 
378 	/* Clear all interrupts */
379 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
380 
381 	/* Detach interrupt handler */
382 	free_irq(pdev->irq, tsi148_bridge);
383 }
384 
385 /*
386  * Check to see if an IACk has been received, return true (1) or false (0).
387  */
tsi148_iack_received(struct tsi148_driver * bridge)388 static int tsi148_iack_received(struct tsi148_driver *bridge)
389 {
390 	u32 tmp;
391 
392 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
393 
394 	if (tmp & TSI148_LCSR_VICR_IRQS)
395 		return 0;
396 	else
397 		return 1;
398 }
399 
400 /*
401  * Configure VME interrupt
402  */
tsi148_irq_set(struct vme_bridge * tsi148_bridge,int level,int state,int sync)403 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
404 	int state, int sync)
405 {
406 	struct pci_dev *pdev;
407 	u32 tmp;
408 	struct tsi148_driver *bridge;
409 
410 	bridge = tsi148_bridge->driver_priv;
411 
412 	/* We need to do the ordering differently for enabling and disabling */
413 	if (state == 0) {
414 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
415 		tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
416 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
417 
418 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
419 		tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
420 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
421 
422 		if (sync != 0) {
423 			pdev = to_pci_dev(tsi148_bridge->parent);
424 			synchronize_irq(pdev->irq);
425 		}
426 	} else {
427 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
428 		tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
429 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
430 
431 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
432 		tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
433 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
434 	}
435 }
436 
437 /*
438  * Generate a VME bus interrupt at the requested level & vector. Wait for
439  * interrupt to be acked.
440  */
tsi148_irq_generate(struct vme_bridge * tsi148_bridge,int level,int statid)441 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
442 	int statid)
443 {
444 	u32 tmp;
445 	struct tsi148_driver *bridge;
446 
447 	bridge = tsi148_bridge->driver_priv;
448 
449 	mutex_lock(&bridge->vme_int);
450 
451 	/* Read VICR register */
452 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
453 
454 	/* Set Status/ID */
455 	tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
456 		(statid & TSI148_LCSR_VICR_STID_M);
457 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
458 
459 	/* Assert VMEbus IRQ */
460 	tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
461 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
462 
463 	/* XXX Consider implementing a timeout? */
464 	wait_event_interruptible(bridge->iack_queue,
465 		tsi148_iack_received(bridge));
466 
467 	mutex_unlock(&bridge->vme_int);
468 
469 	return 0;
470 }
471 
472 /*
473  * Initialize a slave window with the requested attributes.
474  */
tsi148_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)475 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
476 	unsigned long long vme_base, unsigned long long size,
477 	dma_addr_t pci_base, u32 aspace, u32 cycle)
478 {
479 	unsigned int i, addr = 0, granularity = 0;
480 	unsigned int temp_ctl = 0;
481 	unsigned int vme_base_low, vme_base_high;
482 	unsigned int vme_bound_low, vme_bound_high;
483 	unsigned int pci_offset_low, pci_offset_high;
484 	unsigned long long vme_bound, pci_offset;
485 	struct vme_bridge *tsi148_bridge;
486 	struct tsi148_driver *bridge;
487 
488 	tsi148_bridge = image->parent;
489 	bridge = tsi148_bridge->driver_priv;
490 
491 	i = image->number;
492 
493 	switch (aspace) {
494 	case VME_A16:
495 		granularity = 0x10;
496 		addr |= TSI148_LCSR_ITAT_AS_A16;
497 		break;
498 	case VME_A24:
499 		granularity = 0x1000;
500 		addr |= TSI148_LCSR_ITAT_AS_A24;
501 		break;
502 	case VME_A32:
503 		granularity = 0x10000;
504 		addr |= TSI148_LCSR_ITAT_AS_A32;
505 		break;
506 	case VME_A64:
507 		granularity = 0x10000;
508 		addr |= TSI148_LCSR_ITAT_AS_A64;
509 		break;
510 	default:
511 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
512 		return -EINVAL;
513 		break;
514 	}
515 
516 	/* Convert 64-bit variables to 2x 32-bit variables */
517 	reg_split(vme_base, &vme_base_high, &vme_base_low);
518 
519 	/*
520 	 * Bound address is a valid address for the window, adjust
521 	 * accordingly
522 	 */
523 	vme_bound = vme_base + size - granularity;
524 	reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
525 	pci_offset = (unsigned long long)pci_base - vme_base;
526 	reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
527 
528 	if (vme_base_low & (granularity - 1)) {
529 		dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
530 		return -EINVAL;
531 	}
532 	if (vme_bound_low & (granularity - 1)) {
533 		dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
534 		return -EINVAL;
535 	}
536 	if (pci_offset_low & (granularity - 1)) {
537 		dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
538 			"alignment\n");
539 		return -EINVAL;
540 	}
541 
542 	/*  Disable while we are mucking around */
543 	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
544 		TSI148_LCSR_OFFSET_ITAT);
545 	temp_ctl &= ~TSI148_LCSR_ITAT_EN;
546 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
547 		TSI148_LCSR_OFFSET_ITAT);
548 
549 	/* Setup mapping */
550 	iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
551 		TSI148_LCSR_OFFSET_ITSAU);
552 	iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
553 		TSI148_LCSR_OFFSET_ITSAL);
554 	iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
555 		TSI148_LCSR_OFFSET_ITEAU);
556 	iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
557 		TSI148_LCSR_OFFSET_ITEAL);
558 	iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
559 		TSI148_LCSR_OFFSET_ITOFU);
560 	iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
561 		TSI148_LCSR_OFFSET_ITOFL);
562 
563 	/* Setup 2eSST speeds */
564 	temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
565 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
566 	case VME_2eSST160:
567 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
568 		break;
569 	case VME_2eSST267:
570 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
571 		break;
572 	case VME_2eSST320:
573 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
574 		break;
575 	}
576 
577 	/* Setup cycle types */
578 	temp_ctl &= ~(0x1F << 7);
579 	if (cycle & VME_BLT)
580 		temp_ctl |= TSI148_LCSR_ITAT_BLT;
581 	if (cycle & VME_MBLT)
582 		temp_ctl |= TSI148_LCSR_ITAT_MBLT;
583 	if (cycle & VME_2eVME)
584 		temp_ctl |= TSI148_LCSR_ITAT_2eVME;
585 	if (cycle & VME_2eSST)
586 		temp_ctl |= TSI148_LCSR_ITAT_2eSST;
587 	if (cycle & VME_2eSSTB)
588 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
589 
590 	/* Setup address space */
591 	temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
592 	temp_ctl |= addr;
593 
594 	temp_ctl &= ~0xF;
595 	if (cycle & VME_SUPER)
596 		temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
597 	if (cycle & VME_USER)
598 		temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
599 	if (cycle & VME_PROG)
600 		temp_ctl |= TSI148_LCSR_ITAT_PGM;
601 	if (cycle & VME_DATA)
602 		temp_ctl |= TSI148_LCSR_ITAT_DATA;
603 
604 	/* Write ctl reg without enable */
605 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
606 		TSI148_LCSR_OFFSET_ITAT);
607 
608 	if (enabled)
609 		temp_ctl |= TSI148_LCSR_ITAT_EN;
610 
611 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
612 		TSI148_LCSR_OFFSET_ITAT);
613 
614 	return 0;
615 }
616 
617 /*
618  * Get slave window configuration.
619  */
tsi148_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)620 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
621 	unsigned long long *vme_base, unsigned long long *size,
622 	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
623 {
624 	unsigned int i, granularity = 0, ctl = 0;
625 	unsigned int vme_base_low, vme_base_high;
626 	unsigned int vme_bound_low, vme_bound_high;
627 	unsigned int pci_offset_low, pci_offset_high;
628 	unsigned long long vme_bound, pci_offset;
629 	struct tsi148_driver *bridge;
630 
631 	bridge = image->parent->driver_priv;
632 
633 	i = image->number;
634 
635 	/* Read registers */
636 	ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
637 		TSI148_LCSR_OFFSET_ITAT);
638 
639 	vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
640 		TSI148_LCSR_OFFSET_ITSAU);
641 	vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
642 		TSI148_LCSR_OFFSET_ITSAL);
643 	vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
644 		TSI148_LCSR_OFFSET_ITEAU);
645 	vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
646 		TSI148_LCSR_OFFSET_ITEAL);
647 	pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
648 		TSI148_LCSR_OFFSET_ITOFU);
649 	pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
650 		TSI148_LCSR_OFFSET_ITOFL);
651 
652 	/* Convert 64-bit variables to 2x 32-bit variables */
653 	reg_join(vme_base_high, vme_base_low, vme_base);
654 	reg_join(vme_bound_high, vme_bound_low, &vme_bound);
655 	reg_join(pci_offset_high, pci_offset_low, &pci_offset);
656 
657 	*pci_base = (dma_addr_t)(*vme_base + pci_offset);
658 
659 	*enabled = 0;
660 	*aspace = 0;
661 	*cycle = 0;
662 
663 	if (ctl & TSI148_LCSR_ITAT_EN)
664 		*enabled = 1;
665 
666 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
667 		granularity = 0x10;
668 		*aspace |= VME_A16;
669 	}
670 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
671 		granularity = 0x1000;
672 		*aspace |= VME_A24;
673 	}
674 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
675 		granularity = 0x10000;
676 		*aspace |= VME_A32;
677 	}
678 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
679 		granularity = 0x10000;
680 		*aspace |= VME_A64;
681 	}
682 
683 	/* Need granularity before we set the size */
684 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
685 
686 
687 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
688 		*cycle |= VME_2eSST160;
689 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
690 		*cycle |= VME_2eSST267;
691 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
692 		*cycle |= VME_2eSST320;
693 
694 	if (ctl & TSI148_LCSR_ITAT_BLT)
695 		*cycle |= VME_BLT;
696 	if (ctl & TSI148_LCSR_ITAT_MBLT)
697 		*cycle |= VME_MBLT;
698 	if (ctl & TSI148_LCSR_ITAT_2eVME)
699 		*cycle |= VME_2eVME;
700 	if (ctl & TSI148_LCSR_ITAT_2eSST)
701 		*cycle |= VME_2eSST;
702 	if (ctl & TSI148_LCSR_ITAT_2eSSTB)
703 		*cycle |= VME_2eSSTB;
704 
705 	if (ctl & TSI148_LCSR_ITAT_SUPR)
706 		*cycle |= VME_SUPER;
707 	if (ctl & TSI148_LCSR_ITAT_NPRIV)
708 		*cycle |= VME_USER;
709 	if (ctl & TSI148_LCSR_ITAT_PGM)
710 		*cycle |= VME_PROG;
711 	if (ctl & TSI148_LCSR_ITAT_DATA)
712 		*cycle |= VME_DATA;
713 
714 	return 0;
715 }
716 
717 /*
718  * Allocate and map PCI Resource
719  */
tsi148_alloc_resource(struct vme_master_resource * image,unsigned long long size)720 static int tsi148_alloc_resource(struct vme_master_resource *image,
721 	unsigned long long size)
722 {
723 	unsigned long long existing_size;
724 	int retval = 0;
725 	struct pci_dev *pdev;
726 	struct vme_bridge *tsi148_bridge;
727 
728 	tsi148_bridge = image->parent;
729 
730 	pdev = to_pci_dev(tsi148_bridge->parent);
731 
732 	existing_size = (unsigned long long)(image->bus_resource.end -
733 		image->bus_resource.start);
734 
735 	/* If the existing size is OK, return */
736 	if ((size != 0) && (existing_size == (size - 1)))
737 		return 0;
738 
739 	if (existing_size != 0) {
740 		iounmap(image->kern_base);
741 		image->kern_base = NULL;
742 		kfree(image->bus_resource.name);
743 		release_resource(&image->bus_resource);
744 		memset(&image->bus_resource, 0, sizeof(struct resource));
745 	}
746 
747 	/* Exit here if size is zero */
748 	if (size == 0)
749 		return 0;
750 
751 	if (image->bus_resource.name == NULL) {
752 		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
753 		if (image->bus_resource.name == NULL) {
754 			dev_err(tsi148_bridge->parent, "Unable to allocate "
755 				"memory for resource name\n");
756 			retval = -ENOMEM;
757 			goto err_name;
758 		}
759 	}
760 
761 	sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
762 		image->number);
763 
764 	image->bus_resource.start = 0;
765 	image->bus_resource.end = (unsigned long)size;
766 	image->bus_resource.flags = IORESOURCE_MEM;
767 
768 	retval = pci_bus_alloc_resource(pdev->bus,
769 		&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
770 		0, NULL, NULL);
771 	if (retval) {
772 		dev_err(tsi148_bridge->parent, "Failed to allocate mem "
773 			"resource for window %d size 0x%lx start 0x%lx\n",
774 			image->number, (unsigned long)size,
775 			(unsigned long)image->bus_resource.start);
776 		goto err_resource;
777 	}
778 
779 	image->kern_base = ioremap_nocache(
780 		image->bus_resource.start, size);
781 	if (image->kern_base == NULL) {
782 		dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
783 		retval = -ENOMEM;
784 		goto err_remap;
785 	}
786 
787 	return 0;
788 
789 err_remap:
790 	release_resource(&image->bus_resource);
791 err_resource:
792 	kfree(image->bus_resource.name);
793 	memset(&image->bus_resource, 0, sizeof(struct resource));
794 err_name:
795 	return retval;
796 }
797 
798 /*
799  * Free and unmap PCI Resource
800  */
tsi148_free_resource(struct vme_master_resource * image)801 static void tsi148_free_resource(struct vme_master_resource *image)
802 {
803 	iounmap(image->kern_base);
804 	image->kern_base = NULL;
805 	release_resource(&image->bus_resource);
806 	kfree(image->bus_resource.name);
807 	memset(&image->bus_resource, 0, sizeof(struct resource));
808 }
809 
810 /*
811  * Set the attributes of an outbound window.
812  */
tsi148_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)813 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
814 	unsigned long long vme_base, unsigned long long size, u32 aspace,
815 	u32 cycle, u32 dwidth)
816 {
817 	int retval = 0;
818 	unsigned int i;
819 	unsigned int temp_ctl = 0;
820 	unsigned int pci_base_low, pci_base_high;
821 	unsigned int pci_bound_low, pci_bound_high;
822 	unsigned int vme_offset_low, vme_offset_high;
823 	unsigned long long pci_bound, vme_offset, pci_base;
824 	struct vme_bridge *tsi148_bridge;
825 	struct tsi148_driver *bridge;
826 	struct pci_bus_region region;
827 	struct pci_dev *pdev;
828 
829 	tsi148_bridge = image->parent;
830 
831 	bridge = tsi148_bridge->driver_priv;
832 
833 	pdev = to_pci_dev(tsi148_bridge->parent);
834 
835 	/* Verify input data */
836 	if (vme_base & 0xFFFF) {
837 		dev_err(tsi148_bridge->parent, "Invalid VME Window "
838 			"alignment\n");
839 		retval = -EINVAL;
840 		goto err_window;
841 	}
842 
843 	if ((size == 0) && (enabled != 0)) {
844 		dev_err(tsi148_bridge->parent, "Size must be non-zero for "
845 			"enabled windows\n");
846 		retval = -EINVAL;
847 		goto err_window;
848 	}
849 
850 	spin_lock(&image->lock);
851 
852 	/* Let's allocate the resource here rather than further up the stack as
853 	 * it avoids pushing loads of bus dependent stuff up the stack. If size
854 	 * is zero, any existing resource will be freed.
855 	 */
856 	retval = tsi148_alloc_resource(image, size);
857 	if (retval) {
858 		spin_unlock(&image->lock);
859 		dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
860 			"resource\n");
861 		goto err_res;
862 	}
863 
864 	if (size == 0) {
865 		pci_base = 0;
866 		pci_bound = 0;
867 		vme_offset = 0;
868 	} else {
869 		pcibios_resource_to_bus(pdev->bus, &region,
870 					&image->bus_resource);
871 		pci_base = region.start;
872 
873 		/*
874 		 * Bound address is a valid address for the window, adjust
875 		 * according to window granularity.
876 		 */
877 		pci_bound = pci_base + (size - 0x10000);
878 		vme_offset = vme_base - pci_base;
879 	}
880 
881 	/* Convert 64-bit variables to 2x 32-bit variables */
882 	reg_split(pci_base, &pci_base_high, &pci_base_low);
883 	reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
884 	reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
885 
886 	if (pci_base_low & 0xFFFF) {
887 		spin_unlock(&image->lock);
888 		dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
889 		retval = -EINVAL;
890 		goto err_gran;
891 	}
892 	if (pci_bound_low & 0xFFFF) {
893 		spin_unlock(&image->lock);
894 		dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
895 		retval = -EINVAL;
896 		goto err_gran;
897 	}
898 	if (vme_offset_low & 0xFFFF) {
899 		spin_unlock(&image->lock);
900 		dev_err(tsi148_bridge->parent, "Invalid VME Offset "
901 			"alignment\n");
902 		retval = -EINVAL;
903 		goto err_gran;
904 	}
905 
906 	i = image->number;
907 
908 	/* Disable while we are mucking around */
909 	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
910 		TSI148_LCSR_OFFSET_OTAT);
911 	temp_ctl &= ~TSI148_LCSR_OTAT_EN;
912 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
913 		TSI148_LCSR_OFFSET_OTAT);
914 
915 	/* Setup 2eSST speeds */
916 	temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
917 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
918 	case VME_2eSST160:
919 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
920 		break;
921 	case VME_2eSST267:
922 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
923 		break;
924 	case VME_2eSST320:
925 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
926 		break;
927 	}
928 
929 	/* Setup cycle types */
930 	if (cycle & VME_BLT) {
931 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
932 		temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
933 	}
934 	if (cycle & VME_MBLT) {
935 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
936 		temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
937 	}
938 	if (cycle & VME_2eVME) {
939 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
940 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
941 	}
942 	if (cycle & VME_2eSST) {
943 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
944 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
945 	}
946 	if (cycle & VME_2eSSTB) {
947 		dev_warn(tsi148_bridge->parent, "Currently not setting "
948 			"Broadcast Select Registers\n");
949 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
950 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
951 	}
952 
953 	/* Setup data width */
954 	temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
955 	switch (dwidth) {
956 	case VME_D16:
957 		temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
958 		break;
959 	case VME_D32:
960 		temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
961 		break;
962 	default:
963 		spin_unlock(&image->lock);
964 		dev_err(tsi148_bridge->parent, "Invalid data width\n");
965 		retval = -EINVAL;
966 		goto err_dwidth;
967 	}
968 
969 	/* Setup address space */
970 	temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
971 	switch (aspace) {
972 	case VME_A16:
973 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
974 		break;
975 	case VME_A24:
976 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
977 		break;
978 	case VME_A32:
979 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
980 		break;
981 	case VME_A64:
982 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
983 		break;
984 	case VME_CRCSR:
985 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
986 		break;
987 	case VME_USER1:
988 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
989 		break;
990 	case VME_USER2:
991 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
992 		break;
993 	case VME_USER3:
994 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
995 		break;
996 	case VME_USER4:
997 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
998 		break;
999 	default:
1000 		spin_unlock(&image->lock);
1001 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
1002 		retval = -EINVAL;
1003 		goto err_aspace;
1004 		break;
1005 	}
1006 
1007 	temp_ctl &= ~(3<<4);
1008 	if (cycle & VME_SUPER)
1009 		temp_ctl |= TSI148_LCSR_OTAT_SUP;
1010 	if (cycle & VME_PROG)
1011 		temp_ctl |= TSI148_LCSR_OTAT_PGM;
1012 
1013 	/* Setup mapping */
1014 	iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1015 		TSI148_LCSR_OFFSET_OTSAU);
1016 	iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1017 		TSI148_LCSR_OFFSET_OTSAL);
1018 	iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1019 		TSI148_LCSR_OFFSET_OTEAU);
1020 	iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1021 		TSI148_LCSR_OFFSET_OTEAL);
1022 	iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1023 		TSI148_LCSR_OFFSET_OTOFU);
1024 	iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1025 		TSI148_LCSR_OFFSET_OTOFL);
1026 
1027 	/* Write ctl reg without enable */
1028 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1029 		TSI148_LCSR_OFFSET_OTAT);
1030 
1031 	if (enabled)
1032 		temp_ctl |= TSI148_LCSR_OTAT_EN;
1033 
1034 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1035 		TSI148_LCSR_OFFSET_OTAT);
1036 
1037 	spin_unlock(&image->lock);
1038 	return 0;
1039 
1040 err_aspace:
1041 err_dwidth:
1042 err_gran:
1043 	tsi148_free_resource(image);
1044 err_res:
1045 err_window:
1046 	return retval;
1047 
1048 }
1049 
1050 /*
1051  * Set the attributes of an outbound window.
1052  *
1053  * XXX Not parsing prefetch information.
1054  */
__tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1055 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1056 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1057 	u32 *cycle, u32 *dwidth)
1058 {
1059 	unsigned int i, ctl;
1060 	unsigned int pci_base_low, pci_base_high;
1061 	unsigned int pci_bound_low, pci_bound_high;
1062 	unsigned int vme_offset_low, vme_offset_high;
1063 
1064 	unsigned long long pci_base, pci_bound, vme_offset;
1065 	struct tsi148_driver *bridge;
1066 
1067 	bridge = image->parent->driver_priv;
1068 
1069 	i = image->number;
1070 
1071 	ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1072 		TSI148_LCSR_OFFSET_OTAT);
1073 
1074 	pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1075 		TSI148_LCSR_OFFSET_OTSAU);
1076 	pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1077 		TSI148_LCSR_OFFSET_OTSAL);
1078 	pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1079 		TSI148_LCSR_OFFSET_OTEAU);
1080 	pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1081 		TSI148_LCSR_OFFSET_OTEAL);
1082 	vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1083 		TSI148_LCSR_OFFSET_OTOFU);
1084 	vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1085 		TSI148_LCSR_OFFSET_OTOFL);
1086 
1087 	/* Convert 64-bit variables to 2x 32-bit variables */
1088 	reg_join(pci_base_high, pci_base_low, &pci_base);
1089 	reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1090 	reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1091 
1092 	*vme_base = pci_base + vme_offset;
1093 	*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1094 
1095 	*enabled = 0;
1096 	*aspace = 0;
1097 	*cycle = 0;
1098 	*dwidth = 0;
1099 
1100 	if (ctl & TSI148_LCSR_OTAT_EN)
1101 		*enabled = 1;
1102 
1103 	/* Setup address space */
1104 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1105 		*aspace |= VME_A16;
1106 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1107 		*aspace |= VME_A24;
1108 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1109 		*aspace |= VME_A32;
1110 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1111 		*aspace |= VME_A64;
1112 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1113 		*aspace |= VME_CRCSR;
1114 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1115 		*aspace |= VME_USER1;
1116 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1117 		*aspace |= VME_USER2;
1118 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1119 		*aspace |= VME_USER3;
1120 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1121 		*aspace |= VME_USER4;
1122 
1123 	/* Setup 2eSST speeds */
1124 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1125 		*cycle |= VME_2eSST160;
1126 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1127 		*cycle |= VME_2eSST267;
1128 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1129 		*cycle |= VME_2eSST320;
1130 
1131 	/* Setup cycle types */
1132 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1133 		*cycle |= VME_SCT;
1134 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1135 		*cycle |= VME_BLT;
1136 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1137 		*cycle |= VME_MBLT;
1138 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1139 		*cycle |= VME_2eVME;
1140 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1141 		*cycle |= VME_2eSST;
1142 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1143 		*cycle |= VME_2eSSTB;
1144 
1145 	if (ctl & TSI148_LCSR_OTAT_SUP)
1146 		*cycle |= VME_SUPER;
1147 	else
1148 		*cycle |= VME_USER;
1149 
1150 	if (ctl & TSI148_LCSR_OTAT_PGM)
1151 		*cycle |= VME_PROG;
1152 	else
1153 		*cycle |= VME_DATA;
1154 
1155 	/* Setup data width */
1156 	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1157 		*dwidth = VME_D16;
1158 	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1159 		*dwidth = VME_D32;
1160 
1161 	return 0;
1162 }
1163 
1164 
tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1165 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1166 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1167 	u32 *cycle, u32 *dwidth)
1168 {
1169 	int retval;
1170 
1171 	spin_lock(&image->lock);
1172 
1173 	retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1174 		cycle, dwidth);
1175 
1176 	spin_unlock(&image->lock);
1177 
1178 	return retval;
1179 }
1180 
tsi148_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1181 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1182 	size_t count, loff_t offset)
1183 {
1184 	int retval, enabled;
1185 	unsigned long long vme_base, size;
1186 	u32 aspace, cycle, dwidth;
1187 	struct vme_error_handler *handler = NULL;
1188 	struct vme_bridge *tsi148_bridge;
1189 	void __iomem *addr = image->kern_base + offset;
1190 	unsigned int done = 0;
1191 	unsigned int count32;
1192 
1193 	tsi148_bridge = image->parent;
1194 
1195 	spin_lock(&image->lock);
1196 
1197 	if (err_chk) {
1198 		__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1199 				    &cycle, &dwidth);
1200 		handler = vme_register_error_handler(tsi148_bridge, aspace,
1201 						     vme_base + offset, count);
1202 		if (!handler) {
1203 			spin_unlock(&image->lock);
1204 			return -ENOMEM;
1205 		}
1206 	}
1207 
1208 	/* The following code handles VME address alignment. We cannot use
1209 	 * memcpy_xxx here because it may cut data transfers in to 8-bit
1210 	 * cycles when D16 or D32 cycles are required on the VME bus.
1211 	 * On the other hand, the bridge itself assures that the maximum data
1212 	 * cycle configured for the transfer is used and splits it
1213 	 * automatically for non-aligned addresses, so we don't want the
1214 	 * overhead of needlessly forcing small transfers for the entire cycle.
1215 	 */
1216 	if ((uintptr_t)addr & 0x1) {
1217 		*(u8 *)buf = ioread8(addr);
1218 		done += 1;
1219 		if (done == count)
1220 			goto out;
1221 	}
1222 	if ((uintptr_t)(addr + done) & 0x2) {
1223 		if ((count - done) < 2) {
1224 			*(u8 *)(buf + done) = ioread8(addr + done);
1225 			done += 1;
1226 			goto out;
1227 		} else {
1228 			*(u16 *)(buf + done) = ioread16(addr + done);
1229 			done += 2;
1230 		}
1231 	}
1232 
1233 	count32 = (count - done) & ~0x3;
1234 	while (done < count32) {
1235 		*(u32 *)(buf + done) = ioread32(addr + done);
1236 		done += 4;
1237 	}
1238 
1239 	if ((count - done) & 0x2) {
1240 		*(u16 *)(buf + done) = ioread16(addr + done);
1241 		done += 2;
1242 	}
1243 	if ((count - done) & 0x1) {
1244 		*(u8 *)(buf + done) = ioread8(addr + done);
1245 		done += 1;
1246 	}
1247 
1248 out:
1249 	retval = count;
1250 
1251 	if (err_chk) {
1252 		if (handler->num_errors) {
1253 			dev_err(image->parent->parent,
1254 				"First VME read error detected an at address 0x%llx\n",
1255 				handler->first_error);
1256 			retval = handler->first_error - (vme_base + offset);
1257 		}
1258 		vme_unregister_error_handler(handler);
1259 	}
1260 
1261 	spin_unlock(&image->lock);
1262 
1263 	return retval;
1264 }
1265 
1266 
tsi148_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1267 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1268 	size_t count, loff_t offset)
1269 {
1270 	int retval = 0, enabled;
1271 	unsigned long long vme_base, size;
1272 	u32 aspace, cycle, dwidth;
1273 	void __iomem *addr = image->kern_base + offset;
1274 	unsigned int done = 0;
1275 	unsigned int count32;
1276 
1277 	struct vme_error_handler *handler = NULL;
1278 	struct vme_bridge *tsi148_bridge;
1279 	struct tsi148_driver *bridge;
1280 
1281 	tsi148_bridge = image->parent;
1282 
1283 	bridge = tsi148_bridge->driver_priv;
1284 
1285 	spin_lock(&image->lock);
1286 
1287 	if (err_chk) {
1288 		__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1289 				    &cycle, &dwidth);
1290 		handler = vme_register_error_handler(tsi148_bridge, aspace,
1291 						     vme_base + offset, count);
1292 		if (!handler) {
1293 			spin_unlock(&image->lock);
1294 			return -ENOMEM;
1295 		}
1296 	}
1297 
1298 	/* Here we apply for the same strategy we do in master_read
1299 	 * function in order to assure the correct cycles.
1300 	 */
1301 	if ((uintptr_t)addr & 0x1) {
1302 		iowrite8(*(u8 *)buf, addr);
1303 		done += 1;
1304 		if (done == count)
1305 			goto out;
1306 	}
1307 	if ((uintptr_t)(addr + done) & 0x2) {
1308 		if ((count - done) < 2) {
1309 			iowrite8(*(u8 *)(buf + done), addr + done);
1310 			done += 1;
1311 			goto out;
1312 		} else {
1313 			iowrite16(*(u16 *)(buf + done), addr + done);
1314 			done += 2;
1315 		}
1316 	}
1317 
1318 	count32 = (count - done) & ~0x3;
1319 	while (done < count32) {
1320 		iowrite32(*(u32 *)(buf + done), addr + done);
1321 		done += 4;
1322 	}
1323 
1324 	if ((count - done) & 0x2) {
1325 		iowrite16(*(u16 *)(buf + done), addr + done);
1326 		done += 2;
1327 	}
1328 	if ((count - done) & 0x1) {
1329 		iowrite8(*(u8 *)(buf + done), addr + done);
1330 		done += 1;
1331 	}
1332 
1333 out:
1334 	retval = count;
1335 
1336 	/*
1337 	 * Writes are posted. We need to do a read on the VME bus to flush out
1338 	 * all of the writes before we check for errors. We can't guarantee
1339 	 * that reading the data we have just written is safe. It is believed
1340 	 * that there isn't any read, write re-ordering, so we can read any
1341 	 * location in VME space, so lets read the Device ID from the tsi148's
1342 	 * own registers as mapped into CR/CSR space.
1343 	 *
1344 	 * We check for saved errors in the written address range/space.
1345 	 */
1346 
1347 	if (err_chk) {
1348 		ioread16(bridge->flush_image->kern_base + 0x7F000);
1349 
1350 		if (handler->num_errors) {
1351 			dev_warn(tsi148_bridge->parent,
1352 				 "First VME write error detected an at address 0x%llx\n",
1353 				 handler->first_error);
1354 			retval = handler->first_error - (vme_base + offset);
1355 		}
1356 		vme_unregister_error_handler(handler);
1357 	}
1358 
1359 	spin_unlock(&image->lock);
1360 
1361 	return retval;
1362 }
1363 
1364 /*
1365  * Perform an RMW cycle on the VME bus.
1366  *
1367  * Requires a previously configured master window, returns final value.
1368  */
tsi148_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)1369 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1370 	unsigned int mask, unsigned int compare, unsigned int swap,
1371 	loff_t offset)
1372 {
1373 	unsigned long long pci_addr;
1374 	unsigned int pci_addr_high, pci_addr_low;
1375 	u32 tmp, result;
1376 	int i;
1377 	struct tsi148_driver *bridge;
1378 
1379 	bridge = image->parent->driver_priv;
1380 
1381 	/* Find the PCI address that maps to the desired VME address */
1382 	i = image->number;
1383 
1384 	/* Locking as we can only do one of these at a time */
1385 	mutex_lock(&bridge->vme_rmw);
1386 
1387 	/* Lock image */
1388 	spin_lock(&image->lock);
1389 
1390 	pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1391 		TSI148_LCSR_OFFSET_OTSAU);
1392 	pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1393 		TSI148_LCSR_OFFSET_OTSAL);
1394 
1395 	reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1396 	reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1397 
1398 	/* Configure registers */
1399 	iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1400 	iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1401 	iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1402 	iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1403 	iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1404 
1405 	/* Enable RMW */
1406 	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1407 	tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1408 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1409 
1410 	/* Kick process off with a read to the required address. */
1411 	result = ioread32be(image->kern_base + offset);
1412 
1413 	/* Disable RMW */
1414 	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1415 	tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1416 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1417 
1418 	spin_unlock(&image->lock);
1419 
1420 	mutex_unlock(&bridge->vme_rmw);
1421 
1422 	return result;
1423 }
1424 
tsi148_dma_set_vme_src_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1425 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1426 	u32 aspace, u32 cycle, u32 dwidth)
1427 {
1428 	u32 val;
1429 
1430 	val = be32_to_cpu(*attr);
1431 
1432 	/* Setup 2eSST speeds */
1433 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1434 	case VME_2eSST160:
1435 		val |= TSI148_LCSR_DSAT_2eSSTM_160;
1436 		break;
1437 	case VME_2eSST267:
1438 		val |= TSI148_LCSR_DSAT_2eSSTM_267;
1439 		break;
1440 	case VME_2eSST320:
1441 		val |= TSI148_LCSR_DSAT_2eSSTM_320;
1442 		break;
1443 	}
1444 
1445 	/* Setup cycle types */
1446 	if (cycle & VME_SCT)
1447 		val |= TSI148_LCSR_DSAT_TM_SCT;
1448 
1449 	if (cycle & VME_BLT)
1450 		val |= TSI148_LCSR_DSAT_TM_BLT;
1451 
1452 	if (cycle & VME_MBLT)
1453 		val |= TSI148_LCSR_DSAT_TM_MBLT;
1454 
1455 	if (cycle & VME_2eVME)
1456 		val |= TSI148_LCSR_DSAT_TM_2eVME;
1457 
1458 	if (cycle & VME_2eSST)
1459 		val |= TSI148_LCSR_DSAT_TM_2eSST;
1460 
1461 	if (cycle & VME_2eSSTB) {
1462 		dev_err(dev, "Currently not setting Broadcast Select "
1463 			"Registers\n");
1464 		val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1465 	}
1466 
1467 	/* Setup data width */
1468 	switch (dwidth) {
1469 	case VME_D16:
1470 		val |= TSI148_LCSR_DSAT_DBW_16;
1471 		break;
1472 	case VME_D32:
1473 		val |= TSI148_LCSR_DSAT_DBW_32;
1474 		break;
1475 	default:
1476 		dev_err(dev, "Invalid data width\n");
1477 		return -EINVAL;
1478 	}
1479 
1480 	/* Setup address space */
1481 	switch (aspace) {
1482 	case VME_A16:
1483 		val |= TSI148_LCSR_DSAT_AMODE_A16;
1484 		break;
1485 	case VME_A24:
1486 		val |= TSI148_LCSR_DSAT_AMODE_A24;
1487 		break;
1488 	case VME_A32:
1489 		val |= TSI148_LCSR_DSAT_AMODE_A32;
1490 		break;
1491 	case VME_A64:
1492 		val |= TSI148_LCSR_DSAT_AMODE_A64;
1493 		break;
1494 	case VME_CRCSR:
1495 		val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1496 		break;
1497 	case VME_USER1:
1498 		val |= TSI148_LCSR_DSAT_AMODE_USER1;
1499 		break;
1500 	case VME_USER2:
1501 		val |= TSI148_LCSR_DSAT_AMODE_USER2;
1502 		break;
1503 	case VME_USER3:
1504 		val |= TSI148_LCSR_DSAT_AMODE_USER3;
1505 		break;
1506 	case VME_USER4:
1507 		val |= TSI148_LCSR_DSAT_AMODE_USER4;
1508 		break;
1509 	default:
1510 		dev_err(dev, "Invalid address space\n");
1511 		return -EINVAL;
1512 		break;
1513 	}
1514 
1515 	if (cycle & VME_SUPER)
1516 		val |= TSI148_LCSR_DSAT_SUP;
1517 	if (cycle & VME_PROG)
1518 		val |= TSI148_LCSR_DSAT_PGM;
1519 
1520 	*attr = cpu_to_be32(val);
1521 
1522 	return 0;
1523 }
1524 
tsi148_dma_set_vme_dest_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1525 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1526 	u32 aspace, u32 cycle, u32 dwidth)
1527 {
1528 	u32 val;
1529 
1530 	val = be32_to_cpu(*attr);
1531 
1532 	/* Setup 2eSST speeds */
1533 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1534 	case VME_2eSST160:
1535 		val |= TSI148_LCSR_DDAT_2eSSTM_160;
1536 		break;
1537 	case VME_2eSST267:
1538 		val |= TSI148_LCSR_DDAT_2eSSTM_267;
1539 		break;
1540 	case VME_2eSST320:
1541 		val |= TSI148_LCSR_DDAT_2eSSTM_320;
1542 		break;
1543 	}
1544 
1545 	/* Setup cycle types */
1546 	if (cycle & VME_SCT)
1547 		val |= TSI148_LCSR_DDAT_TM_SCT;
1548 
1549 	if (cycle & VME_BLT)
1550 		val |= TSI148_LCSR_DDAT_TM_BLT;
1551 
1552 	if (cycle & VME_MBLT)
1553 		val |= TSI148_LCSR_DDAT_TM_MBLT;
1554 
1555 	if (cycle & VME_2eVME)
1556 		val |= TSI148_LCSR_DDAT_TM_2eVME;
1557 
1558 	if (cycle & VME_2eSST)
1559 		val |= TSI148_LCSR_DDAT_TM_2eSST;
1560 
1561 	if (cycle & VME_2eSSTB) {
1562 		dev_err(dev, "Currently not setting Broadcast Select "
1563 			"Registers\n");
1564 		val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1565 	}
1566 
1567 	/* Setup data width */
1568 	switch (dwidth) {
1569 	case VME_D16:
1570 		val |= TSI148_LCSR_DDAT_DBW_16;
1571 		break;
1572 	case VME_D32:
1573 		val |= TSI148_LCSR_DDAT_DBW_32;
1574 		break;
1575 	default:
1576 		dev_err(dev, "Invalid data width\n");
1577 		return -EINVAL;
1578 	}
1579 
1580 	/* Setup address space */
1581 	switch (aspace) {
1582 	case VME_A16:
1583 		val |= TSI148_LCSR_DDAT_AMODE_A16;
1584 		break;
1585 	case VME_A24:
1586 		val |= TSI148_LCSR_DDAT_AMODE_A24;
1587 		break;
1588 	case VME_A32:
1589 		val |= TSI148_LCSR_DDAT_AMODE_A32;
1590 		break;
1591 	case VME_A64:
1592 		val |= TSI148_LCSR_DDAT_AMODE_A64;
1593 		break;
1594 	case VME_CRCSR:
1595 		val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1596 		break;
1597 	case VME_USER1:
1598 		val |= TSI148_LCSR_DDAT_AMODE_USER1;
1599 		break;
1600 	case VME_USER2:
1601 		val |= TSI148_LCSR_DDAT_AMODE_USER2;
1602 		break;
1603 	case VME_USER3:
1604 		val |= TSI148_LCSR_DDAT_AMODE_USER3;
1605 		break;
1606 	case VME_USER4:
1607 		val |= TSI148_LCSR_DDAT_AMODE_USER4;
1608 		break;
1609 	default:
1610 		dev_err(dev, "Invalid address space\n");
1611 		return -EINVAL;
1612 		break;
1613 	}
1614 
1615 	if (cycle & VME_SUPER)
1616 		val |= TSI148_LCSR_DDAT_SUP;
1617 	if (cycle & VME_PROG)
1618 		val |= TSI148_LCSR_DDAT_PGM;
1619 
1620 	*attr = cpu_to_be32(val);
1621 
1622 	return 0;
1623 }
1624 
1625 /*
1626  * Add a link list descriptor to the list
1627  *
1628  * Note: DMA engine expects the DMA descriptor to be big endian.
1629  */
tsi148_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1630 static int tsi148_dma_list_add(struct vme_dma_list *list,
1631 	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1632 {
1633 	struct tsi148_dma_entry *entry, *prev;
1634 	u32 address_high, address_low, val;
1635 	struct vme_dma_pattern *pattern_attr;
1636 	struct vme_dma_pci *pci_attr;
1637 	struct vme_dma_vme *vme_attr;
1638 	int retval = 0;
1639 	struct vme_bridge *tsi148_bridge;
1640 
1641 	tsi148_bridge = list->parent->parent;
1642 
1643 	/* Descriptor must be aligned on 64-bit boundaries */
1644 	entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1645 	if (entry == NULL) {
1646 		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1647 			"dma resource structure\n");
1648 		retval = -ENOMEM;
1649 		goto err_mem;
1650 	}
1651 
1652 	/* Test descriptor alignment */
1653 	if ((unsigned long)&entry->descriptor & 0x7) {
1654 		dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1655 			"byte boundary as required: %p\n",
1656 			&entry->descriptor);
1657 		retval = -EINVAL;
1658 		goto err_align;
1659 	}
1660 
1661 	/* Given we are going to fill out the structure, we probably don't
1662 	 * need to zero it, but better safe than sorry for now.
1663 	 */
1664 	memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1665 
1666 	/* Fill out source part */
1667 	switch (src->type) {
1668 	case VME_DMA_PATTERN:
1669 		pattern_attr = src->private;
1670 
1671 		entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1672 
1673 		val = TSI148_LCSR_DSAT_TYP_PAT;
1674 
1675 		/* Default behaviour is 32 bit pattern */
1676 		if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1677 			val |= TSI148_LCSR_DSAT_PSZ;
1678 
1679 		/* It seems that the default behaviour is to increment */
1680 		if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1681 			val |= TSI148_LCSR_DSAT_NIN;
1682 		entry->descriptor.dsat = cpu_to_be32(val);
1683 		break;
1684 	case VME_DMA_PCI:
1685 		pci_attr = src->private;
1686 
1687 		reg_split((unsigned long long)pci_attr->address, &address_high,
1688 			&address_low);
1689 		entry->descriptor.dsau = cpu_to_be32(address_high);
1690 		entry->descriptor.dsal = cpu_to_be32(address_low);
1691 		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1692 		break;
1693 	case VME_DMA_VME:
1694 		vme_attr = src->private;
1695 
1696 		reg_split((unsigned long long)vme_attr->address, &address_high,
1697 			&address_low);
1698 		entry->descriptor.dsau = cpu_to_be32(address_high);
1699 		entry->descriptor.dsal = cpu_to_be32(address_low);
1700 		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1701 
1702 		retval = tsi148_dma_set_vme_src_attributes(
1703 			tsi148_bridge->parent, &entry->descriptor.dsat,
1704 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1705 		if (retval < 0)
1706 			goto err_source;
1707 		break;
1708 	default:
1709 		dev_err(tsi148_bridge->parent, "Invalid source type\n");
1710 		retval = -EINVAL;
1711 		goto err_source;
1712 		break;
1713 	}
1714 
1715 	/* Assume last link - this will be over-written by adding another */
1716 	entry->descriptor.dnlau = cpu_to_be32(0);
1717 	entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1718 
1719 	/* Fill out destination part */
1720 	switch (dest->type) {
1721 	case VME_DMA_PCI:
1722 		pci_attr = dest->private;
1723 
1724 		reg_split((unsigned long long)pci_attr->address, &address_high,
1725 			&address_low);
1726 		entry->descriptor.ddau = cpu_to_be32(address_high);
1727 		entry->descriptor.ddal = cpu_to_be32(address_low);
1728 		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1729 		break;
1730 	case VME_DMA_VME:
1731 		vme_attr = dest->private;
1732 
1733 		reg_split((unsigned long long)vme_attr->address, &address_high,
1734 			&address_low);
1735 		entry->descriptor.ddau = cpu_to_be32(address_high);
1736 		entry->descriptor.ddal = cpu_to_be32(address_low);
1737 		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1738 
1739 		retval = tsi148_dma_set_vme_dest_attributes(
1740 			tsi148_bridge->parent, &entry->descriptor.ddat,
1741 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1742 		if (retval < 0)
1743 			goto err_dest;
1744 		break;
1745 	default:
1746 		dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1747 		retval = -EINVAL;
1748 		goto err_dest;
1749 		break;
1750 	}
1751 
1752 	/* Fill out count */
1753 	entry->descriptor.dcnt = cpu_to_be32((u32)count);
1754 
1755 	/* Add to list */
1756 	list_add_tail(&entry->list, &list->entries);
1757 
1758 	entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1759 		&entry->descriptor,
1760 		sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1761 	if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1762 		dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1763 		retval = -EINVAL;
1764 		goto err_dma;
1765 	}
1766 
1767 	/* Fill out previous descriptors "Next Address" */
1768 	if (entry->list.prev != &list->entries) {
1769 		reg_split((unsigned long long)entry->dma_handle, &address_high,
1770 			&address_low);
1771 		prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1772 				  list);
1773 		prev->descriptor.dnlau = cpu_to_be32(address_high);
1774 		prev->descriptor.dnlal = cpu_to_be32(address_low);
1775 
1776 	}
1777 
1778 	return 0;
1779 
1780 err_dma:
1781 err_dest:
1782 err_source:
1783 err_align:
1784 		kfree(entry);
1785 err_mem:
1786 	return retval;
1787 }
1788 
1789 /*
1790  * Check to see if the provided DMA channel is busy.
1791  */
tsi148_dma_busy(struct vme_bridge * tsi148_bridge,int channel)1792 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1793 {
1794 	u32 tmp;
1795 	struct tsi148_driver *bridge;
1796 
1797 	bridge = tsi148_bridge->driver_priv;
1798 
1799 	tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1800 		TSI148_LCSR_OFFSET_DSTA);
1801 
1802 	if (tmp & TSI148_LCSR_DSTA_BSY)
1803 		return 0;
1804 	else
1805 		return 1;
1806 
1807 }
1808 
1809 /*
1810  * Execute a previously generated link list
1811  *
1812  * XXX Need to provide control register configuration.
1813  */
tsi148_dma_list_exec(struct vme_dma_list * list)1814 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1815 {
1816 	struct vme_dma_resource *ctrlr;
1817 	int channel, retval;
1818 	struct tsi148_dma_entry *entry;
1819 	u32 bus_addr_high, bus_addr_low;
1820 	u32 val, dctlreg = 0;
1821 	struct vme_bridge *tsi148_bridge;
1822 	struct tsi148_driver *bridge;
1823 
1824 	ctrlr = list->parent;
1825 
1826 	tsi148_bridge = ctrlr->parent;
1827 
1828 	bridge = tsi148_bridge->driver_priv;
1829 
1830 	mutex_lock(&ctrlr->mtx);
1831 
1832 	channel = ctrlr->number;
1833 
1834 	if (!list_empty(&ctrlr->running)) {
1835 		/*
1836 		 * XXX We have an active DMA transfer and currently haven't
1837 		 *     sorted out the mechanism for "pending" DMA transfers.
1838 		 *     Return busy.
1839 		 */
1840 		/* Need to add to pending here */
1841 		mutex_unlock(&ctrlr->mtx);
1842 		return -EBUSY;
1843 	} else {
1844 		list_add(&list->list, &ctrlr->running);
1845 	}
1846 
1847 	/* Get first bus address and write into registers */
1848 	entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1849 		list);
1850 
1851 	mutex_unlock(&ctrlr->mtx);
1852 
1853 	reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1854 
1855 	iowrite32be(bus_addr_high, bridge->base +
1856 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1857 	iowrite32be(bus_addr_low, bridge->base +
1858 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1859 
1860 	dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1861 		TSI148_LCSR_OFFSET_DCTL);
1862 
1863 	/* Start the operation */
1864 	iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1865 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1866 
1867 	retval = wait_event_interruptible(bridge->dma_queue[channel],
1868 		tsi148_dma_busy(ctrlr->parent, channel));
1869 
1870 	if (retval) {
1871 		iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1872 			TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1873 		/* Wait for the operation to abort */
1874 		wait_event(bridge->dma_queue[channel],
1875 			   tsi148_dma_busy(ctrlr->parent, channel));
1876 		retval = -EINTR;
1877 		goto exit;
1878 	}
1879 
1880 	/*
1881 	 * Read status register, this register is valid until we kick off a
1882 	 * new transfer.
1883 	 */
1884 	val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1885 		TSI148_LCSR_OFFSET_DSTA);
1886 
1887 	if (val & TSI148_LCSR_DSTA_VBE) {
1888 		dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1889 		retval = -EIO;
1890 	}
1891 
1892 exit:
1893 	/* Remove list from running list */
1894 	mutex_lock(&ctrlr->mtx);
1895 	list_del(&list->list);
1896 	mutex_unlock(&ctrlr->mtx);
1897 
1898 	return retval;
1899 }
1900 
1901 /*
1902  * Clean up a previously generated link list
1903  *
1904  * We have a separate function, don't assume that the chain can't be reused.
1905  */
tsi148_dma_list_empty(struct vme_dma_list * list)1906 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1907 {
1908 	struct list_head *pos, *temp;
1909 	struct tsi148_dma_entry *entry;
1910 
1911 	struct vme_bridge *tsi148_bridge = list->parent->parent;
1912 
1913 	/* detach and free each entry */
1914 	list_for_each_safe(pos, temp, &list->entries) {
1915 		list_del(pos);
1916 		entry = list_entry(pos, struct tsi148_dma_entry, list);
1917 
1918 		dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1919 			sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1920 		kfree(entry);
1921 	}
1922 
1923 	return 0;
1924 }
1925 
1926 /*
1927  * All 4 location monitors reside at the same base - this is therefore a
1928  * system wide configuration.
1929  *
1930  * This does not enable the LM monitor - that should be done when the first
1931  * callback is attached and disabled when the last callback is removed.
1932  */
tsi148_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1933 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1934 	u32 aspace, u32 cycle)
1935 {
1936 	u32 lm_base_high, lm_base_low, lm_ctl = 0;
1937 	int i;
1938 	struct vme_bridge *tsi148_bridge;
1939 	struct tsi148_driver *bridge;
1940 
1941 	tsi148_bridge = lm->parent;
1942 
1943 	bridge = tsi148_bridge->driver_priv;
1944 
1945 	mutex_lock(&lm->mtx);
1946 
1947 	/* If we already have a callback attached, we can't move it! */
1948 	for (i = 0; i < lm->monitors; i++) {
1949 		if (bridge->lm_callback[i] != NULL) {
1950 			mutex_unlock(&lm->mtx);
1951 			dev_err(tsi148_bridge->parent, "Location monitor "
1952 				"callback attached, can't reset\n");
1953 			return -EBUSY;
1954 		}
1955 	}
1956 
1957 	switch (aspace) {
1958 	case VME_A16:
1959 		lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1960 		break;
1961 	case VME_A24:
1962 		lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1963 		break;
1964 	case VME_A32:
1965 		lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1966 		break;
1967 	case VME_A64:
1968 		lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1969 		break;
1970 	default:
1971 		mutex_unlock(&lm->mtx);
1972 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
1973 		return -EINVAL;
1974 		break;
1975 	}
1976 
1977 	if (cycle & VME_SUPER)
1978 		lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1979 	if (cycle & VME_USER)
1980 		lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1981 	if (cycle & VME_PROG)
1982 		lm_ctl |= TSI148_LCSR_LMAT_PGM;
1983 	if (cycle & VME_DATA)
1984 		lm_ctl |= TSI148_LCSR_LMAT_DATA;
1985 
1986 	reg_split(lm_base, &lm_base_high, &lm_base_low);
1987 
1988 	iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1989 	iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1990 	iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1991 
1992 	mutex_unlock(&lm->mtx);
1993 
1994 	return 0;
1995 }
1996 
1997 /* Get configuration of the callback monitor and return whether it is enabled
1998  * or disabled.
1999  */
tsi148_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)2000 static int tsi148_lm_get(struct vme_lm_resource *lm,
2001 	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
2002 {
2003 	u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2004 	struct tsi148_driver *bridge;
2005 
2006 	bridge = lm->parent->driver_priv;
2007 
2008 	mutex_lock(&lm->mtx);
2009 
2010 	lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2011 	lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2012 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2013 
2014 	reg_join(lm_base_high, lm_base_low, lm_base);
2015 
2016 	if (lm_ctl & TSI148_LCSR_LMAT_EN)
2017 		enabled = 1;
2018 
2019 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2020 		*aspace |= VME_A16;
2021 
2022 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2023 		*aspace |= VME_A24;
2024 
2025 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2026 		*aspace |= VME_A32;
2027 
2028 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2029 		*aspace |= VME_A64;
2030 
2031 
2032 	if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2033 		*cycle |= VME_SUPER;
2034 	if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2035 		*cycle |= VME_USER;
2036 	if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2037 		*cycle |= VME_PROG;
2038 	if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2039 		*cycle |= VME_DATA;
2040 
2041 	mutex_unlock(&lm->mtx);
2042 
2043 	return enabled;
2044 }
2045 
2046 /*
2047  * Attach a callback to a specific location monitor.
2048  *
2049  * Callback will be passed the monitor triggered.
2050  */
tsi148_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(void *),void * data)2051 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2052 	void (*callback)(void *), void *data)
2053 {
2054 	u32 lm_ctl, tmp;
2055 	struct vme_bridge *tsi148_bridge;
2056 	struct tsi148_driver *bridge;
2057 
2058 	tsi148_bridge = lm->parent;
2059 
2060 	bridge = tsi148_bridge->driver_priv;
2061 
2062 	mutex_lock(&lm->mtx);
2063 
2064 	/* Ensure that the location monitor is configured - need PGM or DATA */
2065 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2066 	if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2067 		mutex_unlock(&lm->mtx);
2068 		dev_err(tsi148_bridge->parent, "Location monitor not properly "
2069 			"configured\n");
2070 		return -EINVAL;
2071 	}
2072 
2073 	/* Check that a callback isn't already attached */
2074 	if (bridge->lm_callback[monitor] != NULL) {
2075 		mutex_unlock(&lm->mtx);
2076 		dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2077 		return -EBUSY;
2078 	}
2079 
2080 	/* Attach callback */
2081 	bridge->lm_callback[monitor] = callback;
2082 	bridge->lm_data[monitor] = data;
2083 
2084 	/* Enable Location Monitor interrupt */
2085 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2086 	tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2087 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2088 
2089 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2090 	tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2091 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2092 
2093 	/* Ensure that global Location Monitor Enable set */
2094 	if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2095 		lm_ctl |= TSI148_LCSR_LMAT_EN;
2096 		iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2097 	}
2098 
2099 	mutex_unlock(&lm->mtx);
2100 
2101 	return 0;
2102 }
2103 
2104 /*
2105  * Detach a callback function forn a specific location monitor.
2106  */
tsi148_lm_detach(struct vme_lm_resource * lm,int monitor)2107 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2108 {
2109 	u32 lm_en, tmp;
2110 	struct tsi148_driver *bridge;
2111 
2112 	bridge = lm->parent->driver_priv;
2113 
2114 	mutex_lock(&lm->mtx);
2115 
2116 	/* Disable Location Monitor and ensure previous interrupts are clear */
2117 	lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2118 	lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2119 	iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2120 
2121 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2122 	tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2123 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2124 
2125 	iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2126 		 bridge->base + TSI148_LCSR_INTC);
2127 
2128 	/* Detach callback */
2129 	bridge->lm_callback[monitor] = NULL;
2130 	bridge->lm_data[monitor] = NULL;
2131 
2132 	/* If all location monitors disabled, disable global Location Monitor */
2133 	if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2134 			TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2135 		tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2136 		tmp &= ~TSI148_LCSR_LMAT_EN;
2137 		iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2138 	}
2139 
2140 	mutex_unlock(&lm->mtx);
2141 
2142 	return 0;
2143 }
2144 
2145 /*
2146  * Determine Geographical Addressing
2147  */
tsi148_slot_get(struct vme_bridge * tsi148_bridge)2148 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2149 {
2150 	u32 slot = 0;
2151 	struct tsi148_driver *bridge;
2152 
2153 	bridge = tsi148_bridge->driver_priv;
2154 
2155 	if (!geoid) {
2156 		slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2157 		slot = slot & TSI148_LCSR_VSTAT_GA_M;
2158 	} else
2159 		slot = geoid;
2160 
2161 	return (int)slot;
2162 }
2163 
tsi148_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)2164 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2165 	dma_addr_t *dma)
2166 {
2167 	struct pci_dev *pdev;
2168 
2169 	/* Find pci_dev container of dev */
2170 	pdev = to_pci_dev(parent);
2171 
2172 	return pci_alloc_consistent(pdev, size, dma);
2173 }
2174 
tsi148_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)2175 static void tsi148_free_consistent(struct device *parent, size_t size,
2176 	void *vaddr, dma_addr_t dma)
2177 {
2178 	struct pci_dev *pdev;
2179 
2180 	/* Find pci_dev container of dev */
2181 	pdev = to_pci_dev(parent);
2182 
2183 	pci_free_consistent(pdev, size, vaddr, dma);
2184 }
2185 
2186 /*
2187  * Configure CR/CSR space
2188  *
2189  * Access to the CR/CSR can be configured at power-up. The location of the
2190  * CR/CSR registers in the CR/CSR address space is determined by the boards
2191  * Auto-ID or Geographic address. This function ensures that the window is
2192  * enabled at an offset consistent with the boards geopgraphic address.
2193  *
2194  * Each board has a 512kB window, with the highest 4kB being used for the
2195  * boards registers, this means there is a fix length 508kB window which must
2196  * be mapped onto PCI memory.
2197  */
tsi148_crcsr_init(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2198 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2199 	struct pci_dev *pdev)
2200 {
2201 	u32 cbar, crat, vstat;
2202 	u32 crcsr_bus_high, crcsr_bus_low;
2203 	int retval;
2204 	struct tsi148_driver *bridge;
2205 
2206 	bridge = tsi148_bridge->driver_priv;
2207 
2208 	/* Allocate mem for CR/CSR image */
2209 	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2210 						     &bridge->crcsr_bus);
2211 	if (bridge->crcsr_kernel == NULL) {
2212 		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2213 			"CR/CSR image\n");
2214 		return -ENOMEM;
2215 	}
2216 
2217 	reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2218 
2219 	iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2220 	iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2221 
2222 	/* Ensure that the CR/CSR is configured at the correct offset */
2223 	cbar = ioread32be(bridge->base + TSI148_CBAR);
2224 	cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2225 
2226 	vstat = tsi148_slot_get(tsi148_bridge);
2227 
2228 	if (cbar != vstat) {
2229 		cbar = vstat;
2230 		dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2231 		iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2232 	}
2233 	dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2234 
2235 	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2236 	if (crat & TSI148_LCSR_CRAT_EN)
2237 		dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2238 	else {
2239 		dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2240 		iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2241 			bridge->base + TSI148_LCSR_CRAT);
2242 	}
2243 
2244 	/* If we want flushed, error-checked writes, set up a window
2245 	 * over the CR/CSR registers. We read from here to safely flush
2246 	 * through VME writes.
2247 	 */
2248 	if (err_chk) {
2249 		retval = tsi148_master_set(bridge->flush_image, 1,
2250 			(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2251 			VME_D16);
2252 		if (retval)
2253 			dev_err(tsi148_bridge->parent, "Configuring flush image"
2254 				" failed\n");
2255 	}
2256 
2257 	return 0;
2258 
2259 }
2260 
tsi148_crcsr_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2261 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2262 	struct pci_dev *pdev)
2263 {
2264 	u32 crat;
2265 	struct tsi148_driver *bridge;
2266 
2267 	bridge = tsi148_bridge->driver_priv;
2268 
2269 	/* Turn off CR/CSR space */
2270 	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2271 	iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2272 		bridge->base + TSI148_LCSR_CRAT);
2273 
2274 	/* Free image */
2275 	iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2276 	iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2277 
2278 	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2279 		bridge->crcsr_bus);
2280 }
2281 
tsi148_probe(struct pci_dev * pdev,const struct pci_device_id * id)2282 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2283 {
2284 	int retval, i, master_num;
2285 	u32 data;
2286 	struct list_head *pos = NULL, *n;
2287 	struct vme_bridge *tsi148_bridge;
2288 	struct tsi148_driver *tsi148_device;
2289 	struct vme_master_resource *master_image;
2290 	struct vme_slave_resource *slave_image;
2291 	struct vme_dma_resource *dma_ctrlr;
2292 	struct vme_lm_resource *lm;
2293 
2294 	/* If we want to support more than one of each bridge, we need to
2295 	 * dynamically generate this so we get one per device
2296 	 */
2297 	tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2298 	if (tsi148_bridge == NULL) {
2299 		dev_err(&pdev->dev, "Failed to allocate memory for device "
2300 			"structure\n");
2301 		retval = -ENOMEM;
2302 		goto err_struct;
2303 	}
2304 	vme_init_bridge(tsi148_bridge);
2305 
2306 	tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2307 	if (tsi148_device == NULL) {
2308 		dev_err(&pdev->dev, "Failed to allocate memory for device "
2309 			"structure\n");
2310 		retval = -ENOMEM;
2311 		goto err_driver;
2312 	}
2313 
2314 	tsi148_bridge->driver_priv = tsi148_device;
2315 
2316 	/* Enable the device */
2317 	retval = pci_enable_device(pdev);
2318 	if (retval) {
2319 		dev_err(&pdev->dev, "Unable to enable device\n");
2320 		goto err_enable;
2321 	}
2322 
2323 	/* Map Registers */
2324 	retval = pci_request_regions(pdev, driver_name);
2325 	if (retval) {
2326 		dev_err(&pdev->dev, "Unable to reserve resources\n");
2327 		goto err_resource;
2328 	}
2329 
2330 	/* map registers in BAR 0 */
2331 	tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2332 		4096);
2333 	if (!tsi148_device->base) {
2334 		dev_err(&pdev->dev, "Unable to remap CRG region\n");
2335 		retval = -EIO;
2336 		goto err_remap;
2337 	}
2338 
2339 	/* Check to see if the mapping worked out */
2340 	data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2341 	if (data != PCI_VENDOR_ID_TUNDRA) {
2342 		dev_err(&pdev->dev, "CRG region check failed\n");
2343 		retval = -EIO;
2344 		goto err_test;
2345 	}
2346 
2347 	/* Initialize wait queues & mutual exclusion flags */
2348 	init_waitqueue_head(&tsi148_device->dma_queue[0]);
2349 	init_waitqueue_head(&tsi148_device->dma_queue[1]);
2350 	init_waitqueue_head(&tsi148_device->iack_queue);
2351 	mutex_init(&tsi148_device->vme_int);
2352 	mutex_init(&tsi148_device->vme_rmw);
2353 
2354 	tsi148_bridge->parent = &pdev->dev;
2355 	strcpy(tsi148_bridge->name, driver_name);
2356 
2357 	/* Setup IRQ */
2358 	retval = tsi148_irq_init(tsi148_bridge);
2359 	if (retval != 0) {
2360 		dev_err(&pdev->dev, "Chip Initialization failed.\n");
2361 		goto err_irq;
2362 	}
2363 
2364 	/* If we are going to flush writes, we need to read from the VME bus.
2365 	 * We need to do this safely, thus we read the devices own CR/CSR
2366 	 * register. To do this we must set up a window in CR/CSR space and
2367 	 * hence have one less master window resource available.
2368 	 */
2369 	master_num = TSI148_MAX_MASTER;
2370 	if (err_chk) {
2371 		master_num--;
2372 
2373 		tsi148_device->flush_image =
2374 			kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2375 		if (tsi148_device->flush_image == NULL) {
2376 			dev_err(&pdev->dev, "Failed to allocate memory for "
2377 			"flush resource structure\n");
2378 			retval = -ENOMEM;
2379 			goto err_master;
2380 		}
2381 		tsi148_device->flush_image->parent = tsi148_bridge;
2382 		spin_lock_init(&tsi148_device->flush_image->lock);
2383 		tsi148_device->flush_image->locked = 1;
2384 		tsi148_device->flush_image->number = master_num;
2385 		memset(&tsi148_device->flush_image->bus_resource, 0,
2386 			sizeof(struct resource));
2387 		tsi148_device->flush_image->kern_base  = NULL;
2388 	}
2389 
2390 	/* Add master windows to list */
2391 	for (i = 0; i < master_num; i++) {
2392 		master_image = kmalloc(sizeof(struct vme_master_resource),
2393 			GFP_KERNEL);
2394 		if (master_image == NULL) {
2395 			dev_err(&pdev->dev, "Failed to allocate memory for "
2396 			"master resource structure\n");
2397 			retval = -ENOMEM;
2398 			goto err_master;
2399 		}
2400 		master_image->parent = tsi148_bridge;
2401 		spin_lock_init(&master_image->lock);
2402 		master_image->locked = 0;
2403 		master_image->number = i;
2404 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2405 			VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2406 			VME_USER3 | VME_USER4;
2407 		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2408 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2409 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2410 			VME_PROG | VME_DATA;
2411 		master_image->width_attr = VME_D16 | VME_D32;
2412 		memset(&master_image->bus_resource, 0,
2413 			sizeof(struct resource));
2414 		master_image->kern_base  = NULL;
2415 		list_add_tail(&master_image->list,
2416 			&tsi148_bridge->master_resources);
2417 	}
2418 
2419 	/* Add slave windows to list */
2420 	for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2421 		slave_image = kmalloc(sizeof(struct vme_slave_resource),
2422 			GFP_KERNEL);
2423 		if (slave_image == NULL) {
2424 			dev_err(&pdev->dev, "Failed to allocate memory for "
2425 			"slave resource structure\n");
2426 			retval = -ENOMEM;
2427 			goto err_slave;
2428 		}
2429 		slave_image->parent = tsi148_bridge;
2430 		mutex_init(&slave_image->mtx);
2431 		slave_image->locked = 0;
2432 		slave_image->number = i;
2433 		slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2434 			VME_A64;
2435 		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2436 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2437 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2438 			VME_PROG | VME_DATA;
2439 		list_add_tail(&slave_image->list,
2440 			&tsi148_bridge->slave_resources);
2441 	}
2442 
2443 	/* Add dma engines to list */
2444 	for (i = 0; i < TSI148_MAX_DMA; i++) {
2445 		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2446 			GFP_KERNEL);
2447 		if (dma_ctrlr == NULL) {
2448 			dev_err(&pdev->dev, "Failed to allocate memory for "
2449 			"dma resource structure\n");
2450 			retval = -ENOMEM;
2451 			goto err_dma;
2452 		}
2453 		dma_ctrlr->parent = tsi148_bridge;
2454 		mutex_init(&dma_ctrlr->mtx);
2455 		dma_ctrlr->locked = 0;
2456 		dma_ctrlr->number = i;
2457 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2458 			VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2459 			VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2460 			VME_DMA_PATTERN_TO_MEM;
2461 		INIT_LIST_HEAD(&dma_ctrlr->pending);
2462 		INIT_LIST_HEAD(&dma_ctrlr->running);
2463 		list_add_tail(&dma_ctrlr->list,
2464 			&tsi148_bridge->dma_resources);
2465 	}
2466 
2467 	/* Add location monitor to list */
2468 	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2469 	if (lm == NULL) {
2470 		dev_err(&pdev->dev, "Failed to allocate memory for "
2471 		"location monitor resource structure\n");
2472 		retval = -ENOMEM;
2473 		goto err_lm;
2474 	}
2475 	lm->parent = tsi148_bridge;
2476 	mutex_init(&lm->mtx);
2477 	lm->locked = 0;
2478 	lm->number = 1;
2479 	lm->monitors = 4;
2480 	list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2481 
2482 	tsi148_bridge->slave_get = tsi148_slave_get;
2483 	tsi148_bridge->slave_set = tsi148_slave_set;
2484 	tsi148_bridge->master_get = tsi148_master_get;
2485 	tsi148_bridge->master_set = tsi148_master_set;
2486 	tsi148_bridge->master_read = tsi148_master_read;
2487 	tsi148_bridge->master_write = tsi148_master_write;
2488 	tsi148_bridge->master_rmw = tsi148_master_rmw;
2489 	tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2490 	tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2491 	tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2492 	tsi148_bridge->irq_set = tsi148_irq_set;
2493 	tsi148_bridge->irq_generate = tsi148_irq_generate;
2494 	tsi148_bridge->lm_set = tsi148_lm_set;
2495 	tsi148_bridge->lm_get = tsi148_lm_get;
2496 	tsi148_bridge->lm_attach = tsi148_lm_attach;
2497 	tsi148_bridge->lm_detach = tsi148_lm_detach;
2498 	tsi148_bridge->slot_get = tsi148_slot_get;
2499 	tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2500 	tsi148_bridge->free_consistent = tsi148_free_consistent;
2501 
2502 	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2503 	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2504 		(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2505 	if (!geoid)
2506 		dev_info(&pdev->dev, "VME geographical address is %d\n",
2507 			data & TSI148_LCSR_VSTAT_GA_M);
2508 	else
2509 		dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2510 			geoid);
2511 
2512 	dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2513 		err_chk ? "enabled" : "disabled");
2514 
2515 	retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2516 	if (retval) {
2517 		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2518 		goto err_crcsr;
2519 	}
2520 
2521 	retval = vme_register_bridge(tsi148_bridge);
2522 	if (retval != 0) {
2523 		dev_err(&pdev->dev, "Chip Registration failed.\n");
2524 		goto err_reg;
2525 	}
2526 
2527 	pci_set_drvdata(pdev, tsi148_bridge);
2528 
2529 	/* Clear VME bus "board fail", and "power-up reset" lines */
2530 	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2531 	data &= ~TSI148_LCSR_VSTAT_BRDFL;
2532 	data |= TSI148_LCSR_VSTAT_CPURST;
2533 	iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2534 
2535 	return 0;
2536 
2537 err_reg:
2538 	tsi148_crcsr_exit(tsi148_bridge, pdev);
2539 err_crcsr:
2540 err_lm:
2541 	/* resources are stored in link list */
2542 	list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2543 		lm = list_entry(pos, struct vme_lm_resource, list);
2544 		list_del(pos);
2545 		kfree(lm);
2546 	}
2547 err_dma:
2548 	/* resources are stored in link list */
2549 	list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2550 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2551 		list_del(pos);
2552 		kfree(dma_ctrlr);
2553 	}
2554 err_slave:
2555 	/* resources are stored in link list */
2556 	list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2557 		slave_image = list_entry(pos, struct vme_slave_resource, list);
2558 		list_del(pos);
2559 		kfree(slave_image);
2560 	}
2561 err_master:
2562 	/* resources are stored in link list */
2563 	list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2564 		master_image = list_entry(pos, struct vme_master_resource,
2565 			list);
2566 		list_del(pos);
2567 		kfree(master_image);
2568 	}
2569 
2570 	tsi148_irq_exit(tsi148_bridge, pdev);
2571 err_irq:
2572 err_test:
2573 	iounmap(tsi148_device->base);
2574 err_remap:
2575 	pci_release_regions(pdev);
2576 err_resource:
2577 	pci_disable_device(pdev);
2578 err_enable:
2579 	kfree(tsi148_device);
2580 err_driver:
2581 	kfree(tsi148_bridge);
2582 err_struct:
2583 	return retval;
2584 
2585 }
2586 
tsi148_remove(struct pci_dev * pdev)2587 static void tsi148_remove(struct pci_dev *pdev)
2588 {
2589 	struct list_head *pos = NULL;
2590 	struct list_head *tmplist;
2591 	struct vme_master_resource *master_image;
2592 	struct vme_slave_resource *slave_image;
2593 	struct vme_dma_resource *dma_ctrlr;
2594 	int i;
2595 	struct tsi148_driver *bridge;
2596 	struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2597 
2598 	bridge = tsi148_bridge->driver_priv;
2599 
2600 
2601 	dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2602 
2603 	/*
2604 	 *  Shutdown all inbound and outbound windows.
2605 	 */
2606 	for (i = 0; i < 8; i++) {
2607 		iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2608 			TSI148_LCSR_OFFSET_ITAT);
2609 		iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2610 			TSI148_LCSR_OFFSET_OTAT);
2611 	}
2612 
2613 	/*
2614 	 *  Shutdown Location monitor.
2615 	 */
2616 	iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2617 
2618 	/*
2619 	 *  Shutdown CRG map.
2620 	 */
2621 	iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2622 
2623 	/*
2624 	 *  Clear error status.
2625 	 */
2626 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2627 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2628 	iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2629 
2630 	/*
2631 	 *  Remove VIRQ interrupt (if any)
2632 	 */
2633 	if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2634 		iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2635 
2636 	/*
2637 	 *  Map all Interrupts to PCI INTA
2638 	 */
2639 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2640 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2641 
2642 	tsi148_irq_exit(tsi148_bridge, pdev);
2643 
2644 	vme_unregister_bridge(tsi148_bridge);
2645 
2646 	tsi148_crcsr_exit(tsi148_bridge, pdev);
2647 
2648 	/* resources are stored in link list */
2649 	list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2650 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2651 		list_del(pos);
2652 		kfree(dma_ctrlr);
2653 	}
2654 
2655 	/* resources are stored in link list */
2656 	list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2657 		slave_image = list_entry(pos, struct vme_slave_resource, list);
2658 		list_del(pos);
2659 		kfree(slave_image);
2660 	}
2661 
2662 	/* resources are stored in link list */
2663 	list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2664 		master_image = list_entry(pos, struct vme_master_resource,
2665 			list);
2666 		list_del(pos);
2667 		kfree(master_image);
2668 	}
2669 
2670 	iounmap(bridge->base);
2671 
2672 	pci_release_regions(pdev);
2673 
2674 	pci_disable_device(pdev);
2675 
2676 	kfree(tsi148_bridge->driver_priv);
2677 
2678 	kfree(tsi148_bridge);
2679 }
2680 
2681 module_pci_driver(tsi148_driver);
2682 
2683 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2684 module_param(err_chk, bool, 0);
2685 
2686 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2687 module_param(geoid, int, 0);
2688 
2689 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2690 MODULE_LICENSE("GPL");
2691