• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for the Tundra TSI148 VME-PCI Bridge Chip
4  *
5  * Author: Martyn Welch <martyn.welch@ge.com>
6  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7  *
8  * Based on work by Tom Armistead and Ajit Prem
9  * Copyright 2004 Motorola Inc.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/mm.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/proc_fs.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26 #include <linux/io.h>
27 #include <linux/uaccess.h>
28 #include <linux/byteorder/generic.h>
29 #include <linux/vme.h>
30 
31 #include "../vme_bridge.h"
32 #include "vme_tsi148.h"
33 
34 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
35 static void tsi148_remove(struct pci_dev *);
36 
37 
38 /* Module parameter */
39 static bool err_chk;
40 static int geoid;
41 
42 static const char driver_name[] = "vme_tsi148";
43 
44 static const struct pci_device_id tsi148_ids[] = {
45 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
46 	{ },
47 };
48 
49 MODULE_DEVICE_TABLE(pci, tsi148_ids);
50 
51 static struct pci_driver tsi148_driver = {
52 	.name = driver_name,
53 	.id_table = tsi148_ids,
54 	.probe = tsi148_probe,
55 	.remove = tsi148_remove,
56 };
57 
reg_join(unsigned int high,unsigned int low,unsigned long long * variable)58 static void reg_join(unsigned int high, unsigned int low,
59 	unsigned long long *variable)
60 {
61 	*variable = (unsigned long long)high << 32;
62 	*variable |= (unsigned long long)low;
63 }
64 
reg_split(unsigned long long variable,unsigned int * high,unsigned int * low)65 static void reg_split(unsigned long long variable, unsigned int *high,
66 	unsigned int *low)
67 {
68 	*low = (unsigned int)variable & 0xFFFFFFFF;
69 	*high = (unsigned int)(variable >> 32);
70 }
71 
72 /*
73  * Wakes up DMA queue.
74  */
tsi148_DMA_irqhandler(struct tsi148_driver * bridge,int channel_mask)75 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
76 	int channel_mask)
77 {
78 	u32 serviced = 0;
79 
80 	if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
81 		wake_up(&bridge->dma_queue[0]);
82 		serviced |= TSI148_LCSR_INTC_DMA0C;
83 	}
84 	if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
85 		wake_up(&bridge->dma_queue[1]);
86 		serviced |= TSI148_LCSR_INTC_DMA1C;
87 	}
88 
89 	return serviced;
90 }
91 
92 /*
93  * Wake up location monitor queue
94  */
tsi148_LM_irqhandler(struct tsi148_driver * bridge,u32 stat)95 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
96 {
97 	int i;
98 	u32 serviced = 0;
99 
100 	for (i = 0; i < 4; i++) {
101 		if (stat & TSI148_LCSR_INTS_LMS[i]) {
102 			/* We only enable interrupts if the callback is set */
103 			bridge->lm_callback[i](bridge->lm_data[i]);
104 			serviced |= TSI148_LCSR_INTC_LMC[i];
105 		}
106 	}
107 
108 	return serviced;
109 }
110 
111 /*
112  * Wake up mail box queue.
113  *
114  * XXX This functionality is not exposed up though API.
115  */
tsi148_MB_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)116 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
117 {
118 	int i;
119 	u32 val;
120 	u32 serviced = 0;
121 	struct tsi148_driver *bridge;
122 
123 	bridge = tsi148_bridge->driver_priv;
124 
125 	for (i = 0; i < 4; i++) {
126 		if (stat & TSI148_LCSR_INTS_MBS[i]) {
127 			val = ioread32be(bridge->base +	TSI148_GCSR_MBOX[i]);
128 			dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
129 				": 0x%x\n", i, val);
130 			serviced |= TSI148_LCSR_INTC_MBC[i];
131 		}
132 	}
133 
134 	return serviced;
135 }
136 
137 /*
138  * Display error & status message when PERR (PCI) exception interrupt occurs.
139  */
tsi148_PERR_irqhandler(struct vme_bridge * tsi148_bridge)140 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
141 {
142 	struct tsi148_driver *bridge;
143 
144 	bridge = tsi148_bridge->driver_priv;
145 
146 	dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
147 		"attributes: %08x\n",
148 		ioread32be(bridge->base + TSI148_LCSR_EDPAU),
149 		ioread32be(bridge->base + TSI148_LCSR_EDPAL),
150 		ioread32be(bridge->base + TSI148_LCSR_EDPAT));
151 
152 	dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
153 		"completion reg: %08x\n",
154 		ioread32be(bridge->base + TSI148_LCSR_EDPXA),
155 		ioread32be(bridge->base + TSI148_LCSR_EDPXS));
156 
157 	iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
158 
159 	return TSI148_LCSR_INTC_PERRC;
160 }
161 
162 /*
163  * Save address and status when VME error interrupt occurs.
164  */
tsi148_VERR_irqhandler(struct vme_bridge * tsi148_bridge)165 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
166 {
167 	unsigned int error_addr_high, error_addr_low;
168 	unsigned long long error_addr;
169 	u32 error_attrib;
170 	int error_am;
171 	struct tsi148_driver *bridge;
172 
173 	bridge = tsi148_bridge->driver_priv;
174 
175 	error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
176 	error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
177 	error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
178 	error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
179 
180 	reg_join(error_addr_high, error_addr_low, &error_addr);
181 
182 	/* Check for exception register overflow (we have lost error data) */
183 	if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
184 		dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
185 			"Occurred\n");
186 	}
187 
188 	if (err_chk)
189 		vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
190 	else
191 		dev_err(tsi148_bridge->parent,
192 			"VME Bus Error at address: 0x%llx, attributes: %08x\n",
193 			error_addr, error_attrib);
194 
195 	/* Clear Status */
196 	iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
197 
198 	return TSI148_LCSR_INTC_VERRC;
199 }
200 
201 /*
202  * Wake up IACK queue.
203  */
tsi148_IACK_irqhandler(struct tsi148_driver * bridge)204 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
205 {
206 	wake_up(&bridge->iack_queue);
207 
208 	return TSI148_LCSR_INTC_IACKC;
209 }
210 
211 /*
212  * Calling VME bus interrupt callback if provided.
213  */
tsi148_VIRQ_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)214 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
215 	u32 stat)
216 {
217 	int vec, i, serviced = 0;
218 	struct tsi148_driver *bridge;
219 
220 	bridge = tsi148_bridge->driver_priv;
221 
222 	for (i = 7; i > 0; i--) {
223 		if (stat & (1 << i)) {
224 			/*
225 			 * Note: Even though the registers are defined as
226 			 * 32-bits in the spec, we only want to issue 8-bit
227 			 * IACK cycles on the bus, read from offset 3.
228 			 */
229 			vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
230 
231 			vme_irq_handler(tsi148_bridge, i, vec);
232 
233 			serviced |= (1 << i);
234 		}
235 	}
236 
237 	return serviced;
238 }
239 
240 /*
241  * Top level interrupt handler.  Clears appropriate interrupt status bits and
242  * then calls appropriate sub handler(s).
243  */
tsi148_irqhandler(int irq,void * ptr)244 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
245 {
246 	u32 stat, enable, serviced = 0;
247 	struct vme_bridge *tsi148_bridge;
248 	struct tsi148_driver *bridge;
249 
250 	tsi148_bridge = ptr;
251 
252 	bridge = tsi148_bridge->driver_priv;
253 
254 	/* Determine which interrupts are unmasked and set */
255 	enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
256 	stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
257 
258 	/* Only look at unmasked interrupts */
259 	stat &= enable;
260 
261 	if (unlikely(!stat))
262 		return IRQ_NONE;
263 
264 	/* Call subhandlers as appropriate */
265 	/* DMA irqs */
266 	if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
267 		serviced |= tsi148_DMA_irqhandler(bridge, stat);
268 
269 	/* Location monitor irqs */
270 	if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
271 			TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
272 		serviced |= tsi148_LM_irqhandler(bridge, stat);
273 
274 	/* Mail box irqs */
275 	if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
276 			TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
277 		serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
278 
279 	/* PCI bus error */
280 	if (stat & TSI148_LCSR_INTS_PERRS)
281 		serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
282 
283 	/* VME bus error */
284 	if (stat & TSI148_LCSR_INTS_VERRS)
285 		serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
286 
287 	/* IACK irq */
288 	if (stat & TSI148_LCSR_INTS_IACKS)
289 		serviced |= tsi148_IACK_irqhandler(bridge);
290 
291 	/* VME bus irqs */
292 	if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
293 			TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
294 			TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
295 			TSI148_LCSR_INTS_IRQ1S))
296 		serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
297 
298 	/* Clear serviced interrupts */
299 	iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
300 
301 	return IRQ_HANDLED;
302 }
303 
tsi148_irq_init(struct vme_bridge * tsi148_bridge)304 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
305 {
306 	int result;
307 	unsigned int tmp;
308 	struct pci_dev *pdev;
309 	struct tsi148_driver *bridge;
310 
311 	pdev = to_pci_dev(tsi148_bridge->parent);
312 
313 	bridge = tsi148_bridge->driver_priv;
314 
315 	result = request_irq(pdev->irq,
316 			     tsi148_irqhandler,
317 			     IRQF_SHARED,
318 			     driver_name, tsi148_bridge);
319 	if (result) {
320 		dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
321 			"vector %02X\n", pdev->irq);
322 		return result;
323 	}
324 
325 	/* Enable and unmask interrupts */
326 	tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
327 		TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
328 		TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
329 		TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
330 		TSI148_LCSR_INTEO_IACKEO;
331 
332 	/* This leaves the following interrupts masked.
333 	 * TSI148_LCSR_INTEO_VIEEO
334 	 * TSI148_LCSR_INTEO_SYSFLEO
335 	 * TSI148_LCSR_INTEO_ACFLEO
336 	 */
337 
338 	/* Don't enable Location Monitor interrupts here - they will be
339 	 * enabled when the location monitors are properly configured and
340 	 * a callback has been attached.
341 	 * TSI148_LCSR_INTEO_LM0EO
342 	 * TSI148_LCSR_INTEO_LM1EO
343 	 * TSI148_LCSR_INTEO_LM2EO
344 	 * TSI148_LCSR_INTEO_LM3EO
345 	 */
346 
347 	/* Don't enable VME interrupts until we add a handler, else the board
348 	 * will respond to it and we don't want that unless it knows how to
349 	 * properly deal with it.
350 	 * TSI148_LCSR_INTEO_IRQ7EO
351 	 * TSI148_LCSR_INTEO_IRQ6EO
352 	 * TSI148_LCSR_INTEO_IRQ5EO
353 	 * TSI148_LCSR_INTEO_IRQ4EO
354 	 * TSI148_LCSR_INTEO_IRQ3EO
355 	 * TSI148_LCSR_INTEO_IRQ2EO
356 	 * TSI148_LCSR_INTEO_IRQ1EO
357 	 */
358 
359 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
360 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
361 
362 	return 0;
363 }
364 
tsi148_irq_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)365 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
366 	struct pci_dev *pdev)
367 {
368 	struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
369 
370 	/* Turn off interrupts */
371 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
372 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
373 
374 	/* Clear all interrupts */
375 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
376 
377 	/* Detach interrupt handler */
378 	free_irq(pdev->irq, tsi148_bridge);
379 }
380 
381 /*
382  * Check to see if an IACk has been received, return true (1) or false (0).
383  */
tsi148_iack_received(struct tsi148_driver * bridge)384 static int tsi148_iack_received(struct tsi148_driver *bridge)
385 {
386 	u32 tmp;
387 
388 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
389 
390 	if (tmp & TSI148_LCSR_VICR_IRQS)
391 		return 0;
392 	else
393 		return 1;
394 }
395 
396 /*
397  * Configure VME interrupt
398  */
tsi148_irq_set(struct vme_bridge * tsi148_bridge,int level,int state,int sync)399 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
400 	int state, int sync)
401 {
402 	struct pci_dev *pdev;
403 	u32 tmp;
404 	struct tsi148_driver *bridge;
405 
406 	bridge = tsi148_bridge->driver_priv;
407 
408 	/* We need to do the ordering differently for enabling and disabling */
409 	if (state == 0) {
410 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
411 		tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
412 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
413 
414 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
415 		tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
416 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
417 
418 		if (sync != 0) {
419 			pdev = to_pci_dev(tsi148_bridge->parent);
420 			synchronize_irq(pdev->irq);
421 		}
422 	} else {
423 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
424 		tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
425 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
426 
427 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
428 		tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
429 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
430 	}
431 }
432 
433 /*
434  * Generate a VME bus interrupt at the requested level & vector. Wait for
435  * interrupt to be acked.
436  */
tsi148_irq_generate(struct vme_bridge * tsi148_bridge,int level,int statid)437 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
438 	int statid)
439 {
440 	u32 tmp;
441 	struct tsi148_driver *bridge;
442 
443 	bridge = tsi148_bridge->driver_priv;
444 
445 	mutex_lock(&bridge->vme_int);
446 
447 	/* Read VICR register */
448 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
449 
450 	/* Set Status/ID */
451 	tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
452 		(statid & TSI148_LCSR_VICR_STID_M);
453 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
454 
455 	/* Assert VMEbus IRQ */
456 	tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
457 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
458 
459 	/* XXX Consider implementing a timeout? */
460 	wait_event_interruptible(bridge->iack_queue,
461 		tsi148_iack_received(bridge));
462 
463 	mutex_unlock(&bridge->vme_int);
464 
465 	return 0;
466 }
467 
468 /*
469  * Initialize a slave window with the requested attributes.
470  */
tsi148_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)471 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
472 	unsigned long long vme_base, unsigned long long size,
473 	dma_addr_t pci_base, u32 aspace, u32 cycle)
474 {
475 	unsigned int i, addr = 0, granularity = 0;
476 	unsigned int temp_ctl = 0;
477 	unsigned int vme_base_low, vme_base_high;
478 	unsigned int vme_bound_low, vme_bound_high;
479 	unsigned int pci_offset_low, pci_offset_high;
480 	unsigned long long vme_bound, pci_offset;
481 	struct vme_bridge *tsi148_bridge;
482 	struct tsi148_driver *bridge;
483 
484 	tsi148_bridge = image->parent;
485 	bridge = tsi148_bridge->driver_priv;
486 
487 	i = image->number;
488 
489 	switch (aspace) {
490 	case VME_A16:
491 		granularity = 0x10;
492 		addr |= TSI148_LCSR_ITAT_AS_A16;
493 		break;
494 	case VME_A24:
495 		granularity = 0x1000;
496 		addr |= TSI148_LCSR_ITAT_AS_A24;
497 		break;
498 	case VME_A32:
499 		granularity = 0x10000;
500 		addr |= TSI148_LCSR_ITAT_AS_A32;
501 		break;
502 	case VME_A64:
503 		granularity = 0x10000;
504 		addr |= TSI148_LCSR_ITAT_AS_A64;
505 		break;
506 	default:
507 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
508 		return -EINVAL;
509 		break;
510 	}
511 
512 	/* Convert 64-bit variables to 2x 32-bit variables */
513 	reg_split(vme_base, &vme_base_high, &vme_base_low);
514 
515 	/*
516 	 * Bound address is a valid address for the window, adjust
517 	 * accordingly
518 	 */
519 	vme_bound = vme_base + size - granularity;
520 	reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
521 	pci_offset = (unsigned long long)pci_base - vme_base;
522 	reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
523 
524 	if (vme_base_low & (granularity - 1)) {
525 		dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
526 		return -EINVAL;
527 	}
528 	if (vme_bound_low & (granularity - 1)) {
529 		dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
530 		return -EINVAL;
531 	}
532 	if (pci_offset_low & (granularity - 1)) {
533 		dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
534 			"alignment\n");
535 		return -EINVAL;
536 	}
537 
538 	/*  Disable while we are mucking around */
539 	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
540 		TSI148_LCSR_OFFSET_ITAT);
541 	temp_ctl &= ~TSI148_LCSR_ITAT_EN;
542 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
543 		TSI148_LCSR_OFFSET_ITAT);
544 
545 	/* Setup mapping */
546 	iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
547 		TSI148_LCSR_OFFSET_ITSAU);
548 	iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
549 		TSI148_LCSR_OFFSET_ITSAL);
550 	iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
551 		TSI148_LCSR_OFFSET_ITEAU);
552 	iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
553 		TSI148_LCSR_OFFSET_ITEAL);
554 	iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
555 		TSI148_LCSR_OFFSET_ITOFU);
556 	iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
557 		TSI148_LCSR_OFFSET_ITOFL);
558 
559 	/* Setup 2eSST speeds */
560 	temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
561 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
562 	case VME_2eSST160:
563 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
564 		break;
565 	case VME_2eSST267:
566 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
567 		break;
568 	case VME_2eSST320:
569 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
570 		break;
571 	}
572 
573 	/* Setup cycle types */
574 	temp_ctl &= ~(0x1F << 7);
575 	if (cycle & VME_BLT)
576 		temp_ctl |= TSI148_LCSR_ITAT_BLT;
577 	if (cycle & VME_MBLT)
578 		temp_ctl |= TSI148_LCSR_ITAT_MBLT;
579 	if (cycle & VME_2eVME)
580 		temp_ctl |= TSI148_LCSR_ITAT_2eVME;
581 	if (cycle & VME_2eSST)
582 		temp_ctl |= TSI148_LCSR_ITAT_2eSST;
583 	if (cycle & VME_2eSSTB)
584 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
585 
586 	/* Setup address space */
587 	temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
588 	temp_ctl |= addr;
589 
590 	temp_ctl &= ~0xF;
591 	if (cycle & VME_SUPER)
592 		temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
593 	if (cycle & VME_USER)
594 		temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
595 	if (cycle & VME_PROG)
596 		temp_ctl |= TSI148_LCSR_ITAT_PGM;
597 	if (cycle & VME_DATA)
598 		temp_ctl |= TSI148_LCSR_ITAT_DATA;
599 
600 	/* Write ctl reg without enable */
601 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
602 		TSI148_LCSR_OFFSET_ITAT);
603 
604 	if (enabled)
605 		temp_ctl |= TSI148_LCSR_ITAT_EN;
606 
607 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
608 		TSI148_LCSR_OFFSET_ITAT);
609 
610 	return 0;
611 }
612 
613 /*
614  * Get slave window configuration.
615  */
tsi148_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)616 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
617 	unsigned long long *vme_base, unsigned long long *size,
618 	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
619 {
620 	unsigned int i, granularity = 0, ctl = 0;
621 	unsigned int vme_base_low, vme_base_high;
622 	unsigned int vme_bound_low, vme_bound_high;
623 	unsigned int pci_offset_low, pci_offset_high;
624 	unsigned long long vme_bound, pci_offset;
625 	struct tsi148_driver *bridge;
626 
627 	bridge = image->parent->driver_priv;
628 
629 	i = image->number;
630 
631 	/* Read registers */
632 	ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
633 		TSI148_LCSR_OFFSET_ITAT);
634 
635 	vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
636 		TSI148_LCSR_OFFSET_ITSAU);
637 	vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
638 		TSI148_LCSR_OFFSET_ITSAL);
639 	vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
640 		TSI148_LCSR_OFFSET_ITEAU);
641 	vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
642 		TSI148_LCSR_OFFSET_ITEAL);
643 	pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
644 		TSI148_LCSR_OFFSET_ITOFU);
645 	pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
646 		TSI148_LCSR_OFFSET_ITOFL);
647 
648 	/* Convert 64-bit variables to 2x 32-bit variables */
649 	reg_join(vme_base_high, vme_base_low, vme_base);
650 	reg_join(vme_bound_high, vme_bound_low, &vme_bound);
651 	reg_join(pci_offset_high, pci_offset_low, &pci_offset);
652 
653 	*pci_base = (dma_addr_t)(*vme_base + pci_offset);
654 
655 	*enabled = 0;
656 	*aspace = 0;
657 	*cycle = 0;
658 
659 	if (ctl & TSI148_LCSR_ITAT_EN)
660 		*enabled = 1;
661 
662 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
663 		granularity = 0x10;
664 		*aspace |= VME_A16;
665 	}
666 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
667 		granularity = 0x1000;
668 		*aspace |= VME_A24;
669 	}
670 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
671 		granularity = 0x10000;
672 		*aspace |= VME_A32;
673 	}
674 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
675 		granularity = 0x10000;
676 		*aspace |= VME_A64;
677 	}
678 
679 	/* Need granularity before we set the size */
680 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
681 
682 
683 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
684 		*cycle |= VME_2eSST160;
685 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
686 		*cycle |= VME_2eSST267;
687 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
688 		*cycle |= VME_2eSST320;
689 
690 	if (ctl & TSI148_LCSR_ITAT_BLT)
691 		*cycle |= VME_BLT;
692 	if (ctl & TSI148_LCSR_ITAT_MBLT)
693 		*cycle |= VME_MBLT;
694 	if (ctl & TSI148_LCSR_ITAT_2eVME)
695 		*cycle |= VME_2eVME;
696 	if (ctl & TSI148_LCSR_ITAT_2eSST)
697 		*cycle |= VME_2eSST;
698 	if (ctl & TSI148_LCSR_ITAT_2eSSTB)
699 		*cycle |= VME_2eSSTB;
700 
701 	if (ctl & TSI148_LCSR_ITAT_SUPR)
702 		*cycle |= VME_SUPER;
703 	if (ctl & TSI148_LCSR_ITAT_NPRIV)
704 		*cycle |= VME_USER;
705 	if (ctl & TSI148_LCSR_ITAT_PGM)
706 		*cycle |= VME_PROG;
707 	if (ctl & TSI148_LCSR_ITAT_DATA)
708 		*cycle |= VME_DATA;
709 
710 	return 0;
711 }
712 
713 /*
714  * Allocate and map PCI Resource
715  */
tsi148_alloc_resource(struct vme_master_resource * image,unsigned long long size)716 static int tsi148_alloc_resource(struct vme_master_resource *image,
717 	unsigned long long size)
718 {
719 	unsigned long long existing_size;
720 	int retval = 0;
721 	struct pci_dev *pdev;
722 	struct vme_bridge *tsi148_bridge;
723 
724 	tsi148_bridge = image->parent;
725 
726 	pdev = to_pci_dev(tsi148_bridge->parent);
727 
728 	existing_size = (unsigned long long)(image->bus_resource.end -
729 		image->bus_resource.start);
730 
731 	/* If the existing size is OK, return */
732 	if ((size != 0) && (existing_size == (size - 1)))
733 		return 0;
734 
735 	if (existing_size != 0) {
736 		iounmap(image->kern_base);
737 		image->kern_base = NULL;
738 		kfree(image->bus_resource.name);
739 		release_resource(&image->bus_resource);
740 		memset(&image->bus_resource, 0, sizeof(image->bus_resource));
741 	}
742 
743 	/* Exit here if size is zero */
744 	if (size == 0)
745 		return 0;
746 
747 	if (!image->bus_resource.name) {
748 		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
749 		if (!image->bus_resource.name) {
750 			retval = -ENOMEM;
751 			goto err_name;
752 		}
753 	}
754 
755 	sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
756 		image->number);
757 
758 	image->bus_resource.start = 0;
759 	image->bus_resource.end = (unsigned long)size;
760 	image->bus_resource.flags = IORESOURCE_MEM;
761 
762 	retval = pci_bus_alloc_resource(pdev->bus,
763 		&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
764 		0, NULL, NULL);
765 	if (retval) {
766 		dev_err(tsi148_bridge->parent, "Failed to allocate mem "
767 			"resource for window %d size 0x%lx start 0x%lx\n",
768 			image->number, (unsigned long)size,
769 			(unsigned long)image->bus_resource.start);
770 		goto err_resource;
771 	}
772 
773 	image->kern_base = ioremap_nocache(
774 		image->bus_resource.start, size);
775 	if (!image->kern_base) {
776 		dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
777 		retval = -ENOMEM;
778 		goto err_remap;
779 	}
780 
781 	return 0;
782 
783 err_remap:
784 	release_resource(&image->bus_resource);
785 err_resource:
786 	kfree(image->bus_resource.name);
787 	memset(&image->bus_resource, 0, sizeof(image->bus_resource));
788 err_name:
789 	return retval;
790 }
791 
792 /*
793  * Free and unmap PCI Resource
794  */
tsi148_free_resource(struct vme_master_resource * image)795 static void tsi148_free_resource(struct vme_master_resource *image)
796 {
797 	iounmap(image->kern_base);
798 	image->kern_base = NULL;
799 	release_resource(&image->bus_resource);
800 	kfree(image->bus_resource.name);
801 	memset(&image->bus_resource, 0, sizeof(image->bus_resource));
802 }
803 
804 /*
805  * Set the attributes of an outbound window.
806  */
tsi148_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)807 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
808 	unsigned long long vme_base, unsigned long long size, u32 aspace,
809 	u32 cycle, u32 dwidth)
810 {
811 	int retval = 0;
812 	unsigned int i;
813 	unsigned int temp_ctl = 0;
814 	unsigned int pci_base_low, pci_base_high;
815 	unsigned int pci_bound_low, pci_bound_high;
816 	unsigned int vme_offset_low, vme_offset_high;
817 	unsigned long long pci_bound, vme_offset, pci_base;
818 	struct vme_bridge *tsi148_bridge;
819 	struct tsi148_driver *bridge;
820 	struct pci_bus_region region;
821 	struct pci_dev *pdev;
822 
823 	tsi148_bridge = image->parent;
824 
825 	bridge = tsi148_bridge->driver_priv;
826 
827 	pdev = to_pci_dev(tsi148_bridge->parent);
828 
829 	/* Verify input data */
830 	if (vme_base & 0xFFFF) {
831 		dev_err(tsi148_bridge->parent, "Invalid VME Window "
832 			"alignment\n");
833 		retval = -EINVAL;
834 		goto err_window;
835 	}
836 
837 	if ((size == 0) && (enabled != 0)) {
838 		dev_err(tsi148_bridge->parent, "Size must be non-zero for "
839 			"enabled windows\n");
840 		retval = -EINVAL;
841 		goto err_window;
842 	}
843 
844 	spin_lock(&image->lock);
845 
846 	/* Let's allocate the resource here rather than further up the stack as
847 	 * it avoids pushing loads of bus dependent stuff up the stack. If size
848 	 * is zero, any existing resource will be freed.
849 	 */
850 	retval = tsi148_alloc_resource(image, size);
851 	if (retval) {
852 		spin_unlock(&image->lock);
853 		dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
854 			"resource\n");
855 		goto err_res;
856 	}
857 
858 	if (size == 0) {
859 		pci_base = 0;
860 		pci_bound = 0;
861 		vme_offset = 0;
862 	} else {
863 		pcibios_resource_to_bus(pdev->bus, &region,
864 					&image->bus_resource);
865 		pci_base = region.start;
866 
867 		/*
868 		 * Bound address is a valid address for the window, adjust
869 		 * according to window granularity.
870 		 */
871 		pci_bound = pci_base + (size - 0x10000);
872 		vme_offset = vme_base - pci_base;
873 	}
874 
875 	/* Convert 64-bit variables to 2x 32-bit variables */
876 	reg_split(pci_base, &pci_base_high, &pci_base_low);
877 	reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
878 	reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
879 
880 	if (pci_base_low & 0xFFFF) {
881 		spin_unlock(&image->lock);
882 		dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
883 		retval = -EINVAL;
884 		goto err_gran;
885 	}
886 	if (pci_bound_low & 0xFFFF) {
887 		spin_unlock(&image->lock);
888 		dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
889 		retval = -EINVAL;
890 		goto err_gran;
891 	}
892 	if (vme_offset_low & 0xFFFF) {
893 		spin_unlock(&image->lock);
894 		dev_err(tsi148_bridge->parent, "Invalid VME Offset "
895 			"alignment\n");
896 		retval = -EINVAL;
897 		goto err_gran;
898 	}
899 
900 	i = image->number;
901 
902 	/* Disable while we are mucking around */
903 	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
904 		TSI148_LCSR_OFFSET_OTAT);
905 	temp_ctl &= ~TSI148_LCSR_OTAT_EN;
906 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
907 		TSI148_LCSR_OFFSET_OTAT);
908 
909 	/* Setup 2eSST speeds */
910 	temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
911 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
912 	case VME_2eSST160:
913 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
914 		break;
915 	case VME_2eSST267:
916 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
917 		break;
918 	case VME_2eSST320:
919 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
920 		break;
921 	}
922 
923 	/* Setup cycle types */
924 	if (cycle & VME_BLT) {
925 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
926 		temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
927 	}
928 	if (cycle & VME_MBLT) {
929 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
930 		temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
931 	}
932 	if (cycle & VME_2eVME) {
933 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
934 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
935 	}
936 	if (cycle & VME_2eSST) {
937 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
938 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
939 	}
940 	if (cycle & VME_2eSSTB) {
941 		dev_warn(tsi148_bridge->parent, "Currently not setting "
942 			"Broadcast Select Registers\n");
943 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
944 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
945 	}
946 
947 	/* Setup data width */
948 	temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
949 	switch (dwidth) {
950 	case VME_D16:
951 		temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
952 		break;
953 	case VME_D32:
954 		temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
955 		break;
956 	default:
957 		spin_unlock(&image->lock);
958 		dev_err(tsi148_bridge->parent, "Invalid data width\n");
959 		retval = -EINVAL;
960 		goto err_dwidth;
961 	}
962 
963 	/* Setup address space */
964 	temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
965 	switch (aspace) {
966 	case VME_A16:
967 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
968 		break;
969 	case VME_A24:
970 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
971 		break;
972 	case VME_A32:
973 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
974 		break;
975 	case VME_A64:
976 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
977 		break;
978 	case VME_CRCSR:
979 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
980 		break;
981 	case VME_USER1:
982 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
983 		break;
984 	case VME_USER2:
985 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
986 		break;
987 	case VME_USER3:
988 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
989 		break;
990 	case VME_USER4:
991 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
992 		break;
993 	default:
994 		spin_unlock(&image->lock);
995 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
996 		retval = -EINVAL;
997 		goto err_aspace;
998 		break;
999 	}
1000 
1001 	temp_ctl &= ~(3<<4);
1002 	if (cycle & VME_SUPER)
1003 		temp_ctl |= TSI148_LCSR_OTAT_SUP;
1004 	if (cycle & VME_PROG)
1005 		temp_ctl |= TSI148_LCSR_OTAT_PGM;
1006 
1007 	/* Setup mapping */
1008 	iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1009 		TSI148_LCSR_OFFSET_OTSAU);
1010 	iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1011 		TSI148_LCSR_OFFSET_OTSAL);
1012 	iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1013 		TSI148_LCSR_OFFSET_OTEAU);
1014 	iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1015 		TSI148_LCSR_OFFSET_OTEAL);
1016 	iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1017 		TSI148_LCSR_OFFSET_OTOFU);
1018 	iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1019 		TSI148_LCSR_OFFSET_OTOFL);
1020 
1021 	/* Write ctl reg without enable */
1022 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1023 		TSI148_LCSR_OFFSET_OTAT);
1024 
1025 	if (enabled)
1026 		temp_ctl |= TSI148_LCSR_OTAT_EN;
1027 
1028 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1029 		TSI148_LCSR_OFFSET_OTAT);
1030 
1031 	spin_unlock(&image->lock);
1032 	return 0;
1033 
1034 err_aspace:
1035 err_dwidth:
1036 err_gran:
1037 	tsi148_free_resource(image);
1038 err_res:
1039 err_window:
1040 	return retval;
1041 
1042 }
1043 
1044 /*
1045  * Set the attributes of an outbound window.
1046  *
1047  * XXX Not parsing prefetch information.
1048  */
__tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1049 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1050 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1051 	u32 *cycle, u32 *dwidth)
1052 {
1053 	unsigned int i, ctl;
1054 	unsigned int pci_base_low, pci_base_high;
1055 	unsigned int pci_bound_low, pci_bound_high;
1056 	unsigned int vme_offset_low, vme_offset_high;
1057 
1058 	unsigned long long pci_base, pci_bound, vme_offset;
1059 	struct tsi148_driver *bridge;
1060 
1061 	bridge = image->parent->driver_priv;
1062 
1063 	i = image->number;
1064 
1065 	ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1066 		TSI148_LCSR_OFFSET_OTAT);
1067 
1068 	pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1069 		TSI148_LCSR_OFFSET_OTSAU);
1070 	pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1071 		TSI148_LCSR_OFFSET_OTSAL);
1072 	pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1073 		TSI148_LCSR_OFFSET_OTEAU);
1074 	pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1075 		TSI148_LCSR_OFFSET_OTEAL);
1076 	vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1077 		TSI148_LCSR_OFFSET_OTOFU);
1078 	vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1079 		TSI148_LCSR_OFFSET_OTOFL);
1080 
1081 	/* Convert 64-bit variables to 2x 32-bit variables */
1082 	reg_join(pci_base_high, pci_base_low, &pci_base);
1083 	reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1084 	reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1085 
1086 	*vme_base = pci_base + vme_offset;
1087 	*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1088 
1089 	*enabled = 0;
1090 	*aspace = 0;
1091 	*cycle = 0;
1092 	*dwidth = 0;
1093 
1094 	if (ctl & TSI148_LCSR_OTAT_EN)
1095 		*enabled = 1;
1096 
1097 	/* Setup address space */
1098 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1099 		*aspace |= VME_A16;
1100 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1101 		*aspace |= VME_A24;
1102 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1103 		*aspace |= VME_A32;
1104 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1105 		*aspace |= VME_A64;
1106 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1107 		*aspace |= VME_CRCSR;
1108 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1109 		*aspace |= VME_USER1;
1110 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1111 		*aspace |= VME_USER2;
1112 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1113 		*aspace |= VME_USER3;
1114 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1115 		*aspace |= VME_USER4;
1116 
1117 	/* Setup 2eSST speeds */
1118 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1119 		*cycle |= VME_2eSST160;
1120 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1121 		*cycle |= VME_2eSST267;
1122 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1123 		*cycle |= VME_2eSST320;
1124 
1125 	/* Setup cycle types */
1126 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1127 		*cycle |= VME_SCT;
1128 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1129 		*cycle |= VME_BLT;
1130 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1131 		*cycle |= VME_MBLT;
1132 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1133 		*cycle |= VME_2eVME;
1134 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1135 		*cycle |= VME_2eSST;
1136 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1137 		*cycle |= VME_2eSSTB;
1138 
1139 	if (ctl & TSI148_LCSR_OTAT_SUP)
1140 		*cycle |= VME_SUPER;
1141 	else
1142 		*cycle |= VME_USER;
1143 
1144 	if (ctl & TSI148_LCSR_OTAT_PGM)
1145 		*cycle |= VME_PROG;
1146 	else
1147 		*cycle |= VME_DATA;
1148 
1149 	/* Setup data width */
1150 	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1151 		*dwidth = VME_D16;
1152 	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1153 		*dwidth = VME_D32;
1154 
1155 	return 0;
1156 }
1157 
1158 
tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1159 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1160 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1161 	u32 *cycle, u32 *dwidth)
1162 {
1163 	int retval;
1164 
1165 	spin_lock(&image->lock);
1166 
1167 	retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1168 		cycle, dwidth);
1169 
1170 	spin_unlock(&image->lock);
1171 
1172 	return retval;
1173 }
1174 
tsi148_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1175 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1176 	size_t count, loff_t offset)
1177 {
1178 	int retval, enabled;
1179 	unsigned long long vme_base, size;
1180 	u32 aspace, cycle, dwidth;
1181 	struct vme_error_handler *handler = NULL;
1182 	struct vme_bridge *tsi148_bridge;
1183 	void __iomem *addr = image->kern_base + offset;
1184 	unsigned int done = 0;
1185 	unsigned int count32;
1186 
1187 	tsi148_bridge = image->parent;
1188 
1189 	spin_lock(&image->lock);
1190 
1191 	if (err_chk) {
1192 		__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1193 				    &cycle, &dwidth);
1194 		handler = vme_register_error_handler(tsi148_bridge, aspace,
1195 						     vme_base + offset, count);
1196 		if (!handler) {
1197 			spin_unlock(&image->lock);
1198 			return -ENOMEM;
1199 		}
1200 	}
1201 
1202 	/* The following code handles VME address alignment. We cannot use
1203 	 * memcpy_xxx here because it may cut data transfers in to 8-bit
1204 	 * cycles when D16 or D32 cycles are required on the VME bus.
1205 	 * On the other hand, the bridge itself assures that the maximum data
1206 	 * cycle configured for the transfer is used and splits it
1207 	 * automatically for non-aligned addresses, so we don't want the
1208 	 * overhead of needlessly forcing small transfers for the entire cycle.
1209 	 */
1210 	if ((uintptr_t)addr & 0x1) {
1211 		*(u8 *)buf = ioread8(addr);
1212 		done += 1;
1213 		if (done == count)
1214 			goto out;
1215 	}
1216 	if ((uintptr_t)(addr + done) & 0x2) {
1217 		if ((count - done) < 2) {
1218 			*(u8 *)(buf + done) = ioread8(addr + done);
1219 			done += 1;
1220 			goto out;
1221 		} else {
1222 			*(u16 *)(buf + done) = ioread16(addr + done);
1223 			done += 2;
1224 		}
1225 	}
1226 
1227 	count32 = (count - done) & ~0x3;
1228 	while (done < count32) {
1229 		*(u32 *)(buf + done) = ioread32(addr + done);
1230 		done += 4;
1231 	}
1232 
1233 	if ((count - done) & 0x2) {
1234 		*(u16 *)(buf + done) = ioread16(addr + done);
1235 		done += 2;
1236 	}
1237 	if ((count - done) & 0x1) {
1238 		*(u8 *)(buf + done) = ioread8(addr + done);
1239 		done += 1;
1240 	}
1241 
1242 out:
1243 	retval = count;
1244 
1245 	if (err_chk) {
1246 		if (handler->num_errors) {
1247 			dev_err(image->parent->parent,
1248 				"First VME read error detected an at address 0x%llx\n",
1249 				handler->first_error);
1250 			retval = handler->first_error - (vme_base + offset);
1251 		}
1252 		vme_unregister_error_handler(handler);
1253 	}
1254 
1255 	spin_unlock(&image->lock);
1256 
1257 	return retval;
1258 }
1259 
1260 
tsi148_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1261 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1262 	size_t count, loff_t offset)
1263 {
1264 	int retval = 0, enabled;
1265 	unsigned long long vme_base, size;
1266 	u32 aspace, cycle, dwidth;
1267 	void __iomem *addr = image->kern_base + offset;
1268 	unsigned int done = 0;
1269 	unsigned int count32;
1270 
1271 	struct vme_error_handler *handler = NULL;
1272 	struct vme_bridge *tsi148_bridge;
1273 	struct tsi148_driver *bridge;
1274 
1275 	tsi148_bridge = image->parent;
1276 
1277 	bridge = tsi148_bridge->driver_priv;
1278 
1279 	spin_lock(&image->lock);
1280 
1281 	if (err_chk) {
1282 		__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1283 				    &cycle, &dwidth);
1284 		handler = vme_register_error_handler(tsi148_bridge, aspace,
1285 						     vme_base + offset, count);
1286 		if (!handler) {
1287 			spin_unlock(&image->lock);
1288 			return -ENOMEM;
1289 		}
1290 	}
1291 
1292 	/* Here we apply for the same strategy we do in master_read
1293 	 * function in order to assure the correct cycles.
1294 	 */
1295 	if ((uintptr_t)addr & 0x1) {
1296 		iowrite8(*(u8 *)buf, addr);
1297 		done += 1;
1298 		if (done == count)
1299 			goto out;
1300 	}
1301 	if ((uintptr_t)(addr + done) & 0x2) {
1302 		if ((count - done) < 2) {
1303 			iowrite8(*(u8 *)(buf + done), addr + done);
1304 			done += 1;
1305 			goto out;
1306 		} else {
1307 			iowrite16(*(u16 *)(buf + done), addr + done);
1308 			done += 2;
1309 		}
1310 	}
1311 
1312 	count32 = (count - done) & ~0x3;
1313 	while (done < count32) {
1314 		iowrite32(*(u32 *)(buf + done), addr + done);
1315 		done += 4;
1316 	}
1317 
1318 	if ((count - done) & 0x2) {
1319 		iowrite16(*(u16 *)(buf + done), addr + done);
1320 		done += 2;
1321 	}
1322 	if ((count - done) & 0x1) {
1323 		iowrite8(*(u8 *)(buf + done), addr + done);
1324 		done += 1;
1325 	}
1326 
1327 out:
1328 	retval = count;
1329 
1330 	/*
1331 	 * Writes are posted. We need to do a read on the VME bus to flush out
1332 	 * all of the writes before we check for errors. We can't guarantee
1333 	 * that reading the data we have just written is safe. It is believed
1334 	 * that there isn't any read, write re-ordering, so we can read any
1335 	 * location in VME space, so lets read the Device ID from the tsi148's
1336 	 * own registers as mapped into CR/CSR space.
1337 	 *
1338 	 * We check for saved errors in the written address range/space.
1339 	 */
1340 
1341 	if (err_chk) {
1342 		ioread16(bridge->flush_image->kern_base + 0x7F000);
1343 
1344 		if (handler->num_errors) {
1345 			dev_warn(tsi148_bridge->parent,
1346 				 "First VME write error detected an at address 0x%llx\n",
1347 				 handler->first_error);
1348 			retval = handler->first_error - (vme_base + offset);
1349 		}
1350 		vme_unregister_error_handler(handler);
1351 	}
1352 
1353 	spin_unlock(&image->lock);
1354 
1355 	return retval;
1356 }
1357 
1358 /*
1359  * Perform an RMW cycle on the VME bus.
1360  *
1361  * Requires a previously configured master window, returns final value.
1362  */
tsi148_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)1363 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1364 	unsigned int mask, unsigned int compare, unsigned int swap,
1365 	loff_t offset)
1366 {
1367 	unsigned long long pci_addr;
1368 	unsigned int pci_addr_high, pci_addr_low;
1369 	u32 tmp, result;
1370 	int i;
1371 	struct tsi148_driver *bridge;
1372 
1373 	bridge = image->parent->driver_priv;
1374 
1375 	/* Find the PCI address that maps to the desired VME address */
1376 	i = image->number;
1377 
1378 	/* Locking as we can only do one of these at a time */
1379 	mutex_lock(&bridge->vme_rmw);
1380 
1381 	/* Lock image */
1382 	spin_lock(&image->lock);
1383 
1384 	pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1385 		TSI148_LCSR_OFFSET_OTSAU);
1386 	pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1387 		TSI148_LCSR_OFFSET_OTSAL);
1388 
1389 	reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1390 	reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1391 
1392 	/* Configure registers */
1393 	iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1394 	iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1395 	iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1396 	iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1397 	iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1398 
1399 	/* Enable RMW */
1400 	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1401 	tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1402 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1403 
1404 	/* Kick process off with a read to the required address. */
1405 	result = ioread32be(image->kern_base + offset);
1406 
1407 	/* Disable RMW */
1408 	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1409 	tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1410 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1411 
1412 	spin_unlock(&image->lock);
1413 
1414 	mutex_unlock(&bridge->vme_rmw);
1415 
1416 	return result;
1417 }
1418 
tsi148_dma_set_vme_src_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1419 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1420 	u32 aspace, u32 cycle, u32 dwidth)
1421 {
1422 	u32 val;
1423 
1424 	val = be32_to_cpu(*attr);
1425 
1426 	/* Setup 2eSST speeds */
1427 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1428 	case VME_2eSST160:
1429 		val |= TSI148_LCSR_DSAT_2eSSTM_160;
1430 		break;
1431 	case VME_2eSST267:
1432 		val |= TSI148_LCSR_DSAT_2eSSTM_267;
1433 		break;
1434 	case VME_2eSST320:
1435 		val |= TSI148_LCSR_DSAT_2eSSTM_320;
1436 		break;
1437 	}
1438 
1439 	/* Setup cycle types */
1440 	if (cycle & VME_SCT)
1441 		val |= TSI148_LCSR_DSAT_TM_SCT;
1442 
1443 	if (cycle & VME_BLT)
1444 		val |= TSI148_LCSR_DSAT_TM_BLT;
1445 
1446 	if (cycle & VME_MBLT)
1447 		val |= TSI148_LCSR_DSAT_TM_MBLT;
1448 
1449 	if (cycle & VME_2eVME)
1450 		val |= TSI148_LCSR_DSAT_TM_2eVME;
1451 
1452 	if (cycle & VME_2eSST)
1453 		val |= TSI148_LCSR_DSAT_TM_2eSST;
1454 
1455 	if (cycle & VME_2eSSTB) {
1456 		dev_err(dev, "Currently not setting Broadcast Select "
1457 			"Registers\n");
1458 		val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1459 	}
1460 
1461 	/* Setup data width */
1462 	switch (dwidth) {
1463 	case VME_D16:
1464 		val |= TSI148_LCSR_DSAT_DBW_16;
1465 		break;
1466 	case VME_D32:
1467 		val |= TSI148_LCSR_DSAT_DBW_32;
1468 		break;
1469 	default:
1470 		dev_err(dev, "Invalid data width\n");
1471 		return -EINVAL;
1472 	}
1473 
1474 	/* Setup address space */
1475 	switch (aspace) {
1476 	case VME_A16:
1477 		val |= TSI148_LCSR_DSAT_AMODE_A16;
1478 		break;
1479 	case VME_A24:
1480 		val |= TSI148_LCSR_DSAT_AMODE_A24;
1481 		break;
1482 	case VME_A32:
1483 		val |= TSI148_LCSR_DSAT_AMODE_A32;
1484 		break;
1485 	case VME_A64:
1486 		val |= TSI148_LCSR_DSAT_AMODE_A64;
1487 		break;
1488 	case VME_CRCSR:
1489 		val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1490 		break;
1491 	case VME_USER1:
1492 		val |= TSI148_LCSR_DSAT_AMODE_USER1;
1493 		break;
1494 	case VME_USER2:
1495 		val |= TSI148_LCSR_DSAT_AMODE_USER2;
1496 		break;
1497 	case VME_USER3:
1498 		val |= TSI148_LCSR_DSAT_AMODE_USER3;
1499 		break;
1500 	case VME_USER4:
1501 		val |= TSI148_LCSR_DSAT_AMODE_USER4;
1502 		break;
1503 	default:
1504 		dev_err(dev, "Invalid address space\n");
1505 		return -EINVAL;
1506 		break;
1507 	}
1508 
1509 	if (cycle & VME_SUPER)
1510 		val |= TSI148_LCSR_DSAT_SUP;
1511 	if (cycle & VME_PROG)
1512 		val |= TSI148_LCSR_DSAT_PGM;
1513 
1514 	*attr = cpu_to_be32(val);
1515 
1516 	return 0;
1517 }
1518 
tsi148_dma_set_vme_dest_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1519 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1520 	u32 aspace, u32 cycle, u32 dwidth)
1521 {
1522 	u32 val;
1523 
1524 	val = be32_to_cpu(*attr);
1525 
1526 	/* Setup 2eSST speeds */
1527 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1528 	case VME_2eSST160:
1529 		val |= TSI148_LCSR_DDAT_2eSSTM_160;
1530 		break;
1531 	case VME_2eSST267:
1532 		val |= TSI148_LCSR_DDAT_2eSSTM_267;
1533 		break;
1534 	case VME_2eSST320:
1535 		val |= TSI148_LCSR_DDAT_2eSSTM_320;
1536 		break;
1537 	}
1538 
1539 	/* Setup cycle types */
1540 	if (cycle & VME_SCT)
1541 		val |= TSI148_LCSR_DDAT_TM_SCT;
1542 
1543 	if (cycle & VME_BLT)
1544 		val |= TSI148_LCSR_DDAT_TM_BLT;
1545 
1546 	if (cycle & VME_MBLT)
1547 		val |= TSI148_LCSR_DDAT_TM_MBLT;
1548 
1549 	if (cycle & VME_2eVME)
1550 		val |= TSI148_LCSR_DDAT_TM_2eVME;
1551 
1552 	if (cycle & VME_2eSST)
1553 		val |= TSI148_LCSR_DDAT_TM_2eSST;
1554 
1555 	if (cycle & VME_2eSSTB) {
1556 		dev_err(dev, "Currently not setting Broadcast Select "
1557 			"Registers\n");
1558 		val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1559 	}
1560 
1561 	/* Setup data width */
1562 	switch (dwidth) {
1563 	case VME_D16:
1564 		val |= TSI148_LCSR_DDAT_DBW_16;
1565 		break;
1566 	case VME_D32:
1567 		val |= TSI148_LCSR_DDAT_DBW_32;
1568 		break;
1569 	default:
1570 		dev_err(dev, "Invalid data width\n");
1571 		return -EINVAL;
1572 	}
1573 
1574 	/* Setup address space */
1575 	switch (aspace) {
1576 	case VME_A16:
1577 		val |= TSI148_LCSR_DDAT_AMODE_A16;
1578 		break;
1579 	case VME_A24:
1580 		val |= TSI148_LCSR_DDAT_AMODE_A24;
1581 		break;
1582 	case VME_A32:
1583 		val |= TSI148_LCSR_DDAT_AMODE_A32;
1584 		break;
1585 	case VME_A64:
1586 		val |= TSI148_LCSR_DDAT_AMODE_A64;
1587 		break;
1588 	case VME_CRCSR:
1589 		val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1590 		break;
1591 	case VME_USER1:
1592 		val |= TSI148_LCSR_DDAT_AMODE_USER1;
1593 		break;
1594 	case VME_USER2:
1595 		val |= TSI148_LCSR_DDAT_AMODE_USER2;
1596 		break;
1597 	case VME_USER3:
1598 		val |= TSI148_LCSR_DDAT_AMODE_USER3;
1599 		break;
1600 	case VME_USER4:
1601 		val |= TSI148_LCSR_DDAT_AMODE_USER4;
1602 		break;
1603 	default:
1604 		dev_err(dev, "Invalid address space\n");
1605 		return -EINVAL;
1606 		break;
1607 	}
1608 
1609 	if (cycle & VME_SUPER)
1610 		val |= TSI148_LCSR_DDAT_SUP;
1611 	if (cycle & VME_PROG)
1612 		val |= TSI148_LCSR_DDAT_PGM;
1613 
1614 	*attr = cpu_to_be32(val);
1615 
1616 	return 0;
1617 }
1618 
1619 /*
1620  * Add a link list descriptor to the list
1621  *
1622  * Note: DMA engine expects the DMA descriptor to be big endian.
1623  */
tsi148_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1624 static int tsi148_dma_list_add(struct vme_dma_list *list,
1625 	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1626 {
1627 	struct tsi148_dma_entry *entry, *prev;
1628 	u32 address_high, address_low, val;
1629 	struct vme_dma_pattern *pattern_attr;
1630 	struct vme_dma_pci *pci_attr;
1631 	struct vme_dma_vme *vme_attr;
1632 	int retval = 0;
1633 	struct vme_bridge *tsi148_bridge;
1634 
1635 	tsi148_bridge = list->parent->parent;
1636 
1637 	/* Descriptor must be aligned on 64-bit boundaries */
1638 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1639 	if (!entry) {
1640 		retval = -ENOMEM;
1641 		goto err_mem;
1642 	}
1643 
1644 	/* Test descriptor alignment */
1645 	if ((unsigned long)&entry->descriptor & 0x7) {
1646 		dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1647 			"byte boundary as required: %p\n",
1648 			&entry->descriptor);
1649 		retval = -EINVAL;
1650 		goto err_align;
1651 	}
1652 
1653 	/* Given we are going to fill out the structure, we probably don't
1654 	 * need to zero it, but better safe than sorry for now.
1655 	 */
1656 	memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1657 
1658 	/* Fill out source part */
1659 	switch (src->type) {
1660 	case VME_DMA_PATTERN:
1661 		pattern_attr = src->private;
1662 
1663 		entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1664 
1665 		val = TSI148_LCSR_DSAT_TYP_PAT;
1666 
1667 		/* Default behaviour is 32 bit pattern */
1668 		if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1669 			val |= TSI148_LCSR_DSAT_PSZ;
1670 
1671 		/* It seems that the default behaviour is to increment */
1672 		if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1673 			val |= TSI148_LCSR_DSAT_NIN;
1674 		entry->descriptor.dsat = cpu_to_be32(val);
1675 		break;
1676 	case VME_DMA_PCI:
1677 		pci_attr = src->private;
1678 
1679 		reg_split((unsigned long long)pci_attr->address, &address_high,
1680 			&address_low);
1681 		entry->descriptor.dsau = cpu_to_be32(address_high);
1682 		entry->descriptor.dsal = cpu_to_be32(address_low);
1683 		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1684 		break;
1685 	case VME_DMA_VME:
1686 		vme_attr = src->private;
1687 
1688 		reg_split((unsigned long long)vme_attr->address, &address_high,
1689 			&address_low);
1690 		entry->descriptor.dsau = cpu_to_be32(address_high);
1691 		entry->descriptor.dsal = cpu_to_be32(address_low);
1692 		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1693 
1694 		retval = tsi148_dma_set_vme_src_attributes(
1695 			tsi148_bridge->parent, &entry->descriptor.dsat,
1696 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1697 		if (retval < 0)
1698 			goto err_source;
1699 		break;
1700 	default:
1701 		dev_err(tsi148_bridge->parent, "Invalid source type\n");
1702 		retval = -EINVAL;
1703 		goto err_source;
1704 		break;
1705 	}
1706 
1707 	/* Assume last link - this will be over-written by adding another */
1708 	entry->descriptor.dnlau = cpu_to_be32(0);
1709 	entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1710 
1711 	/* Fill out destination part */
1712 	switch (dest->type) {
1713 	case VME_DMA_PCI:
1714 		pci_attr = dest->private;
1715 
1716 		reg_split((unsigned long long)pci_attr->address, &address_high,
1717 			&address_low);
1718 		entry->descriptor.ddau = cpu_to_be32(address_high);
1719 		entry->descriptor.ddal = cpu_to_be32(address_low);
1720 		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1721 		break;
1722 	case VME_DMA_VME:
1723 		vme_attr = dest->private;
1724 
1725 		reg_split((unsigned long long)vme_attr->address, &address_high,
1726 			&address_low);
1727 		entry->descriptor.ddau = cpu_to_be32(address_high);
1728 		entry->descriptor.ddal = cpu_to_be32(address_low);
1729 		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1730 
1731 		retval = tsi148_dma_set_vme_dest_attributes(
1732 			tsi148_bridge->parent, &entry->descriptor.ddat,
1733 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1734 		if (retval < 0)
1735 			goto err_dest;
1736 		break;
1737 	default:
1738 		dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1739 		retval = -EINVAL;
1740 		goto err_dest;
1741 		break;
1742 	}
1743 
1744 	/* Fill out count */
1745 	entry->descriptor.dcnt = cpu_to_be32((u32)count);
1746 
1747 	/* Add to list */
1748 	list_add_tail(&entry->list, &list->entries);
1749 
1750 	entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1751 					   &entry->descriptor,
1752 					   sizeof(entry->descriptor),
1753 					   DMA_TO_DEVICE);
1754 	if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1755 		dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1756 		retval = -EINVAL;
1757 		goto err_dma;
1758 	}
1759 
1760 	/* Fill out previous descriptors "Next Address" */
1761 	if (entry->list.prev != &list->entries) {
1762 		reg_split((unsigned long long)entry->dma_handle, &address_high,
1763 			&address_low);
1764 		prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1765 				  list);
1766 		prev->descriptor.dnlau = cpu_to_be32(address_high);
1767 		prev->descriptor.dnlal = cpu_to_be32(address_low);
1768 
1769 	}
1770 
1771 	return 0;
1772 
1773 err_dma:
1774 err_dest:
1775 err_source:
1776 err_align:
1777 		kfree(entry);
1778 err_mem:
1779 	return retval;
1780 }
1781 
1782 /*
1783  * Check to see if the provided DMA channel is busy.
1784  */
tsi148_dma_busy(struct vme_bridge * tsi148_bridge,int channel)1785 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1786 {
1787 	u32 tmp;
1788 	struct tsi148_driver *bridge;
1789 
1790 	bridge = tsi148_bridge->driver_priv;
1791 
1792 	tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1793 		TSI148_LCSR_OFFSET_DSTA);
1794 
1795 	if (tmp & TSI148_LCSR_DSTA_BSY)
1796 		return 0;
1797 	else
1798 		return 1;
1799 
1800 }
1801 
1802 /*
1803  * Execute a previously generated link list
1804  *
1805  * XXX Need to provide control register configuration.
1806  */
tsi148_dma_list_exec(struct vme_dma_list * list)1807 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1808 {
1809 	struct vme_dma_resource *ctrlr;
1810 	int channel, retval;
1811 	struct tsi148_dma_entry *entry;
1812 	u32 bus_addr_high, bus_addr_low;
1813 	u32 val, dctlreg = 0;
1814 	struct vme_bridge *tsi148_bridge;
1815 	struct tsi148_driver *bridge;
1816 
1817 	ctrlr = list->parent;
1818 
1819 	tsi148_bridge = ctrlr->parent;
1820 
1821 	bridge = tsi148_bridge->driver_priv;
1822 
1823 	mutex_lock(&ctrlr->mtx);
1824 
1825 	channel = ctrlr->number;
1826 
1827 	if (!list_empty(&ctrlr->running)) {
1828 		/*
1829 		 * XXX We have an active DMA transfer and currently haven't
1830 		 *     sorted out the mechanism for "pending" DMA transfers.
1831 		 *     Return busy.
1832 		 */
1833 		/* Need to add to pending here */
1834 		mutex_unlock(&ctrlr->mtx);
1835 		return -EBUSY;
1836 	} else {
1837 		list_add(&list->list, &ctrlr->running);
1838 	}
1839 
1840 	/* Get first bus address and write into registers */
1841 	entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1842 		list);
1843 
1844 	mutex_unlock(&ctrlr->mtx);
1845 
1846 	reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1847 
1848 	iowrite32be(bus_addr_high, bridge->base +
1849 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1850 	iowrite32be(bus_addr_low, bridge->base +
1851 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1852 
1853 	dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1854 		TSI148_LCSR_OFFSET_DCTL);
1855 
1856 	/* Start the operation */
1857 	iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1858 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1859 
1860 	retval = wait_event_interruptible(bridge->dma_queue[channel],
1861 		tsi148_dma_busy(ctrlr->parent, channel));
1862 
1863 	if (retval) {
1864 		iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1865 			TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1866 		/* Wait for the operation to abort */
1867 		wait_event(bridge->dma_queue[channel],
1868 			   tsi148_dma_busy(ctrlr->parent, channel));
1869 		retval = -EINTR;
1870 		goto exit;
1871 	}
1872 
1873 	/*
1874 	 * Read status register, this register is valid until we kick off a
1875 	 * new transfer.
1876 	 */
1877 	val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1878 		TSI148_LCSR_OFFSET_DSTA);
1879 
1880 	if (val & TSI148_LCSR_DSTA_VBE) {
1881 		dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1882 		retval = -EIO;
1883 	}
1884 
1885 exit:
1886 	/* Remove list from running list */
1887 	mutex_lock(&ctrlr->mtx);
1888 	list_del(&list->list);
1889 	mutex_unlock(&ctrlr->mtx);
1890 
1891 	return retval;
1892 }
1893 
1894 /*
1895  * Clean up a previously generated link list
1896  *
1897  * We have a separate function, don't assume that the chain can't be reused.
1898  */
tsi148_dma_list_empty(struct vme_dma_list * list)1899 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1900 {
1901 	struct list_head *pos, *temp;
1902 	struct tsi148_dma_entry *entry;
1903 
1904 	struct vme_bridge *tsi148_bridge = list->parent->parent;
1905 
1906 	/* detach and free each entry */
1907 	list_for_each_safe(pos, temp, &list->entries) {
1908 		list_del(pos);
1909 		entry = list_entry(pos, struct tsi148_dma_entry, list);
1910 
1911 		dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1912 			sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1913 		kfree(entry);
1914 	}
1915 
1916 	return 0;
1917 }
1918 
1919 /*
1920  * All 4 location monitors reside at the same base - this is therefore a
1921  * system wide configuration.
1922  *
1923  * This does not enable the LM monitor - that should be done when the first
1924  * callback is attached and disabled when the last callback is removed.
1925  */
tsi148_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1926 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1927 	u32 aspace, u32 cycle)
1928 {
1929 	u32 lm_base_high, lm_base_low, lm_ctl = 0;
1930 	int i;
1931 	struct vme_bridge *tsi148_bridge;
1932 	struct tsi148_driver *bridge;
1933 
1934 	tsi148_bridge = lm->parent;
1935 
1936 	bridge = tsi148_bridge->driver_priv;
1937 
1938 	mutex_lock(&lm->mtx);
1939 
1940 	/* If we already have a callback attached, we can't move it! */
1941 	for (i = 0; i < lm->monitors; i++) {
1942 		if (bridge->lm_callback[i]) {
1943 			mutex_unlock(&lm->mtx);
1944 			dev_err(tsi148_bridge->parent, "Location monitor "
1945 				"callback attached, can't reset\n");
1946 			return -EBUSY;
1947 		}
1948 	}
1949 
1950 	switch (aspace) {
1951 	case VME_A16:
1952 		lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1953 		break;
1954 	case VME_A24:
1955 		lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1956 		break;
1957 	case VME_A32:
1958 		lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1959 		break;
1960 	case VME_A64:
1961 		lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1962 		break;
1963 	default:
1964 		mutex_unlock(&lm->mtx);
1965 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
1966 		return -EINVAL;
1967 		break;
1968 	}
1969 
1970 	if (cycle & VME_SUPER)
1971 		lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1972 	if (cycle & VME_USER)
1973 		lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1974 	if (cycle & VME_PROG)
1975 		lm_ctl |= TSI148_LCSR_LMAT_PGM;
1976 	if (cycle & VME_DATA)
1977 		lm_ctl |= TSI148_LCSR_LMAT_DATA;
1978 
1979 	reg_split(lm_base, &lm_base_high, &lm_base_low);
1980 
1981 	iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1982 	iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1983 	iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1984 
1985 	mutex_unlock(&lm->mtx);
1986 
1987 	return 0;
1988 }
1989 
1990 /* Get configuration of the callback monitor and return whether it is enabled
1991  * or disabled.
1992  */
tsi148_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)1993 static int tsi148_lm_get(struct vme_lm_resource *lm,
1994 	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1995 {
1996 	u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1997 	struct tsi148_driver *bridge;
1998 
1999 	bridge = lm->parent->driver_priv;
2000 
2001 	mutex_lock(&lm->mtx);
2002 
2003 	lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2004 	lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2005 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2006 
2007 	reg_join(lm_base_high, lm_base_low, lm_base);
2008 
2009 	if (lm_ctl & TSI148_LCSR_LMAT_EN)
2010 		enabled = 1;
2011 
2012 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2013 		*aspace |= VME_A16;
2014 
2015 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2016 		*aspace |= VME_A24;
2017 
2018 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2019 		*aspace |= VME_A32;
2020 
2021 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2022 		*aspace |= VME_A64;
2023 
2024 
2025 	if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2026 		*cycle |= VME_SUPER;
2027 	if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2028 		*cycle |= VME_USER;
2029 	if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2030 		*cycle |= VME_PROG;
2031 	if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2032 		*cycle |= VME_DATA;
2033 
2034 	mutex_unlock(&lm->mtx);
2035 
2036 	return enabled;
2037 }
2038 
2039 /*
2040  * Attach a callback to a specific location monitor.
2041  *
2042  * Callback will be passed the monitor triggered.
2043  */
tsi148_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(void *),void * data)2044 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2045 	void (*callback)(void *), void *data)
2046 {
2047 	u32 lm_ctl, tmp;
2048 	struct vme_bridge *tsi148_bridge;
2049 	struct tsi148_driver *bridge;
2050 
2051 	tsi148_bridge = lm->parent;
2052 
2053 	bridge = tsi148_bridge->driver_priv;
2054 
2055 	mutex_lock(&lm->mtx);
2056 
2057 	/* Ensure that the location monitor is configured - need PGM or DATA */
2058 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2059 	if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2060 		mutex_unlock(&lm->mtx);
2061 		dev_err(tsi148_bridge->parent, "Location monitor not properly "
2062 			"configured\n");
2063 		return -EINVAL;
2064 	}
2065 
2066 	/* Check that a callback isn't already attached */
2067 	if (bridge->lm_callback[monitor]) {
2068 		mutex_unlock(&lm->mtx);
2069 		dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2070 		return -EBUSY;
2071 	}
2072 
2073 	/* Attach callback */
2074 	bridge->lm_callback[monitor] = callback;
2075 	bridge->lm_data[monitor] = data;
2076 
2077 	/* Enable Location Monitor interrupt */
2078 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2079 	tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2080 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2081 
2082 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2083 	tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2084 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2085 
2086 	/* Ensure that global Location Monitor Enable set */
2087 	if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2088 		lm_ctl |= TSI148_LCSR_LMAT_EN;
2089 		iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2090 	}
2091 
2092 	mutex_unlock(&lm->mtx);
2093 
2094 	return 0;
2095 }
2096 
2097 /*
2098  * Detach a callback function forn a specific location monitor.
2099  */
tsi148_lm_detach(struct vme_lm_resource * lm,int monitor)2100 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2101 {
2102 	u32 lm_en, tmp;
2103 	struct tsi148_driver *bridge;
2104 
2105 	bridge = lm->parent->driver_priv;
2106 
2107 	mutex_lock(&lm->mtx);
2108 
2109 	/* Disable Location Monitor and ensure previous interrupts are clear */
2110 	lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2111 	lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2112 	iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2113 
2114 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2115 	tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2116 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2117 
2118 	iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2119 		 bridge->base + TSI148_LCSR_INTC);
2120 
2121 	/* Detach callback */
2122 	bridge->lm_callback[monitor] = NULL;
2123 	bridge->lm_data[monitor] = NULL;
2124 
2125 	/* If all location monitors disabled, disable global Location Monitor */
2126 	if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2127 			TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2128 		tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2129 		tmp &= ~TSI148_LCSR_LMAT_EN;
2130 		iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2131 	}
2132 
2133 	mutex_unlock(&lm->mtx);
2134 
2135 	return 0;
2136 }
2137 
2138 /*
2139  * Determine Geographical Addressing
2140  */
tsi148_slot_get(struct vme_bridge * tsi148_bridge)2141 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2142 {
2143 	u32 slot = 0;
2144 	struct tsi148_driver *bridge;
2145 
2146 	bridge = tsi148_bridge->driver_priv;
2147 
2148 	if (!geoid) {
2149 		slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2150 		slot = slot & TSI148_LCSR_VSTAT_GA_M;
2151 	} else
2152 		slot = geoid;
2153 
2154 	return (int)slot;
2155 }
2156 
tsi148_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)2157 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2158 	dma_addr_t *dma)
2159 {
2160 	struct pci_dev *pdev;
2161 
2162 	/* Find pci_dev container of dev */
2163 	pdev = to_pci_dev(parent);
2164 
2165 	return pci_alloc_consistent(pdev, size, dma);
2166 }
2167 
tsi148_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)2168 static void tsi148_free_consistent(struct device *parent, size_t size,
2169 	void *vaddr, dma_addr_t dma)
2170 {
2171 	struct pci_dev *pdev;
2172 
2173 	/* Find pci_dev container of dev */
2174 	pdev = to_pci_dev(parent);
2175 
2176 	pci_free_consistent(pdev, size, vaddr, dma);
2177 }
2178 
2179 /*
2180  * Configure CR/CSR space
2181  *
2182  * Access to the CR/CSR can be configured at power-up. The location of the
2183  * CR/CSR registers in the CR/CSR address space is determined by the boards
2184  * Auto-ID or Geographic address. This function ensures that the window is
2185  * enabled at an offset consistent with the boards geopgraphic address.
2186  *
2187  * Each board has a 512kB window, with the highest 4kB being used for the
2188  * boards registers, this means there is a fix length 508kB window which must
2189  * be mapped onto PCI memory.
2190  */
tsi148_crcsr_init(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2191 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2192 	struct pci_dev *pdev)
2193 {
2194 	u32 cbar, crat, vstat;
2195 	u32 crcsr_bus_high, crcsr_bus_low;
2196 	int retval;
2197 	struct tsi148_driver *bridge;
2198 
2199 	bridge = tsi148_bridge->driver_priv;
2200 
2201 	/* Allocate mem for CR/CSR image */
2202 	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2203 						     &bridge->crcsr_bus);
2204 	if (!bridge->crcsr_kernel) {
2205 		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2206 			"CR/CSR image\n");
2207 		return -ENOMEM;
2208 	}
2209 
2210 	reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2211 
2212 	iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2213 	iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2214 
2215 	/* Ensure that the CR/CSR is configured at the correct offset */
2216 	cbar = ioread32be(bridge->base + TSI148_CBAR);
2217 	cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2218 
2219 	vstat = tsi148_slot_get(tsi148_bridge);
2220 
2221 	if (cbar != vstat) {
2222 		cbar = vstat;
2223 		dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2224 		iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2225 	}
2226 	dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2227 
2228 	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2229 	if (crat & TSI148_LCSR_CRAT_EN)
2230 		dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2231 	else {
2232 		dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2233 		iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2234 			bridge->base + TSI148_LCSR_CRAT);
2235 	}
2236 
2237 	/* If we want flushed, error-checked writes, set up a window
2238 	 * over the CR/CSR registers. We read from here to safely flush
2239 	 * through VME writes.
2240 	 */
2241 	if (err_chk) {
2242 		retval = tsi148_master_set(bridge->flush_image, 1,
2243 			(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2244 			VME_D16);
2245 		if (retval)
2246 			dev_err(tsi148_bridge->parent, "Configuring flush image"
2247 				" failed\n");
2248 	}
2249 
2250 	return 0;
2251 
2252 }
2253 
tsi148_crcsr_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2254 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2255 	struct pci_dev *pdev)
2256 {
2257 	u32 crat;
2258 	struct tsi148_driver *bridge;
2259 
2260 	bridge = tsi148_bridge->driver_priv;
2261 
2262 	/* Turn off CR/CSR space */
2263 	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2264 	iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2265 		bridge->base + TSI148_LCSR_CRAT);
2266 
2267 	/* Free image */
2268 	iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2269 	iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2270 
2271 	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2272 		bridge->crcsr_bus);
2273 }
2274 
tsi148_probe(struct pci_dev * pdev,const struct pci_device_id * id)2275 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2276 {
2277 	int retval, i, master_num;
2278 	u32 data;
2279 	struct list_head *pos = NULL, *n;
2280 	struct vme_bridge *tsi148_bridge;
2281 	struct tsi148_driver *tsi148_device;
2282 	struct vme_master_resource *master_image;
2283 	struct vme_slave_resource *slave_image;
2284 	struct vme_dma_resource *dma_ctrlr;
2285 	struct vme_lm_resource *lm;
2286 
2287 	/* If we want to support more than one of each bridge, we need to
2288 	 * dynamically generate this so we get one per device
2289 	 */
2290 	tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
2291 	if (!tsi148_bridge) {
2292 		retval = -ENOMEM;
2293 		goto err_struct;
2294 	}
2295 	vme_init_bridge(tsi148_bridge);
2296 
2297 	tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
2298 	if (!tsi148_device) {
2299 		retval = -ENOMEM;
2300 		goto err_driver;
2301 	}
2302 
2303 	tsi148_bridge->driver_priv = tsi148_device;
2304 
2305 	/* Enable the device */
2306 	retval = pci_enable_device(pdev);
2307 	if (retval) {
2308 		dev_err(&pdev->dev, "Unable to enable device\n");
2309 		goto err_enable;
2310 	}
2311 
2312 	/* Map Registers */
2313 	retval = pci_request_regions(pdev, driver_name);
2314 	if (retval) {
2315 		dev_err(&pdev->dev, "Unable to reserve resources\n");
2316 		goto err_resource;
2317 	}
2318 
2319 	/* map registers in BAR 0 */
2320 	tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2321 		4096);
2322 	if (!tsi148_device->base) {
2323 		dev_err(&pdev->dev, "Unable to remap CRG region\n");
2324 		retval = -EIO;
2325 		goto err_remap;
2326 	}
2327 
2328 	/* Check to see if the mapping worked out */
2329 	data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2330 	if (data != PCI_VENDOR_ID_TUNDRA) {
2331 		dev_err(&pdev->dev, "CRG region check failed\n");
2332 		retval = -EIO;
2333 		goto err_test;
2334 	}
2335 
2336 	/* Initialize wait queues & mutual exclusion flags */
2337 	init_waitqueue_head(&tsi148_device->dma_queue[0]);
2338 	init_waitqueue_head(&tsi148_device->dma_queue[1]);
2339 	init_waitqueue_head(&tsi148_device->iack_queue);
2340 	mutex_init(&tsi148_device->vme_int);
2341 	mutex_init(&tsi148_device->vme_rmw);
2342 
2343 	tsi148_bridge->parent = &pdev->dev;
2344 	strcpy(tsi148_bridge->name, driver_name);
2345 
2346 	/* Setup IRQ */
2347 	retval = tsi148_irq_init(tsi148_bridge);
2348 	if (retval != 0) {
2349 		dev_err(&pdev->dev, "Chip Initialization failed.\n");
2350 		goto err_irq;
2351 	}
2352 
2353 	/* If we are going to flush writes, we need to read from the VME bus.
2354 	 * We need to do this safely, thus we read the devices own CR/CSR
2355 	 * register. To do this we must set up a window in CR/CSR space and
2356 	 * hence have one less master window resource available.
2357 	 */
2358 	master_num = TSI148_MAX_MASTER;
2359 	if (err_chk) {
2360 		master_num--;
2361 
2362 		tsi148_device->flush_image =
2363 			kmalloc(sizeof(*tsi148_device->flush_image),
2364 				GFP_KERNEL);
2365 		if (!tsi148_device->flush_image) {
2366 			retval = -ENOMEM;
2367 			goto err_master;
2368 		}
2369 		tsi148_device->flush_image->parent = tsi148_bridge;
2370 		spin_lock_init(&tsi148_device->flush_image->lock);
2371 		tsi148_device->flush_image->locked = 1;
2372 		tsi148_device->flush_image->number = master_num;
2373 		memset(&tsi148_device->flush_image->bus_resource, 0,
2374 		       sizeof(tsi148_device->flush_image->bus_resource));
2375 		tsi148_device->flush_image->kern_base  = NULL;
2376 	}
2377 
2378 	/* Add master windows to list */
2379 	for (i = 0; i < master_num; i++) {
2380 		master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
2381 		if (!master_image) {
2382 			retval = -ENOMEM;
2383 			goto err_master;
2384 		}
2385 		master_image->parent = tsi148_bridge;
2386 		spin_lock_init(&master_image->lock);
2387 		master_image->locked = 0;
2388 		master_image->number = i;
2389 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2390 			VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2391 			VME_USER3 | VME_USER4;
2392 		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2393 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2394 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2395 			VME_PROG | VME_DATA;
2396 		master_image->width_attr = VME_D16 | VME_D32;
2397 		memset(&master_image->bus_resource, 0,
2398 		       sizeof(master_image->bus_resource));
2399 		master_image->kern_base  = NULL;
2400 		list_add_tail(&master_image->list,
2401 			&tsi148_bridge->master_resources);
2402 	}
2403 
2404 	/* Add slave windows to list */
2405 	for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2406 		slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
2407 		if (!slave_image) {
2408 			retval = -ENOMEM;
2409 			goto err_slave;
2410 		}
2411 		slave_image->parent = tsi148_bridge;
2412 		mutex_init(&slave_image->mtx);
2413 		slave_image->locked = 0;
2414 		slave_image->number = i;
2415 		slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2416 			VME_A64;
2417 		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2418 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2419 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2420 			VME_PROG | VME_DATA;
2421 		list_add_tail(&slave_image->list,
2422 			&tsi148_bridge->slave_resources);
2423 	}
2424 
2425 	/* Add dma engines to list */
2426 	for (i = 0; i < TSI148_MAX_DMA; i++) {
2427 		dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
2428 		if (!dma_ctrlr) {
2429 			retval = -ENOMEM;
2430 			goto err_dma;
2431 		}
2432 		dma_ctrlr->parent = tsi148_bridge;
2433 		mutex_init(&dma_ctrlr->mtx);
2434 		dma_ctrlr->locked = 0;
2435 		dma_ctrlr->number = i;
2436 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2437 			VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2438 			VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2439 			VME_DMA_PATTERN_TO_MEM;
2440 		INIT_LIST_HEAD(&dma_ctrlr->pending);
2441 		INIT_LIST_HEAD(&dma_ctrlr->running);
2442 		list_add_tail(&dma_ctrlr->list,
2443 			&tsi148_bridge->dma_resources);
2444 	}
2445 
2446 	/* Add location monitor to list */
2447 	lm = kmalloc(sizeof(*lm), GFP_KERNEL);
2448 	if (!lm) {
2449 		retval = -ENOMEM;
2450 		goto err_lm;
2451 	}
2452 	lm->parent = tsi148_bridge;
2453 	mutex_init(&lm->mtx);
2454 	lm->locked = 0;
2455 	lm->number = 1;
2456 	lm->monitors = 4;
2457 	list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2458 
2459 	tsi148_bridge->slave_get = tsi148_slave_get;
2460 	tsi148_bridge->slave_set = tsi148_slave_set;
2461 	tsi148_bridge->master_get = tsi148_master_get;
2462 	tsi148_bridge->master_set = tsi148_master_set;
2463 	tsi148_bridge->master_read = tsi148_master_read;
2464 	tsi148_bridge->master_write = tsi148_master_write;
2465 	tsi148_bridge->master_rmw = tsi148_master_rmw;
2466 	tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2467 	tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2468 	tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2469 	tsi148_bridge->irq_set = tsi148_irq_set;
2470 	tsi148_bridge->irq_generate = tsi148_irq_generate;
2471 	tsi148_bridge->lm_set = tsi148_lm_set;
2472 	tsi148_bridge->lm_get = tsi148_lm_get;
2473 	tsi148_bridge->lm_attach = tsi148_lm_attach;
2474 	tsi148_bridge->lm_detach = tsi148_lm_detach;
2475 	tsi148_bridge->slot_get = tsi148_slot_get;
2476 	tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2477 	tsi148_bridge->free_consistent = tsi148_free_consistent;
2478 
2479 	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2480 	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2481 		(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2482 	if (!geoid)
2483 		dev_info(&pdev->dev, "VME geographical address is %d\n",
2484 			data & TSI148_LCSR_VSTAT_GA_M);
2485 	else
2486 		dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2487 			geoid);
2488 
2489 	dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2490 		err_chk ? "enabled" : "disabled");
2491 
2492 	retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2493 	if (retval) {
2494 		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2495 		goto err_crcsr;
2496 	}
2497 
2498 	retval = vme_register_bridge(tsi148_bridge);
2499 	if (retval != 0) {
2500 		dev_err(&pdev->dev, "Chip Registration failed.\n");
2501 		goto err_reg;
2502 	}
2503 
2504 	pci_set_drvdata(pdev, tsi148_bridge);
2505 
2506 	/* Clear VME bus "board fail", and "power-up reset" lines */
2507 	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2508 	data &= ~TSI148_LCSR_VSTAT_BRDFL;
2509 	data |= TSI148_LCSR_VSTAT_CPURST;
2510 	iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2511 
2512 	return 0;
2513 
2514 err_reg:
2515 	tsi148_crcsr_exit(tsi148_bridge, pdev);
2516 err_crcsr:
2517 err_lm:
2518 	/* resources are stored in link list */
2519 	list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2520 		lm = list_entry(pos, struct vme_lm_resource, list);
2521 		list_del(pos);
2522 		kfree(lm);
2523 	}
2524 err_dma:
2525 	/* resources are stored in link list */
2526 	list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2527 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2528 		list_del(pos);
2529 		kfree(dma_ctrlr);
2530 	}
2531 err_slave:
2532 	/* resources are stored in link list */
2533 	list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2534 		slave_image = list_entry(pos, struct vme_slave_resource, list);
2535 		list_del(pos);
2536 		kfree(slave_image);
2537 	}
2538 err_master:
2539 	/* resources are stored in link list */
2540 	list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2541 		master_image = list_entry(pos, struct vme_master_resource,
2542 			list);
2543 		list_del(pos);
2544 		kfree(master_image);
2545 	}
2546 
2547 	tsi148_irq_exit(tsi148_bridge, pdev);
2548 err_irq:
2549 err_test:
2550 	iounmap(tsi148_device->base);
2551 err_remap:
2552 	pci_release_regions(pdev);
2553 err_resource:
2554 	pci_disable_device(pdev);
2555 err_enable:
2556 	kfree(tsi148_device);
2557 err_driver:
2558 	kfree(tsi148_bridge);
2559 err_struct:
2560 	return retval;
2561 
2562 }
2563 
tsi148_remove(struct pci_dev * pdev)2564 static void tsi148_remove(struct pci_dev *pdev)
2565 {
2566 	struct list_head *pos = NULL;
2567 	struct list_head *tmplist;
2568 	struct vme_master_resource *master_image;
2569 	struct vme_slave_resource *slave_image;
2570 	struct vme_dma_resource *dma_ctrlr;
2571 	int i;
2572 	struct tsi148_driver *bridge;
2573 	struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2574 
2575 	bridge = tsi148_bridge->driver_priv;
2576 
2577 
2578 	dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2579 
2580 	/*
2581 	 *  Shutdown all inbound and outbound windows.
2582 	 */
2583 	for (i = 0; i < 8; i++) {
2584 		iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2585 			TSI148_LCSR_OFFSET_ITAT);
2586 		iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2587 			TSI148_LCSR_OFFSET_OTAT);
2588 	}
2589 
2590 	/*
2591 	 *  Shutdown Location monitor.
2592 	 */
2593 	iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2594 
2595 	/*
2596 	 *  Shutdown CRG map.
2597 	 */
2598 	iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2599 
2600 	/*
2601 	 *  Clear error status.
2602 	 */
2603 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2604 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2605 	iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2606 
2607 	/*
2608 	 *  Remove VIRQ interrupt (if any)
2609 	 */
2610 	if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2611 		iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2612 
2613 	/*
2614 	 *  Map all Interrupts to PCI INTA
2615 	 */
2616 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2617 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2618 
2619 	tsi148_irq_exit(tsi148_bridge, pdev);
2620 
2621 	vme_unregister_bridge(tsi148_bridge);
2622 
2623 	tsi148_crcsr_exit(tsi148_bridge, pdev);
2624 
2625 	/* resources are stored in link list */
2626 	list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2627 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2628 		list_del(pos);
2629 		kfree(dma_ctrlr);
2630 	}
2631 
2632 	/* resources are stored in link list */
2633 	list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2634 		slave_image = list_entry(pos, struct vme_slave_resource, list);
2635 		list_del(pos);
2636 		kfree(slave_image);
2637 	}
2638 
2639 	/* resources are stored in link list */
2640 	list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2641 		master_image = list_entry(pos, struct vme_master_resource,
2642 			list);
2643 		list_del(pos);
2644 		kfree(master_image);
2645 	}
2646 
2647 	iounmap(bridge->base);
2648 
2649 	pci_release_regions(pdev);
2650 
2651 	pci_disable_device(pdev);
2652 
2653 	kfree(tsi148_bridge->driver_priv);
2654 
2655 	kfree(tsi148_bridge);
2656 }
2657 
2658 module_pci_driver(tsi148_driver);
2659 
2660 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2661 module_param(err_chk, bool, 0);
2662 
2663 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2664 module_param(geoid, int, 0);
2665 
2666 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2667 MODULE_LICENSE("GPL");
2668