1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 *
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 */
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/mm.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/proc_fs.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26 #include <linux/io.h>
27 #include <linux/uaccess.h>
28 #include <linux/byteorder/generic.h>
29 #include <linux/vme.h>
30
31 #include "../vme_bridge.h"
32 #include "vme_tsi148.h"
33
34 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
35 static void tsi148_remove(struct pci_dev *);
36
37
38 /* Module parameter */
39 static bool err_chk;
40 static int geoid;
41
42 static const char driver_name[] = "vme_tsi148";
43
44 static const struct pci_device_id tsi148_ids[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
46 { },
47 };
48
49 MODULE_DEVICE_TABLE(pci, tsi148_ids);
50
51 static struct pci_driver tsi148_driver = {
52 .name = driver_name,
53 .id_table = tsi148_ids,
54 .probe = tsi148_probe,
55 .remove = tsi148_remove,
56 };
57
reg_join(unsigned int high,unsigned int low,unsigned long long * variable)58 static void reg_join(unsigned int high, unsigned int low,
59 unsigned long long *variable)
60 {
61 *variable = (unsigned long long)high << 32;
62 *variable |= (unsigned long long)low;
63 }
64
reg_split(unsigned long long variable,unsigned int * high,unsigned int * low)65 static void reg_split(unsigned long long variable, unsigned int *high,
66 unsigned int *low)
67 {
68 *low = (unsigned int)variable & 0xFFFFFFFF;
69 *high = (unsigned int)(variable >> 32);
70 }
71
72 /*
73 * Wakes up DMA queue.
74 */
tsi148_DMA_irqhandler(struct tsi148_driver * bridge,int channel_mask)75 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
76 int channel_mask)
77 {
78 u32 serviced = 0;
79
80 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
81 wake_up(&bridge->dma_queue[0]);
82 serviced |= TSI148_LCSR_INTC_DMA0C;
83 }
84 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
85 wake_up(&bridge->dma_queue[1]);
86 serviced |= TSI148_LCSR_INTC_DMA1C;
87 }
88
89 return serviced;
90 }
91
92 /*
93 * Wake up location monitor queue
94 */
tsi148_LM_irqhandler(struct tsi148_driver * bridge,u32 stat)95 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
96 {
97 int i;
98 u32 serviced = 0;
99
100 for (i = 0; i < 4; i++) {
101 if (stat & TSI148_LCSR_INTS_LMS[i]) {
102 /* We only enable interrupts if the callback is set */
103 bridge->lm_callback[i](bridge->lm_data[i]);
104 serviced |= TSI148_LCSR_INTC_LMC[i];
105 }
106 }
107
108 return serviced;
109 }
110
111 /*
112 * Wake up mail box queue.
113 *
114 * XXX This functionality is not exposed up though API.
115 */
tsi148_MB_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)116 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
117 {
118 int i;
119 u32 val;
120 u32 serviced = 0;
121 struct tsi148_driver *bridge;
122
123 bridge = tsi148_bridge->driver_priv;
124
125 for (i = 0; i < 4; i++) {
126 if (stat & TSI148_LCSR_INTS_MBS[i]) {
127 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
128 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
129 ": 0x%x\n", i, val);
130 serviced |= TSI148_LCSR_INTC_MBC[i];
131 }
132 }
133
134 return serviced;
135 }
136
137 /*
138 * Display error & status message when PERR (PCI) exception interrupt occurs.
139 */
tsi148_PERR_irqhandler(struct vme_bridge * tsi148_bridge)140 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
141 {
142 struct tsi148_driver *bridge;
143
144 bridge = tsi148_bridge->driver_priv;
145
146 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
147 "attributes: %08x\n",
148 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
149 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
150 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
151
152 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
153 "completion reg: %08x\n",
154 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
155 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
156
157 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
158
159 return TSI148_LCSR_INTC_PERRC;
160 }
161
162 /*
163 * Save address and status when VME error interrupt occurs.
164 */
tsi148_VERR_irqhandler(struct vme_bridge * tsi148_bridge)165 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
166 {
167 unsigned int error_addr_high, error_addr_low;
168 unsigned long long error_addr;
169 u32 error_attrib;
170 int error_am;
171 struct tsi148_driver *bridge;
172
173 bridge = tsi148_bridge->driver_priv;
174
175 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
176 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
177 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
178 error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
179
180 reg_join(error_addr_high, error_addr_low, &error_addr);
181
182 /* Check for exception register overflow (we have lost error data) */
183 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
184 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
185 "Occurred\n");
186 }
187
188 if (err_chk)
189 vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
190 else
191 dev_err(tsi148_bridge->parent,
192 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
193 error_addr, error_attrib);
194
195 /* Clear Status */
196 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
197
198 return TSI148_LCSR_INTC_VERRC;
199 }
200
201 /*
202 * Wake up IACK queue.
203 */
tsi148_IACK_irqhandler(struct tsi148_driver * bridge)204 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
205 {
206 wake_up(&bridge->iack_queue);
207
208 return TSI148_LCSR_INTC_IACKC;
209 }
210
211 /*
212 * Calling VME bus interrupt callback if provided.
213 */
tsi148_VIRQ_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)214 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
215 u32 stat)
216 {
217 int vec, i, serviced = 0;
218 struct tsi148_driver *bridge;
219
220 bridge = tsi148_bridge->driver_priv;
221
222 for (i = 7; i > 0; i--) {
223 if (stat & (1 << i)) {
224 /*
225 * Note: Even though the registers are defined as
226 * 32-bits in the spec, we only want to issue 8-bit
227 * IACK cycles on the bus, read from offset 3.
228 */
229 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
230
231 vme_irq_handler(tsi148_bridge, i, vec);
232
233 serviced |= (1 << i);
234 }
235 }
236
237 return serviced;
238 }
239
240 /*
241 * Top level interrupt handler. Clears appropriate interrupt status bits and
242 * then calls appropriate sub handler(s).
243 */
tsi148_irqhandler(int irq,void * ptr)244 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
245 {
246 u32 stat, enable, serviced = 0;
247 struct vme_bridge *tsi148_bridge;
248 struct tsi148_driver *bridge;
249
250 tsi148_bridge = ptr;
251
252 bridge = tsi148_bridge->driver_priv;
253
254 /* Determine which interrupts are unmasked and set */
255 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
256 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
257
258 /* Only look at unmasked interrupts */
259 stat &= enable;
260
261 if (unlikely(!stat))
262 return IRQ_NONE;
263
264 /* Call subhandlers as appropriate */
265 /* DMA irqs */
266 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
267 serviced |= tsi148_DMA_irqhandler(bridge, stat);
268
269 /* Location monitor irqs */
270 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
271 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
272 serviced |= tsi148_LM_irqhandler(bridge, stat);
273
274 /* Mail box irqs */
275 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
276 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
277 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
278
279 /* PCI bus error */
280 if (stat & TSI148_LCSR_INTS_PERRS)
281 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
282
283 /* VME bus error */
284 if (stat & TSI148_LCSR_INTS_VERRS)
285 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
286
287 /* IACK irq */
288 if (stat & TSI148_LCSR_INTS_IACKS)
289 serviced |= tsi148_IACK_irqhandler(bridge);
290
291 /* VME bus irqs */
292 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
293 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
294 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
295 TSI148_LCSR_INTS_IRQ1S))
296 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
297
298 /* Clear serviced interrupts */
299 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
300
301 return IRQ_HANDLED;
302 }
303
tsi148_irq_init(struct vme_bridge * tsi148_bridge)304 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
305 {
306 int result;
307 unsigned int tmp;
308 struct pci_dev *pdev;
309 struct tsi148_driver *bridge;
310
311 pdev = to_pci_dev(tsi148_bridge->parent);
312
313 bridge = tsi148_bridge->driver_priv;
314
315 result = request_irq(pdev->irq,
316 tsi148_irqhandler,
317 IRQF_SHARED,
318 driver_name, tsi148_bridge);
319 if (result) {
320 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
321 "vector %02X\n", pdev->irq);
322 return result;
323 }
324
325 /* Enable and unmask interrupts */
326 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
327 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
328 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
329 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
330 TSI148_LCSR_INTEO_IACKEO;
331
332 /* This leaves the following interrupts masked.
333 * TSI148_LCSR_INTEO_VIEEO
334 * TSI148_LCSR_INTEO_SYSFLEO
335 * TSI148_LCSR_INTEO_ACFLEO
336 */
337
338 /* Don't enable Location Monitor interrupts here - they will be
339 * enabled when the location monitors are properly configured and
340 * a callback has been attached.
341 * TSI148_LCSR_INTEO_LM0EO
342 * TSI148_LCSR_INTEO_LM1EO
343 * TSI148_LCSR_INTEO_LM2EO
344 * TSI148_LCSR_INTEO_LM3EO
345 */
346
347 /* Don't enable VME interrupts until we add a handler, else the board
348 * will respond to it and we don't want that unless it knows how to
349 * properly deal with it.
350 * TSI148_LCSR_INTEO_IRQ7EO
351 * TSI148_LCSR_INTEO_IRQ6EO
352 * TSI148_LCSR_INTEO_IRQ5EO
353 * TSI148_LCSR_INTEO_IRQ4EO
354 * TSI148_LCSR_INTEO_IRQ3EO
355 * TSI148_LCSR_INTEO_IRQ2EO
356 * TSI148_LCSR_INTEO_IRQ1EO
357 */
358
359 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
360 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
361
362 return 0;
363 }
364
tsi148_irq_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)365 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
366 struct pci_dev *pdev)
367 {
368 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
369
370 /* Turn off interrupts */
371 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
372 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
373
374 /* Clear all interrupts */
375 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
376
377 /* Detach interrupt handler */
378 free_irq(pdev->irq, tsi148_bridge);
379 }
380
381 /*
382 * Check to see if an IACk has been received, return true (1) or false (0).
383 */
tsi148_iack_received(struct tsi148_driver * bridge)384 static int tsi148_iack_received(struct tsi148_driver *bridge)
385 {
386 u32 tmp;
387
388 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
389
390 if (tmp & TSI148_LCSR_VICR_IRQS)
391 return 0;
392 else
393 return 1;
394 }
395
396 /*
397 * Configure VME interrupt
398 */
tsi148_irq_set(struct vme_bridge * tsi148_bridge,int level,int state,int sync)399 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
400 int state, int sync)
401 {
402 struct pci_dev *pdev;
403 u32 tmp;
404 struct tsi148_driver *bridge;
405
406 bridge = tsi148_bridge->driver_priv;
407
408 /* We need to do the ordering differently for enabling and disabling */
409 if (state == 0) {
410 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
411 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
412 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
413
414 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
415 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
416 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
417
418 if (sync != 0) {
419 pdev = to_pci_dev(tsi148_bridge->parent);
420 synchronize_irq(pdev->irq);
421 }
422 } else {
423 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
424 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
425 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
426
427 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
428 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
429 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
430 }
431 }
432
433 /*
434 * Generate a VME bus interrupt at the requested level & vector. Wait for
435 * interrupt to be acked.
436 */
tsi148_irq_generate(struct vme_bridge * tsi148_bridge,int level,int statid)437 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
438 int statid)
439 {
440 u32 tmp;
441 struct tsi148_driver *bridge;
442
443 bridge = tsi148_bridge->driver_priv;
444
445 mutex_lock(&bridge->vme_int);
446
447 /* Read VICR register */
448 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
449
450 /* Set Status/ID */
451 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
452 (statid & TSI148_LCSR_VICR_STID_M);
453 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
454
455 /* Assert VMEbus IRQ */
456 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
457 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
458
459 /* XXX Consider implementing a timeout? */
460 wait_event_interruptible(bridge->iack_queue,
461 tsi148_iack_received(bridge));
462
463 mutex_unlock(&bridge->vme_int);
464
465 return 0;
466 }
467
468 /*
469 * Initialize a slave window with the requested attributes.
470 */
tsi148_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)471 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
472 unsigned long long vme_base, unsigned long long size,
473 dma_addr_t pci_base, u32 aspace, u32 cycle)
474 {
475 unsigned int i, addr = 0, granularity = 0;
476 unsigned int temp_ctl = 0;
477 unsigned int vme_base_low, vme_base_high;
478 unsigned int vme_bound_low, vme_bound_high;
479 unsigned int pci_offset_low, pci_offset_high;
480 unsigned long long vme_bound, pci_offset;
481 struct vme_bridge *tsi148_bridge;
482 struct tsi148_driver *bridge;
483
484 tsi148_bridge = image->parent;
485 bridge = tsi148_bridge->driver_priv;
486
487 i = image->number;
488
489 switch (aspace) {
490 case VME_A16:
491 granularity = 0x10;
492 addr |= TSI148_LCSR_ITAT_AS_A16;
493 break;
494 case VME_A24:
495 granularity = 0x1000;
496 addr |= TSI148_LCSR_ITAT_AS_A24;
497 break;
498 case VME_A32:
499 granularity = 0x10000;
500 addr |= TSI148_LCSR_ITAT_AS_A32;
501 break;
502 case VME_A64:
503 granularity = 0x10000;
504 addr |= TSI148_LCSR_ITAT_AS_A64;
505 break;
506 default:
507 dev_err(tsi148_bridge->parent, "Invalid address space\n");
508 return -EINVAL;
509 }
510
511 /* Convert 64-bit variables to 2x 32-bit variables */
512 reg_split(vme_base, &vme_base_high, &vme_base_low);
513
514 /*
515 * Bound address is a valid address for the window, adjust
516 * accordingly
517 */
518 vme_bound = vme_base + size - granularity;
519 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
520 pci_offset = (unsigned long long)pci_base - vme_base;
521 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
522
523 if (vme_base_low & (granularity - 1)) {
524 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
525 return -EINVAL;
526 }
527 if (vme_bound_low & (granularity - 1)) {
528 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
529 return -EINVAL;
530 }
531 if (pci_offset_low & (granularity - 1)) {
532 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
533 "alignment\n");
534 return -EINVAL;
535 }
536
537 /* Disable while we are mucking around */
538 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
539 TSI148_LCSR_OFFSET_ITAT);
540 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
541 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
542 TSI148_LCSR_OFFSET_ITAT);
543
544 /* Setup mapping */
545 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
546 TSI148_LCSR_OFFSET_ITSAU);
547 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
548 TSI148_LCSR_OFFSET_ITSAL);
549 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
550 TSI148_LCSR_OFFSET_ITEAU);
551 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
552 TSI148_LCSR_OFFSET_ITEAL);
553 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
554 TSI148_LCSR_OFFSET_ITOFU);
555 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
556 TSI148_LCSR_OFFSET_ITOFL);
557
558 /* Setup 2eSST speeds */
559 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
560 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
561 case VME_2eSST160:
562 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
563 break;
564 case VME_2eSST267:
565 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
566 break;
567 case VME_2eSST320:
568 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
569 break;
570 }
571
572 /* Setup cycle types */
573 temp_ctl &= ~(0x1F << 7);
574 if (cycle & VME_BLT)
575 temp_ctl |= TSI148_LCSR_ITAT_BLT;
576 if (cycle & VME_MBLT)
577 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
578 if (cycle & VME_2eVME)
579 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
580 if (cycle & VME_2eSST)
581 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
582 if (cycle & VME_2eSSTB)
583 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
584
585 /* Setup address space */
586 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
587 temp_ctl |= addr;
588
589 temp_ctl &= ~0xF;
590 if (cycle & VME_SUPER)
591 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
592 if (cycle & VME_USER)
593 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
594 if (cycle & VME_PROG)
595 temp_ctl |= TSI148_LCSR_ITAT_PGM;
596 if (cycle & VME_DATA)
597 temp_ctl |= TSI148_LCSR_ITAT_DATA;
598
599 /* Write ctl reg without enable */
600 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
601 TSI148_LCSR_OFFSET_ITAT);
602
603 if (enabled)
604 temp_ctl |= TSI148_LCSR_ITAT_EN;
605
606 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
607 TSI148_LCSR_OFFSET_ITAT);
608
609 return 0;
610 }
611
612 /*
613 * Get slave window configuration.
614 */
tsi148_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)615 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
616 unsigned long long *vme_base, unsigned long long *size,
617 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
618 {
619 unsigned int i, granularity = 0, ctl = 0;
620 unsigned int vme_base_low, vme_base_high;
621 unsigned int vme_bound_low, vme_bound_high;
622 unsigned int pci_offset_low, pci_offset_high;
623 unsigned long long vme_bound, pci_offset;
624 struct tsi148_driver *bridge;
625
626 bridge = image->parent->driver_priv;
627
628 i = image->number;
629
630 /* Read registers */
631 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
632 TSI148_LCSR_OFFSET_ITAT);
633
634 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
635 TSI148_LCSR_OFFSET_ITSAU);
636 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
637 TSI148_LCSR_OFFSET_ITSAL);
638 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
639 TSI148_LCSR_OFFSET_ITEAU);
640 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
641 TSI148_LCSR_OFFSET_ITEAL);
642 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
643 TSI148_LCSR_OFFSET_ITOFU);
644 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
645 TSI148_LCSR_OFFSET_ITOFL);
646
647 /* Convert 64-bit variables to 2x 32-bit variables */
648 reg_join(vme_base_high, vme_base_low, vme_base);
649 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
650 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
651
652 *pci_base = (dma_addr_t)(*vme_base + pci_offset);
653
654 *enabled = 0;
655 *aspace = 0;
656 *cycle = 0;
657
658 if (ctl & TSI148_LCSR_ITAT_EN)
659 *enabled = 1;
660
661 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
662 granularity = 0x10;
663 *aspace |= VME_A16;
664 }
665 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
666 granularity = 0x1000;
667 *aspace |= VME_A24;
668 }
669 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
670 granularity = 0x10000;
671 *aspace |= VME_A32;
672 }
673 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
674 granularity = 0x10000;
675 *aspace |= VME_A64;
676 }
677
678 /* Need granularity before we set the size */
679 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
680
681
682 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
683 *cycle |= VME_2eSST160;
684 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
685 *cycle |= VME_2eSST267;
686 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
687 *cycle |= VME_2eSST320;
688
689 if (ctl & TSI148_LCSR_ITAT_BLT)
690 *cycle |= VME_BLT;
691 if (ctl & TSI148_LCSR_ITAT_MBLT)
692 *cycle |= VME_MBLT;
693 if (ctl & TSI148_LCSR_ITAT_2eVME)
694 *cycle |= VME_2eVME;
695 if (ctl & TSI148_LCSR_ITAT_2eSST)
696 *cycle |= VME_2eSST;
697 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
698 *cycle |= VME_2eSSTB;
699
700 if (ctl & TSI148_LCSR_ITAT_SUPR)
701 *cycle |= VME_SUPER;
702 if (ctl & TSI148_LCSR_ITAT_NPRIV)
703 *cycle |= VME_USER;
704 if (ctl & TSI148_LCSR_ITAT_PGM)
705 *cycle |= VME_PROG;
706 if (ctl & TSI148_LCSR_ITAT_DATA)
707 *cycle |= VME_DATA;
708
709 return 0;
710 }
711
712 /*
713 * Allocate and map PCI Resource
714 */
tsi148_alloc_resource(struct vme_master_resource * image,unsigned long long size)715 static int tsi148_alloc_resource(struct vme_master_resource *image,
716 unsigned long long size)
717 {
718 unsigned long long existing_size;
719 int retval = 0;
720 struct pci_dev *pdev;
721 struct vme_bridge *tsi148_bridge;
722
723 tsi148_bridge = image->parent;
724
725 pdev = to_pci_dev(tsi148_bridge->parent);
726
727 existing_size = (unsigned long long)(image->bus_resource.end -
728 image->bus_resource.start);
729
730 /* If the existing size is OK, return */
731 if ((size != 0) && (existing_size == (size - 1)))
732 return 0;
733
734 if (existing_size != 0) {
735 iounmap(image->kern_base);
736 image->kern_base = NULL;
737 kfree(image->bus_resource.name);
738 release_resource(&image->bus_resource);
739 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
740 }
741
742 /* Exit here if size is zero */
743 if (size == 0)
744 return 0;
745
746 if (!image->bus_resource.name) {
747 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
748 if (!image->bus_resource.name) {
749 retval = -ENOMEM;
750 goto err_name;
751 }
752 }
753
754 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
755 image->number);
756
757 image->bus_resource.start = 0;
758 image->bus_resource.end = (unsigned long)size;
759 image->bus_resource.flags = IORESOURCE_MEM;
760
761 retval = pci_bus_alloc_resource(pdev->bus,
762 &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
763 0, NULL, NULL);
764 if (retval) {
765 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
766 "resource for window %d size 0x%lx start 0x%lx\n",
767 image->number, (unsigned long)size,
768 (unsigned long)image->bus_resource.start);
769 goto err_resource;
770 }
771
772 image->kern_base = ioremap(
773 image->bus_resource.start, size);
774 if (!image->kern_base) {
775 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
776 retval = -ENOMEM;
777 goto err_remap;
778 }
779
780 return 0;
781
782 err_remap:
783 release_resource(&image->bus_resource);
784 err_resource:
785 kfree(image->bus_resource.name);
786 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
787 err_name:
788 return retval;
789 }
790
791 /*
792 * Free and unmap PCI Resource
793 */
tsi148_free_resource(struct vme_master_resource * image)794 static void tsi148_free_resource(struct vme_master_resource *image)
795 {
796 iounmap(image->kern_base);
797 image->kern_base = NULL;
798 release_resource(&image->bus_resource);
799 kfree(image->bus_resource.name);
800 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
801 }
802
803 /*
804 * Set the attributes of an outbound window.
805 */
tsi148_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)806 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
807 unsigned long long vme_base, unsigned long long size, u32 aspace,
808 u32 cycle, u32 dwidth)
809 {
810 int retval = 0;
811 unsigned int i;
812 unsigned int temp_ctl = 0;
813 unsigned int pci_base_low, pci_base_high;
814 unsigned int pci_bound_low, pci_bound_high;
815 unsigned int vme_offset_low, vme_offset_high;
816 unsigned long long pci_bound, vme_offset, pci_base;
817 struct vme_bridge *tsi148_bridge;
818 struct tsi148_driver *bridge;
819 struct pci_bus_region region;
820 struct pci_dev *pdev;
821
822 tsi148_bridge = image->parent;
823
824 bridge = tsi148_bridge->driver_priv;
825
826 pdev = to_pci_dev(tsi148_bridge->parent);
827
828 /* Verify input data */
829 if (vme_base & 0xFFFF) {
830 dev_err(tsi148_bridge->parent, "Invalid VME Window "
831 "alignment\n");
832 retval = -EINVAL;
833 goto err_window;
834 }
835
836 if ((size == 0) && (enabled != 0)) {
837 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
838 "enabled windows\n");
839 retval = -EINVAL;
840 goto err_window;
841 }
842
843 spin_lock(&image->lock);
844
845 /* Let's allocate the resource here rather than further up the stack as
846 * it avoids pushing loads of bus dependent stuff up the stack. If size
847 * is zero, any existing resource will be freed.
848 */
849 retval = tsi148_alloc_resource(image, size);
850 if (retval) {
851 spin_unlock(&image->lock);
852 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
853 "resource\n");
854 goto err_res;
855 }
856
857 if (size == 0) {
858 pci_base = 0;
859 pci_bound = 0;
860 vme_offset = 0;
861 } else {
862 pcibios_resource_to_bus(pdev->bus, ®ion,
863 &image->bus_resource);
864 pci_base = region.start;
865
866 /*
867 * Bound address is a valid address for the window, adjust
868 * according to window granularity.
869 */
870 pci_bound = pci_base + (size - 0x10000);
871 vme_offset = vme_base - pci_base;
872 }
873
874 /* Convert 64-bit variables to 2x 32-bit variables */
875 reg_split(pci_base, &pci_base_high, &pci_base_low);
876 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
877 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
878
879 if (pci_base_low & 0xFFFF) {
880 spin_unlock(&image->lock);
881 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
882 retval = -EINVAL;
883 goto err_gran;
884 }
885 if (pci_bound_low & 0xFFFF) {
886 spin_unlock(&image->lock);
887 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
888 retval = -EINVAL;
889 goto err_gran;
890 }
891 if (vme_offset_low & 0xFFFF) {
892 spin_unlock(&image->lock);
893 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
894 "alignment\n");
895 retval = -EINVAL;
896 goto err_gran;
897 }
898
899 i = image->number;
900
901 /* Disable while we are mucking around */
902 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
903 TSI148_LCSR_OFFSET_OTAT);
904 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
905 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
906 TSI148_LCSR_OFFSET_OTAT);
907
908 /* Setup 2eSST speeds */
909 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
910 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
911 case VME_2eSST160:
912 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
913 break;
914 case VME_2eSST267:
915 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
916 break;
917 case VME_2eSST320:
918 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
919 break;
920 }
921
922 /* Setup cycle types */
923 if (cycle & VME_BLT) {
924 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
925 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
926 }
927 if (cycle & VME_MBLT) {
928 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
929 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
930 }
931 if (cycle & VME_2eVME) {
932 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
933 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
934 }
935 if (cycle & VME_2eSST) {
936 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
937 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
938 }
939 if (cycle & VME_2eSSTB) {
940 dev_warn(tsi148_bridge->parent, "Currently not setting "
941 "Broadcast Select Registers\n");
942 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
943 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
944 }
945
946 /* Setup data width */
947 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
948 switch (dwidth) {
949 case VME_D16:
950 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
951 break;
952 case VME_D32:
953 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
954 break;
955 default:
956 spin_unlock(&image->lock);
957 dev_err(tsi148_bridge->parent, "Invalid data width\n");
958 retval = -EINVAL;
959 goto err_dwidth;
960 }
961
962 /* Setup address space */
963 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
964 switch (aspace) {
965 case VME_A16:
966 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
967 break;
968 case VME_A24:
969 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
970 break;
971 case VME_A32:
972 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
973 break;
974 case VME_A64:
975 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
976 break;
977 case VME_CRCSR:
978 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
979 break;
980 case VME_USER1:
981 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
982 break;
983 case VME_USER2:
984 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
985 break;
986 case VME_USER3:
987 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
988 break;
989 case VME_USER4:
990 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
991 break;
992 default:
993 spin_unlock(&image->lock);
994 dev_err(tsi148_bridge->parent, "Invalid address space\n");
995 retval = -EINVAL;
996 goto err_aspace;
997 }
998
999 temp_ctl &= ~(3<<4);
1000 if (cycle & VME_SUPER)
1001 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1002 if (cycle & VME_PROG)
1003 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1004
1005 /* Setup mapping */
1006 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1007 TSI148_LCSR_OFFSET_OTSAU);
1008 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1009 TSI148_LCSR_OFFSET_OTSAL);
1010 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1011 TSI148_LCSR_OFFSET_OTEAU);
1012 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1013 TSI148_LCSR_OFFSET_OTEAL);
1014 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1015 TSI148_LCSR_OFFSET_OTOFU);
1016 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1017 TSI148_LCSR_OFFSET_OTOFL);
1018
1019 /* Write ctl reg without enable */
1020 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1021 TSI148_LCSR_OFFSET_OTAT);
1022
1023 if (enabled)
1024 temp_ctl |= TSI148_LCSR_OTAT_EN;
1025
1026 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1027 TSI148_LCSR_OFFSET_OTAT);
1028
1029 spin_unlock(&image->lock);
1030 return 0;
1031
1032 err_aspace:
1033 err_dwidth:
1034 err_gran:
1035 tsi148_free_resource(image);
1036 err_res:
1037 err_window:
1038 return retval;
1039
1040 }
1041
1042 /*
1043 * Set the attributes of an outbound window.
1044 *
1045 * XXX Not parsing prefetch information.
1046 */
__tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1047 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1048 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1049 u32 *cycle, u32 *dwidth)
1050 {
1051 unsigned int i, ctl;
1052 unsigned int pci_base_low, pci_base_high;
1053 unsigned int pci_bound_low, pci_bound_high;
1054 unsigned int vme_offset_low, vme_offset_high;
1055
1056 unsigned long long pci_base, pci_bound, vme_offset;
1057 struct tsi148_driver *bridge;
1058
1059 bridge = image->parent->driver_priv;
1060
1061 i = image->number;
1062
1063 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1064 TSI148_LCSR_OFFSET_OTAT);
1065
1066 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1067 TSI148_LCSR_OFFSET_OTSAU);
1068 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1069 TSI148_LCSR_OFFSET_OTSAL);
1070 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1071 TSI148_LCSR_OFFSET_OTEAU);
1072 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1073 TSI148_LCSR_OFFSET_OTEAL);
1074 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1075 TSI148_LCSR_OFFSET_OTOFU);
1076 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1077 TSI148_LCSR_OFFSET_OTOFL);
1078
1079 /* Convert 64-bit variables to 2x 32-bit variables */
1080 reg_join(pci_base_high, pci_base_low, &pci_base);
1081 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1082 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1083
1084 *vme_base = pci_base + vme_offset;
1085 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1086
1087 *enabled = 0;
1088 *aspace = 0;
1089 *cycle = 0;
1090 *dwidth = 0;
1091
1092 if (ctl & TSI148_LCSR_OTAT_EN)
1093 *enabled = 1;
1094
1095 /* Setup address space */
1096 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1097 *aspace |= VME_A16;
1098 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1099 *aspace |= VME_A24;
1100 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1101 *aspace |= VME_A32;
1102 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1103 *aspace |= VME_A64;
1104 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1105 *aspace |= VME_CRCSR;
1106 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1107 *aspace |= VME_USER1;
1108 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1109 *aspace |= VME_USER2;
1110 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1111 *aspace |= VME_USER3;
1112 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1113 *aspace |= VME_USER4;
1114
1115 /* Setup 2eSST speeds */
1116 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1117 *cycle |= VME_2eSST160;
1118 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1119 *cycle |= VME_2eSST267;
1120 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1121 *cycle |= VME_2eSST320;
1122
1123 /* Setup cycle types */
1124 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1125 *cycle |= VME_SCT;
1126 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1127 *cycle |= VME_BLT;
1128 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1129 *cycle |= VME_MBLT;
1130 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1131 *cycle |= VME_2eVME;
1132 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1133 *cycle |= VME_2eSST;
1134 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1135 *cycle |= VME_2eSSTB;
1136
1137 if (ctl & TSI148_LCSR_OTAT_SUP)
1138 *cycle |= VME_SUPER;
1139 else
1140 *cycle |= VME_USER;
1141
1142 if (ctl & TSI148_LCSR_OTAT_PGM)
1143 *cycle |= VME_PROG;
1144 else
1145 *cycle |= VME_DATA;
1146
1147 /* Setup data width */
1148 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1149 *dwidth = VME_D16;
1150 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1151 *dwidth = VME_D32;
1152
1153 return 0;
1154 }
1155
1156
tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1157 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1158 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1159 u32 *cycle, u32 *dwidth)
1160 {
1161 int retval;
1162
1163 spin_lock(&image->lock);
1164
1165 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1166 cycle, dwidth);
1167
1168 spin_unlock(&image->lock);
1169
1170 return retval;
1171 }
1172
tsi148_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1173 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1174 size_t count, loff_t offset)
1175 {
1176 int retval, enabled;
1177 unsigned long long vme_base, size;
1178 u32 aspace, cycle, dwidth;
1179 struct vme_error_handler *handler = NULL;
1180 struct vme_bridge *tsi148_bridge;
1181 void __iomem *addr = image->kern_base + offset;
1182 unsigned int done = 0;
1183 unsigned int count32;
1184
1185 tsi148_bridge = image->parent;
1186
1187 spin_lock(&image->lock);
1188
1189 if (err_chk) {
1190 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1191 &cycle, &dwidth);
1192 handler = vme_register_error_handler(tsi148_bridge, aspace,
1193 vme_base + offset, count);
1194 if (!handler) {
1195 spin_unlock(&image->lock);
1196 return -ENOMEM;
1197 }
1198 }
1199
1200 /* The following code handles VME address alignment. We cannot use
1201 * memcpy_xxx here because it may cut data transfers in to 8-bit
1202 * cycles when D16 or D32 cycles are required on the VME bus.
1203 * On the other hand, the bridge itself assures that the maximum data
1204 * cycle configured for the transfer is used and splits it
1205 * automatically for non-aligned addresses, so we don't want the
1206 * overhead of needlessly forcing small transfers for the entire cycle.
1207 */
1208 if ((uintptr_t)addr & 0x1) {
1209 *(u8 *)buf = ioread8(addr);
1210 done += 1;
1211 if (done == count)
1212 goto out;
1213 }
1214 if ((uintptr_t)(addr + done) & 0x2) {
1215 if ((count - done) < 2) {
1216 *(u8 *)(buf + done) = ioread8(addr + done);
1217 done += 1;
1218 goto out;
1219 } else {
1220 *(u16 *)(buf + done) = ioread16(addr + done);
1221 done += 2;
1222 }
1223 }
1224
1225 count32 = (count - done) & ~0x3;
1226 while (done < count32) {
1227 *(u32 *)(buf + done) = ioread32(addr + done);
1228 done += 4;
1229 }
1230
1231 if ((count - done) & 0x2) {
1232 *(u16 *)(buf + done) = ioread16(addr + done);
1233 done += 2;
1234 }
1235 if ((count - done) & 0x1) {
1236 *(u8 *)(buf + done) = ioread8(addr + done);
1237 done += 1;
1238 }
1239
1240 out:
1241 retval = count;
1242
1243 if (err_chk) {
1244 if (handler->num_errors) {
1245 dev_err(image->parent->parent,
1246 "First VME read error detected an at address 0x%llx\n",
1247 handler->first_error);
1248 retval = handler->first_error - (vme_base + offset);
1249 }
1250 vme_unregister_error_handler(handler);
1251 }
1252
1253 spin_unlock(&image->lock);
1254
1255 return retval;
1256 }
1257
1258
tsi148_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1259 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1260 size_t count, loff_t offset)
1261 {
1262 int retval = 0, enabled;
1263 unsigned long long vme_base, size;
1264 u32 aspace, cycle, dwidth;
1265 void __iomem *addr = image->kern_base + offset;
1266 unsigned int done = 0;
1267 unsigned int count32;
1268
1269 struct vme_error_handler *handler = NULL;
1270 struct vme_bridge *tsi148_bridge;
1271 struct tsi148_driver *bridge;
1272
1273 tsi148_bridge = image->parent;
1274
1275 bridge = tsi148_bridge->driver_priv;
1276
1277 spin_lock(&image->lock);
1278
1279 if (err_chk) {
1280 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1281 &cycle, &dwidth);
1282 handler = vme_register_error_handler(tsi148_bridge, aspace,
1283 vme_base + offset, count);
1284 if (!handler) {
1285 spin_unlock(&image->lock);
1286 return -ENOMEM;
1287 }
1288 }
1289
1290 /* Here we apply for the same strategy we do in master_read
1291 * function in order to assure the correct cycles.
1292 */
1293 if ((uintptr_t)addr & 0x1) {
1294 iowrite8(*(u8 *)buf, addr);
1295 done += 1;
1296 if (done == count)
1297 goto out;
1298 }
1299 if ((uintptr_t)(addr + done) & 0x2) {
1300 if ((count - done) < 2) {
1301 iowrite8(*(u8 *)(buf + done), addr + done);
1302 done += 1;
1303 goto out;
1304 } else {
1305 iowrite16(*(u16 *)(buf + done), addr + done);
1306 done += 2;
1307 }
1308 }
1309
1310 count32 = (count - done) & ~0x3;
1311 while (done < count32) {
1312 iowrite32(*(u32 *)(buf + done), addr + done);
1313 done += 4;
1314 }
1315
1316 if ((count - done) & 0x2) {
1317 iowrite16(*(u16 *)(buf + done), addr + done);
1318 done += 2;
1319 }
1320 if ((count - done) & 0x1) {
1321 iowrite8(*(u8 *)(buf + done), addr + done);
1322 done += 1;
1323 }
1324
1325 out:
1326 retval = count;
1327
1328 /*
1329 * Writes are posted. We need to do a read on the VME bus to flush out
1330 * all of the writes before we check for errors. We can't guarantee
1331 * that reading the data we have just written is safe. It is believed
1332 * that there isn't any read, write re-ordering, so we can read any
1333 * location in VME space, so lets read the Device ID from the tsi148's
1334 * own registers as mapped into CR/CSR space.
1335 *
1336 * We check for saved errors in the written address range/space.
1337 */
1338
1339 if (err_chk) {
1340 ioread16(bridge->flush_image->kern_base + 0x7F000);
1341
1342 if (handler->num_errors) {
1343 dev_warn(tsi148_bridge->parent,
1344 "First VME write error detected an at address 0x%llx\n",
1345 handler->first_error);
1346 retval = handler->first_error - (vme_base + offset);
1347 }
1348 vme_unregister_error_handler(handler);
1349 }
1350
1351 spin_unlock(&image->lock);
1352
1353 return retval;
1354 }
1355
1356 /*
1357 * Perform an RMW cycle on the VME bus.
1358 *
1359 * Requires a previously configured master window, returns final value.
1360 */
tsi148_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)1361 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1362 unsigned int mask, unsigned int compare, unsigned int swap,
1363 loff_t offset)
1364 {
1365 unsigned long long pci_addr;
1366 unsigned int pci_addr_high, pci_addr_low;
1367 u32 tmp, result;
1368 int i;
1369 struct tsi148_driver *bridge;
1370
1371 bridge = image->parent->driver_priv;
1372
1373 /* Find the PCI address that maps to the desired VME address */
1374 i = image->number;
1375
1376 /* Locking as we can only do one of these at a time */
1377 mutex_lock(&bridge->vme_rmw);
1378
1379 /* Lock image */
1380 spin_lock(&image->lock);
1381
1382 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1383 TSI148_LCSR_OFFSET_OTSAU);
1384 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1385 TSI148_LCSR_OFFSET_OTSAL);
1386
1387 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1388 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1389
1390 /* Configure registers */
1391 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1392 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1393 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1394 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1395 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1396
1397 /* Enable RMW */
1398 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1399 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1400 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1401
1402 /* Kick process off with a read to the required address. */
1403 result = ioread32be(image->kern_base + offset);
1404
1405 /* Disable RMW */
1406 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1407 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1408 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1409
1410 spin_unlock(&image->lock);
1411
1412 mutex_unlock(&bridge->vme_rmw);
1413
1414 return result;
1415 }
1416
tsi148_dma_set_vme_src_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1417 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1418 u32 aspace, u32 cycle, u32 dwidth)
1419 {
1420 u32 val;
1421
1422 val = be32_to_cpu(*attr);
1423
1424 /* Setup 2eSST speeds */
1425 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1426 case VME_2eSST160:
1427 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1428 break;
1429 case VME_2eSST267:
1430 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1431 break;
1432 case VME_2eSST320:
1433 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1434 break;
1435 }
1436
1437 /* Setup cycle types */
1438 if (cycle & VME_SCT)
1439 val |= TSI148_LCSR_DSAT_TM_SCT;
1440
1441 if (cycle & VME_BLT)
1442 val |= TSI148_LCSR_DSAT_TM_BLT;
1443
1444 if (cycle & VME_MBLT)
1445 val |= TSI148_LCSR_DSAT_TM_MBLT;
1446
1447 if (cycle & VME_2eVME)
1448 val |= TSI148_LCSR_DSAT_TM_2eVME;
1449
1450 if (cycle & VME_2eSST)
1451 val |= TSI148_LCSR_DSAT_TM_2eSST;
1452
1453 if (cycle & VME_2eSSTB) {
1454 dev_err(dev, "Currently not setting Broadcast Select "
1455 "Registers\n");
1456 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1457 }
1458
1459 /* Setup data width */
1460 switch (dwidth) {
1461 case VME_D16:
1462 val |= TSI148_LCSR_DSAT_DBW_16;
1463 break;
1464 case VME_D32:
1465 val |= TSI148_LCSR_DSAT_DBW_32;
1466 break;
1467 default:
1468 dev_err(dev, "Invalid data width\n");
1469 return -EINVAL;
1470 }
1471
1472 /* Setup address space */
1473 switch (aspace) {
1474 case VME_A16:
1475 val |= TSI148_LCSR_DSAT_AMODE_A16;
1476 break;
1477 case VME_A24:
1478 val |= TSI148_LCSR_DSAT_AMODE_A24;
1479 break;
1480 case VME_A32:
1481 val |= TSI148_LCSR_DSAT_AMODE_A32;
1482 break;
1483 case VME_A64:
1484 val |= TSI148_LCSR_DSAT_AMODE_A64;
1485 break;
1486 case VME_CRCSR:
1487 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1488 break;
1489 case VME_USER1:
1490 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1491 break;
1492 case VME_USER2:
1493 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1494 break;
1495 case VME_USER3:
1496 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1497 break;
1498 case VME_USER4:
1499 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1500 break;
1501 default:
1502 dev_err(dev, "Invalid address space\n");
1503 return -EINVAL;
1504 }
1505
1506 if (cycle & VME_SUPER)
1507 val |= TSI148_LCSR_DSAT_SUP;
1508 if (cycle & VME_PROG)
1509 val |= TSI148_LCSR_DSAT_PGM;
1510
1511 *attr = cpu_to_be32(val);
1512
1513 return 0;
1514 }
1515
tsi148_dma_set_vme_dest_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1516 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1517 u32 aspace, u32 cycle, u32 dwidth)
1518 {
1519 u32 val;
1520
1521 val = be32_to_cpu(*attr);
1522
1523 /* Setup 2eSST speeds */
1524 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1525 case VME_2eSST160:
1526 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1527 break;
1528 case VME_2eSST267:
1529 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1530 break;
1531 case VME_2eSST320:
1532 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1533 break;
1534 }
1535
1536 /* Setup cycle types */
1537 if (cycle & VME_SCT)
1538 val |= TSI148_LCSR_DDAT_TM_SCT;
1539
1540 if (cycle & VME_BLT)
1541 val |= TSI148_LCSR_DDAT_TM_BLT;
1542
1543 if (cycle & VME_MBLT)
1544 val |= TSI148_LCSR_DDAT_TM_MBLT;
1545
1546 if (cycle & VME_2eVME)
1547 val |= TSI148_LCSR_DDAT_TM_2eVME;
1548
1549 if (cycle & VME_2eSST)
1550 val |= TSI148_LCSR_DDAT_TM_2eSST;
1551
1552 if (cycle & VME_2eSSTB) {
1553 dev_err(dev, "Currently not setting Broadcast Select "
1554 "Registers\n");
1555 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1556 }
1557
1558 /* Setup data width */
1559 switch (dwidth) {
1560 case VME_D16:
1561 val |= TSI148_LCSR_DDAT_DBW_16;
1562 break;
1563 case VME_D32:
1564 val |= TSI148_LCSR_DDAT_DBW_32;
1565 break;
1566 default:
1567 dev_err(dev, "Invalid data width\n");
1568 return -EINVAL;
1569 }
1570
1571 /* Setup address space */
1572 switch (aspace) {
1573 case VME_A16:
1574 val |= TSI148_LCSR_DDAT_AMODE_A16;
1575 break;
1576 case VME_A24:
1577 val |= TSI148_LCSR_DDAT_AMODE_A24;
1578 break;
1579 case VME_A32:
1580 val |= TSI148_LCSR_DDAT_AMODE_A32;
1581 break;
1582 case VME_A64:
1583 val |= TSI148_LCSR_DDAT_AMODE_A64;
1584 break;
1585 case VME_CRCSR:
1586 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1587 break;
1588 case VME_USER1:
1589 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1590 break;
1591 case VME_USER2:
1592 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1593 break;
1594 case VME_USER3:
1595 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1596 break;
1597 case VME_USER4:
1598 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1599 break;
1600 default:
1601 dev_err(dev, "Invalid address space\n");
1602 return -EINVAL;
1603 }
1604
1605 if (cycle & VME_SUPER)
1606 val |= TSI148_LCSR_DDAT_SUP;
1607 if (cycle & VME_PROG)
1608 val |= TSI148_LCSR_DDAT_PGM;
1609
1610 *attr = cpu_to_be32(val);
1611
1612 return 0;
1613 }
1614
1615 /*
1616 * Add a link list descriptor to the list
1617 *
1618 * Note: DMA engine expects the DMA descriptor to be big endian.
1619 */
tsi148_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1620 static int tsi148_dma_list_add(struct vme_dma_list *list,
1621 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1622 {
1623 struct tsi148_dma_entry *entry, *prev;
1624 u32 address_high, address_low, val;
1625 struct vme_dma_pattern *pattern_attr;
1626 struct vme_dma_pci *pci_attr;
1627 struct vme_dma_vme *vme_attr;
1628 int retval = 0;
1629 struct vme_bridge *tsi148_bridge;
1630
1631 tsi148_bridge = list->parent->parent;
1632
1633 /* Descriptor must be aligned on 64-bit boundaries */
1634 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1635 if (!entry) {
1636 retval = -ENOMEM;
1637 goto err_mem;
1638 }
1639
1640 /* Test descriptor alignment */
1641 if ((unsigned long)&entry->descriptor & 0x7) {
1642 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1643 "byte boundary as required: %p\n",
1644 &entry->descriptor);
1645 retval = -EINVAL;
1646 goto err_align;
1647 }
1648
1649 /* Given we are going to fill out the structure, we probably don't
1650 * need to zero it, but better safe than sorry for now.
1651 */
1652 memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1653
1654 /* Fill out source part */
1655 switch (src->type) {
1656 case VME_DMA_PATTERN:
1657 pattern_attr = src->private;
1658
1659 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1660
1661 val = TSI148_LCSR_DSAT_TYP_PAT;
1662
1663 /* Default behaviour is 32 bit pattern */
1664 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1665 val |= TSI148_LCSR_DSAT_PSZ;
1666
1667 /* It seems that the default behaviour is to increment */
1668 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1669 val |= TSI148_LCSR_DSAT_NIN;
1670 entry->descriptor.dsat = cpu_to_be32(val);
1671 break;
1672 case VME_DMA_PCI:
1673 pci_attr = src->private;
1674
1675 reg_split((unsigned long long)pci_attr->address, &address_high,
1676 &address_low);
1677 entry->descriptor.dsau = cpu_to_be32(address_high);
1678 entry->descriptor.dsal = cpu_to_be32(address_low);
1679 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1680 break;
1681 case VME_DMA_VME:
1682 vme_attr = src->private;
1683
1684 reg_split((unsigned long long)vme_attr->address, &address_high,
1685 &address_low);
1686 entry->descriptor.dsau = cpu_to_be32(address_high);
1687 entry->descriptor.dsal = cpu_to_be32(address_low);
1688 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1689
1690 retval = tsi148_dma_set_vme_src_attributes(
1691 tsi148_bridge->parent, &entry->descriptor.dsat,
1692 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1693 if (retval < 0)
1694 goto err_source;
1695 break;
1696 default:
1697 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1698 retval = -EINVAL;
1699 goto err_source;
1700 }
1701
1702 /* Assume last link - this will be over-written by adding another */
1703 entry->descriptor.dnlau = cpu_to_be32(0);
1704 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1705
1706 /* Fill out destination part */
1707 switch (dest->type) {
1708 case VME_DMA_PCI:
1709 pci_attr = dest->private;
1710
1711 reg_split((unsigned long long)pci_attr->address, &address_high,
1712 &address_low);
1713 entry->descriptor.ddau = cpu_to_be32(address_high);
1714 entry->descriptor.ddal = cpu_to_be32(address_low);
1715 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1716 break;
1717 case VME_DMA_VME:
1718 vme_attr = dest->private;
1719
1720 reg_split((unsigned long long)vme_attr->address, &address_high,
1721 &address_low);
1722 entry->descriptor.ddau = cpu_to_be32(address_high);
1723 entry->descriptor.ddal = cpu_to_be32(address_low);
1724 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1725
1726 retval = tsi148_dma_set_vme_dest_attributes(
1727 tsi148_bridge->parent, &entry->descriptor.ddat,
1728 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1729 if (retval < 0)
1730 goto err_dest;
1731 break;
1732 default:
1733 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1734 retval = -EINVAL;
1735 goto err_dest;
1736 }
1737
1738 /* Fill out count */
1739 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1740
1741 /* Add to list */
1742 list_add_tail(&entry->list, &list->entries);
1743
1744 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1745 &entry->descriptor,
1746 sizeof(entry->descriptor),
1747 DMA_TO_DEVICE);
1748 if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1749 dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1750 retval = -EINVAL;
1751 goto err_dma;
1752 }
1753
1754 /* Fill out previous descriptors "Next Address" */
1755 if (entry->list.prev != &list->entries) {
1756 reg_split((unsigned long long)entry->dma_handle, &address_high,
1757 &address_low);
1758 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1759 list);
1760 prev->descriptor.dnlau = cpu_to_be32(address_high);
1761 prev->descriptor.dnlal = cpu_to_be32(address_low);
1762
1763 }
1764
1765 return 0;
1766
1767 err_dma:
1768 list_del(&entry->list);
1769 err_dest:
1770 err_source:
1771 err_align:
1772 kfree(entry);
1773 err_mem:
1774 return retval;
1775 }
1776
1777 /*
1778 * Check to see if the provided DMA channel is busy.
1779 */
tsi148_dma_busy(struct vme_bridge * tsi148_bridge,int channel)1780 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1781 {
1782 u32 tmp;
1783 struct tsi148_driver *bridge;
1784
1785 bridge = tsi148_bridge->driver_priv;
1786
1787 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1788 TSI148_LCSR_OFFSET_DSTA);
1789
1790 if (tmp & TSI148_LCSR_DSTA_BSY)
1791 return 0;
1792 else
1793 return 1;
1794
1795 }
1796
1797 /*
1798 * Execute a previously generated link list
1799 *
1800 * XXX Need to provide control register configuration.
1801 */
tsi148_dma_list_exec(struct vme_dma_list * list)1802 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1803 {
1804 struct vme_dma_resource *ctrlr;
1805 int channel, retval;
1806 struct tsi148_dma_entry *entry;
1807 u32 bus_addr_high, bus_addr_low;
1808 u32 val, dctlreg = 0;
1809 struct vme_bridge *tsi148_bridge;
1810 struct tsi148_driver *bridge;
1811
1812 ctrlr = list->parent;
1813
1814 tsi148_bridge = ctrlr->parent;
1815
1816 bridge = tsi148_bridge->driver_priv;
1817
1818 mutex_lock(&ctrlr->mtx);
1819
1820 channel = ctrlr->number;
1821
1822 if (!list_empty(&ctrlr->running)) {
1823 /*
1824 * XXX We have an active DMA transfer and currently haven't
1825 * sorted out the mechanism for "pending" DMA transfers.
1826 * Return busy.
1827 */
1828 /* Need to add to pending here */
1829 mutex_unlock(&ctrlr->mtx);
1830 return -EBUSY;
1831 } else {
1832 list_add(&list->list, &ctrlr->running);
1833 }
1834
1835 /* Get first bus address and write into registers */
1836 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1837 list);
1838
1839 mutex_unlock(&ctrlr->mtx);
1840
1841 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1842
1843 iowrite32be(bus_addr_high, bridge->base +
1844 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1845 iowrite32be(bus_addr_low, bridge->base +
1846 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1847
1848 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1849 TSI148_LCSR_OFFSET_DCTL);
1850
1851 /* Start the operation */
1852 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1853 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1854
1855 retval = wait_event_interruptible(bridge->dma_queue[channel],
1856 tsi148_dma_busy(ctrlr->parent, channel));
1857
1858 if (retval) {
1859 iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1860 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1861 /* Wait for the operation to abort */
1862 wait_event(bridge->dma_queue[channel],
1863 tsi148_dma_busy(ctrlr->parent, channel));
1864 retval = -EINTR;
1865 goto exit;
1866 }
1867
1868 /*
1869 * Read status register, this register is valid until we kick off a
1870 * new transfer.
1871 */
1872 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1873 TSI148_LCSR_OFFSET_DSTA);
1874
1875 if (val & TSI148_LCSR_DSTA_VBE) {
1876 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1877 retval = -EIO;
1878 }
1879
1880 exit:
1881 /* Remove list from running list */
1882 mutex_lock(&ctrlr->mtx);
1883 list_del(&list->list);
1884 mutex_unlock(&ctrlr->mtx);
1885
1886 return retval;
1887 }
1888
1889 /*
1890 * Clean up a previously generated link list
1891 *
1892 * We have a separate function, don't assume that the chain can't be reused.
1893 */
tsi148_dma_list_empty(struct vme_dma_list * list)1894 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1895 {
1896 struct list_head *pos, *temp;
1897 struct tsi148_dma_entry *entry;
1898
1899 struct vme_bridge *tsi148_bridge = list->parent->parent;
1900
1901 /* detach and free each entry */
1902 list_for_each_safe(pos, temp, &list->entries) {
1903 list_del(pos);
1904 entry = list_entry(pos, struct tsi148_dma_entry, list);
1905
1906 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1907 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1908 kfree(entry);
1909 }
1910
1911 return 0;
1912 }
1913
1914 /*
1915 * All 4 location monitors reside at the same base - this is therefore a
1916 * system wide configuration.
1917 *
1918 * This does not enable the LM monitor - that should be done when the first
1919 * callback is attached and disabled when the last callback is removed.
1920 */
tsi148_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1921 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1922 u32 aspace, u32 cycle)
1923 {
1924 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1925 int i;
1926 struct vme_bridge *tsi148_bridge;
1927 struct tsi148_driver *bridge;
1928
1929 tsi148_bridge = lm->parent;
1930
1931 bridge = tsi148_bridge->driver_priv;
1932
1933 mutex_lock(&lm->mtx);
1934
1935 /* If we already have a callback attached, we can't move it! */
1936 for (i = 0; i < lm->monitors; i++) {
1937 if (bridge->lm_callback[i]) {
1938 mutex_unlock(&lm->mtx);
1939 dev_err(tsi148_bridge->parent, "Location monitor "
1940 "callback attached, can't reset\n");
1941 return -EBUSY;
1942 }
1943 }
1944
1945 switch (aspace) {
1946 case VME_A16:
1947 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1948 break;
1949 case VME_A24:
1950 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1951 break;
1952 case VME_A32:
1953 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1954 break;
1955 case VME_A64:
1956 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1957 break;
1958 default:
1959 mutex_unlock(&lm->mtx);
1960 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1961 return -EINVAL;
1962 }
1963
1964 if (cycle & VME_SUPER)
1965 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1966 if (cycle & VME_USER)
1967 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1968 if (cycle & VME_PROG)
1969 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1970 if (cycle & VME_DATA)
1971 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1972
1973 reg_split(lm_base, &lm_base_high, &lm_base_low);
1974
1975 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1976 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1977 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1978
1979 mutex_unlock(&lm->mtx);
1980
1981 return 0;
1982 }
1983
1984 /* Get configuration of the callback monitor and return whether it is enabled
1985 * or disabled.
1986 */
tsi148_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)1987 static int tsi148_lm_get(struct vme_lm_resource *lm,
1988 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1989 {
1990 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1991 struct tsi148_driver *bridge;
1992
1993 bridge = lm->parent->driver_priv;
1994
1995 mutex_lock(&lm->mtx);
1996
1997 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1998 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1999 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2000
2001 reg_join(lm_base_high, lm_base_low, lm_base);
2002
2003 if (lm_ctl & TSI148_LCSR_LMAT_EN)
2004 enabled = 1;
2005
2006 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2007 *aspace |= VME_A16;
2008
2009 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2010 *aspace |= VME_A24;
2011
2012 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2013 *aspace |= VME_A32;
2014
2015 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2016 *aspace |= VME_A64;
2017
2018
2019 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2020 *cycle |= VME_SUPER;
2021 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2022 *cycle |= VME_USER;
2023 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2024 *cycle |= VME_PROG;
2025 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2026 *cycle |= VME_DATA;
2027
2028 mutex_unlock(&lm->mtx);
2029
2030 return enabled;
2031 }
2032
2033 /*
2034 * Attach a callback to a specific location monitor.
2035 *
2036 * Callback will be passed the monitor triggered.
2037 */
tsi148_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(void *),void * data)2038 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2039 void (*callback)(void *), void *data)
2040 {
2041 u32 lm_ctl, tmp;
2042 struct vme_bridge *tsi148_bridge;
2043 struct tsi148_driver *bridge;
2044
2045 tsi148_bridge = lm->parent;
2046
2047 bridge = tsi148_bridge->driver_priv;
2048
2049 mutex_lock(&lm->mtx);
2050
2051 /* Ensure that the location monitor is configured - need PGM or DATA */
2052 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2053 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2054 mutex_unlock(&lm->mtx);
2055 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2056 "configured\n");
2057 return -EINVAL;
2058 }
2059
2060 /* Check that a callback isn't already attached */
2061 if (bridge->lm_callback[monitor]) {
2062 mutex_unlock(&lm->mtx);
2063 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2064 return -EBUSY;
2065 }
2066
2067 /* Attach callback */
2068 bridge->lm_callback[monitor] = callback;
2069 bridge->lm_data[monitor] = data;
2070
2071 /* Enable Location Monitor interrupt */
2072 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2073 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2074 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2075
2076 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2077 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2078 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2079
2080 /* Ensure that global Location Monitor Enable set */
2081 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2082 lm_ctl |= TSI148_LCSR_LMAT_EN;
2083 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2084 }
2085
2086 mutex_unlock(&lm->mtx);
2087
2088 return 0;
2089 }
2090
2091 /*
2092 * Detach a callback function forn a specific location monitor.
2093 */
tsi148_lm_detach(struct vme_lm_resource * lm,int monitor)2094 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2095 {
2096 u32 lm_en, tmp;
2097 struct tsi148_driver *bridge;
2098
2099 bridge = lm->parent->driver_priv;
2100
2101 mutex_lock(&lm->mtx);
2102
2103 /* Disable Location Monitor and ensure previous interrupts are clear */
2104 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2105 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2106 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2107
2108 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2109 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2110 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2111
2112 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2113 bridge->base + TSI148_LCSR_INTC);
2114
2115 /* Detach callback */
2116 bridge->lm_callback[monitor] = NULL;
2117 bridge->lm_data[monitor] = NULL;
2118
2119 /* If all location monitors disabled, disable global Location Monitor */
2120 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2121 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2122 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2123 tmp &= ~TSI148_LCSR_LMAT_EN;
2124 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2125 }
2126
2127 mutex_unlock(&lm->mtx);
2128
2129 return 0;
2130 }
2131
2132 /*
2133 * Determine Geographical Addressing
2134 */
tsi148_slot_get(struct vme_bridge * tsi148_bridge)2135 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2136 {
2137 u32 slot = 0;
2138 struct tsi148_driver *bridge;
2139
2140 bridge = tsi148_bridge->driver_priv;
2141
2142 if (!geoid) {
2143 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2144 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2145 } else
2146 slot = geoid;
2147
2148 return (int)slot;
2149 }
2150
tsi148_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)2151 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2152 dma_addr_t *dma)
2153 {
2154 struct pci_dev *pdev;
2155
2156 /* Find pci_dev container of dev */
2157 pdev = to_pci_dev(parent);
2158
2159 return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
2160 }
2161
tsi148_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)2162 static void tsi148_free_consistent(struct device *parent, size_t size,
2163 void *vaddr, dma_addr_t dma)
2164 {
2165 struct pci_dev *pdev;
2166
2167 /* Find pci_dev container of dev */
2168 pdev = to_pci_dev(parent);
2169
2170 dma_free_coherent(&pdev->dev, size, vaddr, dma);
2171 }
2172
2173 /*
2174 * Configure CR/CSR space
2175 *
2176 * Access to the CR/CSR can be configured at power-up. The location of the
2177 * CR/CSR registers in the CR/CSR address space is determined by the boards
2178 * Auto-ID or Geographic address. This function ensures that the window is
2179 * enabled at an offset consistent with the boards geopgraphic address.
2180 *
2181 * Each board has a 512kB window, with the highest 4kB being used for the
2182 * boards registers, this means there is a fix length 508kB window which must
2183 * be mapped onto PCI memory.
2184 */
tsi148_crcsr_init(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2185 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2186 struct pci_dev *pdev)
2187 {
2188 u32 cbar, crat, vstat;
2189 u32 crcsr_bus_high, crcsr_bus_low;
2190 int retval;
2191 struct tsi148_driver *bridge;
2192
2193 bridge = tsi148_bridge->driver_priv;
2194
2195 /* Allocate mem for CR/CSR image */
2196 bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
2197 VME_CRCSR_BUF_SIZE,
2198 &bridge->crcsr_bus, GFP_KERNEL);
2199 if (!bridge->crcsr_kernel) {
2200 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2201 "CR/CSR image\n");
2202 return -ENOMEM;
2203 }
2204
2205 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2206
2207 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2208 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2209
2210 /* Ensure that the CR/CSR is configured at the correct offset */
2211 cbar = ioread32be(bridge->base + TSI148_CBAR);
2212 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2213
2214 vstat = tsi148_slot_get(tsi148_bridge);
2215
2216 if (cbar != vstat) {
2217 cbar = vstat;
2218 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2219 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2220 }
2221 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2222
2223 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2224 if (crat & TSI148_LCSR_CRAT_EN)
2225 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2226 else {
2227 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2228 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2229 bridge->base + TSI148_LCSR_CRAT);
2230 }
2231
2232 /* If we want flushed, error-checked writes, set up a window
2233 * over the CR/CSR registers. We read from here to safely flush
2234 * through VME writes.
2235 */
2236 if (err_chk) {
2237 retval = tsi148_master_set(bridge->flush_image, 1,
2238 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2239 VME_D16);
2240 if (retval)
2241 dev_err(tsi148_bridge->parent, "Configuring flush image"
2242 " failed\n");
2243 }
2244
2245 return 0;
2246
2247 }
2248
tsi148_crcsr_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2249 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2250 struct pci_dev *pdev)
2251 {
2252 u32 crat;
2253 struct tsi148_driver *bridge;
2254
2255 bridge = tsi148_bridge->driver_priv;
2256
2257 /* Turn off CR/CSR space */
2258 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2259 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2260 bridge->base + TSI148_LCSR_CRAT);
2261
2262 /* Free image */
2263 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2264 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2265
2266 dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
2267 bridge->crcsr_kernel, bridge->crcsr_bus);
2268 }
2269
tsi148_probe(struct pci_dev * pdev,const struct pci_device_id * id)2270 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2271 {
2272 int retval, i, master_num;
2273 u32 data;
2274 struct list_head *pos = NULL, *n;
2275 struct vme_bridge *tsi148_bridge;
2276 struct tsi148_driver *tsi148_device;
2277 struct vme_master_resource *master_image;
2278 struct vme_slave_resource *slave_image;
2279 struct vme_dma_resource *dma_ctrlr;
2280 struct vme_lm_resource *lm;
2281
2282 /* If we want to support more than one of each bridge, we need to
2283 * dynamically generate this so we get one per device
2284 */
2285 tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
2286 if (!tsi148_bridge) {
2287 retval = -ENOMEM;
2288 goto err_struct;
2289 }
2290 vme_init_bridge(tsi148_bridge);
2291
2292 tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
2293 if (!tsi148_device) {
2294 retval = -ENOMEM;
2295 goto err_driver;
2296 }
2297
2298 tsi148_bridge->driver_priv = tsi148_device;
2299
2300 /* Enable the device */
2301 retval = pci_enable_device(pdev);
2302 if (retval) {
2303 dev_err(&pdev->dev, "Unable to enable device\n");
2304 goto err_enable;
2305 }
2306
2307 /* Map Registers */
2308 retval = pci_request_regions(pdev, driver_name);
2309 if (retval) {
2310 dev_err(&pdev->dev, "Unable to reserve resources\n");
2311 goto err_resource;
2312 }
2313
2314 /* map registers in BAR 0 */
2315 tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
2316 4096);
2317 if (!tsi148_device->base) {
2318 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2319 retval = -EIO;
2320 goto err_remap;
2321 }
2322
2323 /* Check to see if the mapping worked out */
2324 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2325 if (data != PCI_VENDOR_ID_TUNDRA) {
2326 dev_err(&pdev->dev, "CRG region check failed\n");
2327 retval = -EIO;
2328 goto err_test;
2329 }
2330
2331 /* Initialize wait queues & mutual exclusion flags */
2332 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2333 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2334 init_waitqueue_head(&tsi148_device->iack_queue);
2335 mutex_init(&tsi148_device->vme_int);
2336 mutex_init(&tsi148_device->vme_rmw);
2337
2338 tsi148_bridge->parent = &pdev->dev;
2339 strcpy(tsi148_bridge->name, driver_name);
2340
2341 /* Setup IRQ */
2342 retval = tsi148_irq_init(tsi148_bridge);
2343 if (retval != 0) {
2344 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2345 goto err_irq;
2346 }
2347
2348 /* If we are going to flush writes, we need to read from the VME bus.
2349 * We need to do this safely, thus we read the devices own CR/CSR
2350 * register. To do this we must set up a window in CR/CSR space and
2351 * hence have one less master window resource available.
2352 */
2353 master_num = TSI148_MAX_MASTER;
2354 if (err_chk) {
2355 master_num--;
2356
2357 tsi148_device->flush_image =
2358 kmalloc(sizeof(*tsi148_device->flush_image),
2359 GFP_KERNEL);
2360 if (!tsi148_device->flush_image) {
2361 retval = -ENOMEM;
2362 goto err_master;
2363 }
2364 tsi148_device->flush_image->parent = tsi148_bridge;
2365 spin_lock_init(&tsi148_device->flush_image->lock);
2366 tsi148_device->flush_image->locked = 1;
2367 tsi148_device->flush_image->number = master_num;
2368 memset(&tsi148_device->flush_image->bus_resource, 0,
2369 sizeof(tsi148_device->flush_image->bus_resource));
2370 tsi148_device->flush_image->kern_base = NULL;
2371 }
2372
2373 /* Add master windows to list */
2374 for (i = 0; i < master_num; i++) {
2375 master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
2376 if (!master_image) {
2377 retval = -ENOMEM;
2378 goto err_master;
2379 }
2380 master_image->parent = tsi148_bridge;
2381 spin_lock_init(&master_image->lock);
2382 master_image->locked = 0;
2383 master_image->number = i;
2384 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2385 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2386 VME_USER3 | VME_USER4;
2387 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2388 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2389 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2390 VME_PROG | VME_DATA;
2391 master_image->width_attr = VME_D16 | VME_D32;
2392 memset(&master_image->bus_resource, 0,
2393 sizeof(master_image->bus_resource));
2394 master_image->kern_base = NULL;
2395 list_add_tail(&master_image->list,
2396 &tsi148_bridge->master_resources);
2397 }
2398
2399 /* Add slave windows to list */
2400 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2401 slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
2402 if (!slave_image) {
2403 retval = -ENOMEM;
2404 goto err_slave;
2405 }
2406 slave_image->parent = tsi148_bridge;
2407 mutex_init(&slave_image->mtx);
2408 slave_image->locked = 0;
2409 slave_image->number = i;
2410 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2411 VME_A64;
2412 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2413 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2414 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2415 VME_PROG | VME_DATA;
2416 list_add_tail(&slave_image->list,
2417 &tsi148_bridge->slave_resources);
2418 }
2419
2420 /* Add dma engines to list */
2421 for (i = 0; i < TSI148_MAX_DMA; i++) {
2422 dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
2423 if (!dma_ctrlr) {
2424 retval = -ENOMEM;
2425 goto err_dma;
2426 }
2427 dma_ctrlr->parent = tsi148_bridge;
2428 mutex_init(&dma_ctrlr->mtx);
2429 dma_ctrlr->locked = 0;
2430 dma_ctrlr->number = i;
2431 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2432 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2433 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2434 VME_DMA_PATTERN_TO_MEM;
2435 INIT_LIST_HEAD(&dma_ctrlr->pending);
2436 INIT_LIST_HEAD(&dma_ctrlr->running);
2437 list_add_tail(&dma_ctrlr->list,
2438 &tsi148_bridge->dma_resources);
2439 }
2440
2441 /* Add location monitor to list */
2442 lm = kmalloc(sizeof(*lm), GFP_KERNEL);
2443 if (!lm) {
2444 retval = -ENOMEM;
2445 goto err_lm;
2446 }
2447 lm->parent = tsi148_bridge;
2448 mutex_init(&lm->mtx);
2449 lm->locked = 0;
2450 lm->number = 1;
2451 lm->monitors = 4;
2452 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2453
2454 tsi148_bridge->slave_get = tsi148_slave_get;
2455 tsi148_bridge->slave_set = tsi148_slave_set;
2456 tsi148_bridge->master_get = tsi148_master_get;
2457 tsi148_bridge->master_set = tsi148_master_set;
2458 tsi148_bridge->master_read = tsi148_master_read;
2459 tsi148_bridge->master_write = tsi148_master_write;
2460 tsi148_bridge->master_rmw = tsi148_master_rmw;
2461 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2462 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2463 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2464 tsi148_bridge->irq_set = tsi148_irq_set;
2465 tsi148_bridge->irq_generate = tsi148_irq_generate;
2466 tsi148_bridge->lm_set = tsi148_lm_set;
2467 tsi148_bridge->lm_get = tsi148_lm_get;
2468 tsi148_bridge->lm_attach = tsi148_lm_attach;
2469 tsi148_bridge->lm_detach = tsi148_lm_detach;
2470 tsi148_bridge->slot_get = tsi148_slot_get;
2471 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2472 tsi148_bridge->free_consistent = tsi148_free_consistent;
2473
2474 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2475 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2476 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2477 if (!geoid)
2478 dev_info(&pdev->dev, "VME geographical address is %d\n",
2479 data & TSI148_LCSR_VSTAT_GA_M);
2480 else
2481 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2482 geoid);
2483
2484 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2485 err_chk ? "enabled" : "disabled");
2486
2487 retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2488 if (retval) {
2489 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2490 goto err_crcsr;
2491 }
2492
2493 retval = vme_register_bridge(tsi148_bridge);
2494 if (retval != 0) {
2495 dev_err(&pdev->dev, "Chip Registration failed.\n");
2496 goto err_reg;
2497 }
2498
2499 pci_set_drvdata(pdev, tsi148_bridge);
2500
2501 /* Clear VME bus "board fail", and "power-up reset" lines */
2502 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2503 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2504 data |= TSI148_LCSR_VSTAT_CPURST;
2505 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2506
2507 return 0;
2508
2509 err_reg:
2510 tsi148_crcsr_exit(tsi148_bridge, pdev);
2511 err_crcsr:
2512 err_lm:
2513 /* resources are stored in link list */
2514 list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2515 lm = list_entry(pos, struct vme_lm_resource, list);
2516 list_del(pos);
2517 kfree(lm);
2518 }
2519 err_dma:
2520 /* resources are stored in link list */
2521 list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2522 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2523 list_del(pos);
2524 kfree(dma_ctrlr);
2525 }
2526 err_slave:
2527 /* resources are stored in link list */
2528 list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2529 slave_image = list_entry(pos, struct vme_slave_resource, list);
2530 list_del(pos);
2531 kfree(slave_image);
2532 }
2533 err_master:
2534 /* resources are stored in link list */
2535 list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2536 master_image = list_entry(pos, struct vme_master_resource,
2537 list);
2538 list_del(pos);
2539 kfree(master_image);
2540 }
2541
2542 tsi148_irq_exit(tsi148_bridge, pdev);
2543 err_irq:
2544 err_test:
2545 iounmap(tsi148_device->base);
2546 err_remap:
2547 pci_release_regions(pdev);
2548 err_resource:
2549 pci_disable_device(pdev);
2550 err_enable:
2551 kfree(tsi148_device);
2552 err_driver:
2553 kfree(tsi148_bridge);
2554 err_struct:
2555 return retval;
2556
2557 }
2558
tsi148_remove(struct pci_dev * pdev)2559 static void tsi148_remove(struct pci_dev *pdev)
2560 {
2561 struct list_head *pos = NULL;
2562 struct list_head *tmplist;
2563 struct vme_master_resource *master_image;
2564 struct vme_slave_resource *slave_image;
2565 struct vme_dma_resource *dma_ctrlr;
2566 int i;
2567 struct tsi148_driver *bridge;
2568 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2569
2570 bridge = tsi148_bridge->driver_priv;
2571
2572
2573 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2574
2575 /*
2576 * Shutdown all inbound and outbound windows.
2577 */
2578 for (i = 0; i < 8; i++) {
2579 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2580 TSI148_LCSR_OFFSET_ITAT);
2581 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2582 TSI148_LCSR_OFFSET_OTAT);
2583 }
2584
2585 /*
2586 * Shutdown Location monitor.
2587 */
2588 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2589
2590 /*
2591 * Shutdown CRG map.
2592 */
2593 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2594
2595 /*
2596 * Clear error status.
2597 */
2598 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2599 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2600 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2601
2602 /*
2603 * Remove VIRQ interrupt (if any)
2604 */
2605 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2606 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2607
2608 /*
2609 * Map all Interrupts to PCI INTA
2610 */
2611 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2612 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2613
2614 tsi148_irq_exit(tsi148_bridge, pdev);
2615
2616 vme_unregister_bridge(tsi148_bridge);
2617
2618 tsi148_crcsr_exit(tsi148_bridge, pdev);
2619
2620 /* resources are stored in link list */
2621 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2622 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2623 list_del(pos);
2624 kfree(dma_ctrlr);
2625 }
2626
2627 /* resources are stored in link list */
2628 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2629 slave_image = list_entry(pos, struct vme_slave_resource, list);
2630 list_del(pos);
2631 kfree(slave_image);
2632 }
2633
2634 /* resources are stored in link list */
2635 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2636 master_image = list_entry(pos, struct vme_master_resource,
2637 list);
2638 list_del(pos);
2639 kfree(master_image);
2640 }
2641
2642 iounmap(bridge->base);
2643
2644 pci_release_regions(pdev);
2645
2646 pci_disable_device(pdev);
2647
2648 kfree(tsi148_bridge->driver_priv);
2649
2650 kfree(tsi148_bridge);
2651 }
2652
2653 module_pci_driver(tsi148_driver);
2654
2655 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2656 module_param(err_chk, bool, 0);
2657
2658 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2659 module_param(geoid, int, 0);
2660
2661 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2662 MODULE_LICENSE("GPL");
2663