1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
6 Version: 1.0
7 *******************************************************************************
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
13
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The following are the change log and history:
25
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
31 Add the CBR support.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
37 Add SMP support.
38
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/mm.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/ctype.h>
51 #include <linux/sonet.h>
52 #include <linux/skbuff.h>
53 #include <linux/time.h>
54 #include <linux/delay.h>
55 #include <linux/uio.h>
56 #include <linux/init.h>
57 #include <linux/interrupt.h>
58 #include <linux/wait.h>
59 #include <linux/slab.h>
60 #include <asm/io.h>
61 #include <linux/atomic.h>
62 #include <linux/uaccess.h>
63 #include <asm/string.h>
64 #include <asm/byteorder.h>
65 #include <linux/vmalloc.h>
66 #include <linux/jiffies.h>
67 #include <linux/nospec.h>
68 #include "iphase.h"
69 #include "suni.h"
70 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
71
72 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
73
74 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
75 static void desc_dbg(IADEV *iadev);
76
77 static IADEV *ia_dev[8];
78 static struct atm_dev *_ia_dev[8];
79 static int iadev_count;
80 static void ia_led_timer(struct timer_list *unused);
81 static DEFINE_TIMER(ia_timer, ia_led_timer);
82 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
83 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
84 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
85 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
86
87 module_param(IA_TX_BUF, int, 0);
88 module_param(IA_TX_BUF_SZ, int, 0);
89 module_param(IA_RX_BUF, int, 0);
90 module_param(IA_RX_BUF_SZ, int, 0);
91 module_param(IADebugFlag, uint, 0644);
92
93 MODULE_LICENSE("GPL");
94
95 /**************************** IA_LIB **********************************/
96
ia_init_rtn_q(IARTN_Q * que)97 static void ia_init_rtn_q (IARTN_Q *que)
98 {
99 que->next = NULL;
100 que->tail = NULL;
101 }
102
ia_enque_head_rtn_q(IARTN_Q * que,IARTN_Q * data)103 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
104 {
105 data->next = NULL;
106 if (que->next == NULL)
107 que->next = que->tail = data;
108 else {
109 data->next = que->next;
110 que->next = data;
111 }
112 return;
113 }
114
ia_enque_rtn_q(IARTN_Q * que,struct desc_tbl_t data)115 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
116 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
117 if (!entry)
118 return -ENOMEM;
119 entry->data = data;
120 entry->next = NULL;
121 if (que->next == NULL)
122 que->next = que->tail = entry;
123 else {
124 que->tail->next = entry;
125 que->tail = que->tail->next;
126 }
127 return 1;
128 }
129
ia_deque_rtn_q(IARTN_Q * que)130 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
131 IARTN_Q *tmpdata;
132 if (que->next == NULL)
133 return NULL;
134 tmpdata = que->next;
135 if ( que->next == que->tail)
136 que->next = que->tail = NULL;
137 else
138 que->next = que->next->next;
139 return tmpdata;
140 }
141
ia_hack_tcq(IADEV * dev)142 static void ia_hack_tcq(IADEV *dev) {
143
144 u_short desc1;
145 u_short tcq_wr;
146 struct ia_vcc *iavcc_r = NULL;
147
148 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
149 while (dev->host_tcq_wr != tcq_wr) {
150 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
151 if (!desc1) ;
152 else if (!dev->desc_tbl[desc1 -1].timestamp) {
153 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
154 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
155 }
156 else if (dev->desc_tbl[desc1 -1].timestamp) {
157 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
158 printk("IA: Fatal err in get_desc\n");
159 continue;
160 }
161 iavcc_r->vc_desc_cnt--;
162 dev->desc_tbl[desc1 -1].timestamp = 0;
163 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
164 dev->desc_tbl[desc1 -1].txskb, desc1);)
165 if (iavcc_r->pcr < dev->rate_limit) {
166 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
167 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
168 printk("ia_hack_tcq: No memory available\n");
169 }
170 dev->desc_tbl[desc1 -1].iavcc = NULL;
171 dev->desc_tbl[desc1 -1].txskb = NULL;
172 }
173 dev->host_tcq_wr += 2;
174 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
175 dev->host_tcq_wr = dev->ffL.tcq_st;
176 }
177 } /* ia_hack_tcq */
178
get_desc(IADEV * dev,struct ia_vcc * iavcc)179 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
180 u_short desc_num, i;
181 struct sk_buff *skb;
182 struct ia_vcc *iavcc_r = NULL;
183 unsigned long delta;
184 static unsigned long timer = 0;
185 int ltimeout;
186
187 ia_hack_tcq (dev);
188 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
189 timer = jiffies;
190 i=0;
191 while (i < dev->num_tx_desc) {
192 if (!dev->desc_tbl[i].timestamp) {
193 i++;
194 continue;
195 }
196 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
197 delta = jiffies - dev->desc_tbl[i].timestamp;
198 if (delta >= ltimeout) {
199 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
200 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
201 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
202 else
203 dev->ffL.tcq_rd -= 2;
204 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
205 if (!(skb = dev->desc_tbl[i].txskb) ||
206 !(iavcc_r = dev->desc_tbl[i].iavcc))
207 printk("Fatal err, desc table vcc or skb is NULL\n");
208 else
209 iavcc_r->vc_desc_cnt--;
210 dev->desc_tbl[i].timestamp = 0;
211 dev->desc_tbl[i].iavcc = NULL;
212 dev->desc_tbl[i].txskb = NULL;
213 }
214 i++;
215 } /* while */
216 }
217 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
218 return 0xFFFF;
219
220 /* Get the next available descriptor number from TCQ */
221 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
222
223 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
224 dev->ffL.tcq_rd += 2;
225 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
226 dev->ffL.tcq_rd = dev->ffL.tcq_st;
227 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
228 return 0xFFFF;
229 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
230 }
231
232 /* get system time */
233 dev->desc_tbl[desc_num -1].timestamp = jiffies;
234 return desc_num;
235 }
236
clear_lockup(struct atm_vcc * vcc,IADEV * dev)237 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
238 u_char foundLockUp;
239 vcstatus_t *vcstatus;
240 u_short *shd_tbl;
241 u_short tempCellSlot, tempFract;
242 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
243 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
244 u_int i;
245
246 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
247 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
248 vcstatus->cnt++;
249 foundLockUp = 0;
250 if( vcstatus->cnt == 0x05 ) {
251 abr_vc += vcc->vci;
252 eabr_vc += vcc->vci;
253 if( eabr_vc->last_desc ) {
254 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
255 /* Wait for 10 Micro sec */
256 udelay(10);
257 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
258 foundLockUp = 1;
259 }
260 else {
261 tempCellSlot = abr_vc->last_cell_slot;
262 tempFract = abr_vc->fraction;
263 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
264 && (tempFract == dev->testTable[vcc->vci]->fract))
265 foundLockUp = 1;
266 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
267 dev->testTable[vcc->vci]->fract = tempFract;
268 }
269 } /* last descriptor */
270 vcstatus->cnt = 0;
271 } /* vcstatus->cnt */
272
273 if (foundLockUp) {
274 IF_ABR(printk("LOCK UP found\n");)
275 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
276 /* Wait for 10 Micro sec */
277 udelay(10);
278 abr_vc->status &= 0xFFF8;
279 abr_vc->status |= 0x0001; /* state is idle */
280 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
281 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
282 if (i < dev->num_vc)
283 shd_tbl[i] = vcc->vci;
284 else
285 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
286 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
287 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
288 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
289 vcstatus->cnt = 0;
290 } /* foundLockUp */
291
292 } /* if an ABR VC */
293
294
295 }
296
297 /*
298 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
299 **
300 ** +----+----+------------------+-------------------------------+
301 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
302 ** +----+----+------------------+-------------------------------+
303 **
304 ** R = reserved (written as 0)
305 ** NZ = 0 if 0 cells/sec; 1 otherwise
306 **
307 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
308 */
309 static u16
cellrate_to_float(u32 cr)310 cellrate_to_float(u32 cr)
311 {
312
313 #define NZ 0x4000
314 #define M_BITS 9 /* Number of bits in mantissa */
315 #define E_BITS 5 /* Number of bits in exponent */
316 #define M_MASK 0x1ff
317 #define E_MASK 0x1f
318 u16 flot;
319 u32 tmp = cr & 0x00ffffff;
320 int i = 0;
321 if (cr == 0)
322 return 0;
323 while (tmp != 1) {
324 tmp >>= 1;
325 i++;
326 }
327 if (i == M_BITS)
328 flot = NZ | (i << M_BITS) | (cr & M_MASK);
329 else if (i < M_BITS)
330 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
331 else
332 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
333 return flot;
334 }
335
336 #if 0
337 /*
338 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
339 */
340 static u32
341 float_to_cellrate(u16 rate)
342 {
343 u32 exp, mantissa, cps;
344 if ((rate & NZ) == 0)
345 return 0;
346 exp = (rate >> M_BITS) & E_MASK;
347 mantissa = rate & M_MASK;
348 if (exp == 0)
349 return 1;
350 cps = (1 << M_BITS) | mantissa;
351 if (exp == M_BITS)
352 cps = cps;
353 else if (exp > M_BITS)
354 cps <<= (exp - M_BITS);
355 else
356 cps >>= (M_BITS - exp);
357 return cps;
358 }
359 #endif
360
init_abr_vc(IADEV * dev,srv_cls_param_t * srv_p)361 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
362 srv_p->class_type = ATM_ABR;
363 srv_p->pcr = dev->LineRate;
364 srv_p->mcr = 0;
365 srv_p->icr = 0x055cb7;
366 srv_p->tbe = 0xffffff;
367 srv_p->frtt = 0x3a;
368 srv_p->rif = 0xf;
369 srv_p->rdf = 0xb;
370 srv_p->nrm = 0x4;
371 srv_p->trm = 0x7;
372 srv_p->cdf = 0x3;
373 srv_p->adtf = 50;
374 }
375
376 static int
ia_open_abr_vc(IADEV * dev,srv_cls_param_t * srv_p,struct atm_vcc * vcc,u8 flag)377 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
378 struct atm_vcc *vcc, u8 flag)
379 {
380 f_vc_abr_entry *f_abr_vc;
381 r_vc_abr_entry *r_abr_vc;
382 u32 icr;
383 u8 trm, nrm, crm;
384 u16 adtf, air, *ptr16;
385 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
386 f_abr_vc += vcc->vci;
387 switch (flag) {
388 case 1: /* FFRED initialization */
389 #if 0 /* sanity check */
390 if (srv_p->pcr == 0)
391 return INVALID_PCR;
392 if (srv_p->pcr > dev->LineRate)
393 srv_p->pcr = dev->LineRate;
394 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
395 return MCR_UNAVAILABLE;
396 if (srv_p->mcr > srv_p->pcr)
397 return INVALID_MCR;
398 if (!(srv_p->icr))
399 srv_p->icr = srv_p->pcr;
400 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
401 return INVALID_ICR;
402 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
403 return INVALID_TBE;
404 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
405 return INVALID_FRTT;
406 if (srv_p->nrm > MAX_NRM)
407 return INVALID_NRM;
408 if (srv_p->trm > MAX_TRM)
409 return INVALID_TRM;
410 if (srv_p->adtf > MAX_ADTF)
411 return INVALID_ADTF;
412 else if (srv_p->adtf == 0)
413 srv_p->adtf = 1;
414 if (srv_p->cdf > MAX_CDF)
415 return INVALID_CDF;
416 if (srv_p->rif > MAX_RIF)
417 return INVALID_RIF;
418 if (srv_p->rdf > MAX_RDF)
419 return INVALID_RDF;
420 #endif
421 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
422 f_abr_vc->f_vc_type = ABR;
423 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
424 /* i.e 2**n = 2 << (n-1) */
425 f_abr_vc->f_nrm = nrm << 8 | nrm;
426 trm = 100000/(2 << (16 - srv_p->trm));
427 if ( trm == 0) trm = 1;
428 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
429 crm = srv_p->tbe / nrm;
430 if (crm == 0) crm = 1;
431 f_abr_vc->f_crm = crm & 0xff;
432 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
433 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
434 ((srv_p->tbe/srv_p->frtt)*1000000) :
435 (1000000/(srv_p->frtt/srv_p->tbe)));
436 f_abr_vc->f_icr = cellrate_to_float(icr);
437 adtf = (10000 * srv_p->adtf)/8192;
438 if (adtf == 0) adtf = 1;
439 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
440 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
441 f_abr_vc->f_acr = f_abr_vc->f_icr;
442 f_abr_vc->f_status = 0x0042;
443 break;
444 case 0: /* RFRED initialization */
445 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
446 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
447 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
448 r_abr_vc += vcc->vci;
449 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
450 air = srv_p->pcr << (15 - srv_p->rif);
451 if (air == 0) air = 1;
452 r_abr_vc->r_air = cellrate_to_float(air);
453 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
454 dev->sum_mcr += srv_p->mcr;
455 dev->n_abr++;
456 break;
457 default:
458 break;
459 }
460 return 0;
461 }
ia_cbr_setup(IADEV * dev,struct atm_vcc * vcc)462 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
463 u32 rateLow=0, rateHigh, rate;
464 int entries;
465 struct ia_vcc *ia_vcc;
466
467 int idealSlot =0, testSlot, toBeAssigned, inc;
468 u32 spacing;
469 u16 *SchedTbl, *TstSchedTbl;
470 u16 cbrVC, vcIndex;
471 u32 fracSlot = 0;
472 u32 sp_mod = 0;
473 u32 sp_mod2 = 0;
474
475 /* IpAdjustTrafficParams */
476 if (vcc->qos.txtp.max_pcr <= 0) {
477 IF_ERR(printk("PCR for CBR not defined\n");)
478 return -1;
479 }
480 rate = vcc->qos.txtp.max_pcr;
481 entries = rate / dev->Granularity;
482 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
483 entries, rate, dev->Granularity);)
484 if (entries < 1)
485 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
486 rateLow = entries * dev->Granularity;
487 rateHigh = (entries + 1) * dev->Granularity;
488 if (3*(rate - rateLow) > (rateHigh - rate))
489 entries++;
490 if (entries > dev->CbrRemEntries) {
491 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
492 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
493 entries, dev->CbrRemEntries);)
494 return -EBUSY;
495 }
496
497 ia_vcc = INPH_IA_VCC(vcc);
498 ia_vcc->NumCbrEntry = entries;
499 dev->sum_mcr += entries * dev->Granularity;
500 /* IaFFrednInsertCbrSched */
501 // Starting at an arbitrary location, place the entries into the table
502 // as smoothly as possible
503 cbrVC = 0;
504 spacing = dev->CbrTotEntries / entries;
505 sp_mod = dev->CbrTotEntries % entries; // get modulo
506 toBeAssigned = entries;
507 fracSlot = 0;
508 vcIndex = vcc->vci;
509 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
510 while (toBeAssigned)
511 {
512 // If this is the first time, start the table loading for this connection
513 // as close to entryPoint as possible.
514 if (toBeAssigned == entries)
515 {
516 idealSlot = dev->CbrEntryPt;
517 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
518 if (dev->CbrEntryPt >= dev->CbrTotEntries)
519 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
520 } else {
521 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
522 // in the table that would be smoothest
523 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
524 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
525 }
526 if (idealSlot >= (int)dev->CbrTotEntries)
527 idealSlot -= dev->CbrTotEntries;
528 // Continuously check around this ideal value until a null
529 // location is encountered.
530 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
531 inc = 0;
532 testSlot = idealSlot;
533 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
534 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
535 testSlot, TstSchedTbl,toBeAssigned);)
536 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
537 while (cbrVC) // If another VC at this location, we have to keep looking
538 {
539 inc++;
540 testSlot = idealSlot - inc;
541 if (testSlot < 0) { // Wrap if necessary
542 testSlot += dev->CbrTotEntries;
543 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
544 SchedTbl,testSlot);)
545 }
546 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
547 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
548 if (!cbrVC)
549 break;
550 testSlot = idealSlot + inc;
551 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
552 testSlot -= dev->CbrTotEntries;
553 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
554 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
555 testSlot, toBeAssigned);)
556 }
557 // set table index and read in value
558 TstSchedTbl = (u16*)(SchedTbl + testSlot);
559 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
560 TstSchedTbl,cbrVC,inc);)
561 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
562 } /* while */
563 // Move this VCI number into this location of the CBR Sched table.
564 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
565 dev->CbrRemEntries--;
566 toBeAssigned--;
567 } /* while */
568
569 /* IaFFrednCbrEnable */
570 dev->NumEnabledCBR++;
571 if (dev->NumEnabledCBR == 1) {
572 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
573 IF_CBR(printk("CBR is enabled\n");)
574 }
575 return 0;
576 }
ia_cbrVc_close(struct atm_vcc * vcc)577 static void ia_cbrVc_close (struct atm_vcc *vcc) {
578 IADEV *iadev;
579 u16 *SchedTbl, NullVci = 0;
580 u32 i, NumFound;
581
582 iadev = INPH_IA_DEV(vcc->dev);
583 iadev->NumEnabledCBR--;
584 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
585 if (iadev->NumEnabledCBR == 0) {
586 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
587 IF_CBR (printk("CBR support disabled\n");)
588 }
589 NumFound = 0;
590 for (i=0; i < iadev->CbrTotEntries; i++)
591 {
592 if (*SchedTbl == vcc->vci) {
593 iadev->CbrRemEntries++;
594 *SchedTbl = NullVci;
595 IF_CBR(NumFound++;)
596 }
597 SchedTbl++;
598 }
599 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
600 }
601
ia_avail_descs(IADEV * iadev)602 static int ia_avail_descs(IADEV *iadev) {
603 int tmp = 0;
604 ia_hack_tcq(iadev);
605 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
606 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
607 else
608 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
609 iadev->ffL.tcq_st) / 2;
610 return tmp;
611 }
612
613 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
614
ia_que_tx(IADEV * iadev)615 static int ia_que_tx (IADEV *iadev) {
616 struct sk_buff *skb;
617 int num_desc;
618 struct atm_vcc *vcc;
619 num_desc = ia_avail_descs(iadev);
620
621 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
622 if (!(vcc = ATM_SKB(skb)->vcc)) {
623 dev_kfree_skb_any(skb);
624 printk("ia_que_tx: Null vcc\n");
625 break;
626 }
627 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
628 dev_kfree_skb_any(skb);
629 printk("Free the SKB on closed vci %d \n", vcc->vci);
630 break;
631 }
632 if (ia_pkt_tx (vcc, skb)) {
633 skb_queue_head(&iadev->tx_backlog, skb);
634 }
635 num_desc--;
636 }
637 return 0;
638 }
639
ia_tx_poll(IADEV * iadev)640 static void ia_tx_poll (IADEV *iadev) {
641 struct atm_vcc *vcc = NULL;
642 struct sk_buff *skb = NULL, *skb1 = NULL;
643 struct ia_vcc *iavcc;
644 IARTN_Q * rtne;
645
646 ia_hack_tcq(iadev);
647 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
648 skb = rtne->data.txskb;
649 if (!skb) {
650 printk("ia_tx_poll: skb is null\n");
651 goto out;
652 }
653 vcc = ATM_SKB(skb)->vcc;
654 if (!vcc) {
655 printk("ia_tx_poll: vcc is null\n");
656 dev_kfree_skb_any(skb);
657 goto out;
658 }
659
660 iavcc = INPH_IA_VCC(vcc);
661 if (!iavcc) {
662 printk("ia_tx_poll: iavcc is null\n");
663 dev_kfree_skb_any(skb);
664 goto out;
665 }
666
667 skb1 = skb_dequeue(&iavcc->txing_skb);
668 while (skb1 && (skb1 != skb)) {
669 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
670 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
671 }
672 IF_ERR(printk("Release the SKB not match\n");)
673 if ((vcc->pop) && (skb1->len != 0))
674 {
675 vcc->pop(vcc, skb1);
676 IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
677 (long)skb1);)
678 }
679 else
680 dev_kfree_skb_any(skb1);
681 skb1 = skb_dequeue(&iavcc->txing_skb);
682 }
683 if (!skb1) {
684 IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);)
685 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
686 break;
687 }
688 if ((vcc->pop) && (skb->len != 0))
689 {
690 vcc->pop(vcc, skb);
691 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
692 }
693 else
694 dev_kfree_skb_any(skb);
695 kfree(rtne);
696 }
697 ia_que_tx(iadev);
698 out:
699 return;
700 }
701 #if 0
702 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
703 {
704 u32 t;
705 int i;
706 /*
707 * Issue a command to enable writes to the NOVRAM
708 */
709 NVRAM_CMD (EXTEND + EWEN);
710 NVRAM_CLR_CE;
711 /*
712 * issue the write command
713 */
714 NVRAM_CMD(IAWRITE + addr);
715 /*
716 * Send the data, starting with D15, then D14, and so on for 16 bits
717 */
718 for (i=15; i>=0; i--) {
719 NVRAM_CLKOUT (val & 0x8000);
720 val <<= 1;
721 }
722 NVRAM_CLR_CE;
723 CFG_OR(NVCE);
724 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
725 while (!(t & NVDO))
726 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
727
728 NVRAM_CLR_CE;
729 /*
730 * disable writes again
731 */
732 NVRAM_CMD(EXTEND + EWDS)
733 NVRAM_CLR_CE;
734 CFG_AND(~NVDI);
735 }
736 #endif
737
ia_eeprom_get(IADEV * iadev,u32 addr)738 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
739 {
740 u_short val;
741 u32 t;
742 int i;
743 /*
744 * Read the first bit that was clocked with the falling edge of the
745 * the last command data clock
746 */
747 NVRAM_CMD(IAREAD + addr);
748 /*
749 * Now read the rest of the bits, the next bit read is D14, then D13,
750 * and so on.
751 */
752 val = 0;
753 for (i=15; i>=0; i--) {
754 NVRAM_CLKIN(t);
755 val |= (t << i);
756 }
757 NVRAM_CLR_CE;
758 CFG_AND(~NVDI);
759 return val;
760 }
761
ia_hw_type(IADEV * iadev)762 static void ia_hw_type(IADEV *iadev) {
763 u_short memType = ia_eeprom_get(iadev, 25);
764 iadev->memType = memType;
765 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
766 iadev->num_tx_desc = IA_TX_BUF;
767 iadev->tx_buf_sz = IA_TX_BUF_SZ;
768 iadev->num_rx_desc = IA_RX_BUF;
769 iadev->rx_buf_sz = IA_RX_BUF_SZ;
770 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
771 if (IA_TX_BUF == DFL_TX_BUFFERS)
772 iadev->num_tx_desc = IA_TX_BUF / 2;
773 else
774 iadev->num_tx_desc = IA_TX_BUF;
775 iadev->tx_buf_sz = IA_TX_BUF_SZ;
776 if (IA_RX_BUF == DFL_RX_BUFFERS)
777 iadev->num_rx_desc = IA_RX_BUF / 2;
778 else
779 iadev->num_rx_desc = IA_RX_BUF;
780 iadev->rx_buf_sz = IA_RX_BUF_SZ;
781 }
782 else {
783 if (IA_TX_BUF == DFL_TX_BUFFERS)
784 iadev->num_tx_desc = IA_TX_BUF / 8;
785 else
786 iadev->num_tx_desc = IA_TX_BUF;
787 iadev->tx_buf_sz = IA_TX_BUF_SZ;
788 if (IA_RX_BUF == DFL_RX_BUFFERS)
789 iadev->num_rx_desc = IA_RX_BUF / 8;
790 else
791 iadev->num_rx_desc = IA_RX_BUF;
792 iadev->rx_buf_sz = IA_RX_BUF_SZ;
793 }
794 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
795 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
796 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
797 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
798
799 #if 0
800 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
801 iadev->phy_type = PHY_OC3C_S;
802 else if ((memType & FE_MASK) == FE_UTP_OPTION)
803 iadev->phy_type = PHY_UTP155;
804 else
805 iadev->phy_type = PHY_OC3C_M;
806 #endif
807
808 iadev->phy_type = memType & FE_MASK;
809 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
810 memType,iadev->phy_type);)
811 if (iadev->phy_type == FE_25MBIT_PHY)
812 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
813 else if (iadev->phy_type == FE_DS3_PHY)
814 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
815 else if (iadev->phy_type == FE_E3_PHY)
816 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
817 else
818 iadev->LineRate = (u32)(ATM_OC3_PCR);
819 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
820
821 }
822
823 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
824 {
825 return readl(ia->phy + (reg >> 2));
826 }
827
828 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
829 {
830 writel(val, ia->phy + (reg >> 2));
831 }
832
833 static void ia_frontend_intr(struct iadev_priv *iadev)
834 {
835 u32 status;
836
837 if (iadev->phy_type & FE_25MBIT_PHY) {
838 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
839 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
840 } else if (iadev->phy_type & FE_DS3_PHY) {
841 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
842 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
843 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
844 } else if (iadev->phy_type & FE_E3_PHY) {
845 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
846 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
847 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
848 } else {
849 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
850 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
851 }
852
853 printk(KERN_INFO "IA: SUNI carrier %s\n",
854 iadev->carrier_detect ? "detected" : "lost signal");
855 }
856
857 static void ia_mb25_init(struct iadev_priv *iadev)
858 {
859 #if 0
860 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
861 #endif
862 ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
863 ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
864
865 iadev->carrier_detect =
866 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
867 }
868
869 struct ia_reg {
870 u16 reg;
871 u16 val;
872 };
873
874 static void ia_phy_write(struct iadev_priv *iadev,
875 const struct ia_reg *regs, int len)
876 {
877 while (len--) {
878 ia_phy_write32(iadev, regs->reg, regs->val);
879 regs++;
880 }
881 }
882
883 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
884 {
885 static const struct ia_reg suni_ds3_init[] = {
886 { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
887 { SUNI_DS3_FRM_CFG, 0x01 },
888 { SUNI_DS3_TRAN_CFG, 0x01 },
889 { SUNI_CONFIG, 0 },
890 { SUNI_SPLR_CFG, 0 },
891 { SUNI_SPLT_CFG, 0 }
892 };
893 u32 status;
894
895 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
896 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
897
898 ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
899 }
900
901 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
902 {
903 static const struct ia_reg suni_e3_init[] = {
904 { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
905 { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
906 { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
907 { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
908 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
909 { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
910 { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
911 { SUNI_SPLR_CFG, 0x41 },
912 { SUNI_SPLT_CFG, 0x41 }
913 };
914 u32 status;
915
916 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
917 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
918 ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
919 }
920
921 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
922 {
923 static const struct ia_reg suni_init[] = {
924 /* Enable RSOP loss of signal interrupt. */
925 { SUNI_INTR_ENBL, 0x28 },
926 /* Clear error counters. */
927 { SUNI_ID_RESET, 0 },
928 /* Clear "PMCTST" in master test register. */
929 { SUNI_MASTER_TEST, 0 },
930
931 { SUNI_RXCP_CTRL, 0x2c },
932 { SUNI_RXCP_FCTRL, 0x81 },
933
934 { SUNI_RXCP_IDLE_PAT_H1, 0 },
935 { SUNI_RXCP_IDLE_PAT_H2, 0 },
936 { SUNI_RXCP_IDLE_PAT_H3, 0 },
937 { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
938
939 { SUNI_RXCP_IDLE_MASK_H1, 0xff },
940 { SUNI_RXCP_IDLE_MASK_H2, 0xff },
941 { SUNI_RXCP_IDLE_MASK_H3, 0xff },
942 { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
943
944 { SUNI_RXCP_CELL_PAT_H1, 0 },
945 { SUNI_RXCP_CELL_PAT_H2, 0 },
946 { SUNI_RXCP_CELL_PAT_H3, 0 },
947 { SUNI_RXCP_CELL_PAT_H4, 0x01 },
948
949 { SUNI_RXCP_CELL_MASK_H1, 0xff },
950 { SUNI_RXCP_CELL_MASK_H2, 0xff },
951 { SUNI_RXCP_CELL_MASK_H3, 0xff },
952 { SUNI_RXCP_CELL_MASK_H4, 0xff },
953
954 { SUNI_TXCP_CTRL, 0xa4 },
955 { SUNI_TXCP_INTR_EN_STS, 0x10 },
956 { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
957 };
958
959 if (iadev->phy_type & FE_DS3_PHY)
960 ia_suni_pm7345_init_ds3(iadev);
961 else
962 ia_suni_pm7345_init_e3(iadev);
963
964 ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
965
966 ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
967 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
968 SUNI_PM7345_DLB | SUNI_PM7345_PLB));
969 #ifdef __SNMP__
970 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
971 #endif /* __SNMP__ */
972 return;
973 }
974
975
976 /***************************** IA_LIB END *****************************/
977
978 #ifdef CONFIG_ATM_IA_DEBUG
979 static int tcnter = 0;
980 static void xdump( u_char* cp, int length, char* prefix )
981 {
982 int col, count;
983 u_char prntBuf[120];
984 u_char* pBuf = prntBuf;
985 count = 0;
986 while(count < length){
987 pBuf += sprintf( pBuf, "%s", prefix );
988 for(col = 0;count + col < length && col < 16; col++){
989 if (col != 0 && (col % 4) == 0)
990 pBuf += sprintf( pBuf, " " );
991 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
992 }
993 while(col++ < 16){ /* pad end of buffer with blanks */
994 if ((col % 4) == 0)
995 sprintf( pBuf, " " );
996 pBuf += sprintf( pBuf, " " );
997 }
998 pBuf += sprintf( pBuf, " " );
999 for(col = 0;count + col < length && col < 16; col++){
1000 u_char c = cp[count + col];
1001
1002 if (isascii(c) && isprint(c))
1003 pBuf += sprintf(pBuf, "%c", c);
1004 else
1005 pBuf += sprintf(pBuf, ".");
1006 }
1007 printk("%s\n", prntBuf);
1008 count += col;
1009 pBuf = prntBuf;
1010 }
1011
1012 } /* close xdump(... */
1013 #endif /* CONFIG_ATM_IA_DEBUG */
1014
1015
1016 static struct atm_dev *ia_boards = NULL;
1017
1018 #define ACTUAL_RAM_BASE \
1019 RAM_BASE*((iadev->mem)/(128 * 1024))
1020 #define ACTUAL_SEG_RAM_BASE \
1021 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1022 #define ACTUAL_REASS_RAM_BASE \
1023 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1024
1025
1026 /*-- some utilities and memory allocation stuff will come here -------------*/
1027
1028 static void desc_dbg(IADEV *iadev) {
1029
1030 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1031 u32 i;
1032 void __iomem *tmp;
1033 // regval = readl((u32)ia_cmds->maddr);
1034 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1035 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1036 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1037 readw(iadev->seg_ram+tcq_wr_ptr-2));
1038 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1039 iadev->ffL.tcq_rd);
1040 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1041 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1042 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1043 i = 0;
1044 while (tcq_st_ptr != tcq_ed_ptr) {
1045 tmp = iadev->seg_ram+tcq_st_ptr;
1046 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1047 tcq_st_ptr += 2;
1048 }
1049 for(i=0; i <iadev->num_tx_desc; i++)
1050 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1051 }
1052
1053
1054 /*----------------------------- Receiving side stuff --------------------------*/
1055
1056 static void rx_excp_rcvd(struct atm_dev *dev)
1057 {
1058 #if 0 /* closing the receiving size will cause too many excp int */
1059 IADEV *iadev;
1060 u_short state;
1061 u_short excpq_rd_ptr;
1062 //u_short *ptr;
1063 int vci, error = 1;
1064 iadev = INPH_IA_DEV(dev);
1065 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1066 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1067 { printk("state = %x \n", state);
1068 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1069 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1070 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1071 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1072 // TODO: update exception stat
1073 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1074 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1075 // pwang_test
1076 excpq_rd_ptr += 4;
1077 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1078 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1079 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1080 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1081 }
1082 #endif
1083 }
1084
1085 static void free_desc(struct atm_dev *dev, int desc)
1086 {
1087 IADEV *iadev;
1088 iadev = INPH_IA_DEV(dev);
1089 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1090 iadev->rfL.fdq_wr +=2;
1091 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1092 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1093 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1094 }
1095
1096
1097 static int rx_pkt(struct atm_dev *dev)
1098 {
1099 IADEV *iadev;
1100 struct atm_vcc *vcc;
1101 unsigned short status;
1102 struct rx_buf_desc __iomem *buf_desc_ptr;
1103 int desc;
1104 struct dle* wr_ptr;
1105 int len;
1106 struct sk_buff *skb;
1107 u_int buf_addr, dma_addr;
1108
1109 iadev = INPH_IA_DEV(dev);
1110 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1111 {
1112 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1113 return -EINVAL;
1114 }
1115 /* mask 1st 3 bits to get the actual descno. */
1116 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1117 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1118 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1119 printk(" pcq_wr_ptr = 0x%x\n",
1120 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1121 /* update the read pointer - maybe we shud do this in the end*/
1122 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1123 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1124 else
1125 iadev->rfL.pcq_rd += 2;
1126 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1127
1128 /* get the buffer desc entry.
1129 update stuff. - doesn't seem to be any update necessary
1130 */
1131 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1132 /* make the ptr point to the corresponding buffer desc entry */
1133 buf_desc_ptr += desc;
1134 if (!desc || (desc > iadev->num_rx_desc) ||
1135 ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1136 free_desc(dev, desc);
1137 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1138 return -1;
1139 }
1140 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1141 if (!vcc)
1142 {
1143 free_desc(dev, desc);
1144 printk("IA: null vcc, drop PDU\n");
1145 return -1;
1146 }
1147
1148
1149 /* might want to check the status bits for errors */
1150 status = (u_short) (buf_desc_ptr->desc_mode);
1151 if (status & (RX_CER | RX_PTE | RX_OFL))
1152 {
1153 atomic_inc(&vcc->stats->rx_err);
1154 IF_ERR(printk("IA: bad packet, dropping it");)
1155 if (status & RX_CER) {
1156 IF_ERR(printk(" cause: packet CRC error\n");)
1157 }
1158 else if (status & RX_PTE) {
1159 IF_ERR(printk(" cause: packet time out\n");)
1160 }
1161 else {
1162 IF_ERR(printk(" cause: buffer overflow\n");)
1163 }
1164 goto out_free_desc;
1165 }
1166
1167 /*
1168 build DLE.
1169 */
1170
1171 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1172 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1173 len = dma_addr - buf_addr;
1174 if (len > iadev->rx_buf_sz) {
1175 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1176 atomic_inc(&vcc->stats->rx_err);
1177 goto out_free_desc;
1178 }
1179
1180 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1181 if (vcc->vci < 32)
1182 printk("Drop control packets\n");
1183 goto out_free_desc;
1184 }
1185 skb_put(skb,len);
1186 // pwang_test
1187 ATM_SKB(skb)->vcc = vcc;
1188 ATM_DESC(skb) = desc;
1189 skb_queue_tail(&iadev->rx_dma_q, skb);
1190
1191 /* Build the DLE structure */
1192 wr_ptr = iadev->rx_dle_q.write;
1193 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1194 len, DMA_FROM_DEVICE);
1195 wr_ptr->local_pkt_addr = buf_addr;
1196 wr_ptr->bytes = len; /* We don't know this do we ?? */
1197 wr_ptr->mode = DMA_INT_ENABLE;
1198
1199 /* shud take care of wrap around here too. */
1200 if(++wr_ptr == iadev->rx_dle_q.end)
1201 wr_ptr = iadev->rx_dle_q.start;
1202 iadev->rx_dle_q.write = wr_ptr;
1203 udelay(1);
1204 /* Increment transaction counter */
1205 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1206 out: return 0;
1207 out_free_desc:
1208 free_desc(dev, desc);
1209 goto out;
1210 }
1211
1212 static void rx_intr(struct atm_dev *dev)
1213 {
1214 IADEV *iadev;
1215 u_short status;
1216 u_short state, i;
1217
1218 iadev = INPH_IA_DEV(dev);
1219 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1220 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1221 if (status & RX_PKT_RCVD)
1222 {
1223 /* do something */
1224 /* Basically recvd an interrupt for receiving a packet.
1225 A descriptor would have been written to the packet complete
1226 queue. Get all the descriptors and set up dma to move the
1227 packets till the packet complete queue is empty..
1228 */
1229 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1230 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1231 while(!(state & PCQ_EMPTY))
1232 {
1233 rx_pkt(dev);
1234 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1235 }
1236 iadev->rxing = 1;
1237 }
1238 if (status & RX_FREEQ_EMPT)
1239 {
1240 if (iadev->rxing) {
1241 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1242 iadev->rx_tmp_jif = jiffies;
1243 iadev->rxing = 0;
1244 }
1245 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1246 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1247 for (i = 1; i <= iadev->num_rx_desc; i++)
1248 free_desc(dev, i);
1249 printk("Test logic RUN!!!!\n");
1250 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1251 iadev->rxing = 1;
1252 }
1253 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1254 }
1255
1256 if (status & RX_EXCP_RCVD)
1257 {
1258 /* probably need to handle the exception queue also. */
1259 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1260 rx_excp_rcvd(dev);
1261 }
1262
1263
1264 if (status & RX_RAW_RCVD)
1265 {
1266 /* need to handle the raw incoming cells. This deepnds on
1267 whether we have programmed to receive the raw cells or not.
1268 Else ignore. */
1269 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1270 }
1271 }
1272
1273
1274 static void rx_dle_intr(struct atm_dev *dev)
1275 {
1276 IADEV *iadev;
1277 struct atm_vcc *vcc;
1278 struct sk_buff *skb;
1279 int desc;
1280 u_short state;
1281 struct dle *dle, *cur_dle;
1282 u_int dle_lp;
1283 int len;
1284 iadev = INPH_IA_DEV(dev);
1285
1286 /* free all the dles done, that is just update our own dle read pointer
1287 - do we really need to do this. Think not. */
1288 /* DMA is done, just get all the recevie buffers from the rx dma queue
1289 and push them up to the higher layer protocol. Also free the desc
1290 associated with the buffer. */
1291 dle = iadev->rx_dle_q.read;
1292 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1293 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1294 while(dle != cur_dle)
1295 {
1296 /* free the DMAed skb */
1297 skb = skb_dequeue(&iadev->rx_dma_q);
1298 if (!skb)
1299 goto INCR_DLE;
1300 desc = ATM_DESC(skb);
1301 free_desc(dev, desc);
1302
1303 if (!(len = skb->len))
1304 {
1305 printk("rx_dle_intr: skb len 0\n");
1306 dev_kfree_skb_any(skb);
1307 }
1308 else
1309 {
1310 struct cpcs_trailer *trailer;
1311 u_short length;
1312 struct ia_vcc *ia_vcc;
1313
1314 dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1315 len, DMA_FROM_DEVICE);
1316 /* no VCC related housekeeping done as yet. lets see */
1317 vcc = ATM_SKB(skb)->vcc;
1318 if (!vcc) {
1319 printk("IA: null vcc\n");
1320 dev_kfree_skb_any(skb);
1321 goto INCR_DLE;
1322 }
1323 ia_vcc = INPH_IA_VCC(vcc);
1324 if (ia_vcc == NULL)
1325 {
1326 atomic_inc(&vcc->stats->rx_err);
1327 atm_return(vcc, skb->truesize);
1328 dev_kfree_skb_any(skb);
1329 goto INCR_DLE;
1330 }
1331 // get real pkt length pwang_test
1332 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1333 skb->len - sizeof(*trailer));
1334 length = swap_byte_order(trailer->length);
1335 if ((length > iadev->rx_buf_sz) || (length >
1336 (skb->len - sizeof(struct cpcs_trailer))))
1337 {
1338 atomic_inc(&vcc->stats->rx_err);
1339 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1340 length, skb->len);)
1341 atm_return(vcc, skb->truesize);
1342 dev_kfree_skb_any(skb);
1343 goto INCR_DLE;
1344 }
1345 skb_trim(skb, length);
1346
1347 /* Display the packet */
1348 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1349 xdump(skb->data, skb->len, "RX: ");
1350 printk("\n");)
1351
1352 IF_RX(printk("rx_dle_intr: skb push");)
1353 vcc->push(vcc,skb);
1354 atomic_inc(&vcc->stats->rx);
1355 iadev->rx_pkt_cnt++;
1356 }
1357 INCR_DLE:
1358 if (++dle == iadev->rx_dle_q.end)
1359 dle = iadev->rx_dle_q.start;
1360 }
1361 iadev->rx_dle_q.read = dle;
1362
1363 /* if the interrupts are masked because there were no free desc available,
1364 unmask them now. */
1365 if (!iadev->rxing) {
1366 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1367 if (!(state & FREEQ_EMPTY)) {
1368 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1369 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1370 iadev->reass_reg+REASS_MASK_REG);
1371 iadev->rxing++;
1372 }
1373 }
1374 }
1375
1376
1377 static int open_rx(struct atm_vcc *vcc)
1378 {
1379 IADEV *iadev;
1380 u_short __iomem *vc_table;
1381 u_short __iomem *reass_ptr;
1382 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1383
1384 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1385 iadev = INPH_IA_DEV(vcc->dev);
1386 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1387 if (iadev->phy_type & FE_25MBIT_PHY) {
1388 printk("IA: ABR not support\n");
1389 return -EINVAL;
1390 }
1391 }
1392 /* Make only this VCI in the vc table valid and let all
1393 others be invalid entries */
1394 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1395 vc_table += vcc->vci;
1396 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1397
1398 *vc_table = vcc->vci << 6;
1399 /* Also keep a list of open rx vcs so that we can attach them with
1400 incoming PDUs later. */
1401 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1402 (vcc->qos.txtp.traffic_class == ATM_ABR))
1403 {
1404 srv_cls_param_t srv_p;
1405 init_abr_vc(iadev, &srv_p);
1406 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1407 }
1408 else { /* for UBR later may need to add CBR logic */
1409 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1410 reass_ptr += vcc->vci;
1411 *reass_ptr = NO_AAL5_PKT;
1412 }
1413
1414 if (iadev->rx_open[vcc->vci])
1415 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1416 vcc->dev->number, vcc->vci);
1417 iadev->rx_open[vcc->vci] = vcc;
1418 return 0;
1419 }
1420
1421 static int rx_init(struct atm_dev *dev)
1422 {
1423 IADEV *iadev;
1424 struct rx_buf_desc __iomem *buf_desc_ptr;
1425 unsigned long rx_pkt_start = 0;
1426 void *dle_addr;
1427 struct abr_vc_table *abr_vc_table;
1428 u16 *vc_table;
1429 u16 *reass_table;
1430 int i,j, vcsize_sel;
1431 u_short freeq_st_adr;
1432 u_short *freeq_start;
1433
1434 iadev = INPH_IA_DEV(dev);
1435 // spin_lock_init(&iadev->rx_lock);
1436
1437 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1438 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1439 &iadev->rx_dle_dma, GFP_KERNEL);
1440 if (!dle_addr) {
1441 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1442 goto err_out;
1443 }
1444 iadev->rx_dle_q.start = (struct dle *)dle_addr;
1445 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1446 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1447 iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1448 /* the end of the dle q points to the entry after the last
1449 DLE that can be used. */
1450
1451 /* write the upper 20 bits of the start address to rx list address register */
1452 /* We know this is 32bit bus addressed so the following is safe */
1453 writel(iadev->rx_dle_dma & 0xfffff000,
1454 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1455 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1456 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1457 readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1458 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1459 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1460 readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1461
1462 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1463 writew(0, iadev->reass_reg+MODE_REG);
1464 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1465
1466 /* Receive side control memory map
1467 -------------------------------
1468
1469 Buffer descr 0x0000 (736 - 23K)
1470 VP Table 0x5c00 (256 - 512)
1471 Except q 0x5e00 (128 - 512)
1472 Free buffer q 0x6000 (1K - 2K)
1473 Packet comp q 0x6800 (1K - 2K)
1474 Reass Table 0x7000 (1K - 2K)
1475 VC Table 0x7800 (1K - 2K)
1476 ABR VC Table 0x8000 (1K - 32K)
1477 */
1478
1479 /* Base address for Buffer Descriptor Table */
1480 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1481 /* Set the buffer size register */
1482 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1483
1484 /* Initialize each entry in the Buffer Descriptor Table */
1485 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1486 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1487 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1488 buf_desc_ptr++;
1489 rx_pkt_start = iadev->rx_pkt_ram;
1490 for(i=1; i<=iadev->num_rx_desc; i++)
1491 {
1492 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1493 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1494 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1495 buf_desc_ptr++;
1496 rx_pkt_start += iadev->rx_buf_sz;
1497 }
1498 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1499 i = FREE_BUF_DESC_Q*iadev->memSize;
1500 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1501 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1502 writew(i+iadev->num_rx_desc*sizeof(u_short),
1503 iadev->reass_reg+FREEQ_ED_ADR);
1504 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1505 writew(i+iadev->num_rx_desc*sizeof(u_short),
1506 iadev->reass_reg+FREEQ_WR_PTR);
1507 /* Fill the FREEQ with all the free descriptors. */
1508 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1509 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1510 for(i=1; i<=iadev->num_rx_desc; i++)
1511 {
1512 *freeq_start = (u_short)i;
1513 freeq_start++;
1514 }
1515 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1516 /* Packet Complete Queue */
1517 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1518 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1519 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1520 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1521 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1522
1523 /* Exception Queue */
1524 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1525 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1526 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1527 iadev->reass_reg+EXCP_Q_ED_ADR);
1528 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1529 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1530
1531 /* Load local copy of FREEQ and PCQ ptrs */
1532 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1533 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1534 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1535 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1536 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1537 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1538 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1539 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1540
1541 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1542 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1543 iadev->rfL.pcq_wr);)
1544 /* just for check - no VP TBL */
1545 /* VP Table */
1546 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1547 /* initialize VP Table for invalid VPIs
1548 - I guess we can write all 1s or 0x000f in the entire memory
1549 space or something similar.
1550 */
1551
1552 /* This seems to work and looks right to me too !!! */
1553 i = REASS_TABLE * iadev->memSize;
1554 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1555 /* initialize Reassembly table to I don't know what ???? */
1556 reass_table = (u16 *)(iadev->reass_ram+i);
1557 j = REASS_TABLE_SZ * iadev->memSize;
1558 for(i=0; i < j; i++)
1559 *reass_table++ = NO_AAL5_PKT;
1560 i = 8*1024;
1561 vcsize_sel = 0;
1562 while (i != iadev->num_vc) {
1563 i /= 2;
1564 vcsize_sel++;
1565 }
1566 i = RX_VC_TABLE * iadev->memSize;
1567 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1568 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1569 j = RX_VC_TABLE_SZ * iadev->memSize;
1570 for(i = 0; i < j; i++)
1571 {
1572 /* shift the reassembly pointer by 3 + lower 3 bits of
1573 vc_lkup_base register (=3 for 1K VCs) and the last byte
1574 is those low 3 bits.
1575 Shall program this later.
1576 */
1577 *vc_table = (i << 6) | 15; /* for invalid VCI */
1578 vc_table++;
1579 }
1580 /* ABR VC table */
1581 i = ABR_VC_TABLE * iadev->memSize;
1582 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1583
1584 i = ABR_VC_TABLE * iadev->memSize;
1585 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1586 j = REASS_TABLE_SZ * iadev->memSize;
1587 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1588 for(i = 0; i < j; i++) {
1589 abr_vc_table->rdf = 0x0003;
1590 abr_vc_table->air = 0x5eb1;
1591 abr_vc_table++;
1592 }
1593
1594 /* Initialize other registers */
1595
1596 /* VP Filter Register set for VC Reassembly only */
1597 writew(0xff00, iadev->reass_reg+VP_FILTER);
1598 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1599 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1600
1601 /* Packet Timeout Count related Registers :
1602 Set packet timeout to occur in about 3 seconds
1603 Set Packet Aging Interval count register to overflow in about 4 us
1604 */
1605 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1606
1607 i = (j >> 6) & 0xFF;
1608 j += 2 * (j - 1);
1609 i |= ((j << 2) & 0xFF00);
1610 writew(i, iadev->reass_reg+TMOUT_RANGE);
1611
1612 /* initiate the desc_tble */
1613 for(i=0; i<iadev->num_tx_desc;i++)
1614 iadev->desc_tbl[i].timestamp = 0;
1615
1616 /* to clear the interrupt status register - read it */
1617 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1618
1619 /* Mask Register - clear it */
1620 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1621
1622 skb_queue_head_init(&iadev->rx_dma_q);
1623 iadev->rx_free_desc_qhead = NULL;
1624
1625 iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1626 if (!iadev->rx_open) {
1627 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1628 dev->number);
1629 goto err_free_dle;
1630 }
1631
1632 iadev->rxing = 1;
1633 iadev->rx_pkt_cnt = 0;
1634 /* Mode Register */
1635 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1636 return 0;
1637
1638 err_free_dle:
1639 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1640 iadev->rx_dle_dma);
1641 err_out:
1642 return -ENOMEM;
1643 }
1644
1645
1646 /*
1647 The memory map suggested in appendix A and the coding for it.
1648 Keeping it around just in case we change our mind later.
1649
1650 Buffer descr 0x0000 (128 - 4K)
1651 UBR sched 0x1000 (1K - 4K)
1652 UBR Wait q 0x2000 (1K - 4K)
1653 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1654 (128 - 256) each
1655 extended VC 0x4000 (1K - 8K)
1656 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1657 CBR sched 0x7000 (as needed)
1658 VC table 0x8000 (1K - 32K)
1659 */
1660
1661 static void tx_intr(struct atm_dev *dev)
1662 {
1663 IADEV *iadev;
1664 unsigned short status;
1665 unsigned long flags;
1666
1667 iadev = INPH_IA_DEV(dev);
1668
1669 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1670 if (status & TRANSMIT_DONE){
1671
1672 IF_EVENT(printk("Transmit Done Intr logic run\n");)
1673 spin_lock_irqsave(&iadev->tx_lock, flags);
1674 ia_tx_poll(iadev);
1675 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1676 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1677 if (iadev->close_pending)
1678 wake_up(&iadev->close_wait);
1679 }
1680 if (status & TCQ_NOT_EMPTY)
1681 {
1682 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1683 }
1684 }
1685
1686 static void tx_dle_intr(struct atm_dev *dev)
1687 {
1688 IADEV *iadev;
1689 struct dle *dle, *cur_dle;
1690 struct sk_buff *skb;
1691 struct atm_vcc *vcc;
1692 struct ia_vcc *iavcc;
1693 u_int dle_lp;
1694 unsigned long flags;
1695
1696 iadev = INPH_IA_DEV(dev);
1697 spin_lock_irqsave(&iadev->tx_lock, flags);
1698 dle = iadev->tx_dle_q.read;
1699 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1700 (sizeof(struct dle)*DLE_ENTRIES - 1);
1701 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1702 while (dle != cur_dle)
1703 {
1704 /* free the DMAed skb */
1705 skb = skb_dequeue(&iadev->tx_dma_q);
1706 if (!skb) break;
1707
1708 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1709 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1710 dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1711 DMA_TO_DEVICE);
1712 }
1713 vcc = ATM_SKB(skb)->vcc;
1714 if (!vcc) {
1715 printk("tx_dle_intr: vcc is null\n");
1716 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1717 dev_kfree_skb_any(skb);
1718
1719 return;
1720 }
1721 iavcc = INPH_IA_VCC(vcc);
1722 if (!iavcc) {
1723 printk("tx_dle_intr: iavcc is null\n");
1724 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1725 dev_kfree_skb_any(skb);
1726 return;
1727 }
1728 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1729 if ((vcc->pop) && (skb->len != 0))
1730 {
1731 vcc->pop(vcc, skb);
1732 }
1733 else {
1734 dev_kfree_skb_any(skb);
1735 }
1736 }
1737 else { /* Hold the rate-limited skb for flow control */
1738 IA_SKB_STATE(skb) |= IA_DLED;
1739 skb_queue_tail(&iavcc->txing_skb, skb);
1740 }
1741 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1742 if (++dle == iadev->tx_dle_q.end)
1743 dle = iadev->tx_dle_q.start;
1744 }
1745 iadev->tx_dle_q.read = dle;
1746 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1747 }
1748
1749 static int open_tx(struct atm_vcc *vcc)
1750 {
1751 struct ia_vcc *ia_vcc;
1752 IADEV *iadev;
1753 struct main_vc *vc;
1754 struct ext_vc *evc;
1755 int ret;
1756 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1757 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1758 iadev = INPH_IA_DEV(vcc->dev);
1759
1760 if (iadev->phy_type & FE_25MBIT_PHY) {
1761 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1762 printk("IA: ABR not support\n");
1763 return -EINVAL;
1764 }
1765 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1766 printk("IA: CBR not support\n");
1767 return -EINVAL;
1768 }
1769 }
1770 ia_vcc = INPH_IA_VCC(vcc);
1771 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1772 if (vcc->qos.txtp.max_sdu >
1773 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1774 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1775 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1776 vcc->dev_data = NULL;
1777 kfree(ia_vcc);
1778 return -EINVAL;
1779 }
1780 ia_vcc->vc_desc_cnt = 0;
1781 ia_vcc->txing = 1;
1782
1783 /* find pcr */
1784 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1785 vcc->qos.txtp.pcr = iadev->LineRate;
1786 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1787 vcc->qos.txtp.pcr = iadev->LineRate;
1788 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1789 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1790 if (vcc->qos.txtp.pcr > iadev->LineRate)
1791 vcc->qos.txtp.pcr = iadev->LineRate;
1792 ia_vcc->pcr = vcc->qos.txtp.pcr;
1793
1794 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1795 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1796 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1797 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1798 if (ia_vcc->pcr < iadev->rate_limit)
1799 skb_queue_head_init (&ia_vcc->txing_skb);
1800 if (ia_vcc->pcr < iadev->rate_limit) {
1801 struct sock *sk = sk_atm(vcc);
1802
1803 if (vcc->qos.txtp.max_sdu != 0) {
1804 if (ia_vcc->pcr > 60000)
1805 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1806 else if (ia_vcc->pcr > 2000)
1807 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1808 else
1809 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1810 }
1811 else
1812 sk->sk_sndbuf = 24576;
1813 }
1814
1815 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1816 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1817 vc += vcc->vci;
1818 evc += vcc->vci;
1819 memset((caddr_t)vc, 0, sizeof(*vc));
1820 memset((caddr_t)evc, 0, sizeof(*evc));
1821
1822 /* store the most significant 4 bits of vci as the last 4 bits
1823 of first part of atm header.
1824 store the last 12 bits of vci as first 12 bits of the second
1825 part of the atm header.
1826 */
1827 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1828 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1829
1830 /* check the following for different traffic classes */
1831 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1832 {
1833 vc->type = UBR;
1834 vc->status = CRC_APPEND;
1835 vc->acr = cellrate_to_float(iadev->LineRate);
1836 if (vcc->qos.txtp.pcr > 0)
1837 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1838 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1839 vcc->qos.txtp.max_pcr,vc->acr);)
1840 }
1841 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1842 { srv_cls_param_t srv_p;
1843 IF_ABR(printk("Tx ABR VCC\n");)
1844 init_abr_vc(iadev, &srv_p);
1845 if (vcc->qos.txtp.pcr > 0)
1846 srv_p.pcr = vcc->qos.txtp.pcr;
1847 if (vcc->qos.txtp.min_pcr > 0) {
1848 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1849 if (tmpsum > iadev->LineRate)
1850 return -EBUSY;
1851 srv_p.mcr = vcc->qos.txtp.min_pcr;
1852 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1853 }
1854 else srv_p.mcr = 0;
1855 if (vcc->qos.txtp.icr)
1856 srv_p.icr = vcc->qos.txtp.icr;
1857 if (vcc->qos.txtp.tbe)
1858 srv_p.tbe = vcc->qos.txtp.tbe;
1859 if (vcc->qos.txtp.frtt)
1860 srv_p.frtt = vcc->qos.txtp.frtt;
1861 if (vcc->qos.txtp.rif)
1862 srv_p.rif = vcc->qos.txtp.rif;
1863 if (vcc->qos.txtp.rdf)
1864 srv_p.rdf = vcc->qos.txtp.rdf;
1865 if (vcc->qos.txtp.nrm_pres)
1866 srv_p.nrm = vcc->qos.txtp.nrm;
1867 if (vcc->qos.txtp.trm_pres)
1868 srv_p.trm = vcc->qos.txtp.trm;
1869 if (vcc->qos.txtp.adtf_pres)
1870 srv_p.adtf = vcc->qos.txtp.adtf;
1871 if (vcc->qos.txtp.cdf_pres)
1872 srv_p.cdf = vcc->qos.txtp.cdf;
1873 if (srv_p.icr > srv_p.pcr)
1874 srv_p.icr = srv_p.pcr;
1875 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1876 srv_p.pcr, srv_p.mcr);)
1877 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1878 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1879 if (iadev->phy_type & FE_25MBIT_PHY) {
1880 printk("IA: CBR not support\n");
1881 return -EINVAL;
1882 }
1883 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1884 IF_CBR(printk("PCR is not available\n");)
1885 return -1;
1886 }
1887 vc->type = CBR;
1888 vc->status = CRC_APPEND;
1889 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1890 return ret;
1891 }
1892 } else {
1893 printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
1894 }
1895
1896 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1897 IF_EVENT(printk("ia open_tx returning \n");)
1898 return 0;
1899 }
1900
1901
1902 static int tx_init(struct atm_dev *dev)
1903 {
1904 IADEV *iadev;
1905 struct tx_buf_desc *buf_desc_ptr;
1906 unsigned int tx_pkt_start;
1907 void *dle_addr;
1908 int i;
1909 u_short tcq_st_adr;
1910 u_short *tcq_start;
1911 u_short prq_st_adr;
1912 u_short *prq_start;
1913 struct main_vc *vc;
1914 struct ext_vc *evc;
1915 u_short tmp16;
1916 u32 vcsize_sel;
1917
1918 iadev = INPH_IA_DEV(dev);
1919 spin_lock_init(&iadev->tx_lock);
1920
1921 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1922 readw(iadev->seg_reg+SEG_MASK_REG));)
1923
1924 /* Allocate 4k (boundary aligned) bytes */
1925 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1926 &iadev->tx_dle_dma, GFP_KERNEL);
1927 if (!dle_addr) {
1928 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1929 goto err_out;
1930 }
1931 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1932 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1933 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1934 iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1935
1936 /* write the upper 20 bits of the start address to tx list address register */
1937 writel(iadev->tx_dle_dma & 0xfffff000,
1938 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1939 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1940 writew(0, iadev->seg_reg+MODE_REG_0);
1941 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1942 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1943 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1944 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1945
1946 /*
1947 Transmit side control memory map
1948 --------------------------------
1949 Buffer descr 0x0000 (128 - 4K)
1950 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1951 (512 - 1K) each
1952 TCQ - 4K, PRQ - 5K
1953 CBR Table 0x1800 (as needed) - 6K
1954 UBR Table 0x3000 (1K - 4K) - 12K
1955 UBR Wait queue 0x4000 (1K - 4K) - 16K
1956 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1957 ABR Tbl - 20K, ABR Wq - 22K
1958 extended VC 0x6000 (1K - 8K) - 24K
1959 VC Table 0x8000 (1K - 32K) - 32K
1960
1961 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1962 and Wait q, which can be allotted later.
1963 */
1964
1965 /* Buffer Descriptor Table Base address */
1966 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1967
1968 /* initialize each entry in the buffer descriptor table */
1969 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1970 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1971 buf_desc_ptr++;
1972 tx_pkt_start = TX_PACKET_RAM;
1973 for(i=1; i<=iadev->num_tx_desc; i++)
1974 {
1975 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1976 buf_desc_ptr->desc_mode = AAL5;
1977 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1978 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1979 buf_desc_ptr++;
1980 tx_pkt_start += iadev->tx_buf_sz;
1981 }
1982 iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1983 sizeof(*iadev->tx_buf),
1984 GFP_KERNEL);
1985 if (!iadev->tx_buf) {
1986 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1987 goto err_free_dle;
1988 }
1989 for (i= 0; i< iadev->num_tx_desc; i++)
1990 {
1991 struct cpcs_trailer *cpcs;
1992
1993 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1994 if(!cpcs) {
1995 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1996 goto err_free_tx_bufs;
1997 }
1998 iadev->tx_buf[i].cpcs = cpcs;
1999 iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
2000 cpcs,
2001 sizeof(*cpcs),
2002 DMA_TO_DEVICE);
2003 }
2004 iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2005 sizeof(*iadev->desc_tbl),
2006 GFP_KERNEL);
2007 if (!iadev->desc_tbl) {
2008 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2009 goto err_free_all_tx_bufs;
2010 }
2011
2012 /* Communication Queues base address */
2013 i = TX_COMP_Q * iadev->memSize;
2014 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2015
2016 /* Transmit Complete Queue */
2017 writew(i, iadev->seg_reg+TCQ_ST_ADR);
2018 writew(i, iadev->seg_reg+TCQ_RD_PTR);
2019 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2020 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2021 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2022 iadev->seg_reg+TCQ_ED_ADR);
2023 /* Fill the TCQ with all the free descriptors. */
2024 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2025 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2026 for(i=1; i<=iadev->num_tx_desc; i++)
2027 {
2028 *tcq_start = (u_short)i;
2029 tcq_start++;
2030 }
2031
2032 /* Packet Ready Queue */
2033 i = PKT_RDY_Q * iadev->memSize;
2034 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2035 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2036 iadev->seg_reg+PRQ_ED_ADR);
2037 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2038 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2039
2040 /* Load local copy of PRQ and TCQ ptrs */
2041 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2042 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2043 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2044
2045 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2046 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2047 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2048
2049 /* Just for safety initializing the queue to have desc 1 always */
2050 /* Fill the PRQ with all the free descriptors. */
2051 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2052 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2053 for(i=1; i<=iadev->num_tx_desc; i++)
2054 {
2055 *prq_start = (u_short)0; /* desc 1 in all entries */
2056 prq_start++;
2057 }
2058 /* CBR Table */
2059 IF_INIT(printk("Start CBR Init\n");)
2060 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2061 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2062 #else /* Charlie's logic is wrong ? */
2063 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2064 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2065 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2066 #endif
2067
2068 IF_INIT(printk("value in register = 0x%x\n",
2069 readw(iadev->seg_reg+CBR_PTR_BASE));)
2070 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2071 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2072 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2073 readw(iadev->seg_reg+CBR_TAB_BEG));)
2074 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2075 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2076 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2077 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2078 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2079 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2080 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2081 readw(iadev->seg_reg+CBR_TAB_END+1));)
2082
2083 /* Initialize the CBR Schedualing Table */
2084 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2085 0, iadev->num_vc*6);
2086 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2087 iadev->CbrEntryPt = 0;
2088 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2089 iadev->NumEnabledCBR = 0;
2090
2091 /* UBR scheduling Table and wait queue */
2092 /* initialize all bytes of UBR scheduler table and wait queue to 0
2093 - SCHEDSZ is 1K (# of entries).
2094 - UBR Table size is 4K
2095 - UBR wait queue is 4K
2096 since the table and wait queues are contiguous, all the bytes
2097 can be initialized by one memeset.
2098 */
2099
2100 vcsize_sel = 0;
2101 i = 8*1024;
2102 while (i != iadev->num_vc) {
2103 i /= 2;
2104 vcsize_sel++;
2105 }
2106
2107 i = MAIN_VC_TABLE * iadev->memSize;
2108 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2109 i = EXT_VC_TABLE * iadev->memSize;
2110 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2111 i = UBR_SCHED_TABLE * iadev->memSize;
2112 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2113 i = UBR_WAIT_Q * iadev->memSize;
2114 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2115 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2116 0, iadev->num_vc*8);
2117 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2118 /* initialize all bytes of ABR scheduler table and wait queue to 0
2119 - SCHEDSZ is 1K (# of entries).
2120 - ABR Table size is 2K
2121 - ABR wait queue is 2K
2122 since the table and wait queues are contiguous, all the bytes
2123 can be initialized by one memeset.
2124 */
2125 i = ABR_SCHED_TABLE * iadev->memSize;
2126 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2127 i = ABR_WAIT_Q * iadev->memSize;
2128 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2129
2130 i = ABR_SCHED_TABLE*iadev->memSize;
2131 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2132 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2133 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2134 iadev->testTable = kmalloc_array(iadev->num_vc,
2135 sizeof(*iadev->testTable),
2136 GFP_KERNEL);
2137 if (!iadev->testTable) {
2138 printk("Get freepage failed\n");
2139 goto err_free_desc_tbl;
2140 }
2141 for(i=0; i<iadev->num_vc; i++)
2142 {
2143 memset((caddr_t)vc, 0, sizeof(*vc));
2144 memset((caddr_t)evc, 0, sizeof(*evc));
2145 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2146 GFP_KERNEL);
2147 if (!iadev->testTable[i])
2148 goto err_free_test_tables;
2149 iadev->testTable[i]->lastTime = 0;
2150 iadev->testTable[i]->fract = 0;
2151 iadev->testTable[i]->vc_status = VC_UBR;
2152 vc++;
2153 evc++;
2154 }
2155
2156 /* Other Initialization */
2157
2158 /* Max Rate Register */
2159 if (iadev->phy_type & FE_25MBIT_PHY) {
2160 writew(RATE25, iadev->seg_reg+MAXRATE);
2161 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2162 }
2163 else {
2164 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2165 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2166 }
2167 /* Set Idle Header Reigisters to be sure */
2168 writew(0, iadev->seg_reg+IDLEHEADHI);
2169 writew(0, iadev->seg_reg+IDLEHEADLO);
2170
2171 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2172 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2173
2174 iadev->close_pending = 0;
2175 init_waitqueue_head(&iadev->close_wait);
2176 init_waitqueue_head(&iadev->timeout_wait);
2177 skb_queue_head_init(&iadev->tx_dma_q);
2178 ia_init_rtn_q(&iadev->tx_return_q);
2179
2180 /* RM Cell Protocol ID and Message Type */
2181 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2182 skb_queue_head_init (&iadev->tx_backlog);
2183
2184 /* Mode Register 1 */
2185 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2186
2187 /* Mode Register 0 */
2188 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2189
2190 /* Interrupt Status Register - read to clear */
2191 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2192
2193 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2194 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2195 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2196 iadev->tx_pkt_cnt = 0;
2197 iadev->rate_limit = iadev->LineRate / 3;
2198
2199 return 0;
2200
2201 err_free_test_tables:
2202 while (--i >= 0)
2203 kfree(iadev->testTable[i]);
2204 kfree(iadev->testTable);
2205 err_free_desc_tbl:
2206 kfree(iadev->desc_tbl);
2207 err_free_all_tx_bufs:
2208 i = iadev->num_tx_desc;
2209 err_free_tx_bufs:
2210 while (--i >= 0) {
2211 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2212
2213 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2214 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2215 kfree(desc->cpcs);
2216 }
2217 kfree(iadev->tx_buf);
2218 err_free_dle:
2219 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2220 iadev->tx_dle_dma);
2221 err_out:
2222 return -ENOMEM;
2223 }
2224
2225 static irqreturn_t ia_int(int irq, void *dev_id)
2226 {
2227 struct atm_dev *dev;
2228 IADEV *iadev;
2229 unsigned int status;
2230 int handled = 0;
2231
2232 dev = dev_id;
2233 iadev = INPH_IA_DEV(dev);
2234 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2235 {
2236 handled = 1;
2237 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2238 if (status & STAT_REASSINT)
2239 {
2240 /* do something */
2241 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2242 rx_intr(dev);
2243 }
2244 if (status & STAT_DLERINT)
2245 {
2246 /* Clear this bit by writing a 1 to it. */
2247 writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2248 rx_dle_intr(dev);
2249 }
2250 if (status & STAT_SEGINT)
2251 {
2252 /* do something */
2253 IF_EVENT(printk("IA: tx_intr \n");)
2254 tx_intr(dev);
2255 }
2256 if (status & STAT_DLETINT)
2257 {
2258 writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2259 tx_dle_intr(dev);
2260 }
2261 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2262 {
2263 if (status & STAT_FEINT)
2264 ia_frontend_intr(iadev);
2265 }
2266 }
2267 return IRQ_RETVAL(handled);
2268 }
2269
2270
2271
2272 /*----------------------------- entries --------------------------------*/
2273 static int get_esi(struct atm_dev *dev)
2274 {
2275 IADEV *iadev;
2276 int i;
2277 u32 mac1;
2278 u16 mac2;
2279
2280 iadev = INPH_IA_DEV(dev);
2281 mac1 = cpu_to_be32(le32_to_cpu(readl(
2282 iadev->reg+IPHASE5575_MAC1)));
2283 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2284 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2285 for (i=0; i<MAC1_LEN; i++)
2286 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2287
2288 for (i=0; i<MAC2_LEN; i++)
2289 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2290 return 0;
2291 }
2292
2293 static int reset_sar(struct atm_dev *dev)
2294 {
2295 IADEV *iadev;
2296 int i, error;
2297 unsigned int pci[64];
2298
2299 iadev = INPH_IA_DEV(dev);
2300 for (i = 0; i < 64; i++) {
2301 error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
2302 if (error != PCIBIOS_SUCCESSFUL)
2303 return error;
2304 }
2305 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2306 for (i = 0; i < 64; i++) {
2307 error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
2308 if (error != PCIBIOS_SUCCESSFUL)
2309 return error;
2310 }
2311 udelay(5);
2312 return 0;
2313 }
2314
2315
2316 static int ia_init(struct atm_dev *dev)
2317 {
2318 IADEV *iadev;
2319 unsigned long real_base;
2320 void __iomem *base;
2321 unsigned short command;
2322 int error, i;
2323
2324 /* The device has been identified and registered. Now we read
2325 necessary configuration info like memory base address,
2326 interrupt number etc */
2327
2328 IF_INIT(printk(">ia_init\n");)
2329 dev->ci_range.vpi_bits = 0;
2330 dev->ci_range.vci_bits = NR_VCI_LD;
2331
2332 iadev = INPH_IA_DEV(dev);
2333 real_base = pci_resource_start (iadev->pci, 0);
2334 iadev->irq = iadev->pci->irq;
2335
2336 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2337 if (error) {
2338 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2339 dev->number,error);
2340 return -EINVAL;
2341 }
2342 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2343 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2344
2345 /* find mapping size of board */
2346
2347 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2348
2349 if (iadev->pci_map_size == 0x100000){
2350 iadev->num_vc = 4096;
2351 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2352 iadev->memSize = 4;
2353 }
2354 else if (iadev->pci_map_size == 0x40000) {
2355 iadev->num_vc = 1024;
2356 iadev->memSize = 1;
2357 }
2358 else {
2359 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2360 return -EINVAL;
2361 }
2362 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2363
2364 /* enable bus mastering */
2365 pci_set_master(iadev->pci);
2366
2367 /*
2368 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2369 */
2370 udelay(10);
2371
2372 /* mapping the physical address to a virtual address in address space */
2373 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2374
2375 if (!base)
2376 {
2377 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2378 dev->number);
2379 return -ENOMEM;
2380 }
2381 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2382 dev->number, iadev->pci->revision, base, iadev->irq);)
2383
2384 /* filling the iphase dev structure */
2385 iadev->mem = iadev->pci_map_size /2;
2386 iadev->real_base = real_base;
2387 iadev->base = base;
2388
2389 /* Bus Interface Control Registers */
2390 iadev->reg = base + REG_BASE;
2391 /* Segmentation Control Registers */
2392 iadev->seg_reg = base + SEG_BASE;
2393 /* Reassembly Control Registers */
2394 iadev->reass_reg = base + REASS_BASE;
2395 /* Front end/ DMA control registers */
2396 iadev->phy = base + PHY_BASE;
2397 iadev->dma = base + PHY_BASE;
2398 /* RAM - Segmentation RAm and Reassembly RAM */
2399 iadev->ram = base + ACTUAL_RAM_BASE;
2400 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2401 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2402
2403 /* lets print out the above */
2404 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2405 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2406 iadev->phy, iadev->ram, iadev->seg_ram,
2407 iadev->reass_ram);)
2408
2409 /* lets try reading the MAC address */
2410 error = get_esi(dev);
2411 if (error) {
2412 iounmap(iadev->base);
2413 return error;
2414 }
2415 printk("IA: ");
2416 for (i=0; i < ESI_LEN; i++)
2417 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2418 printk("\n");
2419
2420 /* reset SAR */
2421 if (reset_sar(dev)) {
2422 iounmap(iadev->base);
2423 printk("IA: reset SAR fail, please try again\n");
2424 return 1;
2425 }
2426 return 0;
2427 }
2428
2429 static void ia_update_stats(IADEV *iadev) {
2430 if (!iadev->carrier_detect)
2431 return;
2432 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2433 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2434 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2435 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2436 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2437 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2438 return;
2439 }
2440
2441 static void ia_led_timer(struct timer_list *unused) {
2442 unsigned long flags;
2443 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2444 u_char i;
2445 static u32 ctrl_reg;
2446 for (i = 0; i < iadev_count; i++) {
2447 if (ia_dev[i]) {
2448 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2449 if (blinking[i] == 0) {
2450 blinking[i]++;
2451 ctrl_reg &= (~CTRL_LED);
2452 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2453 ia_update_stats(ia_dev[i]);
2454 }
2455 else {
2456 blinking[i] = 0;
2457 ctrl_reg |= CTRL_LED;
2458 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2459 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2460 if (ia_dev[i]->close_pending)
2461 wake_up(&ia_dev[i]->close_wait);
2462 ia_tx_poll(ia_dev[i]);
2463 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2464 }
2465 }
2466 }
2467 mod_timer(&ia_timer, jiffies + HZ / 4);
2468 return;
2469 }
2470
2471 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2472 unsigned long addr)
2473 {
2474 writel(value, INPH_IA_DEV(dev)->phy+addr);
2475 }
2476
2477 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2478 {
2479 return readl(INPH_IA_DEV(dev)->phy+addr);
2480 }
2481
2482 static void ia_free_tx(IADEV *iadev)
2483 {
2484 int i;
2485
2486 kfree(iadev->desc_tbl);
2487 for (i = 0; i < iadev->num_vc; i++)
2488 kfree(iadev->testTable[i]);
2489 kfree(iadev->testTable);
2490 for (i = 0; i < iadev->num_tx_desc; i++) {
2491 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2492
2493 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2494 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2495 kfree(desc->cpcs);
2496 }
2497 kfree(iadev->tx_buf);
2498 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2499 iadev->tx_dle_dma);
2500 }
2501
2502 static void ia_free_rx(IADEV *iadev)
2503 {
2504 kfree(iadev->rx_open);
2505 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2506 iadev->rx_dle_dma);
2507 }
2508
2509 static int ia_start(struct atm_dev *dev)
2510 {
2511 IADEV *iadev;
2512 int error;
2513 unsigned char phy;
2514 u32 ctrl_reg;
2515 IF_EVENT(printk(">ia_start\n");)
2516 iadev = INPH_IA_DEV(dev);
2517 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2518 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2519 dev->number, iadev->irq);
2520 error = -EAGAIN;
2521 goto err_out;
2522 }
2523 /* @@@ should release IRQ on error */
2524 /* enabling memory + master */
2525 if ((error = pci_write_config_word(iadev->pci,
2526 PCI_COMMAND,
2527 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2528 {
2529 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2530 "master (0x%x)\n",dev->number, error);
2531 error = -EIO;
2532 goto err_free_irq;
2533 }
2534 udelay(10);
2535
2536 /* Maybe we should reset the front end, initialize Bus Interface Control
2537 Registers and see. */
2538
2539 IF_INIT(printk("Bus ctrl reg: %08x\n",
2540 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2541 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2542 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2543 | CTRL_B8
2544 | CTRL_B16
2545 | CTRL_B32
2546 | CTRL_B48
2547 | CTRL_B64
2548 | CTRL_B128
2549 | CTRL_ERRMASK
2550 | CTRL_DLETMASK /* shud be removed l8r */
2551 | CTRL_DLERMASK
2552 | CTRL_SEGMASK
2553 | CTRL_REASSMASK
2554 | CTRL_FEMASK
2555 | CTRL_CSPREEMPT;
2556
2557 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2558
2559 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2560 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2561 printk("Bus status reg after init: %08x\n",
2562 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2563
2564 ia_hw_type(iadev);
2565 error = tx_init(dev);
2566 if (error)
2567 goto err_free_irq;
2568 error = rx_init(dev);
2569 if (error)
2570 goto err_free_tx;
2571
2572 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2573 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2574 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2575 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2576 phy = 0; /* resolve compiler complaint */
2577 IF_INIT (
2578 if ((phy=ia_phy_get(dev,0)) == 0x30)
2579 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2580 else
2581 printk("IA: utopia,rev.%0x\n",phy);)
2582
2583 if (iadev->phy_type & FE_25MBIT_PHY)
2584 ia_mb25_init(iadev);
2585 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2586 ia_suni_pm7345_init(iadev);
2587 else {
2588 error = suni_init(dev);
2589 if (error)
2590 goto err_free_rx;
2591 if (dev->phy->start) {
2592 error = dev->phy->start(dev);
2593 if (error)
2594 goto err_free_rx;
2595 }
2596 /* Get iadev->carrier_detect status */
2597 ia_frontend_intr(iadev);
2598 }
2599 return 0;
2600
2601 err_free_rx:
2602 ia_free_rx(iadev);
2603 err_free_tx:
2604 ia_free_tx(iadev);
2605 err_free_irq:
2606 free_irq(iadev->irq, dev);
2607 err_out:
2608 return error;
2609 }
2610
2611 static void ia_close(struct atm_vcc *vcc)
2612 {
2613 DEFINE_WAIT(wait);
2614 u16 *vc_table;
2615 IADEV *iadev;
2616 struct ia_vcc *ia_vcc;
2617 struct sk_buff *skb = NULL;
2618 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2619 unsigned long closetime, flags;
2620
2621 iadev = INPH_IA_DEV(vcc->dev);
2622 ia_vcc = INPH_IA_VCC(vcc);
2623 if (!ia_vcc) return;
2624
2625 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2626 ia_vcc->vc_desc_cnt,vcc->vci);)
2627 clear_bit(ATM_VF_READY,&vcc->flags);
2628 skb_queue_head_init (&tmp_tx_backlog);
2629 skb_queue_head_init (&tmp_vcc_backlog);
2630 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2631 iadev->close_pending++;
2632 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2633 schedule_timeout(msecs_to_jiffies(500));
2634 finish_wait(&iadev->timeout_wait, &wait);
2635 spin_lock_irqsave(&iadev->tx_lock, flags);
2636 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2637 if (ATM_SKB(skb)->vcc == vcc){
2638 if (vcc->pop) vcc->pop(vcc, skb);
2639 else dev_kfree_skb_any(skb);
2640 }
2641 else
2642 skb_queue_tail(&tmp_tx_backlog, skb);
2643 }
2644 while((skb = skb_dequeue(&tmp_tx_backlog)))
2645 skb_queue_tail(&iadev->tx_backlog, skb);
2646 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2647 closetime = 300000 / ia_vcc->pcr;
2648 if (closetime == 0)
2649 closetime = 1;
2650 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2651 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2652 spin_lock_irqsave(&iadev->tx_lock, flags);
2653 iadev->close_pending--;
2654 iadev->testTable[vcc->vci]->lastTime = 0;
2655 iadev->testTable[vcc->vci]->fract = 0;
2656 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2657 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2658 if (vcc->qos.txtp.min_pcr > 0)
2659 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2660 }
2661 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2662 ia_vcc = INPH_IA_VCC(vcc);
2663 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2664 ia_cbrVc_close (vcc);
2665 }
2666 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2667 }
2668
2669 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2670 // reset reass table
2671 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2672 vc_table += vcc->vci;
2673 *vc_table = NO_AAL5_PKT;
2674 // reset vc table
2675 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2676 vc_table += vcc->vci;
2677 *vc_table = (vcc->vci << 6) | 15;
2678 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2679 struct abr_vc_table __iomem *abr_vc_table =
2680 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2681 abr_vc_table += vcc->vci;
2682 abr_vc_table->rdf = 0x0003;
2683 abr_vc_table->air = 0x5eb1;
2684 }
2685 // Drain the packets
2686 rx_dle_intr(vcc->dev);
2687 iadev->rx_open[vcc->vci] = NULL;
2688 }
2689 kfree(INPH_IA_VCC(vcc));
2690 ia_vcc = NULL;
2691 vcc->dev_data = NULL;
2692 clear_bit(ATM_VF_ADDR,&vcc->flags);
2693 return;
2694 }
2695
2696 static int ia_open(struct atm_vcc *vcc)
2697 {
2698 struct ia_vcc *ia_vcc;
2699 int error;
2700 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2701 {
2702 IF_EVENT(printk("ia: not partially allocated resources\n");)
2703 vcc->dev_data = NULL;
2704 }
2705 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2706 {
2707 IF_EVENT(printk("iphase open: unspec part\n");)
2708 set_bit(ATM_VF_ADDR,&vcc->flags);
2709 }
2710 if (vcc->qos.aal != ATM_AAL5)
2711 return -EINVAL;
2712 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2713 vcc->dev->number, vcc->vpi, vcc->vci);)
2714
2715 /* Device dependent initialization */
2716 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2717 if (!ia_vcc) return -ENOMEM;
2718 vcc->dev_data = ia_vcc;
2719
2720 if ((error = open_rx(vcc)))
2721 {
2722 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2723 ia_close(vcc);
2724 return error;
2725 }
2726
2727 if ((error = open_tx(vcc)))
2728 {
2729 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2730 ia_close(vcc);
2731 return error;
2732 }
2733
2734 set_bit(ATM_VF_READY,&vcc->flags);
2735
2736 #if 0
2737 {
2738 static u8 first = 1;
2739 if (first) {
2740 ia_timer.expires = jiffies + 3*HZ;
2741 add_timer(&ia_timer);
2742 first = 0;
2743 }
2744 }
2745 #endif
2746 IF_EVENT(printk("ia open returning\n");)
2747 return 0;
2748 }
2749
2750 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2751 {
2752 IF_EVENT(printk(">ia_change_qos\n");)
2753 return 0;
2754 }
2755
2756 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2757 {
2758 IA_CMDBUF ia_cmds;
2759 IADEV *iadev;
2760 int i, board;
2761 u16 __user *tmps;
2762 IF_EVENT(printk(">ia_ioctl\n");)
2763 if (cmd != IA_CMD) {
2764 if (!dev->phy->ioctl) return -EINVAL;
2765 return dev->phy->ioctl(dev,cmd,arg);
2766 }
2767 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2768 board = ia_cmds.status;
2769
2770 if ((board < 0) || (board > iadev_count))
2771 board = 0;
2772 board = array_index_nospec(board, iadev_count + 1);
2773
2774 iadev = ia_dev[board];
2775 switch (ia_cmds.cmd) {
2776 case MEMDUMP:
2777 {
2778 switch (ia_cmds.sub_cmd) {
2779 case MEMDUMP_SEGREG:
2780 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2781 tmps = (u16 __user *)ia_cmds.buf;
2782 for(i=0; i<0x80; i+=2, tmps++)
2783 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2784 ia_cmds.status = 0;
2785 ia_cmds.len = 0x80;
2786 break;
2787 case MEMDUMP_REASSREG:
2788 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2789 tmps = (u16 __user *)ia_cmds.buf;
2790 for(i=0; i<0x80; i+=2, tmps++)
2791 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2792 ia_cmds.status = 0;
2793 ia_cmds.len = 0x80;
2794 break;
2795 case MEMDUMP_FFL:
2796 {
2797 ia_regs_t *regs_local;
2798 ffredn_t *ffL;
2799 rfredn_t *rfL;
2800
2801 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2802 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2803 if (!regs_local) return -ENOMEM;
2804 ffL = ®s_local->ffredn;
2805 rfL = ®s_local->rfredn;
2806 /* Copy real rfred registers into the local copy */
2807 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2808 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2809 /* Copy real ffred registers into the local copy */
2810 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2811 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2812
2813 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2814 kfree(regs_local);
2815 return -EFAULT;
2816 }
2817 kfree(regs_local);
2818 printk("Board %d registers dumped\n", board);
2819 ia_cmds.status = 0;
2820 }
2821 break;
2822 case READ_REG:
2823 {
2824 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2825 desc_dbg(iadev);
2826 ia_cmds.status = 0;
2827 }
2828 break;
2829 case 0x6:
2830 {
2831 ia_cmds.status = 0;
2832 printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2833 printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2834 }
2835 break;
2836 case 0x8:
2837 {
2838 struct k_sonet_stats *stats;
2839 stats = &PRIV(_ia_dev[board])->sonet_stats;
2840 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2841 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2842 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2843 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2844 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2845 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2846 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2847 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2848 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2849 }
2850 ia_cmds.status = 0;
2851 break;
2852 case 0x9:
2853 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2854 for (i = 1; i <= iadev->num_rx_desc; i++)
2855 free_desc(_ia_dev[board], i);
2856 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2857 iadev->reass_reg+REASS_MASK_REG);
2858 iadev->rxing = 1;
2859
2860 ia_cmds.status = 0;
2861 break;
2862
2863 case 0xb:
2864 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2865 ia_frontend_intr(iadev);
2866 break;
2867 case 0xa:
2868 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2869 {
2870 ia_cmds.status = 0;
2871 IADebugFlag = ia_cmds.maddr;
2872 printk("New debug option loaded\n");
2873 }
2874 break;
2875 default:
2876 ia_cmds.status = 0;
2877 break;
2878 }
2879 }
2880 break;
2881 default:
2882 break;
2883
2884 }
2885 return 0;
2886 }
2887
2888 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2889 IADEV *iadev;
2890 struct dle *wr_ptr;
2891 struct tx_buf_desc __iomem *buf_desc_ptr;
2892 int desc;
2893 int comp_code;
2894 int total_len;
2895 struct cpcs_trailer *trailer;
2896 struct ia_vcc *iavcc;
2897
2898 iadev = INPH_IA_DEV(vcc->dev);
2899 iavcc = INPH_IA_VCC(vcc);
2900 if (!iavcc->txing) {
2901 printk("discard packet on closed VC\n");
2902 if (vcc->pop)
2903 vcc->pop(vcc, skb);
2904 else
2905 dev_kfree_skb_any(skb);
2906 return 0;
2907 }
2908
2909 if (skb->len > iadev->tx_buf_sz - 8) {
2910 printk("Transmit size over tx buffer size\n");
2911 if (vcc->pop)
2912 vcc->pop(vcc, skb);
2913 else
2914 dev_kfree_skb_any(skb);
2915 return 0;
2916 }
2917 if ((unsigned long)skb->data & 3) {
2918 printk("Misaligned SKB\n");
2919 if (vcc->pop)
2920 vcc->pop(vcc, skb);
2921 else
2922 dev_kfree_skb_any(skb);
2923 return 0;
2924 }
2925 /* Get a descriptor number from our free descriptor queue
2926 We get the descr number from the TCQ now, since I am using
2927 the TCQ as a free buffer queue. Initially TCQ will be
2928 initialized with all the descriptors and is hence, full.
2929 */
2930 desc = get_desc (iadev, iavcc);
2931 if (desc == 0xffff)
2932 return 1;
2933 comp_code = desc >> 13;
2934 desc &= 0x1fff;
2935
2936 if ((desc == 0) || (desc > iadev->num_tx_desc))
2937 {
2938 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2939 atomic_inc(&vcc->stats->tx);
2940 if (vcc->pop)
2941 vcc->pop(vcc, skb);
2942 else
2943 dev_kfree_skb_any(skb);
2944 return 0; /* return SUCCESS */
2945 }
2946
2947 if (comp_code)
2948 {
2949 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2950 desc, comp_code);)
2951 }
2952
2953 /* remember the desc and vcc mapping */
2954 iavcc->vc_desc_cnt++;
2955 iadev->desc_tbl[desc-1].iavcc = iavcc;
2956 iadev->desc_tbl[desc-1].txskb = skb;
2957 IA_SKB_STATE(skb) = 0;
2958
2959 iadev->ffL.tcq_rd += 2;
2960 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2961 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2962 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2963
2964 /* Put the descriptor number in the packet ready queue
2965 and put the updated write pointer in the DLE field
2966 */
2967 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2968
2969 iadev->ffL.prq_wr += 2;
2970 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2971 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2972
2973 /* Figure out the exact length of the packet and padding required to
2974 make it aligned on a 48 byte boundary. */
2975 total_len = skb->len + sizeof(struct cpcs_trailer);
2976 total_len = ((total_len + 47) / 48) * 48;
2977 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2978
2979 /* Put the packet in a tx buffer */
2980 trailer = iadev->tx_buf[desc-1].cpcs;
2981 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2982 skb, skb->data, skb->len, desc);)
2983 trailer->control = 0;
2984 /*big endian*/
2985 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2986 trailer->crc32 = 0; /* not needed - dummy bytes */
2987
2988 /* Display the packet */
2989 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2990 skb->len, tcnter++);
2991 xdump(skb->data, skb->len, "TX: ");
2992 printk("\n");)
2993
2994 /* Build the buffer descriptor */
2995 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2996 buf_desc_ptr += desc; /* points to the corresponding entry */
2997 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2998 /* Huh ? p.115 of users guide describes this as a read-only register */
2999 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3000 buf_desc_ptr->vc_index = vcc->vci;
3001 buf_desc_ptr->bytes = total_len;
3002
3003 if (vcc->qos.txtp.traffic_class == ATM_ABR)
3004 clear_lockup (vcc, iadev);
3005
3006 /* Build the DLE structure */
3007 wr_ptr = iadev->tx_dle_q.write;
3008 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3009 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3010 skb->len, DMA_TO_DEVICE);
3011 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3012 buf_desc_ptr->buf_start_lo;
3013 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3014 wr_ptr->bytes = skb->len;
3015
3016 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3017 if ((wr_ptr->bytes >> 2) == 0xb)
3018 wr_ptr->bytes = 0x30;
3019
3020 wr_ptr->mode = TX_DLE_PSI;
3021 wr_ptr->prq_wr_ptr_data = 0;
3022
3023 /* end is not to be used for the DLE q */
3024 if (++wr_ptr == iadev->tx_dle_q.end)
3025 wr_ptr = iadev->tx_dle_q.start;
3026
3027 /* Build trailer dle */
3028 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3029 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3030 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3031
3032 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3033 wr_ptr->mode = DMA_INT_ENABLE;
3034 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3035
3036 /* end is not to be used for the DLE q */
3037 if (++wr_ptr == iadev->tx_dle_q.end)
3038 wr_ptr = iadev->tx_dle_q.start;
3039
3040 iadev->tx_dle_q.write = wr_ptr;
3041 ATM_DESC(skb) = vcc->vci;
3042 skb_queue_tail(&iadev->tx_dma_q, skb);
3043
3044 atomic_inc(&vcc->stats->tx);
3045 iadev->tx_pkt_cnt++;
3046 /* Increment transaction counter */
3047 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3048
3049 #if 0
3050 /* add flow control logic */
3051 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3052 if (iavcc->vc_desc_cnt > 10) {
3053 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3054 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3055 iavcc->flow_inc = -1;
3056 iavcc->saved_tx_quota = vcc->tx_quota;
3057 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3058 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3059 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3060 iavcc->flow_inc = 0;
3061 }
3062 }
3063 #endif
3064 IF_TX(printk("ia send done\n");)
3065 return 0;
3066 }
3067
3068 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3069 {
3070 IADEV *iadev;
3071 unsigned long flags;
3072
3073 iadev = INPH_IA_DEV(vcc->dev);
3074 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3075 {
3076 if (!skb)
3077 printk(KERN_CRIT "null skb in ia_send\n");
3078 else dev_kfree_skb_any(skb);
3079 return -EINVAL;
3080 }
3081 spin_lock_irqsave(&iadev->tx_lock, flags);
3082 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3083 dev_kfree_skb_any(skb);
3084 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3085 return -EINVAL;
3086 }
3087 ATM_SKB(skb)->vcc = vcc;
3088
3089 if (skb_peek(&iadev->tx_backlog)) {
3090 skb_queue_tail(&iadev->tx_backlog, skb);
3091 }
3092 else {
3093 if (ia_pkt_tx (vcc, skb)) {
3094 skb_queue_tail(&iadev->tx_backlog, skb);
3095 }
3096 }
3097 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3098 return 0;
3099
3100 }
3101
3102 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3103 {
3104 int left = *pos, n;
3105 char *tmpPtr;
3106 IADEV *iadev = INPH_IA_DEV(dev);
3107 if(!left--) {
3108 if (iadev->phy_type == FE_25MBIT_PHY) {
3109 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3110 return n;
3111 }
3112 if (iadev->phy_type == FE_DS3_PHY)
3113 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3114 else if (iadev->phy_type == FE_E3_PHY)
3115 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3116 else if (iadev->phy_type == FE_UTP_OPTION)
3117 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3118 else
3119 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3120 tmpPtr = page + n;
3121 if (iadev->pci_map_size == 0x40000)
3122 n += sprintf(tmpPtr, "-1KVC-");
3123 else
3124 n += sprintf(tmpPtr, "-4KVC-");
3125 tmpPtr = page + n;
3126 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3127 n += sprintf(tmpPtr, "1M \n");
3128 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3129 n += sprintf(tmpPtr, "512K\n");
3130 else
3131 n += sprintf(tmpPtr, "128K\n");
3132 return n;
3133 }
3134 if (!left) {
3135 return sprintf(page, " Number of Tx Buffer: %u\n"
3136 " Size of Tx Buffer : %u\n"
3137 " Number of Rx Buffer: %u\n"
3138 " Size of Rx Buffer : %u\n"
3139 " Packets Received : %u\n"
3140 " Packets Transmitted: %u\n"
3141 " Cells Received : %u\n"
3142 " Cells Transmitted : %u\n"
3143 " Board Dropped Cells: %u\n"
3144 " Board Dropped Pkts : %u\n",
3145 iadev->num_tx_desc, iadev->tx_buf_sz,
3146 iadev->num_rx_desc, iadev->rx_buf_sz,
3147 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3148 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3149 iadev->drop_rxcell, iadev->drop_rxpkt);
3150 }
3151 return 0;
3152 }
3153
3154 static const struct atmdev_ops ops = {
3155 .open = ia_open,
3156 .close = ia_close,
3157 .ioctl = ia_ioctl,
3158 .send = ia_send,
3159 .phy_put = ia_phy_put,
3160 .phy_get = ia_phy_get,
3161 .change_qos = ia_change_qos,
3162 .proc_read = ia_proc_read,
3163 .owner = THIS_MODULE,
3164 };
3165
3166 static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3167 {
3168 struct atm_dev *dev;
3169 IADEV *iadev;
3170 int ret;
3171
3172 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3173 if (!iadev) {
3174 ret = -ENOMEM;
3175 goto err_out;
3176 }
3177
3178 iadev->pci = pdev;
3179
3180 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3181 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3182 if (pci_enable_device(pdev)) {
3183 ret = -ENODEV;
3184 goto err_out_free_iadev;
3185 }
3186 dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3187 if (!dev) {
3188 ret = -ENOMEM;
3189 goto err_out_disable_dev;
3190 }
3191 dev->dev_data = iadev;
3192 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3193 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3194 iadev->LineRate);)
3195
3196 pci_set_drvdata(pdev, dev);
3197
3198 ia_dev[iadev_count] = iadev;
3199 _ia_dev[iadev_count] = dev;
3200 iadev_count++;
3201 if (ia_init(dev) || ia_start(dev)) {
3202 IF_INIT(printk("IA register failed!\n");)
3203 iadev_count--;
3204 ia_dev[iadev_count] = NULL;
3205 _ia_dev[iadev_count] = NULL;
3206 ret = -EINVAL;
3207 goto err_out_deregister_dev;
3208 }
3209 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3210
3211 iadev->next_board = ia_boards;
3212 ia_boards = dev;
3213
3214 return 0;
3215
3216 err_out_deregister_dev:
3217 atm_dev_deregister(dev);
3218 err_out_disable_dev:
3219 pci_disable_device(pdev);
3220 err_out_free_iadev:
3221 kfree(iadev);
3222 err_out:
3223 return ret;
3224 }
3225
3226 static void ia_remove_one(struct pci_dev *pdev)
3227 {
3228 struct atm_dev *dev = pci_get_drvdata(pdev);
3229 IADEV *iadev = INPH_IA_DEV(dev);
3230
3231 /* Disable phy interrupts */
3232 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3233 SUNI_RSOP_CIE);
3234 udelay(1);
3235
3236 if (dev->phy && dev->phy->stop)
3237 dev->phy->stop(dev);
3238
3239 /* De-register device */
3240 free_irq(iadev->irq, dev);
3241 iadev_count--;
3242 ia_dev[iadev_count] = NULL;
3243 _ia_dev[iadev_count] = NULL;
3244 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3245 atm_dev_deregister(dev);
3246
3247 iounmap(iadev->base);
3248 pci_disable_device(pdev);
3249
3250 ia_free_rx(iadev);
3251 ia_free_tx(iadev);
3252
3253 kfree(iadev);
3254 }
3255
3256 static const struct pci_device_id ia_pci_tbl[] = {
3257 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3258 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3259 { 0,}
3260 };
3261 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3262
3263 static struct pci_driver ia_driver = {
3264 .name = DEV_LABEL,
3265 .id_table = ia_pci_tbl,
3266 .probe = ia_init_one,
3267 .remove = ia_remove_one,
3268 };
3269
3270 static int __init ia_module_init(void)
3271 {
3272 int ret;
3273
3274 ret = pci_register_driver(&ia_driver);
3275 if (ret >= 0) {
3276 ia_timer.expires = jiffies + 3*HZ;
3277 add_timer(&ia_timer);
3278 } else
3279 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3280 return ret;
3281 }
3282
3283 static void __exit ia_module_exit(void)
3284 {
3285 pci_unregister_driver(&ia_driver);
3286
3287 del_timer_sync(&ia_timer);
3288 }
3289
3290 module_init(ia_module_init);
3291 module_exit(ia_module_exit);
3292