• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards
3                     Author: Peter Wang  <pwang@iphase.com>
4 		   Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>
6                                Version: 1.0
7 *******************************************************************************
8 
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13 
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18 
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20       was originally written by Monalisa Agrawal at UNH. Now this driver
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23       in terms of PHY type, the size of control memory and the size of
24       packet memory. The following are the change log and history:
25 
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32 	  Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38 
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40 
41 *******************************************************************************/
42 
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/mm.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/ctype.h>
51 #include <linux/sonet.h>
52 #include <linux/skbuff.h>
53 #include <linux/time.h>
54 #include <linux/delay.h>
55 #include <linux/uio.h>
56 #include <linux/init.h>
57 #include <linux/interrupt.h>
58 #include <linux/wait.h>
59 #include <linux/slab.h>
60 #include <asm/io.h>
61 #include <linux/atomic.h>
62 #include <linux/uaccess.h>
63 #include <asm/string.h>
64 #include <asm/byteorder.h>
65 #include <linux/vmalloc.h>
66 #include <linux/jiffies.h>
67 #include <linux/nospec.h>
68 #include "iphase.h"
69 #include "suni.h"
70 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
71 
72 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
73 
74 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
75 static void desc_dbg(IADEV *iadev);
76 
77 static IADEV *ia_dev[8];
78 static struct atm_dev *_ia_dev[8];
79 static int iadev_count;
80 static void ia_led_timer(struct timer_list *unused);
81 static DEFINE_TIMER(ia_timer, ia_led_timer);
82 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
83 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
84 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
85             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
86 
87 module_param(IA_TX_BUF, int, 0);
88 module_param(IA_TX_BUF_SZ, int, 0);
89 module_param(IA_RX_BUF, int, 0);
90 module_param(IA_RX_BUF_SZ, int, 0);
91 module_param(IADebugFlag, uint, 0644);
92 
93 MODULE_LICENSE("GPL");
94 
95 /**************************** IA_LIB **********************************/
96 
ia_init_rtn_q(IARTN_Q * que)97 static void ia_init_rtn_q (IARTN_Q *que)
98 {
99    que->next = NULL;
100    que->tail = NULL;
101 }
102 
ia_enque_head_rtn_q(IARTN_Q * que,IARTN_Q * data)103 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
104 {
105    data->next = NULL;
106    if (que->next == NULL)
107       que->next = que->tail = data;
108    else {
109       data->next = que->next;
110       que->next = data;
111    }
112    return;
113 }
114 
ia_enque_rtn_q(IARTN_Q * que,struct desc_tbl_t data)115 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
116    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
117    if (!entry)
118       return -ENOMEM;
119    entry->data = data;
120    entry->next = NULL;
121    if (que->next == NULL)
122       que->next = que->tail = entry;
123    else {
124       que->tail->next = entry;
125       que->tail = que->tail->next;
126    }
127    return 1;
128 }
129 
ia_deque_rtn_q(IARTN_Q * que)130 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
131    IARTN_Q *tmpdata;
132    if (que->next == NULL)
133       return NULL;
134    tmpdata = que->next;
135    if ( que->next == que->tail)
136       que->next = que->tail = NULL;
137    else
138       que->next = que->next->next;
139    return tmpdata;
140 }
141 
ia_hack_tcq(IADEV * dev)142 static void ia_hack_tcq(IADEV *dev) {
143 
144   u_short 		desc1;
145   u_short		tcq_wr;
146   struct ia_vcc         *iavcc_r = NULL;
147 
148   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
149   while (dev->host_tcq_wr != tcq_wr) {
150      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
151      if (!desc1) ;
152      else if (!dev->desc_tbl[desc1 -1].timestamp) {
153         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
154         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
155      }
156      else if (dev->desc_tbl[desc1 -1].timestamp) {
157         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
158            printk("IA: Fatal err in get_desc\n");
159            continue;
160         }
161         iavcc_r->vc_desc_cnt--;
162         dev->desc_tbl[desc1 -1].timestamp = 0;
163         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
164                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
165         if (iavcc_r->pcr < dev->rate_limit) {
166            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
167            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
168               printk("ia_hack_tcq: No memory available\n");
169         }
170         dev->desc_tbl[desc1 -1].iavcc = NULL;
171         dev->desc_tbl[desc1 -1].txskb = NULL;
172      }
173      dev->host_tcq_wr += 2;
174      if (dev->host_tcq_wr > dev->ffL.tcq_ed)
175         dev->host_tcq_wr = dev->ffL.tcq_st;
176   }
177 } /* ia_hack_tcq */
178 
get_desc(IADEV * dev,struct ia_vcc * iavcc)179 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
180   u_short 		desc_num, i;
181   struct ia_vcc         *iavcc_r = NULL;
182   unsigned long delta;
183   static unsigned long timer = 0;
184   int ltimeout;
185 
186   ia_hack_tcq (dev);
187   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
188      timer = jiffies;
189      i=0;
190      while (i < dev->num_tx_desc) {
191         if (!dev->desc_tbl[i].timestamp) {
192            i++;
193            continue;
194         }
195         ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
196         delta = jiffies - dev->desc_tbl[i].timestamp;
197         if (delta >= ltimeout) {
198            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
199            if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
200               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
201            else
202               dev->ffL.tcq_rd -= 2;
203            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
204            if (!dev->desc_tbl[i].txskb || !(iavcc_r = dev->desc_tbl[i].iavcc))
205               printk("Fatal err, desc table vcc or skb is NULL\n");
206            else
207               iavcc_r->vc_desc_cnt--;
208            dev->desc_tbl[i].timestamp = 0;
209            dev->desc_tbl[i].iavcc = NULL;
210            dev->desc_tbl[i].txskb = NULL;
211         }
212         i++;
213      } /* while */
214   }
215   if (dev->ffL.tcq_rd == dev->host_tcq_wr)
216      return 0xFFFF;
217 
218   /* Get the next available descriptor number from TCQ */
219   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
220 
221   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
222      dev->ffL.tcq_rd += 2;
223      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
224 	dev->ffL.tcq_rd = dev->ffL.tcq_st;
225      if (dev->ffL.tcq_rd == dev->host_tcq_wr)
226         return 0xFFFF;
227      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
228   }
229 
230   /* get system time */
231   dev->desc_tbl[desc_num -1].timestamp = jiffies;
232   return desc_num;
233 }
234 
clear_lockup(struct atm_vcc * vcc,IADEV * dev)235 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
236   u_char          	foundLockUp;
237   vcstatus_t		*vcstatus;
238   u_short               *shd_tbl;
239   u_short               tempCellSlot, tempFract;
240   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
241   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
242   u_int  i;
243 
244   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
245      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
246      vcstatus->cnt++;
247      foundLockUp = 0;
248      if( vcstatus->cnt == 0x05 ) {
249         abr_vc += vcc->vci;
250 	eabr_vc += vcc->vci;
251 	if( eabr_vc->last_desc ) {
252 	   if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
253               /* Wait for 10 Micro sec */
254               udelay(10);
255 	      if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
256 		 foundLockUp = 1;
257            }
258 	   else {
259 	      tempCellSlot = abr_vc->last_cell_slot;
260               tempFract    = abr_vc->fraction;
261               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
262                          && (tempFract == dev->testTable[vcc->vci]->fract))
263 	         foundLockUp = 1;
264               dev->testTable[vcc->vci]->lastTime = tempCellSlot;
265               dev->testTable[vcc->vci]->fract = tempFract;
266 	   }
267         } /* last descriptor */
268         vcstatus->cnt = 0;
269      } /* vcstatus->cnt */
270 
271      if (foundLockUp) {
272         IF_ABR(printk("LOCK UP found\n");)
273 	writew(0xFFFD, dev->seg_reg+MODE_REG_0);
274         /* Wait for 10 Micro sec */
275         udelay(10);
276         abr_vc->status &= 0xFFF8;
277         abr_vc->status |= 0x0001;  /* state is idle */
278 	shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
279 	for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
280 	if (i < dev->num_vc)
281            shd_tbl[i] = vcc->vci;
282         else
283            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
284         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
285         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
286         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
287 	vcstatus->cnt = 0;
288      } /* foundLockUp */
289 
290   } /* if an ABR VC */
291 
292 
293 }
294 
295 /*
296 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
297 **
298 **  +----+----+------------------+-------------------------------+
299 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
300 **  +----+----+------------------+-------------------------------+
301 **
302 **    R = reserved (written as 0)
303 **    NZ = 0 if 0 cells/sec; 1 otherwise
304 **
305 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
306 */
307 static u16
cellrate_to_float(u32 cr)308 cellrate_to_float(u32 cr)
309 {
310 
311 #define	NZ 		0x4000
312 #define	M_BITS		9		/* Number of bits in mantissa */
313 #define	E_BITS		5		/* Number of bits in exponent */
314 #define	M_MASK		0x1ff
315 #define	E_MASK		0x1f
316   u16   flot;
317   u32	tmp = cr & 0x00ffffff;
318   int 	i   = 0;
319   if (cr == 0)
320      return 0;
321   while (tmp != 1) {
322      tmp >>= 1;
323      i++;
324   }
325   if (i == M_BITS)
326      flot = NZ | (i << M_BITS) | (cr & M_MASK);
327   else if (i < M_BITS)
328      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
329   else
330      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
331   return flot;
332 }
333 
334 #if 0
335 /*
336 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
337 */
338 static u32
339 float_to_cellrate(u16 rate)
340 {
341   u32   exp, mantissa, cps;
342   if ((rate & NZ) == 0)
343      return 0;
344   exp = (rate >> M_BITS) & E_MASK;
345   mantissa = rate & M_MASK;
346   if (exp == 0)
347      return 1;
348   cps = (1 << M_BITS) | mantissa;
349   if (exp == M_BITS)
350      cps = cps;
351   else if (exp > M_BITS)
352      cps <<= (exp - M_BITS);
353   else
354      cps >>= (M_BITS - exp);
355   return cps;
356 }
357 #endif
358 
init_abr_vc(IADEV * dev,srv_cls_param_t * srv_p)359 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
360   srv_p->class_type = ATM_ABR;
361   srv_p->pcr        = dev->LineRate;
362   srv_p->mcr        = 0;
363   srv_p->icr        = 0x055cb7;
364   srv_p->tbe        = 0xffffff;
365   srv_p->frtt       = 0x3a;
366   srv_p->rif        = 0xf;
367   srv_p->rdf        = 0xb;
368   srv_p->nrm        = 0x4;
369   srv_p->trm        = 0x7;
370   srv_p->cdf        = 0x3;
371   srv_p->adtf       = 50;
372 }
373 
374 static int
ia_open_abr_vc(IADEV * dev,srv_cls_param_t * srv_p,struct atm_vcc * vcc,u8 flag)375 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
376                                                 struct atm_vcc *vcc, u8 flag)
377 {
378   f_vc_abr_entry  *f_abr_vc;
379   r_vc_abr_entry  *r_abr_vc;
380   u32		icr;
381   u8		trm, nrm, crm;
382   u16		adtf, air, *ptr16;
383   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
384   f_abr_vc += vcc->vci;
385   switch (flag) {
386      case 1: /* FFRED initialization */
387 #if 0  /* sanity check */
388        if (srv_p->pcr == 0)
389           return INVALID_PCR;
390        if (srv_p->pcr > dev->LineRate)
391           srv_p->pcr = dev->LineRate;
392        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
393 	  return MCR_UNAVAILABLE;
394        if (srv_p->mcr > srv_p->pcr)
395 	  return INVALID_MCR;
396        if (!(srv_p->icr))
397 	  srv_p->icr = srv_p->pcr;
398        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
399 	  return INVALID_ICR;
400        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
401 	  return INVALID_TBE;
402        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
403 	  return INVALID_FRTT;
404        if (srv_p->nrm > MAX_NRM)
405 	  return INVALID_NRM;
406        if (srv_p->trm > MAX_TRM)
407 	  return INVALID_TRM;
408        if (srv_p->adtf > MAX_ADTF)
409           return INVALID_ADTF;
410        else if (srv_p->adtf == 0)
411 	  srv_p->adtf = 1;
412        if (srv_p->cdf > MAX_CDF)
413 	  return INVALID_CDF;
414        if (srv_p->rif > MAX_RIF)
415 	  return INVALID_RIF;
416        if (srv_p->rdf > MAX_RDF)
417 	  return INVALID_RDF;
418 #endif
419        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
420        f_abr_vc->f_vc_type = ABR;
421        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
422 			          /* i.e 2**n = 2 << (n-1) */
423        f_abr_vc->f_nrm = nrm << 8 | nrm;
424        trm = 100000/(2 << (16 - srv_p->trm));
425        if ( trm == 0) trm = 1;
426        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
427        crm = srv_p->tbe / nrm;
428        if (crm == 0) crm = 1;
429        f_abr_vc->f_crm = crm & 0xff;
430        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
431        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
432 				((srv_p->tbe/srv_p->frtt)*1000000) :
433 				(1000000/(srv_p->frtt/srv_p->tbe)));
434        f_abr_vc->f_icr = cellrate_to_float(icr);
435        adtf = (10000 * srv_p->adtf)/8192;
436        if (adtf == 0) adtf = 1;
437        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
438        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
439        f_abr_vc->f_acr = f_abr_vc->f_icr;
440        f_abr_vc->f_status = 0x0042;
441        break;
442     case 0: /* RFRED initialization */
443        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
444        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
445        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
446        r_abr_vc += vcc->vci;
447        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
448        air = srv_p->pcr << (15 - srv_p->rif);
449        if (air == 0) air = 1;
450        r_abr_vc->r_air = cellrate_to_float(air);
451        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
452        dev->sum_mcr	   += srv_p->mcr;
453        dev->n_abr++;
454        break;
455     default:
456        break;
457   }
458   return	0;
459 }
ia_cbr_setup(IADEV * dev,struct atm_vcc * vcc)460 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
461    u32 rateLow=0, rateHigh, rate;
462    int entries;
463    struct ia_vcc *ia_vcc;
464 
465    int   idealSlot =0, testSlot, toBeAssigned, inc;
466    u32   spacing;
467    u16  *SchedTbl, *TstSchedTbl;
468    u16  cbrVC, vcIndex;
469    u32   fracSlot    = 0;
470    u32   sp_mod      = 0;
471    u32   sp_mod2     = 0;
472 
473    /* IpAdjustTrafficParams */
474    if (vcc->qos.txtp.max_pcr <= 0) {
475       IF_ERR(printk("PCR for CBR not defined\n");)
476       return -1;
477    }
478    rate = vcc->qos.txtp.max_pcr;
479    entries = rate / dev->Granularity;
480    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
481                                 entries, rate, dev->Granularity);)
482    if (entries < 1)
483       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
484    rateLow  =  entries * dev->Granularity;
485    rateHigh = (entries + 1) * dev->Granularity;
486    if (3*(rate - rateLow) > (rateHigh - rate))
487       entries++;
488    if (entries > dev->CbrRemEntries) {
489       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
490       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
491                                        entries, dev->CbrRemEntries);)
492       return -EBUSY;
493    }
494 
495    ia_vcc = INPH_IA_VCC(vcc);
496    ia_vcc->NumCbrEntry = entries;
497    dev->sum_mcr += entries * dev->Granularity;
498    /* IaFFrednInsertCbrSched */
499    // Starting at an arbitrary location, place the entries into the table
500    // as smoothly as possible
501    cbrVC   = 0;
502    spacing = dev->CbrTotEntries / entries;
503    sp_mod  = dev->CbrTotEntries % entries; // get modulo
504    toBeAssigned = entries;
505    fracSlot = 0;
506    vcIndex  = vcc->vci;
507    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
508    while (toBeAssigned)
509    {
510       // If this is the first time, start the table loading for this connection
511       // as close to entryPoint as possible.
512       if (toBeAssigned == entries)
513       {
514          idealSlot = dev->CbrEntryPt;
515          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
516          if (dev->CbrEntryPt >= dev->CbrTotEntries)
517             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
518       } else {
519          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
520          // in the table that would be  smoothest
521          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
522          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
523       }
524       if (idealSlot >= (int)dev->CbrTotEntries)
525          idealSlot -= dev->CbrTotEntries;
526       // Continuously check around this ideal value until a null
527       // location is encountered.
528       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
529       inc = 0;
530       testSlot = idealSlot;
531       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
532       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
533                                 testSlot, TstSchedTbl,toBeAssigned);)
534       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
535       while (cbrVC)  // If another VC at this location, we have to keep looking
536       {
537           inc++;
538           testSlot = idealSlot - inc;
539           if (testSlot < 0) { // Wrap if necessary
540              testSlot += dev->CbrTotEntries;
541              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
542                                                        SchedTbl,testSlot);)
543           }
544           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
545           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
546           if (!cbrVC)
547              break;
548           testSlot = idealSlot + inc;
549           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
550              testSlot -= dev->CbrTotEntries;
551              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
552              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
553                                             testSlot, toBeAssigned);)
554           }
555           // set table index and read in value
556           TstSchedTbl = (u16*)(SchedTbl + testSlot);
557           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
558                           TstSchedTbl,cbrVC,inc);)
559           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
560        } /* while */
561        // Move this VCI number into this location of the CBR Sched table.
562        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
563        dev->CbrRemEntries--;
564        toBeAssigned--;
565    } /* while */
566 
567    /* IaFFrednCbrEnable */
568    dev->NumEnabledCBR++;
569    if (dev->NumEnabledCBR == 1) {
570        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
571        IF_CBR(printk("CBR is enabled\n");)
572    }
573    return 0;
574 }
ia_cbrVc_close(struct atm_vcc * vcc)575 static void ia_cbrVc_close (struct atm_vcc *vcc) {
576    IADEV *iadev;
577    u16 *SchedTbl, NullVci = 0;
578    u32 i, NumFound;
579 
580    iadev = INPH_IA_DEV(vcc->dev);
581    iadev->NumEnabledCBR--;
582    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
583    if (iadev->NumEnabledCBR == 0) {
584       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
585       IF_CBR (printk("CBR support disabled\n");)
586    }
587    NumFound = 0;
588    for (i=0; i < iadev->CbrTotEntries; i++)
589    {
590       if (*SchedTbl == vcc->vci) {
591          iadev->CbrRemEntries++;
592          *SchedTbl = NullVci;
593          IF_CBR(NumFound++;)
594       }
595       SchedTbl++;
596    }
597    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
598 }
599 
ia_avail_descs(IADEV * iadev)600 static int ia_avail_descs(IADEV *iadev) {
601    int tmp = 0;
602    ia_hack_tcq(iadev);
603    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
604       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
605    else
606       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
607                    iadev->ffL.tcq_st) / 2;
608    return tmp;
609 }
610 
611 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
612 
ia_que_tx(IADEV * iadev)613 static int ia_que_tx (IADEV *iadev) {
614    struct sk_buff *skb;
615    int num_desc;
616    struct atm_vcc *vcc;
617    num_desc = ia_avail_descs(iadev);
618 
619    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
620       if (!(vcc = ATM_SKB(skb)->vcc)) {
621          dev_kfree_skb_any(skb);
622          printk("ia_que_tx: Null vcc\n");
623          break;
624       }
625       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
626          dev_kfree_skb_any(skb);
627          printk("Free the SKB on closed vci %d \n", vcc->vci);
628          break;
629       }
630       if (ia_pkt_tx (vcc, skb)) {
631          skb_queue_head(&iadev->tx_backlog, skb);
632       }
633       num_desc--;
634    }
635    return 0;
636 }
637 
ia_tx_poll(IADEV * iadev)638 static void ia_tx_poll (IADEV *iadev) {
639    struct atm_vcc *vcc = NULL;
640    struct sk_buff *skb = NULL, *skb1 = NULL;
641    struct ia_vcc *iavcc;
642    IARTN_Q *  rtne;
643 
644    ia_hack_tcq(iadev);
645    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
646        skb = rtne->data.txskb;
647        if (!skb) {
648            printk("ia_tx_poll: skb is null\n");
649            goto out;
650        }
651        vcc = ATM_SKB(skb)->vcc;
652        if (!vcc) {
653            printk("ia_tx_poll: vcc is null\n");
654            dev_kfree_skb_any(skb);
655 	   goto out;
656        }
657 
658        iavcc = INPH_IA_VCC(vcc);
659        if (!iavcc) {
660            printk("ia_tx_poll: iavcc is null\n");
661            dev_kfree_skb_any(skb);
662 	   goto out;
663        }
664 
665        skb1 = skb_dequeue(&iavcc->txing_skb);
666        while (skb1 && (skb1 != skb)) {
667           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
668              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
669           }
670           IF_ERR(printk("Release the SKB not match\n");)
671           if ((vcc->pop) && (skb1->len != 0))
672           {
673              vcc->pop(vcc, skb1);
674              IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
675                                                           (long)skb1);)
676           }
677           else
678              dev_kfree_skb_any(skb1);
679           skb1 = skb_dequeue(&iavcc->txing_skb);
680        }
681        if (!skb1) {
682           IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);)
683           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
684           break;
685        }
686        if ((vcc->pop) && (skb->len != 0))
687        {
688           vcc->pop(vcc, skb);
689           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
690        }
691        else
692           dev_kfree_skb_any(skb);
693        kfree(rtne);
694     }
695     ia_que_tx(iadev);
696 out:
697     return;
698 }
699 #if 0
700 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
701 {
702         u32	t;
703 	int	i;
704 	/*
705 	 * Issue a command to enable writes to the NOVRAM
706 	 */
707 	NVRAM_CMD (EXTEND + EWEN);
708 	NVRAM_CLR_CE;
709 	/*
710 	 * issue the write command
711 	 */
712 	NVRAM_CMD(IAWRITE + addr);
713 	/*
714 	 * Send the data, starting with D15, then D14, and so on for 16 bits
715 	 */
716 	for (i=15; i>=0; i--) {
717 		NVRAM_CLKOUT (val & 0x8000);
718 		val <<= 1;
719 	}
720 	NVRAM_CLR_CE;
721 	CFG_OR(NVCE);
722 	t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
723 	while (!(t & NVDO))
724 		t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
725 
726 	NVRAM_CLR_CE;
727 	/*
728 	 * disable writes again
729 	 */
730 	NVRAM_CMD(EXTEND + EWDS)
731 	NVRAM_CLR_CE;
732 	CFG_AND(~NVDI);
733 }
734 #endif
735 
ia_eeprom_get(IADEV * iadev,u32 addr)736 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
737 {
738 	u_short	val;
739         u32	t;
740 	int	i;
741 	/*
742 	 * Read the first bit that was clocked with the falling edge of
743 	 * the last command data clock
744 	 */
745 	NVRAM_CMD(IAREAD + addr);
746 	/*
747 	 * Now read the rest of the bits, the next bit read is D14, then D13,
748 	 * and so on.
749 	 */
750 	val = 0;
751 	for (i=15; i>=0; i--) {
752 		NVRAM_CLKIN(t);
753 		val |= (t << i);
754 	}
755 	NVRAM_CLR_CE;
756 	CFG_AND(~NVDI);
757 	return val;
758 }
759 
ia_hw_type(IADEV * iadev)760 static void ia_hw_type(IADEV *iadev) {
761    u_short memType = ia_eeprom_get(iadev, 25);
762    iadev->memType = memType;
763    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
764       iadev->num_tx_desc = IA_TX_BUF;
765       iadev->tx_buf_sz = IA_TX_BUF_SZ;
766       iadev->num_rx_desc = IA_RX_BUF;
767       iadev->rx_buf_sz = IA_RX_BUF_SZ;
768    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
769       if (IA_TX_BUF == DFL_TX_BUFFERS)
770         iadev->num_tx_desc = IA_TX_BUF / 2;
771       else
772         iadev->num_tx_desc = IA_TX_BUF;
773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
774       if (IA_RX_BUF == DFL_RX_BUFFERS)
775         iadev->num_rx_desc = IA_RX_BUF / 2;
776       else
777         iadev->num_rx_desc = IA_RX_BUF;
778       iadev->rx_buf_sz = IA_RX_BUF_SZ;
779    }
780    else {
781       if (IA_TX_BUF == DFL_TX_BUFFERS)
782         iadev->num_tx_desc = IA_TX_BUF / 8;
783       else
784         iadev->num_tx_desc = IA_TX_BUF;
785       iadev->tx_buf_sz = IA_TX_BUF_SZ;
786       if (IA_RX_BUF == DFL_RX_BUFFERS)
787         iadev->num_rx_desc = IA_RX_BUF / 8;
788       else
789         iadev->num_rx_desc = IA_RX_BUF;
790       iadev->rx_buf_sz = IA_RX_BUF_SZ;
791    }
792    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
793    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
794          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
795          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
796 
797 #if 0
798    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
799       iadev->phy_type = PHY_OC3C_S;
800    else if ((memType & FE_MASK) == FE_UTP_OPTION)
801       iadev->phy_type = PHY_UTP155;
802    else
803      iadev->phy_type = PHY_OC3C_M;
804 #endif
805 
806    iadev->phy_type = memType & FE_MASK;
807    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
808                                          memType,iadev->phy_type);)
809    if (iadev->phy_type == FE_25MBIT_PHY)
810       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
811    else if (iadev->phy_type == FE_DS3_PHY)
812       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
813    else if (iadev->phy_type == FE_E3_PHY)
814       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
815    else
816        iadev->LineRate = (u32)(ATM_OC3_PCR);
817    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
818 
819 }
820 
821 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
822 {
823 	return readl(ia->phy + (reg >> 2));
824 }
825 
826 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
827 {
828 	writel(val, ia->phy + (reg >> 2));
829 }
830 
831 static void ia_frontend_intr(struct iadev_priv *iadev)
832 {
833 	u32 status;
834 
835 	if (iadev->phy_type & FE_25MBIT_PHY) {
836 		status = ia_phy_read32(iadev, MB25_INTR_STATUS);
837 		iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
838 	} else if (iadev->phy_type & FE_DS3_PHY) {
839 		ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
840 		status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
841 		iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
842 	} else if (iadev->phy_type & FE_E3_PHY) {
843 		ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
844 		status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
845 		iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
846 	} else {
847 		status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
848 		iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
849 	}
850 
851 	printk(KERN_INFO "IA: SUNI carrier %s\n",
852 		iadev->carrier_detect ? "detected" : "lost signal");
853 }
854 
855 static void ia_mb25_init(struct iadev_priv *iadev)
856 {
857 #if 0
858    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
859 #endif
860 	ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
861 	ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
862 
863 	iadev->carrier_detect =
864 		(ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
865 }
866 
867 struct ia_reg {
868 	u16 reg;
869 	u16 val;
870 };
871 
872 static void ia_phy_write(struct iadev_priv *iadev,
873 			 const struct ia_reg *regs, int len)
874 {
875 	while (len--) {
876 		ia_phy_write32(iadev, regs->reg, regs->val);
877 		regs++;
878 	}
879 }
880 
881 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
882 {
883 	static const struct ia_reg suni_ds3_init[] = {
884 		{ SUNI_DS3_FRM_INTR_ENBL,	0x17 },
885 		{ SUNI_DS3_FRM_CFG,		0x01 },
886 		{ SUNI_DS3_TRAN_CFG,		0x01 },
887 		{ SUNI_CONFIG,			0 },
888 		{ SUNI_SPLR_CFG,		0 },
889 		{ SUNI_SPLT_CFG,		0 }
890 	};
891 	u32 status;
892 
893 	status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
894 	iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
895 
896 	ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
897 }
898 
899 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
900 {
901 	static const struct ia_reg suni_e3_init[] = {
902 		{ SUNI_E3_FRM_FRAM_OPTIONS,		0x04 },
903 		{ SUNI_E3_FRM_MAINT_OPTIONS,		0x20 },
904 		{ SUNI_E3_FRM_FRAM_INTR_ENBL,		0x1d },
905 		{ SUNI_E3_FRM_MAINT_INTR_ENBL,		0x30 },
906 		{ SUNI_E3_TRAN_STAT_DIAG_OPTIONS,	0 },
907 		{ SUNI_E3_TRAN_FRAM_OPTIONS,		0x01 },
908 		{ SUNI_CONFIG,				SUNI_PM7345_E3ENBL },
909 		{ SUNI_SPLR_CFG,			0x41 },
910 		{ SUNI_SPLT_CFG,			0x41 }
911 	};
912 	u32 status;
913 
914 	status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
915 	iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
916 	ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
917 }
918 
919 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
920 {
921 	static const struct ia_reg suni_init[] = {
922 		/* Enable RSOP loss of signal interrupt. */
923 		{ SUNI_INTR_ENBL,		0x28 },
924 		/* Clear error counters. */
925 		{ SUNI_ID_RESET,		0 },
926 		/* Clear "PMCTST" in master test register. */
927 		{ SUNI_MASTER_TEST,		0 },
928 
929 		{ SUNI_RXCP_CTRL,		0x2c },
930 		{ SUNI_RXCP_FCTRL,		0x81 },
931 
932 		{ SUNI_RXCP_IDLE_PAT_H1,	0 },
933 		{ SUNI_RXCP_IDLE_PAT_H2,	0 },
934 		{ SUNI_RXCP_IDLE_PAT_H3,	0 },
935 		{ SUNI_RXCP_IDLE_PAT_H4,	0x01 },
936 
937 		{ SUNI_RXCP_IDLE_MASK_H1,	0xff },
938 		{ SUNI_RXCP_IDLE_MASK_H2,	0xff },
939 		{ SUNI_RXCP_IDLE_MASK_H3,	0xff },
940 		{ SUNI_RXCP_IDLE_MASK_H4,	0xfe },
941 
942 		{ SUNI_RXCP_CELL_PAT_H1,	0 },
943 		{ SUNI_RXCP_CELL_PAT_H2,	0 },
944 		{ SUNI_RXCP_CELL_PAT_H3,	0 },
945 		{ SUNI_RXCP_CELL_PAT_H4,	0x01 },
946 
947 		{ SUNI_RXCP_CELL_MASK_H1,	0xff },
948 		{ SUNI_RXCP_CELL_MASK_H2,	0xff },
949 		{ SUNI_RXCP_CELL_MASK_H3,	0xff },
950 		{ SUNI_RXCP_CELL_MASK_H4,	0xff },
951 
952 		{ SUNI_TXCP_CTRL,		0xa4 },
953 		{ SUNI_TXCP_INTR_EN_STS,	0x10 },
954 		{ SUNI_TXCP_IDLE_PAT_H5,	0x55 }
955 	};
956 
957 	if (iadev->phy_type & FE_DS3_PHY)
958 		ia_suni_pm7345_init_ds3(iadev);
959 	else
960 		ia_suni_pm7345_init_e3(iadev);
961 
962 	ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
963 
964 	ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
965 		~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
966 		  SUNI_PM7345_DLB | SUNI_PM7345_PLB));
967 #ifdef __SNMP__
968    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
969 #endif /* __SNMP__ */
970    return;
971 }
972 
973 
974 /***************************** IA_LIB END *****************************/
975 
976 #ifdef CONFIG_ATM_IA_DEBUG
977 static int tcnter = 0;
978 static void xdump( u_char*  cp, int  length, char*  prefix )
979 {
980     int col, count;
981     u_char prntBuf[120];
982     u_char*  pBuf = prntBuf;
983     count = 0;
984     while(count < length){
985         pBuf += sprintf( pBuf, "%s", prefix );
986         for(col = 0;count + col < length && col < 16; col++){
987             if (col != 0 && (col % 4) == 0)
988                 pBuf += sprintf( pBuf, " " );
989             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
990         }
991         while(col++ < 16){      /* pad end of buffer with blanks */
992             if ((col % 4) == 0)
993                 sprintf( pBuf, " " );
994             pBuf += sprintf( pBuf, "   " );
995         }
996         pBuf += sprintf( pBuf, "  " );
997         for(col = 0;count + col < length && col < 16; col++){
998 		u_char c = cp[count + col];
999 
1000 		if (isascii(c) && isprint(c))
1001 			pBuf += sprintf(pBuf, "%c", c);
1002 		else
1003 			pBuf += sprintf(pBuf, ".");
1004                 }
1005         printk("%s\n", prntBuf);
1006         count += col;
1007         pBuf = prntBuf;
1008     }
1009 
1010 }  /* close xdump(... */
1011 #endif /* CONFIG_ATM_IA_DEBUG */
1012 
1013 
1014 static struct atm_dev *ia_boards = NULL;
1015 
1016 #define ACTUAL_RAM_BASE \
1017 	RAM_BASE*((iadev->mem)/(128 * 1024))
1018 #define ACTUAL_SEG_RAM_BASE \
1019 	IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1020 #define ACTUAL_REASS_RAM_BASE \
1021 	IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1022 
1023 
1024 /*-- some utilities and memory allocation stuff will come here -------------*/
1025 
1026 static void desc_dbg(IADEV *iadev) {
1027 
1028   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1029   u32 i;
1030   void __iomem *tmp;
1031   // regval = readl((u32)ia_cmds->maddr);
1032   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1033   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1034                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1035                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1036   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr,
1037                    iadev->ffL.tcq_rd);
1038   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1039   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1040   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1041   i = 0;
1042   while (tcq_st_ptr != tcq_ed_ptr) {
1043       tmp = iadev->seg_ram+tcq_st_ptr;
1044       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1045       tcq_st_ptr += 2;
1046   }
1047   for(i=0; i <iadev->num_tx_desc; i++)
1048       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1049 }
1050 
1051 
1052 /*----------------------------- Receiving side stuff --------------------------*/
1053 
1054 static void rx_excp_rcvd(struct atm_dev *dev)
1055 {
1056 #if 0 /* closing the receiving size will cause too many excp int */
1057   IADEV *iadev;
1058   u_short state;
1059   u_short excpq_rd_ptr;
1060   //u_short *ptr;
1061   int vci, error = 1;
1062   iadev = INPH_IA_DEV(dev);
1063   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1064   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1065   { printk("state = %x \n", state);
1066         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1067  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1068         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1069             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1070         // TODO: update exception stat
1071 	vci = readw(iadev->reass_ram+excpq_rd_ptr);
1072 	error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1073         // pwang_test
1074 	excpq_rd_ptr += 4;
1075 	if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1076  	    excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1077 	writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1078         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1079   }
1080 #endif
1081 }
1082 
1083 static void free_desc(struct atm_dev *dev, int desc)
1084 {
1085 	IADEV *iadev;
1086 	iadev = INPH_IA_DEV(dev);
1087         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1088 	iadev->rfL.fdq_wr +=2;
1089 	if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1090 		iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;
1091 	writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1092 }
1093 
1094 
1095 static int rx_pkt(struct atm_dev *dev)
1096 {
1097 	IADEV *iadev;
1098 	struct atm_vcc *vcc;
1099 	unsigned short status;
1100 	struct rx_buf_desc __iomem *buf_desc_ptr;
1101 	int desc;
1102 	struct dle* wr_ptr;
1103 	int len;
1104 	struct sk_buff *skb;
1105 	u_int buf_addr, dma_addr;
1106 
1107 	iadev = INPH_IA_DEV(dev);
1108 	if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1109 	{
1110    	    printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1111 	    return -EINVAL;
1112 	}
1113 	/* mask 1st 3 bits to get the actual descno. */
1114 	desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1115         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1116                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1117               printk(" pcq_wr_ptr = 0x%x\n",
1118                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1119 	/* update the read pointer  - maybe we shud do this in the end*/
1120 	if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1121 		iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1122 	else
1123 		iadev->rfL.pcq_rd += 2;
1124 	writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1125 
1126 	/* get the buffer desc entry.
1127 		update stuff. - doesn't seem to be any update necessary
1128 	*/
1129 	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1130 	/* make the ptr point to the corresponding buffer desc entry */
1131 	buf_desc_ptr += desc;
1132         if (!desc || (desc > iadev->num_rx_desc) ||
1133                       ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1134             free_desc(dev, desc);
1135             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1136             return -1;
1137         }
1138 	vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1139 	if (!vcc)
1140 	{
1141                 free_desc(dev, desc);
1142 		printk("IA: null vcc, drop PDU\n");
1143 		return -1;
1144 	}
1145 
1146 
1147 	/* might want to check the status bits for errors */
1148 	status = (u_short) (buf_desc_ptr->desc_mode);
1149 	if (status & (RX_CER | RX_PTE | RX_OFL))
1150 	{
1151                 atomic_inc(&vcc->stats->rx_err);
1152 		IF_ERR(printk("IA: bad packet, dropping it");)
1153                 if (status & RX_CER) {
1154                     IF_ERR(printk(" cause: packet CRC error\n");)
1155                 }
1156                 else if (status & RX_PTE) {
1157                     IF_ERR(printk(" cause: packet time out\n");)
1158                 }
1159                 else {
1160                     IF_ERR(printk(" cause: buffer overflow\n");)
1161                 }
1162 		goto out_free_desc;
1163 	}
1164 
1165 	/*
1166 		build DLE.
1167 	*/
1168 
1169 	buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1170 	dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1171 	len = dma_addr - buf_addr;
1172         if (len > iadev->rx_buf_sz) {
1173            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1174            atomic_inc(&vcc->stats->rx_err);
1175 	   goto out_free_desc;
1176         }
1177 
1178         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1179            if (vcc->vci < 32)
1180               printk("Drop control packets\n");
1181 	   goto out_free_desc;
1182         }
1183 	skb_put(skb,len);
1184         // pwang_test
1185         ATM_SKB(skb)->vcc = vcc;
1186         ATM_DESC(skb) = desc;
1187 	skb_queue_tail(&iadev->rx_dma_q, skb);
1188 
1189 	/* Build the DLE structure */
1190 	wr_ptr = iadev->rx_dle_q.write;
1191 	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1192 					      len, DMA_FROM_DEVICE);
1193 	wr_ptr->local_pkt_addr = buf_addr;
1194 	wr_ptr->bytes = len;	/* We don't know this do we ?? */
1195 	wr_ptr->mode = DMA_INT_ENABLE;
1196 
1197 	/* shud take care of wrap around here too. */
1198         if(++wr_ptr == iadev->rx_dle_q.end)
1199              wr_ptr = iadev->rx_dle_q.start;
1200 	iadev->rx_dle_q.write = wr_ptr;
1201 	udelay(1);
1202 	/* Increment transaction counter */
1203 	writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1204 out:	return 0;
1205 out_free_desc:
1206         free_desc(dev, desc);
1207         goto out;
1208 }
1209 
1210 static void rx_intr(struct atm_dev *dev)
1211 {
1212   IADEV *iadev;
1213   u_short status;
1214   u_short state, i;
1215 
1216   iadev = INPH_IA_DEV(dev);
1217   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1218   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1219   if (status & RX_PKT_RCVD)
1220   {
1221 	/* do something */
1222 	/* Basically recvd an interrupt for receiving a packet.
1223 	A descriptor would have been written to the packet complete
1224 	queue. Get all the descriptors and set up dma to move the
1225 	packets till the packet complete queue is empty..
1226 	*/
1227 	state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1228         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1229 	while(!(state & PCQ_EMPTY))
1230 	{
1231              rx_pkt(dev);
1232 	     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1233 	}
1234         iadev->rxing = 1;
1235   }
1236   if (status & RX_FREEQ_EMPT)
1237   {
1238      if (iadev->rxing) {
1239         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1240         iadev->rx_tmp_jif = jiffies;
1241         iadev->rxing = 0;
1242      }
1243      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1244                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1245         for (i = 1; i <= iadev->num_rx_desc; i++)
1246                free_desc(dev, i);
1247 printk("Test logic RUN!!!!\n");
1248         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1249         iadev->rxing = 1;
1250      }
1251      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1252   }
1253 
1254   if (status & RX_EXCP_RCVD)
1255   {
1256 	/* probably need to handle the exception queue also. */
1257 	IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1258 	rx_excp_rcvd(dev);
1259   }
1260 
1261 
1262   if (status & RX_RAW_RCVD)
1263   {
1264 	/* need to handle the raw incoming cells. This deepnds on
1265 	whether we have programmed to receive the raw cells or not.
1266 	Else ignore. */
1267 	IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)
1268   }
1269 }
1270 
1271 
1272 static void rx_dle_intr(struct atm_dev *dev)
1273 {
1274   IADEV *iadev;
1275   struct atm_vcc *vcc;
1276   struct sk_buff *skb;
1277   int desc;
1278   u_short state;
1279   struct dle *dle, *cur_dle;
1280   u_int dle_lp;
1281   int len;
1282   iadev = INPH_IA_DEV(dev);
1283 
1284   /* free all the dles done, that is just update our own dle read pointer
1285 	- do we really need to do this. Think not. */
1286   /* DMA is done, just get all the recevie buffers from the rx dma queue
1287 	and push them up to the higher layer protocol. Also free the desc
1288 	associated with the buffer. */
1289   dle = iadev->rx_dle_q.read;
1290   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1291   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1292   while(dle != cur_dle)
1293   {
1294       /* free the DMAed skb */
1295       skb = skb_dequeue(&iadev->rx_dma_q);
1296       if (!skb)
1297          goto INCR_DLE;
1298       desc = ATM_DESC(skb);
1299       free_desc(dev, desc);
1300 
1301       if (!(len = skb->len))
1302       {
1303           printk("rx_dle_intr: skb len 0\n");
1304 	  dev_kfree_skb_any(skb);
1305       }
1306       else
1307       {
1308           struct cpcs_trailer *trailer;
1309           u_short length;
1310           struct ia_vcc *ia_vcc;
1311 
1312 	  dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1313 			   len, DMA_FROM_DEVICE);
1314           /* no VCC related housekeeping done as yet. lets see */
1315           vcc = ATM_SKB(skb)->vcc;
1316 	  if (!vcc) {
1317 	      printk("IA: null vcc\n");
1318               dev_kfree_skb_any(skb);
1319               goto INCR_DLE;
1320           }
1321           ia_vcc = INPH_IA_VCC(vcc);
1322           if (ia_vcc == NULL)
1323           {
1324              atomic_inc(&vcc->stats->rx_err);
1325              atm_return(vcc, skb->truesize);
1326              dev_kfree_skb_any(skb);
1327              goto INCR_DLE;
1328            }
1329           // get real pkt length  pwang_test
1330           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1331                                  skb->len - sizeof(*trailer));
1332 	  length = swap_byte_order(trailer->length);
1333           if ((length > iadev->rx_buf_sz) || (length >
1334                               (skb->len - sizeof(struct cpcs_trailer))))
1335           {
1336              atomic_inc(&vcc->stats->rx_err);
1337              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)",
1338                                                             length, skb->len);)
1339              atm_return(vcc, skb->truesize);
1340              dev_kfree_skb_any(skb);
1341              goto INCR_DLE;
1342           }
1343           skb_trim(skb, length);
1344 
1345 	  /* Display the packet */
1346 	  IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1347           xdump(skb->data, skb->len, "RX: ");
1348           printk("\n");)
1349 
1350 	  IF_RX(printk("rx_dle_intr: skb push");)
1351 	  vcc->push(vcc,skb);
1352 	  atomic_inc(&vcc->stats->rx);
1353           iadev->rx_pkt_cnt++;
1354       }
1355 INCR_DLE:
1356       if (++dle == iadev->rx_dle_q.end)
1357     	  dle = iadev->rx_dle_q.start;
1358   }
1359   iadev->rx_dle_q.read = dle;
1360 
1361   /* if the interrupts are masked because there were no free desc available,
1362 		unmask them now. */
1363   if (!iadev->rxing) {
1364      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1365      if (!(state & FREEQ_EMPTY)) {
1366         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1367         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1368                                       iadev->reass_reg+REASS_MASK_REG);
1369         iadev->rxing++;
1370      }
1371   }
1372 }
1373 
1374 
1375 static int open_rx(struct atm_vcc *vcc)
1376 {
1377 	IADEV *iadev;
1378 	u_short __iomem *vc_table;
1379 	u_short __iomem *reass_ptr;
1380 	IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1381 
1382 	if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1383 	iadev = INPH_IA_DEV(vcc->dev);
1384         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1385            if (iadev->phy_type & FE_25MBIT_PHY) {
1386                printk("IA:  ABR not support\n");
1387                return -EINVAL;
1388            }
1389         }
1390 	/* Make only this VCI in the vc table valid and let all
1391 		others be invalid entries */
1392 	vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1393 	vc_table += vcc->vci;
1394 	/* mask the last 6 bits and OR it with 3 for 1K VCs */
1395 
1396         *vc_table = vcc->vci << 6;
1397 	/* Also keep a list of open rx vcs so that we can attach them with
1398 		incoming PDUs later. */
1399 	if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1400                                 (vcc->qos.txtp.traffic_class == ATM_ABR))
1401 	{
1402                 srv_cls_param_t srv_p;
1403                 init_abr_vc(iadev, &srv_p);
1404                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1405 	}
1406        	else {  /* for UBR  later may need to add CBR logic */
1407         	reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1408            	reass_ptr += vcc->vci;
1409            	*reass_ptr = NO_AAL5_PKT;
1410        	}
1411 
1412 	if (iadev->rx_open[vcc->vci])
1413 		printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1414 			vcc->dev->number, vcc->vci);
1415 	iadev->rx_open[vcc->vci] = vcc;
1416 	return 0;
1417 }
1418 
1419 static int rx_init(struct atm_dev *dev)
1420 {
1421 	IADEV *iadev;
1422 	struct rx_buf_desc __iomem *buf_desc_ptr;
1423 	unsigned long rx_pkt_start = 0;
1424 	void *dle_addr;
1425 	struct abr_vc_table  *abr_vc_table;
1426 	u16 *vc_table;
1427 	u16 *reass_table;
1428 	int i,j, vcsize_sel;
1429 	u_short freeq_st_adr;
1430 	u_short *freeq_start;
1431 
1432 	iadev = INPH_IA_DEV(dev);
1433   //    spin_lock_init(&iadev->rx_lock);
1434 
1435 	/* Allocate 4k bytes - more aligned than needed (4k boundary) */
1436 	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1437 				      &iadev->rx_dle_dma, GFP_KERNEL);
1438 	if (!dle_addr)  {
1439 		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1440 		goto err_out;
1441 	}
1442 	iadev->rx_dle_q.start = (struct dle *)dle_addr;
1443 	iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1444 	iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1445 	iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1446 	/* the end of the dle q points to the entry after the last
1447 	DLE that can be used. */
1448 
1449 	/* write the upper 20 bits of the start address to rx list address register */
1450 	/* We know this is 32bit bus addressed so the following is safe */
1451 	writel(iadev->rx_dle_dma & 0xfffff000,
1452 	       iadev->dma + IPHASE5575_RX_LIST_ADDR);
1453 	IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1454                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1455                       readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1456 	printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1457                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1458                       readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1459 
1460 	writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1461 	writew(0, iadev->reass_reg+MODE_REG);
1462 	writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1463 
1464 	/* Receive side control memory map
1465 	   -------------------------------
1466 
1467 		Buffer descr	0x0000 (736 - 23K)
1468 		VP Table	0x5c00 (256 - 512)
1469 		Except q	0x5e00 (128 - 512)
1470 		Free buffer q	0x6000 (1K - 2K)
1471 		Packet comp q	0x6800 (1K - 2K)
1472 		Reass Table	0x7000 (1K - 2K)
1473 		VC Table	0x7800 (1K - 2K)
1474 		ABR VC Table	0x8000 (1K - 32K)
1475 	*/
1476 
1477 	/* Base address for Buffer Descriptor Table */
1478 	writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1479 	/* Set the buffer size register */
1480 	writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1481 
1482 	/* Initialize each entry in the Buffer Descriptor Table */
1483         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1484 	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1485 	memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1486 	buf_desc_ptr++;
1487 	rx_pkt_start = iadev->rx_pkt_ram;
1488 	for(i=1; i<=iadev->num_rx_desc; i++)
1489 	{
1490 		memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1491 		buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1492 		buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1493 		buf_desc_ptr++;
1494 		rx_pkt_start += iadev->rx_buf_sz;
1495 	}
1496 	IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1497         i = FREE_BUF_DESC_Q*iadev->memSize;
1498 	writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE);
1499         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1500         writew(i+iadev->num_rx_desc*sizeof(u_short),
1501                                          iadev->reass_reg+FREEQ_ED_ADR);
1502         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1503         writew(i+iadev->num_rx_desc*sizeof(u_short),
1504                                         iadev->reass_reg+FREEQ_WR_PTR);
1505 	/* Fill the FREEQ with all the free descriptors. */
1506 	freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1507 	freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1508 	for(i=1; i<=iadev->num_rx_desc; i++)
1509 	{
1510 		*freeq_start = (u_short)i;
1511 		freeq_start++;
1512 	}
1513 	IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1514         /* Packet Complete Queue */
1515         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1516         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1517         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1518         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1519         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1520 
1521         /* Exception Queue */
1522         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1523         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1524         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1525                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1526         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1527         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1528 
1529     	/* Load local copy of FREEQ and PCQ ptrs */
1530         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1531        	iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1532 	iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1533 	iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1534         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1535 	iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1536 	iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1537 	iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1538 
1539         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1540               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1541               iadev->rfL.pcq_wr);)
1542 	/* just for check - no VP TBL */
1543 	/* VP Table */
1544 	/* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1545 	/* initialize VP Table for invalid VPIs
1546 		- I guess we can write all 1s or 0x000f in the entire memory
1547 		  space or something similar.
1548 	*/
1549 
1550 	/* This seems to work and looks right to me too !!! */
1551         i =  REASS_TABLE * iadev->memSize;
1552 	writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1553  	/* initialize Reassembly table to I don't know what ???? */
1554 	reass_table = (u16 *)(iadev->reass_ram+i);
1555         j = REASS_TABLE_SZ * iadev->memSize;
1556 	for(i=0; i < j; i++)
1557 		*reass_table++ = NO_AAL5_PKT;
1558        i = 8*1024;
1559        vcsize_sel =  0;
1560        while (i != iadev->num_vc) {
1561           i /= 2;
1562           vcsize_sel++;
1563        }
1564        i = RX_VC_TABLE * iadev->memSize;
1565        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1566        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1567         j = RX_VC_TABLE_SZ * iadev->memSize;
1568 	for(i = 0; i < j; i++)
1569 	{
1570 		/* shift the reassembly pointer by 3 + lower 3 bits of
1571 		vc_lkup_base register (=3 for 1K VCs) and the last byte
1572 		is those low 3 bits.
1573 		Shall program this later.
1574 		*/
1575 		*vc_table = (i << 6) | 15;	/* for invalid VCI */
1576 		vc_table++;
1577 	}
1578         /* ABR VC table */
1579         i =  ABR_VC_TABLE * iadev->memSize;
1580         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1581 
1582         i = ABR_VC_TABLE * iadev->memSize;
1583 	abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1584         j = REASS_TABLE_SZ * iadev->memSize;
1585         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1586     	for(i = 0; i < j; i++) {
1587 		abr_vc_table->rdf = 0x0003;
1588              	abr_vc_table->air = 0x5eb1;
1589 	       	abr_vc_table++;
1590         }
1591 
1592 	/* Initialize other registers */
1593 
1594 	/* VP Filter Register set for VC Reassembly only */
1595 	writew(0xff00, iadev->reass_reg+VP_FILTER);
1596         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1597 	writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1598 
1599 	/* Packet Timeout Count  related Registers :
1600 	   Set packet timeout to occur in about 3 seconds
1601 	   Set Packet Aging Interval count register to overflow in about 4 us
1602  	*/
1603         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1604 
1605         i = (j >> 6) & 0xFF;
1606         j += 2 * (j - 1);
1607         i |= ((j << 2) & 0xFF00);
1608         writew(i, iadev->reass_reg+TMOUT_RANGE);
1609 
1610         /* initiate the desc_tble */
1611         for(i=0; i<iadev->num_tx_desc;i++)
1612             iadev->desc_tbl[i].timestamp = 0;
1613 
1614 	/* to clear the interrupt status register - read it */
1615 	readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1616 
1617 	/* Mask Register - clear it */
1618 	writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1619 
1620 	skb_queue_head_init(&iadev->rx_dma_q);
1621 	iadev->rx_free_desc_qhead = NULL;
1622 
1623 	iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1624 	if (!iadev->rx_open) {
1625 		printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1626 		dev->number);
1627 		goto err_free_dle;
1628 	}
1629 
1630         iadev->rxing = 1;
1631         iadev->rx_pkt_cnt = 0;
1632 	/* Mode Register */
1633 	writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1634 	return 0;
1635 
1636 err_free_dle:
1637 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1638 			  iadev->rx_dle_dma);
1639 err_out:
1640 	return -ENOMEM;
1641 }
1642 
1643 
1644 /*
1645 	The memory map suggested in appendix A and the coding for it.
1646 	Keeping it around just in case we change our mind later.
1647 
1648 		Buffer descr	0x0000 (128 - 4K)
1649 		UBR sched	0x1000 (1K - 4K)
1650 		UBR Wait q	0x2000 (1K - 4K)
1651 		Commn queues	0x3000 Packet Ready, Trasmit comp(0x3100)
1652 					(128 - 256) each
1653 		extended VC	0x4000 (1K - 8K)
1654 		ABR sched	0x6000	and ABR wait queue (1K - 2K) each
1655 		CBR sched	0x7000 (as needed)
1656 		VC table	0x8000 (1K - 32K)
1657 */
1658 
1659 static void tx_intr(struct atm_dev *dev)
1660 {
1661 	IADEV *iadev;
1662 	unsigned short status;
1663         unsigned long flags;
1664 
1665 	iadev = INPH_IA_DEV(dev);
1666 
1667 	status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1668         if (status & TRANSMIT_DONE){
1669 
1670            IF_EVENT(printk("Transmit Done Intr logic run\n");)
1671            spin_lock_irqsave(&iadev->tx_lock, flags);
1672            ia_tx_poll(iadev);
1673            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1674            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1675            if (iadev->close_pending)
1676                wake_up(&iadev->close_wait);
1677         }
1678 	if (status & TCQ_NOT_EMPTY)
1679 	{
1680 	    IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1681 	}
1682 }
1683 
1684 static void tx_dle_intr(struct atm_dev *dev)
1685 {
1686         IADEV *iadev;
1687         struct dle *dle, *cur_dle;
1688         struct sk_buff *skb;
1689         struct atm_vcc *vcc;
1690         struct ia_vcc  *iavcc;
1691         u_int dle_lp;
1692         unsigned long flags;
1693 
1694         iadev = INPH_IA_DEV(dev);
1695         spin_lock_irqsave(&iadev->tx_lock, flags);
1696         dle = iadev->tx_dle_q.read;
1697         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1698                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1699         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1700         while (dle != cur_dle)
1701         {
1702             /* free the DMAed skb */
1703             skb = skb_dequeue(&iadev->tx_dma_q);
1704             if (!skb) break;
1705 
1706 	    /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1707 	    if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1708 		dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1709 				 DMA_TO_DEVICE);
1710 	    }
1711             vcc = ATM_SKB(skb)->vcc;
1712             if (!vcc) {
1713                   printk("tx_dle_intr: vcc is null\n");
1714 		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1715                   dev_kfree_skb_any(skb);
1716 
1717                   return;
1718             }
1719             iavcc = INPH_IA_VCC(vcc);
1720             if (!iavcc) {
1721                   printk("tx_dle_intr: iavcc is null\n");
1722 		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1723                   dev_kfree_skb_any(skb);
1724                   return;
1725             }
1726             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1727                if ((vcc->pop) && (skb->len != 0))
1728                {
1729                  vcc->pop(vcc, skb);
1730                }
1731                else {
1732                  dev_kfree_skb_any(skb);
1733                }
1734             }
1735             else { /* Hold the rate-limited skb for flow control */
1736                IA_SKB_STATE(skb) |= IA_DLED;
1737                skb_queue_tail(&iavcc->txing_skb, skb);
1738             }
1739             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1740             if (++dle == iadev->tx_dle_q.end)
1741                  dle = iadev->tx_dle_q.start;
1742         }
1743         iadev->tx_dle_q.read = dle;
1744         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1745 }
1746 
1747 static int open_tx(struct atm_vcc *vcc)
1748 {
1749 	struct ia_vcc *ia_vcc;
1750 	IADEV *iadev;
1751 	struct main_vc *vc;
1752 	struct ext_vc *evc;
1753         int ret;
1754 	IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1755 	if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1756 	iadev = INPH_IA_DEV(vcc->dev);
1757 
1758         if (iadev->phy_type & FE_25MBIT_PHY) {
1759            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1760                printk("IA:  ABR not support\n");
1761                return -EINVAL;
1762            }
1763 	  if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1764                printk("IA:  CBR not support\n");
1765                return -EINVAL;
1766           }
1767         }
1768         ia_vcc =  INPH_IA_VCC(vcc);
1769         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1770         if (vcc->qos.txtp.max_sdu >
1771                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1772            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1773 		  vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1774 	   vcc->dev_data = NULL;
1775            kfree(ia_vcc);
1776            return -EINVAL;
1777         }
1778 	ia_vcc->vc_desc_cnt = 0;
1779         ia_vcc->txing = 1;
1780 
1781         /* find pcr */
1782         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1783            vcc->qos.txtp.pcr = iadev->LineRate;
1784         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1785            vcc->qos.txtp.pcr = iadev->LineRate;
1786         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1787            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1788         if (vcc->qos.txtp.pcr > iadev->LineRate)
1789              vcc->qos.txtp.pcr = iadev->LineRate;
1790         ia_vcc->pcr = vcc->qos.txtp.pcr;
1791 
1792         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1793         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1794         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1795         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1796         if (ia_vcc->pcr < iadev->rate_limit)
1797            skb_queue_head_init (&ia_vcc->txing_skb);
1798         if (ia_vcc->pcr < iadev->rate_limit) {
1799 	   struct sock *sk = sk_atm(vcc);
1800 
1801 	   if (vcc->qos.txtp.max_sdu != 0) {
1802                if (ia_vcc->pcr > 60000)
1803                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1804                else if (ia_vcc->pcr > 2000)
1805                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1806                else
1807                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1808            }
1809            else
1810              sk->sk_sndbuf = 24576;
1811         }
1812 
1813 	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1814 	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1815 	vc += vcc->vci;
1816 	evc += vcc->vci;
1817 	memset((caddr_t)vc, 0, sizeof(*vc));
1818 	memset((caddr_t)evc, 0, sizeof(*evc));
1819 
1820 	/* store the most significant 4 bits of vci as the last 4 bits
1821 		of first part of atm header.
1822 	   store the last 12 bits of vci as first 12 bits of the second
1823 		part of the atm header.
1824 	*/
1825 	evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1826 	evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1827 
1828 	/* check the following for different traffic classes */
1829 	if (vcc->qos.txtp.traffic_class == ATM_UBR)
1830 	{
1831 		vc->type = UBR;
1832                 vc->status = CRC_APPEND;
1833 		vc->acr = cellrate_to_float(iadev->LineRate);
1834                 if (vcc->qos.txtp.pcr > 0)
1835                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1836                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1837                                              vcc->qos.txtp.max_pcr,vc->acr);)
1838 	}
1839 	else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1840 	{       srv_cls_param_t srv_p;
1841 		IF_ABR(printk("Tx ABR VCC\n");)
1842                 init_abr_vc(iadev, &srv_p);
1843                 if (vcc->qos.txtp.pcr > 0)
1844                    srv_p.pcr = vcc->qos.txtp.pcr;
1845                 if (vcc->qos.txtp.min_pcr > 0) {
1846                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1847                    if (tmpsum > iadev->LineRate)
1848                        return -EBUSY;
1849                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1850                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1851                 }
1852                 else srv_p.mcr = 0;
1853                 if (vcc->qos.txtp.icr)
1854                    srv_p.icr = vcc->qos.txtp.icr;
1855                 if (vcc->qos.txtp.tbe)
1856                    srv_p.tbe = vcc->qos.txtp.tbe;
1857                 if (vcc->qos.txtp.frtt)
1858                    srv_p.frtt = vcc->qos.txtp.frtt;
1859                 if (vcc->qos.txtp.rif)
1860                    srv_p.rif = vcc->qos.txtp.rif;
1861                 if (vcc->qos.txtp.rdf)
1862                    srv_p.rdf = vcc->qos.txtp.rdf;
1863                 if (vcc->qos.txtp.nrm_pres)
1864                    srv_p.nrm = vcc->qos.txtp.nrm;
1865                 if (vcc->qos.txtp.trm_pres)
1866                    srv_p.trm = vcc->qos.txtp.trm;
1867                 if (vcc->qos.txtp.adtf_pres)
1868                    srv_p.adtf = vcc->qos.txtp.adtf;
1869                 if (vcc->qos.txtp.cdf_pres)
1870                    srv_p.cdf = vcc->qos.txtp.cdf;
1871                 if (srv_p.icr > srv_p.pcr)
1872                    srv_p.icr = srv_p.pcr;
1873                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n",
1874                                                       srv_p.pcr, srv_p.mcr);)
1875 		ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1876 	} else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1877                 if (iadev->phy_type & FE_25MBIT_PHY) {
1878                     printk("IA:  CBR not support\n");
1879                     return -EINVAL;
1880                 }
1881                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1882                    IF_CBR(printk("PCR is not available\n");)
1883                    return -1;
1884                 }
1885                 vc->type = CBR;
1886                 vc->status = CRC_APPEND;
1887                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1888                     return ret;
1889                 }
1890 	} else {
1891 		printk("iadev:  Non UBR, ABR and CBR traffic not supported\n");
1892 	}
1893 
1894         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1895 	IF_EVENT(printk("ia open_tx returning \n");)
1896 	return 0;
1897 }
1898 
1899 
1900 static int tx_init(struct atm_dev *dev)
1901 {
1902 	IADEV *iadev;
1903 	struct tx_buf_desc *buf_desc_ptr;
1904 	unsigned int tx_pkt_start;
1905 	void *dle_addr;
1906 	int i;
1907 	u_short tcq_st_adr;
1908 	u_short *tcq_start;
1909 	u_short prq_st_adr;
1910 	u_short *prq_start;
1911 	struct main_vc *vc;
1912 	struct ext_vc *evc;
1913         u_short tmp16;
1914         u32 vcsize_sel;
1915 
1916 	iadev = INPH_IA_DEV(dev);
1917         spin_lock_init(&iadev->tx_lock);
1918 
1919 	IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1920                                 readw(iadev->seg_reg+SEG_MASK_REG));)
1921 
1922 	/* Allocate 4k (boundary aligned) bytes */
1923 	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1924 				      &iadev->tx_dle_dma, GFP_KERNEL);
1925 	if (!dle_addr)  {
1926 		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1927 		goto err_out;
1928 	}
1929 	iadev->tx_dle_q.start = (struct dle*)dle_addr;
1930 	iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1931 	iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1932 	iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1933 
1934 	/* write the upper 20 bits of the start address to tx list address register */
1935 	writel(iadev->tx_dle_dma & 0xfffff000,
1936 	       iadev->dma + IPHASE5575_TX_LIST_ADDR);
1937 	writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1938 	writew(0, iadev->seg_reg+MODE_REG_0);
1939 	writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1940         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1941         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1942         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1943 
1944 	/*
1945 	   Transmit side control memory map
1946 	   --------------------------------
1947 	 Buffer descr 	0x0000 (128 - 4K)
1948 	 Commn queues	0x1000	Transmit comp, Packet ready(0x1400)
1949 					(512 - 1K) each
1950 					TCQ - 4K, PRQ - 5K
1951 	 CBR Table 	0x1800 (as needed) - 6K
1952 	 UBR Table	0x3000 (1K - 4K) - 12K
1953 	 UBR Wait queue	0x4000 (1K - 4K) - 16K
1954 	 ABR sched	0x5000	and ABR wait queue (1K - 2K) each
1955 				ABR Tbl - 20K, ABR Wq - 22K
1956 	 extended VC	0x6000 (1K - 8K) - 24K
1957 	 VC Table	0x8000 (1K - 32K) - 32K
1958 
1959 	Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1960 	and Wait q, which can be allotted later.
1961 	*/
1962 
1963 	/* Buffer Descriptor Table Base address */
1964 	writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1965 
1966 	/* initialize each entry in the buffer descriptor table */
1967 	buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1968 	memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1969 	buf_desc_ptr++;
1970 	tx_pkt_start = TX_PACKET_RAM;
1971 	for(i=1; i<=iadev->num_tx_desc; i++)
1972 	{
1973 		memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1974 		buf_desc_ptr->desc_mode = AAL5;
1975 		buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1976 		buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1977 		buf_desc_ptr++;
1978 		tx_pkt_start += iadev->tx_buf_sz;
1979 	}
1980 	iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1981 				      sizeof(*iadev->tx_buf),
1982 				      GFP_KERNEL);
1983         if (!iadev->tx_buf) {
1984             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1985 	    goto err_free_dle;
1986         }
1987        	for (i= 0; i< iadev->num_tx_desc; i++)
1988        	{
1989 	    struct cpcs_trailer *cpcs;
1990 
1991        	    cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1992             if(!cpcs) {
1993 		printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1994 		goto err_free_tx_bufs;
1995             }
1996 	    iadev->tx_buf[i].cpcs = cpcs;
1997 	    iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1998 						       cpcs,
1999 						       sizeof(*cpcs),
2000 						       DMA_TO_DEVICE);
2001         }
2002 	iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2003 					sizeof(*iadev->desc_tbl),
2004 					GFP_KERNEL);
2005 	if (!iadev->desc_tbl) {
2006 		printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2007 		goto err_free_all_tx_bufs;
2008 	}
2009 
2010 	/* Communication Queues base address */
2011         i = TX_COMP_Q * iadev->memSize;
2012 	writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2013 
2014 	/* Transmit Complete Queue */
2015 	writew(i, iadev->seg_reg+TCQ_ST_ADR);
2016 	writew(i, iadev->seg_reg+TCQ_RD_PTR);
2017 	writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2018 	iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2019         writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2020                                               iadev->seg_reg+TCQ_ED_ADR);
2021 	/* Fill the TCQ with all the free descriptors. */
2022 	tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2023 	tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2024 	for(i=1; i<=iadev->num_tx_desc; i++)
2025 	{
2026 		*tcq_start = (u_short)i;
2027 		tcq_start++;
2028 	}
2029 
2030 	/* Packet Ready Queue */
2031         i = PKT_RDY_Q * iadev->memSize;
2032 	writew(i, iadev->seg_reg+PRQ_ST_ADR);
2033 	writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2034                                               iadev->seg_reg+PRQ_ED_ADR);
2035 	writew(i, iadev->seg_reg+PRQ_RD_PTR);
2036 	writew(i, iadev->seg_reg+PRQ_WR_PTR);
2037 
2038         /* Load local copy of PRQ and TCQ ptrs */
2039         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2040 	iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2041  	iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2042 
2043 	iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2044 	iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2045 	iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2046 
2047 	/* Just for safety initializing the queue to have desc 1 always */
2048 	/* Fill the PRQ with all the free descriptors. */
2049 	prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2050 	prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2051 	for(i=1; i<=iadev->num_tx_desc; i++)
2052 	{
2053 		*prq_start = (u_short)0;	/* desc 1 in all entries */
2054 		prq_start++;
2055 	}
2056 	/* CBR Table */
2057         IF_INIT(printk("Start CBR Init\n");)
2058 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2059         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2060 #else /* Charlie's logic is wrong ? */
2061         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2062         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2063         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2064 #endif
2065 
2066         IF_INIT(printk("value in register = 0x%x\n",
2067                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2068         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2069         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2070         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2071                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2072         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2073         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2074         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2075         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2076                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2077         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2078           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2079           readw(iadev->seg_reg+CBR_TAB_END+1));)
2080 
2081         /* Initialize the CBR Schedualing Table */
2082         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2083                                                           0, iadev->num_vc*6);
2084         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2085         iadev->CbrEntryPt = 0;
2086         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2087         iadev->NumEnabledCBR = 0;
2088 
2089 	/* UBR scheduling Table and wait queue */
2090 	/* initialize all bytes of UBR scheduler table and wait queue to 0
2091 		- SCHEDSZ is 1K (# of entries).
2092 		- UBR Table size is 4K
2093 		- UBR wait queue is 4K
2094 	   since the table and wait queues are contiguous, all the bytes
2095 	   can be initialized by one memeset.
2096 	*/
2097 
2098         vcsize_sel = 0;
2099         i = 8*1024;
2100         while (i != iadev->num_vc) {
2101           i /= 2;
2102           vcsize_sel++;
2103         }
2104 
2105         i = MAIN_VC_TABLE * iadev->memSize;
2106         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2107         i =  EXT_VC_TABLE * iadev->memSize;
2108         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2109         i = UBR_SCHED_TABLE * iadev->memSize;
2110         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2111         i = UBR_WAIT_Q * iadev->memSize;
2112         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2113  	memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2114                                                        0, iadev->num_vc*8);
2115 	/* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2116 	/* initialize all bytes of ABR scheduler table and wait queue to 0
2117 		- SCHEDSZ is 1K (# of entries).
2118 		- ABR Table size is 2K
2119 		- ABR wait queue is 2K
2120 	   since the table and wait queues are contiguous, all the bytes
2121 	   can be initialized by one memeset.
2122 	*/
2123         i = ABR_SCHED_TABLE * iadev->memSize;
2124         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2125         i = ABR_WAIT_Q * iadev->memSize;
2126         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2127 
2128         i = ABR_SCHED_TABLE*iadev->memSize;
2129 	memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2130 	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2131 	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2132 	iadev->testTable = kmalloc_array(iadev->num_vc,
2133 					 sizeof(*iadev->testTable),
2134 					 GFP_KERNEL);
2135         if (!iadev->testTable) {
2136            printk("Get freepage  failed\n");
2137 	   goto err_free_desc_tbl;
2138         }
2139 	for(i=0; i<iadev->num_vc; i++)
2140 	{
2141 		memset((caddr_t)vc, 0, sizeof(*vc));
2142 		memset((caddr_t)evc, 0, sizeof(*evc));
2143                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2144 						GFP_KERNEL);
2145 		if (!iadev->testTable[i])
2146 			goto err_free_test_tables;
2147               	iadev->testTable[i]->lastTime = 0;
2148  		iadev->testTable[i]->fract = 0;
2149                 iadev->testTable[i]->vc_status = VC_UBR;
2150 		vc++;
2151 		evc++;
2152 	}
2153 
2154 	/* Other Initialization */
2155 
2156 	/* Max Rate Register */
2157         if (iadev->phy_type & FE_25MBIT_PHY) {
2158 	   writew(RATE25, iadev->seg_reg+MAXRATE);
2159 	   writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2160         }
2161         else {
2162 	   writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2163 	   writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2164         }
2165 	/* Set Idle Header Reigisters to be sure */
2166 	writew(0, iadev->seg_reg+IDLEHEADHI);
2167 	writew(0, iadev->seg_reg+IDLEHEADLO);
2168 
2169 	/* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2170         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2171 
2172         iadev->close_pending = 0;
2173         init_waitqueue_head(&iadev->close_wait);
2174         init_waitqueue_head(&iadev->timeout_wait);
2175 	skb_queue_head_init(&iadev->tx_dma_q);
2176 	ia_init_rtn_q(&iadev->tx_return_q);
2177 
2178 	/* RM Cell Protocol ID and Message Type */
2179 	writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2180         skb_queue_head_init (&iadev->tx_backlog);
2181 
2182 	/* Mode Register 1 */
2183 	writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2184 
2185 	/* Mode Register 0 */
2186 	writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2187 
2188 	/* Interrupt Status Register - read to clear */
2189 	readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2190 
2191 	/* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2192         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2193         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2194         iadev->tx_pkt_cnt = 0;
2195         iadev->rate_limit = iadev->LineRate / 3;
2196 
2197 	return 0;
2198 
2199 err_free_test_tables:
2200 	while (--i >= 0)
2201 		kfree(iadev->testTable[i]);
2202 	kfree(iadev->testTable);
2203 err_free_desc_tbl:
2204 	kfree(iadev->desc_tbl);
2205 err_free_all_tx_bufs:
2206 	i = iadev->num_tx_desc;
2207 err_free_tx_bufs:
2208 	while (--i >= 0) {
2209 		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2210 
2211 		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2212 				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2213 		kfree(desc->cpcs);
2214 	}
2215 	kfree(iadev->tx_buf);
2216 err_free_dle:
2217 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2218 			  iadev->tx_dle_dma);
2219 err_out:
2220 	return -ENOMEM;
2221 }
2222 
2223 static irqreturn_t ia_int(int irq, void *dev_id)
2224 {
2225    struct atm_dev *dev;
2226    IADEV *iadev;
2227    unsigned int status;
2228    int handled = 0;
2229 
2230    dev = dev_id;
2231    iadev = INPH_IA_DEV(dev);
2232    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2233    {
2234 	handled = 1;
2235         IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2236 	if (status & STAT_REASSINT)
2237 	{
2238 	   /* do something */
2239 	   IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2240 	   rx_intr(dev);
2241 	}
2242 	if (status & STAT_DLERINT)
2243 	{
2244 	   /* Clear this bit by writing a 1 to it. */
2245 	   writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2246 	   rx_dle_intr(dev);
2247 	}
2248 	if (status & STAT_SEGINT)
2249 	{
2250 	   /* do something */
2251            IF_EVENT(printk("IA: tx_intr \n");)
2252 	   tx_intr(dev);
2253 	}
2254 	if (status & STAT_DLETINT)
2255 	{
2256 	   writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2257 	   tx_dle_intr(dev);
2258 	}
2259 	if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2260 	{
2261            if (status & STAT_FEINT)
2262                ia_frontend_intr(iadev);
2263 	}
2264    }
2265    return IRQ_RETVAL(handled);
2266 }
2267 
2268 
2269 
2270 /*----------------------------- entries --------------------------------*/
2271 static int get_esi(struct atm_dev *dev)
2272 {
2273 	IADEV *iadev;
2274 	int i;
2275 	u32 mac1;
2276 	u16 mac2;
2277 
2278 	iadev = INPH_IA_DEV(dev);
2279 	mac1 = cpu_to_be32(le32_to_cpu(readl(
2280 				iadev->reg+IPHASE5575_MAC1)));
2281 	mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2282 	IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2283 	for (i=0; i<MAC1_LEN; i++)
2284 		dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2285 
2286 	for (i=0; i<MAC2_LEN; i++)
2287 		dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2288 	return 0;
2289 }
2290 
2291 static int reset_sar(struct atm_dev *dev)
2292 {
2293 	IADEV *iadev;
2294 	int i, error;
2295 	unsigned int pci[64];
2296 
2297 	iadev = INPH_IA_DEV(dev);
2298 	for (i = 0; i < 64; i++) {
2299 		error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
2300 		if (error != PCIBIOS_SUCCESSFUL)
2301 			return error;
2302 	}
2303 	writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2304 	for (i = 0; i < 64; i++) {
2305 		error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
2306 		if (error != PCIBIOS_SUCCESSFUL)
2307 			return error;
2308 	}
2309 	udelay(5);
2310 	return 0;
2311 }
2312 
2313 
2314 static int ia_init(struct atm_dev *dev)
2315 {
2316 	IADEV *iadev;
2317 	unsigned long real_base;
2318 	void __iomem *base;
2319 	unsigned short command;
2320 	int error, i;
2321 
2322 	/* The device has been identified and registered. Now we read
2323 	   necessary configuration info like memory base address,
2324 	   interrupt number etc */
2325 
2326 	IF_INIT(printk(">ia_init\n");)
2327 	dev->ci_range.vpi_bits = 0;
2328 	dev->ci_range.vci_bits = NR_VCI_LD;
2329 
2330 	iadev = INPH_IA_DEV(dev);
2331 	real_base = pci_resource_start (iadev->pci, 0);
2332 	iadev->irq = iadev->pci->irq;
2333 
2334 	error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2335 	if (error) {
2336 		printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2337 				dev->number,error);
2338 		return -EINVAL;
2339 	}
2340 	IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2341 			dev->number, iadev->pci->revision, real_base, iadev->irq);)
2342 
2343 	/* find mapping size of board */
2344 
2345 	iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2346 
2347         if (iadev->pci_map_size == 0x100000){
2348           iadev->num_vc = 4096;
2349 	  dev->ci_range.vci_bits = NR_VCI_4K_LD;
2350           iadev->memSize = 4;
2351         }
2352         else if (iadev->pci_map_size == 0x40000) {
2353           iadev->num_vc = 1024;
2354           iadev->memSize = 1;
2355         }
2356         else {
2357            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2358            return -EINVAL;
2359         }
2360 	IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2361 
2362 	/* enable bus mastering */
2363 	pci_set_master(iadev->pci);
2364 
2365 	/*
2366 	 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2367 	 */
2368 	udelay(10);
2369 
2370 	/* mapping the physical address to a virtual address in address space */
2371 	base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */
2372 
2373 	if (!base)
2374 	{
2375 		printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2376 			    dev->number);
2377 		return -ENOMEM;
2378 	}
2379 	IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2380 			dev->number, iadev->pci->revision, base, iadev->irq);)
2381 
2382 	/* filling the iphase dev structure */
2383 	iadev->mem = iadev->pci_map_size /2;
2384 	iadev->real_base = real_base;
2385 	iadev->base = base;
2386 
2387 	/* Bus Interface Control Registers */
2388 	iadev->reg = base + REG_BASE;
2389 	/* Segmentation Control Registers */
2390 	iadev->seg_reg = base + SEG_BASE;
2391 	/* Reassembly Control Registers */
2392 	iadev->reass_reg = base + REASS_BASE;
2393 	/* Front end/ DMA control registers */
2394 	iadev->phy = base + PHY_BASE;
2395 	iadev->dma = base + PHY_BASE;
2396 	/* RAM - Segmentation RAm and Reassembly RAM */
2397 	iadev->ram = base + ACTUAL_RAM_BASE;
2398 	iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2399 	iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2400 
2401 	/* lets print out the above */
2402 	IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2403           iadev->reg,iadev->seg_reg,iadev->reass_reg,
2404           iadev->phy, iadev->ram, iadev->seg_ram,
2405           iadev->reass_ram);)
2406 
2407 	/* lets try reading the MAC address */
2408 	error = get_esi(dev);
2409 	if (error) {
2410 	  iounmap(iadev->base);
2411 	  return error;
2412 	}
2413         printk("IA: ");
2414 	for (i=0; i < ESI_LEN; i++)
2415                 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2416         printk("\n");
2417 
2418         /* reset SAR */
2419         if (reset_sar(dev)) {
2420 	   iounmap(iadev->base);
2421            printk("IA: reset SAR fail, please try again\n");
2422            return 1;
2423         }
2424 	return 0;
2425 }
2426 
2427 static void ia_update_stats(IADEV *iadev) {
2428     if (!iadev->carrier_detect)
2429         return;
2430     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2431     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2432     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2433     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2434     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2435     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2436     return;
2437 }
2438 
2439 static void ia_led_timer(struct timer_list *unused) {
2440  	unsigned long flags;
2441   	static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2442         u_char i;
2443         static u32 ctrl_reg;
2444         for (i = 0; i < iadev_count; i++) {
2445            if (ia_dev[i]) {
2446 	      ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2447 	      if (blinking[i] == 0) {
2448 		 blinking[i]++;
2449                  ctrl_reg &= (~CTRL_LED);
2450                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2451                  ia_update_stats(ia_dev[i]);
2452               }
2453               else {
2454 		 blinking[i] = 0;
2455 		 ctrl_reg |= CTRL_LED;
2456                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2457                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2458                  if (ia_dev[i]->close_pending)
2459                     wake_up(&ia_dev[i]->close_wait);
2460                  ia_tx_poll(ia_dev[i]);
2461                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2462               }
2463            }
2464         }
2465 	mod_timer(&ia_timer, jiffies + HZ / 4);
2466  	return;
2467 }
2468 
2469 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2470 	unsigned long addr)
2471 {
2472 	writel(value, INPH_IA_DEV(dev)->phy+addr);
2473 }
2474 
2475 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2476 {
2477 	return readl(INPH_IA_DEV(dev)->phy+addr);
2478 }
2479 
2480 static void ia_free_tx(IADEV *iadev)
2481 {
2482 	int i;
2483 
2484 	kfree(iadev->desc_tbl);
2485 	for (i = 0; i < iadev->num_vc; i++)
2486 		kfree(iadev->testTable[i]);
2487 	kfree(iadev->testTable);
2488 	for (i = 0; i < iadev->num_tx_desc; i++) {
2489 		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2490 
2491 		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2492 				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2493 		kfree(desc->cpcs);
2494 	}
2495 	kfree(iadev->tx_buf);
2496 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2497 			  iadev->tx_dle_dma);
2498 }
2499 
2500 static void ia_free_rx(IADEV *iadev)
2501 {
2502 	kfree(iadev->rx_open);
2503 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2504 			  iadev->rx_dle_dma);
2505 }
2506 
2507 static int ia_start(struct atm_dev *dev)
2508 {
2509 	IADEV *iadev;
2510 	int error;
2511 	unsigned char phy;
2512 	u32 ctrl_reg;
2513 	IF_EVENT(printk(">ia_start\n");)
2514 	iadev = INPH_IA_DEV(dev);
2515         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2516                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2517                     dev->number, iadev->irq);
2518 		error = -EAGAIN;
2519 		goto err_out;
2520         }
2521         /* @@@ should release IRQ on error */
2522 	/* enabling memory + master */
2523         if ((error = pci_write_config_word(iadev->pci,
2524 				PCI_COMMAND,
2525 				PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2526 	{
2527                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2528                     "master (0x%x)\n",dev->number, error);
2529 		error = -EIO;
2530 		goto err_free_irq;
2531         }
2532 	udelay(10);
2533 
2534 	/* Maybe we should reset the front end, initialize Bus Interface Control
2535 		Registers and see. */
2536 
2537 	IF_INIT(printk("Bus ctrl reg: %08x\n",
2538                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2539 	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2540 	ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2541 			| CTRL_B8
2542 			| CTRL_B16
2543 			| CTRL_B32
2544 			| CTRL_B48
2545 			| CTRL_B64
2546 			| CTRL_B128
2547 			| CTRL_ERRMASK
2548 			| CTRL_DLETMASK		/* shud be removed l8r */
2549 			| CTRL_DLERMASK
2550 			| CTRL_SEGMASK
2551 			| CTRL_REASSMASK
2552 			| CTRL_FEMASK
2553 			| CTRL_CSPREEMPT;
2554 
2555        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2556 
2557 	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2558                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2559 	   printk("Bus status reg after init: %08x\n",
2560                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2561 
2562         ia_hw_type(iadev);
2563 	error = tx_init(dev);
2564 	if (error)
2565 		goto err_free_irq;
2566 	error = rx_init(dev);
2567 	if (error)
2568 		goto err_free_tx;
2569 
2570 	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2571        	writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2572 	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2573                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2574         phy = 0; /* resolve compiler complaint */
2575         IF_INIT (
2576 	if ((phy=ia_phy_get(dev,0)) == 0x30)
2577 		printk("IA: pm5346,rev.%d\n",phy&0x0f);
2578 	else
2579 		printk("IA: utopia,rev.%0x\n",phy);)
2580 
2581 	if (iadev->phy_type &  FE_25MBIT_PHY)
2582            ia_mb25_init(iadev);
2583 	else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2584            ia_suni_pm7345_init(iadev);
2585 	else {
2586 		error = suni_init(dev);
2587 		if (error)
2588 			goto err_free_rx;
2589 		if (dev->phy->start) {
2590 			error = dev->phy->start(dev);
2591 			if (error)
2592 				goto err_free_rx;
2593 		}
2594 		/* Get iadev->carrier_detect status */
2595 		ia_frontend_intr(iadev);
2596 	}
2597 	return 0;
2598 
2599 err_free_rx:
2600 	ia_free_rx(iadev);
2601 err_free_tx:
2602 	ia_free_tx(iadev);
2603 err_free_irq:
2604 	free_irq(iadev->irq, dev);
2605 err_out:
2606 	return error;
2607 }
2608 
2609 static void ia_close(struct atm_vcc *vcc)
2610 {
2611 	DEFINE_WAIT(wait);
2612         u16 *vc_table;
2613         IADEV *iadev;
2614         struct ia_vcc *ia_vcc;
2615         struct sk_buff *skb = NULL;
2616         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2617         unsigned long closetime, flags;
2618 
2619         iadev = INPH_IA_DEV(vcc->dev);
2620         ia_vcc = INPH_IA_VCC(vcc);
2621 	if (!ia_vcc) return;
2622 
2623         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n",
2624                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2625 	clear_bit(ATM_VF_READY,&vcc->flags);
2626         skb_queue_head_init (&tmp_tx_backlog);
2627         skb_queue_head_init (&tmp_vcc_backlog);
2628         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2629            iadev->close_pending++;
2630 	   prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2631 	   schedule_timeout(msecs_to_jiffies(500));
2632 	   finish_wait(&iadev->timeout_wait, &wait);
2633            spin_lock_irqsave(&iadev->tx_lock, flags);
2634            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2635               if (ATM_SKB(skb)->vcc == vcc){
2636                  if (vcc->pop) vcc->pop(vcc, skb);
2637                  else dev_kfree_skb_any(skb);
2638               }
2639               else
2640                  skb_queue_tail(&tmp_tx_backlog, skb);
2641            }
2642            while((skb = skb_dequeue(&tmp_tx_backlog)))
2643              skb_queue_tail(&iadev->tx_backlog, skb);
2644            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2645            closetime = 300000 / ia_vcc->pcr;
2646            if (closetime == 0)
2647               closetime = 1;
2648            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2649            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2650            spin_lock_irqsave(&iadev->tx_lock, flags);
2651            iadev->close_pending--;
2652            iadev->testTable[vcc->vci]->lastTime = 0;
2653            iadev->testTable[vcc->vci]->fract = 0;
2654            iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2655            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2656               if (vcc->qos.txtp.min_pcr > 0)
2657                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2658            }
2659            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2660               ia_vcc = INPH_IA_VCC(vcc);
2661               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2662               ia_cbrVc_close (vcc);
2663            }
2664            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2665         }
2666 
2667         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2668            // reset reass table
2669            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2670            vc_table += vcc->vci;
2671            *vc_table = NO_AAL5_PKT;
2672            // reset vc table
2673            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2674            vc_table += vcc->vci;
2675            *vc_table = (vcc->vci << 6) | 15;
2676            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2677               struct abr_vc_table __iomem *abr_vc_table =
2678                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2679               abr_vc_table +=  vcc->vci;
2680               abr_vc_table->rdf = 0x0003;
2681               abr_vc_table->air = 0x5eb1;
2682            }
2683            // Drain the packets
2684            rx_dle_intr(vcc->dev);
2685            iadev->rx_open[vcc->vci] = NULL;
2686         }
2687 	kfree(INPH_IA_VCC(vcc));
2688         ia_vcc = NULL;
2689         vcc->dev_data = NULL;
2690         clear_bit(ATM_VF_ADDR,&vcc->flags);
2691         return;
2692 }
2693 
2694 static int ia_open(struct atm_vcc *vcc)
2695 {
2696 	struct ia_vcc *ia_vcc;
2697 	int error;
2698 	if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2699 	{
2700 		IF_EVENT(printk("ia: not partially allocated resources\n");)
2701 		vcc->dev_data = NULL;
2702 	}
2703 	if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2704 	{
2705 		IF_EVENT(printk("iphase open: unspec part\n");)
2706 		set_bit(ATM_VF_ADDR,&vcc->flags);
2707 	}
2708 	if (vcc->qos.aal != ATM_AAL5)
2709 		return -EINVAL;
2710 	IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2711                                  vcc->dev->number, vcc->vpi, vcc->vci);)
2712 
2713 	/* Device dependent initialization */
2714 	ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2715 	if (!ia_vcc) return -ENOMEM;
2716 	vcc->dev_data = ia_vcc;
2717 
2718 	if ((error = open_rx(vcc)))
2719 	{
2720 		IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2721 		ia_close(vcc);
2722 		return error;
2723 	}
2724 
2725 	if ((error = open_tx(vcc)))
2726 	{
2727 		IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2728 		ia_close(vcc);
2729 		return error;
2730 	}
2731 
2732 	set_bit(ATM_VF_READY,&vcc->flags);
2733 
2734 #if 0
2735         {
2736            static u8 first = 1;
2737            if (first) {
2738               ia_timer.expires = jiffies + 3*HZ;
2739               add_timer(&ia_timer);
2740               first = 0;
2741            }
2742         }
2743 #endif
2744 	IF_EVENT(printk("ia open returning\n");)
2745 	return 0;
2746 }
2747 
2748 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2749 {
2750 	IF_EVENT(printk(">ia_change_qos\n");)
2751 	return 0;
2752 }
2753 
2754 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2755 {
2756    IA_CMDBUF ia_cmds;
2757    IADEV *iadev;
2758    int i, board;
2759    u16 __user *tmps;
2760    IF_EVENT(printk(">ia_ioctl\n");)
2761    if (cmd != IA_CMD) {
2762       if (!dev->phy->ioctl) return -EINVAL;
2763       return dev->phy->ioctl(dev,cmd,arg);
2764    }
2765    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2766    board = ia_cmds.status;
2767 
2768 	if ((board < 0) || (board > iadev_count))
2769 		board = 0;
2770 	board = array_index_nospec(board, iadev_count + 1);
2771 
2772    iadev = ia_dev[board];
2773    switch (ia_cmds.cmd) {
2774    case MEMDUMP:
2775    {
2776 	switch (ia_cmds.sub_cmd) {
2777           case MEMDUMP_SEGREG:
2778 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2779              tmps = (u16 __user *)ia_cmds.buf;
2780              for(i=0; i<0x80; i+=2, tmps++)
2781                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2782              ia_cmds.status = 0;
2783              ia_cmds.len = 0x80;
2784              break;
2785           case MEMDUMP_REASSREG:
2786 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2787              tmps = (u16 __user *)ia_cmds.buf;
2788              for(i=0; i<0x80; i+=2, tmps++)
2789                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2790              ia_cmds.status = 0;
2791              ia_cmds.len = 0x80;
2792              break;
2793           case MEMDUMP_FFL:
2794           {
2795              ia_regs_t       *regs_local;
2796              ffredn_t        *ffL;
2797              rfredn_t        *rfL;
2798 
2799 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2800 	     regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2801 	     if (!regs_local) return -ENOMEM;
2802 	     ffL = &regs_local->ffredn;
2803 	     rfL = &regs_local->rfredn;
2804              /* Copy real rfred registers into the local copy */
2805  	     for (i=0; i<(sizeof (rfredn_t))/4; i++)
2806                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2807              	/* Copy real ffred registers into the local copy */
2808 	     for (i=0; i<(sizeof (ffredn_t))/4; i++)
2809                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2810 
2811              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2812                 kfree(regs_local);
2813                 return -EFAULT;
2814              }
2815              kfree(regs_local);
2816              printk("Board %d registers dumped\n", board);
2817              ia_cmds.status = 0;
2818 	 }
2819     	     break;
2820          case READ_REG:
2821          {
2822 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2823              desc_dbg(iadev);
2824              ia_cmds.status = 0;
2825          }
2826              break;
2827          case 0x6:
2828          {
2829              ia_cmds.status = 0;
2830              printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2831              printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2832          }
2833              break;
2834          case 0x8:
2835          {
2836              struct k_sonet_stats *stats;
2837              stats = &PRIV(_ia_dev[board])->sonet_stats;
2838              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2839              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2840              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2841              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2842              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2843              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2844              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2845              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2846              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2847          }
2848             ia_cmds.status = 0;
2849             break;
2850          case 0x9:
2851 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2852             for (i = 1; i <= iadev->num_rx_desc; i++)
2853                free_desc(_ia_dev[board], i);
2854             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2855                                             iadev->reass_reg+REASS_MASK_REG);
2856             iadev->rxing = 1;
2857 
2858             ia_cmds.status = 0;
2859             break;
2860 
2861          case 0xb:
2862 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2863             ia_frontend_intr(iadev);
2864             break;
2865          case 0xa:
2866 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2867          {
2868              ia_cmds.status = 0;
2869              IADebugFlag = ia_cmds.maddr;
2870              printk("New debug option loaded\n");
2871          }
2872              break;
2873          default:
2874              ia_cmds.status = 0;
2875              break;
2876       }
2877    }
2878       break;
2879    default:
2880       break;
2881 
2882    }
2883    return 0;
2884 }
2885 
2886 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2887         IADEV *iadev;
2888         struct dle *wr_ptr;
2889         struct tx_buf_desc __iomem *buf_desc_ptr;
2890         int desc;
2891         int comp_code;
2892         int total_len;
2893         struct cpcs_trailer *trailer;
2894         struct ia_vcc *iavcc;
2895 
2896         iadev = INPH_IA_DEV(vcc->dev);
2897         iavcc = INPH_IA_VCC(vcc);
2898         if (!iavcc->txing) {
2899            printk("discard packet on closed VC\n");
2900            if (vcc->pop)
2901 		vcc->pop(vcc, skb);
2902            else
2903 		dev_kfree_skb_any(skb);
2904 	   return 0;
2905         }
2906 
2907         if (skb->len > iadev->tx_buf_sz - 8) {
2908            printk("Transmit size over tx buffer size\n");
2909            if (vcc->pop)
2910                  vcc->pop(vcc, skb);
2911            else
2912                  dev_kfree_skb_any(skb);
2913           return 0;
2914         }
2915         if ((unsigned long)skb->data & 3) {
2916            printk("Misaligned SKB\n");
2917            if (vcc->pop)
2918                  vcc->pop(vcc, skb);
2919            else
2920                  dev_kfree_skb_any(skb);
2921            return 0;
2922         }
2923 	/* Get a descriptor number from our free descriptor queue
2924 	   We get the descr number from the TCQ now, since I am using
2925 	   the TCQ as a free buffer queue. Initially TCQ will be
2926 	   initialized with all the descriptors and is hence, full.
2927 	*/
2928 	desc = get_desc (iadev, iavcc);
2929 	if (desc == 0xffff)
2930 	    return 1;
2931 	comp_code = desc >> 13;
2932 	desc &= 0x1fff;
2933 
2934 	if ((desc == 0) || (desc > iadev->num_tx_desc))
2935 	{
2936 		IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2937                 atomic_inc(&vcc->stats->tx);
2938 		if (vcc->pop)
2939 		    vcc->pop(vcc, skb);
2940 		else
2941 		    dev_kfree_skb_any(skb);
2942 		return 0;   /* return SUCCESS */
2943 	}
2944 
2945 	if (comp_code)
2946 	{
2947 	    IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2948                                                             desc, comp_code);)
2949 	}
2950 
2951         /* remember the desc and vcc mapping */
2952         iavcc->vc_desc_cnt++;
2953         iadev->desc_tbl[desc-1].iavcc = iavcc;
2954         iadev->desc_tbl[desc-1].txskb = skb;
2955         IA_SKB_STATE(skb) = 0;
2956 
2957         iadev->ffL.tcq_rd += 2;
2958         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2959 	  	iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2960 	writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2961 
2962 	/* Put the descriptor number in the packet ready queue
2963 		and put the updated write pointer in the DLE field
2964 	*/
2965 	*(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2966 
2967  	iadev->ffL.prq_wr += 2;
2968         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2969                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2970 
2971 	/* Figure out the exact length of the packet and padding required to
2972            make it  aligned on a 48 byte boundary.  */
2973 	total_len = skb->len + sizeof(struct cpcs_trailer);
2974 	total_len = ((total_len + 47) / 48) * 48;
2975 	IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2976 
2977 	/* Put the packet in a tx buffer */
2978 	trailer = iadev->tx_buf[desc-1].cpcs;
2979         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2980                   skb, skb->data, skb->len, desc);)
2981 	trailer->control = 0;
2982         /*big endian*/
2983 	trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2984 	trailer->crc32 = 0;	/* not needed - dummy bytes */
2985 
2986 	/* Display the packet */
2987 	IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2988                                                         skb->len, tcnter++);
2989         xdump(skb->data, skb->len, "TX: ");
2990         printk("\n");)
2991 
2992 	/* Build the buffer descriptor */
2993 	buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2994 	buf_desc_ptr += desc;	/* points to the corresponding entry */
2995 	buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2996 	/* Huh ? p.115 of users guide describes this as a read-only register */
2997         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2998 	buf_desc_ptr->vc_index = vcc->vci;
2999 	buf_desc_ptr->bytes = total_len;
3000 
3001         if (vcc->qos.txtp.traffic_class == ATM_ABR)
3002 	   clear_lockup (vcc, iadev);
3003 
3004 	/* Build the DLE structure */
3005 	wr_ptr = iadev->tx_dle_q.write;
3006 	memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3007 	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3008 					      skb->len, DMA_TO_DEVICE);
3009 	wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3010                                                   buf_desc_ptr->buf_start_lo;
3011 	/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3012 	wr_ptr->bytes = skb->len;
3013 
3014         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3015         if ((wr_ptr->bytes >> 2) == 0xb)
3016            wr_ptr->bytes = 0x30;
3017 
3018 	wr_ptr->mode = TX_DLE_PSI;
3019 	wr_ptr->prq_wr_ptr_data = 0;
3020 
3021 	/* end is not to be used for the DLE q */
3022 	if (++wr_ptr == iadev->tx_dle_q.end)
3023 		wr_ptr = iadev->tx_dle_q.start;
3024 
3025         /* Build trailer dle */
3026         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3027         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3028           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3029 
3030         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3031         wr_ptr->mode = DMA_INT_ENABLE;
3032         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3033 
3034         /* end is not to be used for the DLE q */
3035         if (++wr_ptr == iadev->tx_dle_q.end)
3036                 wr_ptr = iadev->tx_dle_q.start;
3037 
3038 	iadev->tx_dle_q.write = wr_ptr;
3039         ATM_DESC(skb) = vcc->vci;
3040         skb_queue_tail(&iadev->tx_dma_q, skb);
3041 
3042         atomic_inc(&vcc->stats->tx);
3043         iadev->tx_pkt_cnt++;
3044 	/* Increment transaction counter */
3045 	writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3046 
3047 #if 0
3048         /* add flow control logic */
3049         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3050           if (iavcc->vc_desc_cnt > 10) {
3051              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3052             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3053               iavcc->flow_inc = -1;
3054               iavcc->saved_tx_quota = vcc->tx_quota;
3055            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3056              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3057              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3058               iavcc->flow_inc = 0;
3059            }
3060         }
3061 #endif
3062 	IF_TX(printk("ia send done\n");)
3063 	return 0;
3064 }
3065 
3066 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3067 {
3068         IADEV *iadev;
3069         unsigned long flags;
3070 
3071         iadev = INPH_IA_DEV(vcc->dev);
3072         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3073         {
3074             if (!skb)
3075                 printk(KERN_CRIT "null skb in ia_send\n");
3076             else dev_kfree_skb_any(skb);
3077             return -EINVAL;
3078         }
3079         spin_lock_irqsave(&iadev->tx_lock, flags);
3080         if (!test_bit(ATM_VF_READY,&vcc->flags)){
3081             dev_kfree_skb_any(skb);
3082             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3083             return -EINVAL;
3084         }
3085         ATM_SKB(skb)->vcc = vcc;
3086 
3087         if (skb_peek(&iadev->tx_backlog)) {
3088            skb_queue_tail(&iadev->tx_backlog, skb);
3089         }
3090         else {
3091            if (ia_pkt_tx (vcc, skb)) {
3092               skb_queue_tail(&iadev->tx_backlog, skb);
3093            }
3094         }
3095         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3096         return 0;
3097 
3098 }
3099 
3100 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3101 {
3102   int   left = *pos, n;
3103   char  *tmpPtr;
3104   IADEV *iadev = INPH_IA_DEV(dev);
3105   if(!left--) {
3106      if (iadev->phy_type == FE_25MBIT_PHY) {
3107        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3108        return n;
3109      }
3110      if (iadev->phy_type == FE_DS3_PHY)
3111         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3112      else if (iadev->phy_type == FE_E3_PHY)
3113         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3114      else if (iadev->phy_type == FE_UTP_OPTION)
3115          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155");
3116      else
3117         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3118      tmpPtr = page + n;
3119      if (iadev->pci_map_size == 0x40000)
3120         n += sprintf(tmpPtr, "-1KVC-");
3121      else
3122         n += sprintf(tmpPtr, "-4KVC-");
3123      tmpPtr = page + n;
3124      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3125         n += sprintf(tmpPtr, "1M  \n");
3126      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3127         n += sprintf(tmpPtr, "512K\n");
3128      else
3129        n += sprintf(tmpPtr, "128K\n");
3130      return n;
3131   }
3132   if (!left) {
3133      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3134                            "  Size of Tx Buffer  :  %u\n"
3135                            "  Number of Rx Buffer:  %u\n"
3136                            "  Size of Rx Buffer  :  %u\n"
3137                            "  Packets Received   :  %u\n"
3138                            "  Packets Transmitted:  %u\n"
3139                            "  Cells Received     :  %u\n"
3140                            "  Cells Transmitted  :  %u\n"
3141                            "  Board Dropped Cells:  %u\n"
3142                            "  Board Dropped Pkts :  %u\n",
3143                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3144                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3145                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3146                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3147                            iadev->drop_rxcell, iadev->drop_rxpkt);
3148   }
3149   return 0;
3150 }
3151 
3152 static const struct atmdev_ops ops = {
3153 	.open		= ia_open,
3154 	.close		= ia_close,
3155 	.ioctl		= ia_ioctl,
3156 	.send		= ia_send,
3157 	.phy_put	= ia_phy_put,
3158 	.phy_get	= ia_phy_get,
3159 	.change_qos	= ia_change_qos,
3160 	.proc_read	= ia_proc_read,
3161 	.owner		= THIS_MODULE,
3162 };
3163 
3164 static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3165 {
3166 	struct atm_dev *dev;
3167 	IADEV *iadev;
3168 	int ret;
3169 
3170 	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3171 	if (!iadev) {
3172 		ret = -ENOMEM;
3173 		goto err_out;
3174 	}
3175 
3176 	iadev->pci = pdev;
3177 
3178 	IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3179 		pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3180 	if (pci_enable_device(pdev)) {
3181 		ret = -ENODEV;
3182 		goto err_out_free_iadev;
3183 	}
3184 	dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3185 	if (!dev) {
3186 		ret = -ENOMEM;
3187 		goto err_out_disable_dev;
3188 	}
3189 	dev->dev_data = iadev;
3190 	IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3191 	IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3192 		iadev->LineRate);)
3193 
3194 	pci_set_drvdata(pdev, dev);
3195 
3196 	ia_dev[iadev_count] = iadev;
3197 	_ia_dev[iadev_count] = dev;
3198 	iadev_count++;
3199 	if (ia_init(dev) || ia_start(dev)) {
3200 		IF_INIT(printk("IA register failed!\n");)
3201 		iadev_count--;
3202 		ia_dev[iadev_count] = NULL;
3203 		_ia_dev[iadev_count] = NULL;
3204 		ret = -EINVAL;
3205 		goto err_out_deregister_dev;
3206 	}
3207 	IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3208 
3209 	iadev->next_board = ia_boards;
3210 	ia_boards = dev;
3211 
3212 	return 0;
3213 
3214 err_out_deregister_dev:
3215 	atm_dev_deregister(dev);
3216 err_out_disable_dev:
3217 	pci_disable_device(pdev);
3218 err_out_free_iadev:
3219 	kfree(iadev);
3220 err_out:
3221 	return ret;
3222 }
3223 
3224 static void ia_remove_one(struct pci_dev *pdev)
3225 {
3226 	struct atm_dev *dev = pci_get_drvdata(pdev);
3227 	IADEV *iadev = INPH_IA_DEV(dev);
3228 
3229 	/* Disable phy interrupts */
3230 	ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3231 				   SUNI_RSOP_CIE);
3232 	udelay(1);
3233 
3234 	if (dev->phy && dev->phy->stop)
3235 		dev->phy->stop(dev);
3236 
3237 	/* De-register device */
3238       	free_irq(iadev->irq, dev);
3239 	iadev_count--;
3240 	ia_dev[iadev_count] = NULL;
3241 	_ia_dev[iadev_count] = NULL;
3242 	IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3243 	atm_dev_deregister(dev);
3244 
3245       	iounmap(iadev->base);
3246 	pci_disable_device(pdev);
3247 
3248 	ia_free_rx(iadev);
3249 	ia_free_tx(iadev);
3250 
3251       	kfree(iadev);
3252 }
3253 
3254 static const struct pci_device_id ia_pci_tbl[] = {
3255 	{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3256 	{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3257 	{ 0,}
3258 };
3259 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3260 
3261 static struct pci_driver ia_driver = {
3262 	.name =         DEV_LABEL,
3263 	.id_table =     ia_pci_tbl,
3264 	.probe =        ia_init_one,
3265 	.remove =       ia_remove_one,
3266 };
3267 
3268 static int __init ia_module_init(void)
3269 {
3270 	int ret;
3271 
3272 	ret = pci_register_driver(&ia_driver);
3273 	if (ret >= 0) {
3274 		ia_timer.expires = jiffies + 3*HZ;
3275 		add_timer(&ia_timer);
3276 	} else
3277 		printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3278 	return ret;
3279 }
3280 
3281 static void __exit ia_module_exit(void)
3282 {
3283 	pci_unregister_driver(&ia_driver);
3284 
3285 	del_timer_sync(&ia_timer);
3286 }
3287 
3288 module_init(ia_module_init);
3289 module_exit(ia_module_exit);
3290