linux/drivers/atm/iphase.c
<<
>>
Prefs
   1/******************************************************************************
   2         iphase.c: Device driver for Interphase ATM PCI adapter cards 
   3                    Author: Peter Wang  <pwang@iphase.com>            
   4                   Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   5                   Interphase Corporation  <www.iphase.com>           
   6                               Version: 1.0                           
   7*******************************************************************************
   8      
   9      This software may be used and distributed according to the terms
  10      of the GNU General Public License (GPL), incorporated herein by reference.
  11      Drivers based on this skeleton fall under the GPL and must retain
  12      the authorship (implicit copyright) notice.
  13
  14      This program is distributed in the hope that it will be useful, but
  15      WITHOUT ANY WARRANTY; without even the implied warranty of
  16      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17      General Public License for more details.
  18      
  19      Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
  20      was originally written by Monalisa Agrawal at UNH. Now this driver 
  21      supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
  22      card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
  23      in terms of PHY type, the size of control memory and the size of 
  24      packet memory. The followings are the change log and history:
  25     
  26          Bugfix the Mona's UBR driver.
  27          Modify the basic memory allocation and dma logic.
  28          Port the driver to the latest kernel from 2.0.46.
  29          Complete the ABR logic of the driver, and added the ABR work-
  30              around for the hardware anormalies.
  31          Add the CBR support.
  32          Add the flow control logic to the driver to allow rate-limit VC.
  33          Add 4K VC support to the board with 512K control memory.
  34          Add the support of all the variants of the Interphase ATM PCI 
  35          (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
  36          (25M UTP25) and x531 (DS3 and E3).
  37          Add SMP support.
  38
  39      Support and updates available at: ftp://ftp.iphase.com/pub/atm
  40
  41*******************************************************************************/
  42
  43#include <linux/module.h>  
  44#include <linux/kernel.h>  
  45#include <linux/mm.h>  
  46#include <linux/pci.h>  
  47#include <linux/errno.h>  
  48#include <linux/atm.h>  
  49#include <linux/atmdev.h>  
  50#include <linux/sonet.h>  
  51#include <linux/skbuff.h>  
  52#include <linux/time.h>  
  53#include <linux/delay.h>  
  54#include <linux/uio.h>  
  55#include <linux/init.h>  
  56#include <linux/interrupt.h>
  57#include <linux/wait.h>
  58#include <linux/slab.h>
  59#include <asm/system.h>  
  60#include <asm/io.h>  
  61#include <linux/atomic.h>
  62#include <asm/uaccess.h>  
  63#include <asm/string.h>  
  64#include <asm/byteorder.h>  
  65#include <linux/vmalloc.h>
  66#include <linux/jiffies.h>
  67#include "iphase.h"               
  68#include "suni.h"                 
  69#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
  70
  71#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
  72
  73static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
  74static void desc_dbg(IADEV *iadev);
  75
  76static IADEV *ia_dev[8];
  77static struct atm_dev *_ia_dev[8];
  78static int iadev_count;
  79static void ia_led_timer(unsigned long arg);
  80static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
  81static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
  82static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
  83static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
  84            |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
  85
  86module_param(IA_TX_BUF, int, 0);
  87module_param(IA_TX_BUF_SZ, int, 0);
  88module_param(IA_RX_BUF, int, 0);
  89module_param(IA_RX_BUF_SZ, int, 0);
  90module_param(IADebugFlag, uint, 0644);
  91
  92MODULE_LICENSE("GPL");
  93
  94/**************************** IA_LIB **********************************/
  95
  96static void ia_init_rtn_q (IARTN_Q *que) 
  97{ 
  98   que->next = NULL; 
  99   que->tail = NULL; 
 100}
 101
 102static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
 103{
 104   data->next = NULL;
 105   if (que->next == NULL) 
 106      que->next = que->tail = data;
 107   else {
 108      data->next = que->next;
 109      que->next = data;
 110   } 
 111   return;
 112}
 113
 114static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
 115   IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 116   if (!entry) return -1;
 117   entry->data = data;
 118   entry->next = NULL;
 119   if (que->next == NULL) 
 120      que->next = que->tail = entry;
 121   else {
 122      que->tail->next = entry;
 123      que->tail = que->tail->next;
 124   }      
 125   return 1;
 126}
 127
 128static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
 129   IARTN_Q *tmpdata;
 130   if (que->next == NULL)
 131      return NULL;
 132   tmpdata = que->next;
 133   if ( que->next == que->tail)  
 134      que->next = que->tail = NULL;
 135   else 
 136      que->next = que->next->next;
 137   return tmpdata;
 138}
 139
 140static void ia_hack_tcq(IADEV *dev) {
 141
 142  u_short               desc1;
 143  u_short               tcq_wr;
 144  struct ia_vcc         *iavcc_r = NULL; 
 145
 146  tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
 147  while (dev->host_tcq_wr != tcq_wr) {
 148     desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
 149     if (!desc1) ;
 150     else if (!dev->desc_tbl[desc1 -1].timestamp) {
 151        IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
 152        *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
 153     }                                 
 154     else if (dev->desc_tbl[desc1 -1].timestamp) {
 155        if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
 156           printk("IA: Fatal err in get_desc\n");
 157           continue;
 158        }
 159        iavcc_r->vc_desc_cnt--;
 160        dev->desc_tbl[desc1 -1].timestamp = 0;
 161        IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
 162                                   dev->desc_tbl[desc1 -1].txskb, desc1);)
 163        if (iavcc_r->pcr < dev->rate_limit) {
 164           IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
 165           if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
 166              printk("ia_hack_tcq: No memory available\n");
 167        } 
 168        dev->desc_tbl[desc1 -1].iavcc = NULL;
 169        dev->desc_tbl[desc1 -1].txskb = NULL;
 170     }
 171     dev->host_tcq_wr += 2;
 172     if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
 173        dev->host_tcq_wr = dev->ffL.tcq_st;
 174  }
 175} /* ia_hack_tcq */
 176
 177static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
 178  u_short               desc_num, i;
 179  struct sk_buff        *skb;
 180  struct ia_vcc         *iavcc_r = NULL; 
 181  unsigned long delta;
 182  static unsigned long timer = 0;
 183  int ltimeout;
 184
 185  ia_hack_tcq (dev);
 186  if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
 187     timer = jiffies; 
 188     i=0;
 189     while (i < dev->num_tx_desc) {
 190        if (!dev->desc_tbl[i].timestamp) {
 191           i++;
 192           continue;
 193        }
 194        ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
 195        delta = jiffies - dev->desc_tbl[i].timestamp;
 196        if (delta >= ltimeout) {
 197           IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
 198           if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
 199              dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
 200           else 
 201              dev->ffL.tcq_rd -= 2;
 202           *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
 203           if (!(skb = dev->desc_tbl[i].txskb) || 
 204                          !(iavcc_r = dev->desc_tbl[i].iavcc))
 205              printk("Fatal err, desc table vcc or skb is NULL\n");
 206           else 
 207              iavcc_r->vc_desc_cnt--;
 208           dev->desc_tbl[i].timestamp = 0;
 209           dev->desc_tbl[i].iavcc = NULL;
 210           dev->desc_tbl[i].txskb = NULL;
 211        }
 212        i++;
 213     } /* while */
 214  }
 215  if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
 216     return 0xFFFF;
 217    
 218  /* Get the next available descriptor number from TCQ */
 219  desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
 220
 221  while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
 222     dev->ffL.tcq_rd += 2;
 223     if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
 224        dev->ffL.tcq_rd = dev->ffL.tcq_st;
 225     if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
 226        return 0xFFFF; 
 227     desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
 228  }
 229
 230  /* get system time */
 231  dev->desc_tbl[desc_num -1].timestamp = jiffies;
 232  return desc_num;
 233}
 234
 235static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
 236  u_char                foundLockUp;
 237  vcstatus_t            *vcstatus;
 238  u_short               *shd_tbl;
 239  u_short               tempCellSlot, tempFract;
 240  struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
 241  struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
 242  u_int  i;
 243
 244  if (vcc->qos.txtp.traffic_class == ATM_ABR) {
 245     vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
 246     vcstatus->cnt++;
 247     foundLockUp = 0;
 248     if( vcstatus->cnt == 0x05 ) {
 249        abr_vc += vcc->vci;
 250        eabr_vc += vcc->vci;
 251        if( eabr_vc->last_desc ) {
 252           if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
 253              /* Wait for 10 Micro sec */
 254              udelay(10);
 255              if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
 256                 foundLockUp = 1;
 257           }
 258           else {
 259              tempCellSlot = abr_vc->last_cell_slot;
 260              tempFract    = abr_vc->fraction;
 261              if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
 262                         && (tempFract == dev->testTable[vcc->vci]->fract))
 263                 foundLockUp = 1;                   
 264              dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
 265              dev->testTable[vcc->vci]->fract = tempFract; 
 266           }        
 267        } /* last descriptor */            
 268        vcstatus->cnt = 0;      
 269     } /* vcstatus->cnt */
 270        
 271     if (foundLockUp) {
 272        IF_ABR(printk("LOCK UP found\n");) 
 273        writew(0xFFFD, dev->seg_reg+MODE_REG_0);
 274        /* Wait for 10 Micro sec */
 275        udelay(10); 
 276        abr_vc->status &= 0xFFF8;
 277        abr_vc->status |= 0x0001;  /* state is idle */
 278        shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
 279        for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
 280        if (i < dev->num_vc)
 281           shd_tbl[i] = vcc->vci;
 282        else
 283           IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
 284        writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
 285        writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
 286        writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
 287        vcstatus->cnt = 0;
 288     } /* foundLockUp */
 289
 290  } /* if an ABR VC */
 291
 292
 293}
 294 
 295/*
 296** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
 297**
 298**  +----+----+------------------+-------------------------------+
 299**  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
 300**  +----+----+------------------+-------------------------------+
 301** 
 302**    R = reserved (written as 0)
 303**    NZ = 0 if 0 cells/sec; 1 otherwise
 304**
 305**    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
 306*/
 307static u16
 308cellrate_to_float(u32 cr)
 309{
 310
 311#define NZ              0x4000
 312#define M_BITS          9               /* Number of bits in mantissa */
 313#define E_BITS          5               /* Number of bits in exponent */
 314#define M_MASK          0x1ff           
 315#define E_MASK          0x1f
 316  u16   flot;
 317  u32   tmp = cr & 0x00ffffff;
 318  int   i   = 0;
 319  if (cr == 0)
 320     return 0;
 321  while (tmp != 1) {
 322     tmp >>= 1;
 323     i++;
 324  }
 325  if (i == M_BITS)
 326     flot = NZ | (i << M_BITS) | (cr & M_MASK);
 327  else if (i < M_BITS)
 328     flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
 329  else
 330     flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
 331  return flot;
 332}
 333
 334#if 0
 335/*
 336** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
 337*/
 338static u32
 339float_to_cellrate(u16 rate)
 340{
 341  u32   exp, mantissa, cps;
 342  if ((rate & NZ) == 0)
 343     return 0;
 344  exp = (rate >> M_BITS) & E_MASK;
 345  mantissa = rate & M_MASK;
 346  if (exp == 0)
 347     return 1;
 348  cps = (1 << M_BITS) | mantissa;
 349  if (exp == M_BITS)
 350     cps = cps;
 351  else if (exp > M_BITS)
 352     cps <<= (exp - M_BITS);
 353  else
 354     cps >>= (M_BITS - exp);
 355  return cps;
 356}
 357#endif 
 358
 359static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
 360  srv_p->class_type = ATM_ABR;
 361  srv_p->pcr        = dev->LineRate;
 362  srv_p->mcr        = 0;
 363  srv_p->icr        = 0x055cb7;
 364  srv_p->tbe        = 0xffffff;
 365  srv_p->frtt       = 0x3a;
 366  srv_p->rif        = 0xf;
 367  srv_p->rdf        = 0xb;
 368  srv_p->nrm        = 0x4;
 369  srv_p->trm        = 0x7;
 370  srv_p->cdf        = 0x3;
 371  srv_p->adtf       = 50;
 372}
 373
 374static int
 375ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
 376                                                struct atm_vcc *vcc, u8 flag)
 377{
 378  f_vc_abr_entry  *f_abr_vc;
 379  r_vc_abr_entry  *r_abr_vc;
 380  u32           icr;
 381  u8            trm, nrm, crm;
 382  u16           adtf, air, *ptr16;      
 383  f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
 384  f_abr_vc += vcc->vci;       
 385  switch (flag) {
 386     case 1: /* FFRED initialization */
 387#if 0  /* sanity check */
 388       if (srv_p->pcr == 0)
 389          return INVALID_PCR;
 390       if (srv_p->pcr > dev->LineRate)
 391          srv_p->pcr = dev->LineRate;
 392       if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
 393          return MCR_UNAVAILABLE;
 394       if (srv_p->mcr > srv_p->pcr)
 395          return INVALID_MCR;
 396       if (!(srv_p->icr))
 397          srv_p->icr = srv_p->pcr;
 398       if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
 399          return INVALID_ICR;
 400       if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
 401          return INVALID_TBE;
 402       if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
 403          return INVALID_FRTT;
 404       if (srv_p->nrm > MAX_NRM)
 405          return INVALID_NRM;
 406       if (srv_p->trm > MAX_TRM)
 407          return INVALID_TRM;
 408       if (srv_p->adtf > MAX_ADTF)
 409          return INVALID_ADTF;
 410       else if (srv_p->adtf == 0)
 411          srv_p->adtf = 1;
 412       if (srv_p->cdf > MAX_CDF)
 413          return INVALID_CDF;
 414       if (srv_p->rif > MAX_RIF)
 415          return INVALID_RIF;
 416       if (srv_p->rdf > MAX_RDF)
 417          return INVALID_RDF;
 418#endif
 419       memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
 420       f_abr_vc->f_vc_type = ABR;
 421       nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
 422                                  /* i.e 2**n = 2 << (n-1) */
 423       f_abr_vc->f_nrm = nrm << 8 | nrm;
 424       trm = 100000/(2 << (16 - srv_p->trm));
 425       if ( trm == 0) trm = 1;
 426       f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
 427       crm = srv_p->tbe / nrm;
 428       if (crm == 0) crm = 1;
 429       f_abr_vc->f_crm = crm & 0xff;
 430       f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
 431       icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
 432                                ((srv_p->tbe/srv_p->frtt)*1000000) :
 433                                (1000000/(srv_p->frtt/srv_p->tbe)));
 434       f_abr_vc->f_icr = cellrate_to_float(icr);
 435       adtf = (10000 * srv_p->adtf)/8192;
 436       if (adtf == 0) adtf = 1; 
 437       f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
 438       f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
 439       f_abr_vc->f_acr = f_abr_vc->f_icr;
 440       f_abr_vc->f_status = 0x0042;
 441       break;
 442    case 0: /* RFRED initialization */  
 443       ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
 444       *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
 445       r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
 446       r_abr_vc += vcc->vci;
 447       r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
 448       air = srv_p->pcr << (15 - srv_p->rif);
 449       if (air == 0) air = 1;
 450       r_abr_vc->r_air = cellrate_to_float(air);
 451       dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
 452       dev->sum_mcr        += srv_p->mcr;
 453       dev->n_abr++;
 454       break;
 455    default:
 456       break;
 457  }
 458  return        0;
 459}
 460static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
 461   u32 rateLow=0, rateHigh, rate;
 462   int entries;
 463   struct ia_vcc *ia_vcc;
 464
 465   int   idealSlot =0, testSlot, toBeAssigned, inc;
 466   u32   spacing;
 467   u16  *SchedTbl, *TstSchedTbl;
 468   u16  cbrVC, vcIndex;
 469   u32   fracSlot    = 0;
 470   u32   sp_mod      = 0;
 471   u32   sp_mod2     = 0;
 472
 473   /* IpAdjustTrafficParams */
 474   if (vcc->qos.txtp.max_pcr <= 0) {
 475      IF_ERR(printk("PCR for CBR not defined\n");)
 476      return -1;
 477   }
 478   rate = vcc->qos.txtp.max_pcr;
 479   entries = rate / dev->Granularity;
 480   IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
 481                                entries, rate, dev->Granularity);)
 482   if (entries < 1)
 483      IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
 484   rateLow  =  entries * dev->Granularity;
 485   rateHigh = (entries + 1) * dev->Granularity;
 486   if (3*(rate - rateLow) > (rateHigh - rate))
 487      entries++;
 488   if (entries > dev->CbrRemEntries) {
 489      IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
 490      IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
 491                                       entries, dev->CbrRemEntries);)
 492      return -EBUSY;
 493   }   
 494
 495   ia_vcc = INPH_IA_VCC(vcc);
 496   ia_vcc->NumCbrEntry = entries; 
 497   dev->sum_mcr += entries * dev->Granularity; 
 498   /* IaFFrednInsertCbrSched */
 499   // Starting at an arbitrary location, place the entries into the table
 500   // as smoothly as possible
 501   cbrVC   = 0;
 502   spacing = dev->CbrTotEntries / entries;
 503   sp_mod  = dev->CbrTotEntries % entries; // get modulo
 504   toBeAssigned = entries;
 505   fracSlot = 0;
 506   vcIndex  = vcc->vci;
 507   IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
 508   while (toBeAssigned)
 509   {
 510      // If this is the first time, start the table loading for this connection
 511      // as close to entryPoint as possible.
 512      if (toBeAssigned == entries)
 513      {
 514         idealSlot = dev->CbrEntryPt;
 515         dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
 516         if (dev->CbrEntryPt >= dev->CbrTotEntries) 
 517            dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
 518      } else {
 519         idealSlot += (u32)(spacing + fracSlot); // Point to the next location
 520         // in the table that would be  smoothest
 521         fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
 522         sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
 523      }
 524      if (idealSlot >= (int)dev->CbrTotEntries) 
 525         idealSlot -= dev->CbrTotEntries;  
 526      // Continuously check around this ideal value until a null
 527      // location is encountered.
 528      SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
 529      inc = 0;
 530      testSlot = idealSlot;
 531      TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
 532      IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
 533                                testSlot, TstSchedTbl,toBeAssigned);)
 534      memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
 535      while (cbrVC)  // If another VC at this location, we have to keep looking
 536      {
 537          inc++;
 538          testSlot = idealSlot - inc;
 539          if (testSlot < 0) { // Wrap if necessary
 540             testSlot += dev->CbrTotEntries;
 541             IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
 542                                                       SchedTbl,testSlot);)
 543          }
 544          TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
 545          memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
 546          if (!cbrVC)
 547             break;
 548          testSlot = idealSlot + inc;
 549          if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
 550             testSlot -= dev->CbrTotEntries;
 551             IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
 552             IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
 553                                            testSlot, toBeAssigned);)
 554          } 
 555          // set table index and read in value
 556          TstSchedTbl = (u16*)(SchedTbl + testSlot);
 557          IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
 558                          TstSchedTbl,cbrVC,inc);)
 559          memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
 560       } /* while */
 561       // Move this VCI number into this location of the CBR Sched table.
 562       memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
 563       dev->CbrRemEntries--;
 564       toBeAssigned--;
 565   } /* while */ 
 566
 567   /* IaFFrednCbrEnable */
 568   dev->NumEnabledCBR++;
 569   if (dev->NumEnabledCBR == 1) {
 570       writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
 571       IF_CBR(printk("CBR is enabled\n");)
 572   }
 573   return 0;
 574}
 575static void ia_cbrVc_close (struct atm_vcc *vcc) {
 576   IADEV *iadev;
 577   u16 *SchedTbl, NullVci = 0;
 578   u32 i, NumFound;
 579
 580   iadev = INPH_IA_DEV(vcc->dev);
 581   iadev->NumEnabledCBR--;
 582   SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
 583   if (iadev->NumEnabledCBR == 0) {
 584      writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
 585      IF_CBR (printk("CBR support disabled\n");)
 586   }
 587   NumFound = 0;
 588   for (i=0; i < iadev->CbrTotEntries; i++)
 589   {
 590      if (*SchedTbl == vcc->vci) {
 591         iadev->CbrRemEntries++;
 592         *SchedTbl = NullVci;
 593         IF_CBR(NumFound++;)
 594      }
 595      SchedTbl++;   
 596   } 
 597   IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
 598}
 599
 600static int ia_avail_descs(IADEV *iadev) {
 601   int tmp = 0;
 602   ia_hack_tcq(iadev);
 603   if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
 604      tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
 605   else
 606      tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
 607                   iadev->ffL.tcq_st) / 2;
 608   return tmp;
 609}    
 610
 611static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
 612
 613static int ia_que_tx (IADEV *iadev) { 
 614   struct sk_buff *skb;
 615   int num_desc;
 616   struct atm_vcc *vcc;
 617   num_desc = ia_avail_descs(iadev);
 618
 619   while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
 620      if (!(vcc = ATM_SKB(skb)->vcc)) {
 621         dev_kfree_skb_any(skb);
 622         printk("ia_que_tx: Null vcc\n");
 623         break;
 624      }
 625      if (!test_bit(ATM_VF_READY,&vcc->flags)) {
 626         dev_kfree_skb_any(skb);
 627         printk("Free the SKB on closed vci %d \n", vcc->vci);
 628         break;
 629      }
 630      if (ia_pkt_tx (vcc, skb)) {
 631         skb_queue_head(&iadev->tx_backlog, skb);
 632      }
 633      num_desc--;
 634   }
 635   return 0;
 636}
 637
 638static void ia_tx_poll (IADEV *iadev) {
 639   struct atm_vcc *vcc = NULL;
 640   struct sk_buff *skb = NULL, *skb1 = NULL;
 641   struct ia_vcc *iavcc;
 642   IARTN_Q *  rtne;
 643
 644   ia_hack_tcq(iadev);
 645   while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
 646       skb = rtne->data.txskb;
 647       if (!skb) {
 648           printk("ia_tx_poll: skb is null\n");
 649           goto out;
 650       }
 651       vcc = ATM_SKB(skb)->vcc;
 652       if (!vcc) {
 653           printk("ia_tx_poll: vcc is null\n");
 654           dev_kfree_skb_any(skb);
 655           goto out;
 656       }
 657
 658       iavcc = INPH_IA_VCC(vcc);
 659       if (!iavcc) {
 660           printk("ia_tx_poll: iavcc is null\n");
 661           dev_kfree_skb_any(skb);
 662           goto out;
 663       }
 664
 665       skb1 = skb_dequeue(&iavcc->txing_skb);
 666       while (skb1 && (skb1 != skb)) {
 667          if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
 668             printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
 669          }
 670          IF_ERR(printk("Release the SKB not match\n");)
 671          if ((vcc->pop) && (skb1->len != 0))
 672          {
 673             vcc->pop(vcc, skb1);
 674             IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
 675                                                          (long)skb1);)
 676          }
 677          else 
 678             dev_kfree_skb_any(skb1);
 679          skb1 = skb_dequeue(&iavcc->txing_skb);
 680       }                                                        
 681       if (!skb1) {
 682          IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
 683          ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
 684          break;
 685       }
 686       if ((vcc->pop) && (skb->len != 0))
 687       {
 688          vcc->pop(vcc, skb);
 689          IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
 690       }
 691       else 
 692          dev_kfree_skb_any(skb);
 693       kfree(rtne);
 694    }
 695    ia_que_tx(iadev);
 696out:
 697    return;
 698}
 699#if 0
 700static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
 701{
 702        u32     t;
 703        int     i;
 704        /*
 705         * Issue a command to enable writes to the NOVRAM
 706         */
 707        NVRAM_CMD (EXTEND + EWEN);
 708        NVRAM_CLR_CE;
 709        /*
 710         * issue the write command
 711         */
 712        NVRAM_CMD(IAWRITE + addr);
 713        /* 
 714         * Send the data, starting with D15, then D14, and so on for 16 bits
 715         */
 716        for (i=15; i>=0; i--) {
 717                NVRAM_CLKOUT (val & 0x8000);
 718                val <<= 1;
 719        }
 720        NVRAM_CLR_CE;
 721        CFG_OR(NVCE);
 722        t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
 723        while (!(t & NVDO))
 724                t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
 725
 726        NVRAM_CLR_CE;
 727        /*
 728         * disable writes again
 729         */
 730        NVRAM_CMD(EXTEND + EWDS)
 731        NVRAM_CLR_CE;
 732        CFG_AND(~NVDI);
 733}
 734#endif
 735
 736static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
 737{
 738        u_short val;
 739        u32     t;
 740        int     i;
 741        /*
 742         * Read the first bit that was clocked with the falling edge of the
 743         * the last command data clock
 744         */
 745        NVRAM_CMD(IAREAD + addr);
 746        /*
 747         * Now read the rest of the bits, the next bit read is D14, then D13,
 748         * and so on.
 749         */
 750        val = 0;
 751        for (i=15; i>=0; i--) {
 752                NVRAM_CLKIN(t);
 753                val |= (t << i);
 754        }
 755        NVRAM_CLR_CE;
 756        CFG_AND(~NVDI);
 757        return val;
 758}
 759
 760static void ia_hw_type(IADEV *iadev) {
 761   u_short memType = ia_eeprom_get(iadev, 25);   
 762   iadev->memType = memType;
 763   if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
 764      iadev->num_tx_desc = IA_TX_BUF;
 765      iadev->tx_buf_sz = IA_TX_BUF_SZ;
 766      iadev->num_rx_desc = IA_RX_BUF;
 767      iadev->rx_buf_sz = IA_RX_BUF_SZ; 
 768   } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
 769      if (IA_TX_BUF == DFL_TX_BUFFERS)
 770        iadev->num_tx_desc = IA_TX_BUF / 2;
 771      else 
 772        iadev->num_tx_desc = IA_TX_BUF;
 773      iadev->tx_buf_sz = IA_TX_BUF_SZ;
 774      if (IA_RX_BUF == DFL_RX_BUFFERS)
 775        iadev->num_rx_desc = IA_RX_BUF / 2;
 776      else
 777        iadev->num_rx_desc = IA_RX_BUF;
 778      iadev->rx_buf_sz = IA_RX_BUF_SZ;
 779   }
 780   else {
 781      if (IA_TX_BUF == DFL_TX_BUFFERS) 
 782        iadev->num_tx_desc = IA_TX_BUF / 8;
 783      else
 784        iadev->num_tx_desc = IA_TX_BUF;
 785      iadev->tx_buf_sz = IA_TX_BUF_SZ;
 786      if (IA_RX_BUF == DFL_RX_BUFFERS)
 787        iadev->num_rx_desc = IA_RX_BUF / 8;
 788      else
 789        iadev->num_rx_desc = IA_RX_BUF;
 790      iadev->rx_buf_sz = IA_RX_BUF_SZ; 
 791   } 
 792   iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
 793   IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
 794         iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
 795         iadev->rx_buf_sz, iadev->rx_pkt_ram);)
 796
 797#if 0
 798   if ((memType & FE_MASK) == FE_SINGLE_MODE) {
 799      iadev->phy_type = PHY_OC3C_S;
 800   else if ((memType & FE_MASK) == FE_UTP_OPTION)
 801      iadev->phy_type = PHY_UTP155;
 802   else
 803     iadev->phy_type = PHY_OC3C_M;
 804#endif
 805   
 806   iadev->phy_type = memType & FE_MASK;
 807   IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
 808                                         memType,iadev->phy_type);)
 809   if (iadev->phy_type == FE_25MBIT_PHY) 
 810      iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
 811   else if (iadev->phy_type == FE_DS3_PHY)
 812      iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
 813   else if (iadev->phy_type == FE_E3_PHY) 
 814      iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
 815   else
 816       iadev->LineRate = (u32)(ATM_OC3_PCR);
 817   IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
 818
 819}
 820
 821static void IaFrontEndIntr(IADEV *iadev) {
 822  volatile IA_SUNI *suni;
 823  volatile ia_mb25_t *mb25;
 824  volatile suni_pm7345_t *suni_pm7345;
 825
 826  if(iadev->phy_type & FE_25MBIT_PHY) {
 827     mb25 = (ia_mb25_t*)iadev->phy;
 828     iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
 829  } else if (iadev->phy_type & FE_DS3_PHY) {
 830     suni_pm7345 = (suni_pm7345_t *)iadev->phy;
 831     /* clear FRMR interrupts */
 832     (void) suni_pm7345->suni_ds3_frm_intr_stat; 
 833     iadev->carrier_detect =  
 834           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
 835  } else if (iadev->phy_type & FE_E3_PHY ) {
 836     suni_pm7345 = (suni_pm7345_t *)iadev->phy;
 837     (void) suni_pm7345->suni_e3_frm_maint_intr_ind;
 838     iadev->carrier_detect =
 839           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
 840  }
 841  else { 
 842     suni = (IA_SUNI *)iadev->phy;
 843     (void) suni->suni_rsop_status;
 844     iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
 845  }
 846  if (iadev->carrier_detect)
 847    printk("IA: SUNI carrier detected\n");
 848  else
 849    printk("IA: SUNI carrier lost signal\n"); 
 850  return;
 851}
 852
 853static void ia_mb25_init (IADEV *iadev)
 854{
 855   volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
 856#if 0
 857   mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
 858#endif
 859   mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
 860   mb25->mb25_diag_control = 0;
 861   /*
 862    * Initialize carrier detect state
 863    */
 864   iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
 865   return;
 866}                   
 867
 868static void ia_suni_pm7345_init (IADEV *iadev)
 869{
 870   volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
 871   if (iadev->phy_type & FE_DS3_PHY)
 872   {
 873      iadev->carrier_detect = 
 874          Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
 875      suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
 876      suni_pm7345->suni_ds3_frm_cfg = 1;
 877      suni_pm7345->suni_ds3_tran_cfg = 1;
 878      suni_pm7345->suni_config = 0;
 879      suni_pm7345->suni_splr_cfg = 0;
 880      suni_pm7345->suni_splt_cfg = 0;
 881   }
 882   else 
 883   {
 884      iadev->carrier_detect = 
 885          Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
 886      suni_pm7345->suni_e3_frm_fram_options = 0x4;
 887      suni_pm7345->suni_e3_frm_maint_options = 0x20;
 888      suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
 889      suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
 890      suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
 891      suni_pm7345->suni_e3_tran_fram_options = 0x1;
 892      suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
 893      suni_pm7345->suni_splr_cfg = 0x41;
 894      suni_pm7345->suni_splt_cfg = 0x41;
 895   } 
 896   /*
 897    * Enable RSOP loss of signal interrupt.
 898    */
 899   suni_pm7345->suni_intr_enbl = 0x28;
 900 
 901   /*
 902    * Clear error counters
 903    */
 904   suni_pm7345->suni_id_reset = 0;
 905
 906   /*
 907    * Clear "PMCTST" in master test register.
 908    */
 909   suni_pm7345->suni_master_test = 0;
 910
 911   suni_pm7345->suni_rxcp_ctrl = 0x2c;
 912   suni_pm7345->suni_rxcp_fctrl = 0x81;
 913 
 914   suni_pm7345->suni_rxcp_idle_pat_h1 =
 915        suni_pm7345->suni_rxcp_idle_pat_h2 =
 916        suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
 917   suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
 918 
 919   suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
 920   suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
 921   suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
 922   suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
 923 
 924   suni_pm7345->suni_rxcp_cell_pat_h1 =
 925        suni_pm7345->suni_rxcp_cell_pat_h2 =
 926        suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
 927   suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
 928 
 929   suni_pm7345->suni_rxcp_cell_mask_h1 =
 930        suni_pm7345->suni_rxcp_cell_mask_h2 =
 931        suni_pm7345->suni_rxcp_cell_mask_h3 =
 932        suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
 933 
 934   suni_pm7345->suni_txcp_ctrl = 0xa4;
 935   suni_pm7345->suni_txcp_intr_en_sts = 0x10;
 936   suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
 937 
 938   suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
 939                                 SUNI_PM7345_CLB |
 940                                 SUNI_PM7345_DLB |
 941                                  SUNI_PM7345_PLB);
 942#ifdef __SNMP__
 943   suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
 944#endif /* __SNMP__ */
 945   return;
 946}
 947
 948
 949/***************************** IA_LIB END *****************************/
 950    
 951#ifdef CONFIG_ATM_IA_DEBUG
 952static int tcnter = 0;
 953static void xdump( u_char*  cp, int  length, char*  prefix )
 954{
 955    int col, count;
 956    u_char prntBuf[120];
 957    u_char*  pBuf = prntBuf;
 958    count = 0;
 959    while(count < length){
 960        pBuf += sprintf( pBuf, "%s", prefix );
 961        for(col = 0;count + col < length && col < 16; col++){
 962            if (col != 0 && (col % 4) == 0)
 963                pBuf += sprintf( pBuf, " " );
 964            pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
 965        }
 966        while(col++ < 16){      /* pad end of buffer with blanks */
 967            if ((col % 4) == 0)
 968                sprintf( pBuf, " " );
 969            pBuf += sprintf( pBuf, "   " );
 970        }
 971        pBuf += sprintf( pBuf, "  " );
 972        for(col = 0;count + col < length && col < 16; col++){
 973            if (isprint((int)cp[count + col]))
 974                pBuf += sprintf( pBuf, "%c", cp[count + col] );
 975            else
 976                pBuf += sprintf( pBuf, "." );
 977                }
 978        printk("%s\n", prntBuf);
 979        count += col;
 980        pBuf = prntBuf;
 981    }
 982
 983}  /* close xdump(... */
 984#endif /* CONFIG_ATM_IA_DEBUG */
 985
 986  
 987static struct atm_dev *ia_boards = NULL;  
 988  
 989#define ACTUAL_RAM_BASE \
 990        RAM_BASE*((iadev->mem)/(128 * 1024))  
 991#define ACTUAL_SEG_RAM_BASE \
 992        IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
 993#define ACTUAL_REASS_RAM_BASE \
 994        IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
 995  
 996  
 997/*-- some utilities and memory allocation stuff will come here -------------*/  
 998  
 999static void desc_dbg(IADEV *iadev) {
1000
1001  u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1002  u32 i;
1003  void __iomem *tmp;
1004  // regval = readl((u32)ia_cmds->maddr);
1005  tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1006  printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1007                     tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1008                     readw(iadev->seg_ram+tcq_wr_ptr-2));
1009  printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1010                   iadev->ffL.tcq_rd);
1011  tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1012  tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1013  printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1014  i = 0;
1015  while (tcq_st_ptr != tcq_ed_ptr) {
1016      tmp = iadev->seg_ram+tcq_st_ptr;
1017      printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1018      tcq_st_ptr += 2;
1019  }
1020  for(i=0; i <iadev->num_tx_desc; i++)
1021      printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1022} 
1023  
1024  
1025/*----------------------------- Receiving side stuff --------------------------*/  
1026 
1027static void rx_excp_rcvd(struct atm_dev *dev)  
1028{  
1029#if 0 /* closing the receiving size will cause too many excp int */  
1030  IADEV *iadev;  
1031  u_short state;  
1032  u_short excpq_rd_ptr;  
1033  //u_short *ptr;  
1034  int vci, error = 1;  
1035  iadev = INPH_IA_DEV(dev);  
1036  state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1037  while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1038  { printk("state = %x \n", state); 
1039        excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1040 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1041        if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1042            IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1043        // TODO: update exception stat
1044        vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1045        error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1046        // pwang_test
1047        excpq_rd_ptr += 4;  
1048        if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1049            excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1050        writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1051        state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1052  }  
1053#endif
1054}  
1055  
1056static void free_desc(struct atm_dev *dev, int desc)  
1057{  
1058        IADEV *iadev;  
1059        iadev = INPH_IA_DEV(dev);  
1060        writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1061        iadev->rfL.fdq_wr +=2;
1062        if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1063                iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1064        writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1065}  
1066  
1067  
1068static int rx_pkt(struct atm_dev *dev)  
1069{  
1070        IADEV *iadev;  
1071        struct atm_vcc *vcc;  
1072        unsigned short status;  
1073        struct rx_buf_desc __iomem *buf_desc_ptr;  
1074        int desc;   
1075        struct dle* wr_ptr;  
1076        int len;  
1077        struct sk_buff *skb;  
1078        u_int buf_addr, dma_addr;  
1079
1080        iadev = INPH_IA_DEV(dev);  
1081        if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1082        {  
1083            printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1084            return -EINVAL;  
1085        }  
1086        /* mask 1st 3 bits to get the actual descno. */  
1087        desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1088        IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1089                                    iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1090              printk(" pcq_wr_ptr = 0x%x\n",
1091                               readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1092        /* update the read pointer  - maybe we shud do this in the end*/  
1093        if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1094                iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1095        else  
1096                iadev->rfL.pcq_rd += 2;
1097        writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1098  
1099        /* get the buffer desc entry.  
1100                update stuff. - doesn't seem to be any update necessary  
1101        */  
1102        buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1103        /* make the ptr point to the corresponding buffer desc entry */  
1104        buf_desc_ptr += desc;     
1105        if (!desc || (desc > iadev->num_rx_desc) || 
1106                      ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1107            free_desc(dev, desc);
1108            IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1109            return -1;
1110        }
1111        vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1112        if (!vcc)  
1113        {      
1114                free_desc(dev, desc); 
1115                printk("IA: null vcc, drop PDU\n");  
1116                return -1;  
1117        }  
1118          
1119  
1120        /* might want to check the status bits for errors */  
1121        status = (u_short) (buf_desc_ptr->desc_mode);  
1122        if (status & (RX_CER | RX_PTE | RX_OFL))  
1123        {  
1124                atomic_inc(&vcc->stats->rx_err);
1125                IF_ERR(printk("IA: bad packet, dropping it");)  
1126                if (status & RX_CER) { 
1127                    IF_ERR(printk(" cause: packet CRC error\n");)
1128                }
1129                else if (status & RX_PTE) {
1130                    IF_ERR(printk(" cause: packet time out\n");)
1131                }
1132                else {
1133                    IF_ERR(printk(" cause: buffer overflow\n");)
1134                }
1135                goto out_free_desc;
1136        }  
1137  
1138        /*  
1139                build DLE.        
1140        */  
1141  
1142        buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1143        dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1144        len = dma_addr - buf_addr;  
1145        if (len > iadev->rx_buf_sz) {
1146           printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1147           atomic_inc(&vcc->stats->rx_err);
1148           goto out_free_desc;
1149        }
1150                  
1151        if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1152           if (vcc->vci < 32)
1153              printk("Drop control packets\n");
1154              goto out_free_desc;
1155        }
1156        skb_put(skb,len);  
1157        // pwang_test
1158        ATM_SKB(skb)->vcc = vcc;
1159        ATM_DESC(skb) = desc;        
1160        skb_queue_tail(&iadev->rx_dma_q, skb);  
1161
1162        /* Build the DLE structure */  
1163        wr_ptr = iadev->rx_dle_q.write;  
1164        wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1165                len, PCI_DMA_FROMDEVICE);
1166        wr_ptr->local_pkt_addr = buf_addr;  
1167        wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1168        wr_ptr->mode = DMA_INT_ENABLE;  
1169  
1170        /* shud take care of wrap around here too. */  
1171        if(++wr_ptr == iadev->rx_dle_q.end)
1172             wr_ptr = iadev->rx_dle_q.start;
1173        iadev->rx_dle_q.write = wr_ptr;  
1174        udelay(1);  
1175        /* Increment transaction counter */  
1176        writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1177out:    return 0;  
1178out_free_desc:
1179        free_desc(dev, desc);
1180        goto out;
1181}  
1182  
1183static void rx_intr(struct atm_dev *dev)  
1184{  
1185  IADEV *iadev;  
1186  u_short status;  
1187  u_short state, i;  
1188  
1189  iadev = INPH_IA_DEV(dev);  
1190  status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1191  IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1192  if (status & RX_PKT_RCVD)  
1193  {  
1194        /* do something */  
1195        /* Basically recvd an interrupt for receiving a packet.  
1196        A descriptor would have been written to the packet complete   
1197        queue. Get all the descriptors and set up dma to move the   
1198        packets till the packet complete queue is empty..  
1199        */  
1200        state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1201        IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1202        while(!(state & PCQ_EMPTY))  
1203        {  
1204             rx_pkt(dev);  
1205             state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1206        }  
1207        iadev->rxing = 1;
1208  }  
1209  if (status & RX_FREEQ_EMPT)  
1210  {   
1211     if (iadev->rxing) {
1212        iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1213        iadev->rx_tmp_jif = jiffies; 
1214        iadev->rxing = 0;
1215     } 
1216     else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1217               ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1218        for (i = 1; i <= iadev->num_rx_desc; i++)
1219               free_desc(dev, i);
1220printk("Test logic RUN!!!!\n");
1221        writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1222        iadev->rxing = 1;
1223     }
1224     IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1225  }  
1226
1227  if (status & RX_EXCP_RCVD)  
1228  {  
1229        /* probably need to handle the exception queue also. */  
1230        IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1231        rx_excp_rcvd(dev);  
1232  }  
1233
1234
1235  if (status & RX_RAW_RCVD)  
1236  {  
1237        /* need to handle the raw incoming cells. This deepnds on   
1238        whether we have programmed to receive the raw cells or not.  
1239        Else ignore. */  
1240        IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1241  }  
1242}  
1243  
1244  
1245static void rx_dle_intr(struct atm_dev *dev)  
1246{  
1247  IADEV *iadev;  
1248  struct atm_vcc *vcc;   
1249  struct sk_buff *skb;  
1250  int desc;  
1251  u_short state;   
1252  struct dle *dle, *cur_dle;  
1253  u_int dle_lp;  
1254  int len;
1255  iadev = INPH_IA_DEV(dev);  
1256 
1257  /* free all the dles done, that is just update our own dle read pointer   
1258        - do we really need to do this. Think not. */  
1259  /* DMA is done, just get all the recevie buffers from the rx dma queue  
1260        and push them up to the higher layer protocol. Also free the desc  
1261        associated with the buffer. */  
1262  dle = iadev->rx_dle_q.read;  
1263  dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1264  cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1265  while(dle != cur_dle)  
1266  {  
1267      /* free the DMAed skb */  
1268      skb = skb_dequeue(&iadev->rx_dma_q);  
1269      if (!skb)  
1270         goto INCR_DLE;
1271      desc = ATM_DESC(skb);
1272      free_desc(dev, desc);  
1273               
1274      if (!(len = skb->len))
1275      {  
1276          printk("rx_dle_intr: skb len 0\n");  
1277          dev_kfree_skb_any(skb);  
1278      }  
1279      else  
1280      {  
1281          struct cpcs_trailer *trailer;
1282          u_short length;
1283          struct ia_vcc *ia_vcc;
1284
1285          pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1286                len, PCI_DMA_FROMDEVICE);
1287          /* no VCC related housekeeping done as yet. lets see */  
1288          vcc = ATM_SKB(skb)->vcc;
1289          if (!vcc) {
1290              printk("IA: null vcc\n");  
1291              dev_kfree_skb_any(skb);
1292              goto INCR_DLE;
1293          }
1294          ia_vcc = INPH_IA_VCC(vcc);
1295          if (ia_vcc == NULL)
1296          {
1297             atomic_inc(&vcc->stats->rx_err);
1298             dev_kfree_skb_any(skb);
1299             atm_return(vcc, atm_guess_pdu2truesize(len));
1300             goto INCR_DLE;
1301           }
1302          // get real pkt length  pwang_test
1303          trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1304                                 skb->len - sizeof(*trailer));
1305          length = swap_byte_order(trailer->length);
1306          if ((length > iadev->rx_buf_sz) || (length > 
1307                              (skb->len - sizeof(struct cpcs_trailer))))
1308          {
1309             atomic_inc(&vcc->stats->rx_err);
1310             IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1311                                                            length, skb->len);)
1312             dev_kfree_skb_any(skb);
1313             atm_return(vcc, atm_guess_pdu2truesize(len));
1314             goto INCR_DLE;
1315          }
1316          skb_trim(skb, length);
1317          
1318          /* Display the packet */  
1319          IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1320          xdump(skb->data, skb->len, "RX: ");
1321          printk("\n");)
1322
1323          IF_RX(printk("rx_dle_intr: skb push");)  
1324          vcc->push(vcc,skb);  
1325          atomic_inc(&vcc->stats->rx);
1326          iadev->rx_pkt_cnt++;
1327      }  
1328INCR_DLE:
1329      if (++dle == iadev->rx_dle_q.end)  
1330          dle = iadev->rx_dle_q.start;  
1331  }  
1332  iadev->rx_dle_q.read = dle;  
1333  
1334  /* if the interrupts are masked because there were no free desc available,  
1335                unmask them now. */ 
1336  if (!iadev->rxing) {
1337     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1338     if (!(state & FREEQ_EMPTY)) {
1339        state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1340        writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1341                                      iadev->reass_reg+REASS_MASK_REG);
1342        iadev->rxing++; 
1343     }
1344  }
1345}  
1346  
1347  
1348static int open_rx(struct atm_vcc *vcc)  
1349{  
1350        IADEV *iadev;  
1351        u_short __iomem *vc_table;  
1352        u_short __iomem *reass_ptr;  
1353        IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1354
1355        if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1356        iadev = INPH_IA_DEV(vcc->dev);  
1357        if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1358           if (iadev->phy_type & FE_25MBIT_PHY) {
1359               printk("IA:  ABR not support\n");
1360               return -EINVAL; 
1361           }
1362        }
1363        /* Make only this VCI in the vc table valid and let all   
1364                others be invalid entries */  
1365        vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1366        vc_table += vcc->vci;
1367        /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1368
1369        *vc_table = vcc->vci << 6;
1370        /* Also keep a list of open rx vcs so that we can attach them with  
1371                incoming PDUs later. */  
1372        if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1373                                (vcc->qos.txtp.traffic_class == ATM_ABR))  
1374        {  
1375                srv_cls_param_t srv_p;
1376                init_abr_vc(iadev, &srv_p);
1377                ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1378        } 
1379        else {  /* for UBR  later may need to add CBR logic */
1380                reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1381                reass_ptr += vcc->vci;
1382                *reass_ptr = NO_AAL5_PKT;
1383        }
1384        
1385        if (iadev->rx_open[vcc->vci])  
1386                printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1387                        vcc->dev->number, vcc->vci);  
1388        iadev->rx_open[vcc->vci] = vcc;  
1389        return 0;  
1390}  
1391  
1392static int rx_init(struct atm_dev *dev)  
1393{  
1394        IADEV *iadev;  
1395        struct rx_buf_desc __iomem *buf_desc_ptr;  
1396        unsigned long rx_pkt_start = 0;  
1397        void *dle_addr;  
1398        struct abr_vc_table  *abr_vc_table; 
1399        u16 *vc_table;  
1400        u16 *reass_table;  
1401        int i,j, vcsize_sel;  
1402        u_short freeq_st_adr;  
1403        u_short *freeq_start;  
1404  
1405        iadev = INPH_IA_DEV(dev);  
1406  //    spin_lock_init(&iadev->rx_lock); 
1407  
1408        /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1409        dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1410                                        &iadev->rx_dle_dma);  
1411        if (!dle_addr)  {  
1412                printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1413                goto err_out;
1414        }
1415        iadev->rx_dle_q.start = (struct dle *)dle_addr;
1416        iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1417        iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1418        iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1419        /* the end of the dle q points to the entry after the last  
1420        DLE that can be used. */  
1421  
1422        /* write the upper 20 bits of the start address to rx list address register */  
1423        /* We know this is 32bit bus addressed so the following is safe */
1424        writel(iadev->rx_dle_dma & 0xfffff000,
1425               iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1426        IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1427                      iadev->dma+IPHASE5575_TX_LIST_ADDR,
1428                      *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1429        printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1430                      iadev->dma+IPHASE5575_RX_LIST_ADDR,
1431                      *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1432  
1433        writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1434        writew(0, iadev->reass_reg+MODE_REG);  
1435        writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1436  
1437        /* Receive side control memory map  
1438           -------------------------------  
1439  
1440                Buffer descr    0x0000 (736 - 23K)  
1441                VP Table        0x5c00 (256 - 512)  
1442                Except q        0x5e00 (128 - 512)  
1443                Free buffer q   0x6000 (1K - 2K)  
1444                Packet comp q   0x6800 (1K - 2K)  
1445                Reass Table     0x7000 (1K - 2K)  
1446                VC Table        0x7800 (1K - 2K)  
1447                ABR VC Table    0x8000 (1K - 32K)  
1448        */  
1449          
1450        /* Base address for Buffer Descriptor Table */  
1451        writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1452        /* Set the buffer size register */  
1453        writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1454  
1455        /* Initialize each entry in the Buffer Descriptor Table */  
1456        iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1457        buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1458        memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1459        buf_desc_ptr++;  
1460        rx_pkt_start = iadev->rx_pkt_ram;  
1461        for(i=1; i<=iadev->num_rx_desc; i++)  
1462        {  
1463                memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1464                buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1465                buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1466                buf_desc_ptr++;           
1467                rx_pkt_start += iadev->rx_buf_sz;  
1468        }  
1469        IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1470        i = FREE_BUF_DESC_Q*iadev->memSize; 
1471        writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1472        writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1473        writew(i+iadev->num_rx_desc*sizeof(u_short), 
1474                                         iadev->reass_reg+FREEQ_ED_ADR);
1475        writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1476        writew(i+iadev->num_rx_desc*sizeof(u_short), 
1477                                        iadev->reass_reg+FREEQ_WR_PTR);    
1478        /* Fill the FREEQ with all the free descriptors. */  
1479        freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1480        freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1481        for(i=1; i<=iadev->num_rx_desc; i++)  
1482        {  
1483                *freeq_start = (u_short)i;  
1484                freeq_start++;  
1485        }  
1486        IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1487        /* Packet Complete Queue */
1488        i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1489        writew(i, iadev->reass_reg+PCQ_ST_ADR);
1490        writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1491        writew(i, iadev->reass_reg+PCQ_RD_PTR);
1492        writew(i, iadev->reass_reg+PCQ_WR_PTR);
1493
1494        /* Exception Queue */
1495        i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1496        writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1497        writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1498                                             iadev->reass_reg+EXCP_Q_ED_ADR);
1499        writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1500        writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1501 
1502        /* Load local copy of FREEQ and PCQ ptrs */
1503        iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1504        iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1505        iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1506        iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1507        iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1508        iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1509        iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1510        iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1511        
1512        IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1513              iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1514              iadev->rfL.pcq_wr);)                
1515        /* just for check - no VP TBL */  
1516        /* VP Table */  
1517        /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1518        /* initialize VP Table for invalid VPIs  
1519                - I guess we can write all 1s or 0x000f in the entire memory  
1520                  space or something similar.  
1521        */  
1522  
1523        /* This seems to work and looks right to me too !!! */  
1524        i =  REASS_TABLE * iadev->memSize;
1525        writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1526        /* initialize Reassembly table to I don't know what ???? */  
1527        reass_table = (u16 *)(iadev->reass_ram+i);  
1528        j = REASS_TABLE_SZ * iadev->memSize;
1529        for(i=0; i < j; i++)  
1530                *reass_table++ = NO_AAL5_PKT;  
1531       i = 8*1024;
1532       vcsize_sel =  0;
1533       while (i != iadev->num_vc) {
1534          i /= 2;
1535          vcsize_sel++;
1536       }
1537       i = RX_VC_TABLE * iadev->memSize;
1538       writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1539       vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1540        j = RX_VC_TABLE_SZ * iadev->memSize;
1541        for(i = 0; i < j; i++)  
1542        {  
1543                /* shift the reassembly pointer by 3 + lower 3 bits of   
1544                vc_lkup_base register (=3 for 1K VCs) and the last byte   
1545                is those low 3 bits.   
1546                Shall program this later.  
1547                */  
1548                *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1549                vc_table++;  
1550        }  
1551        /* ABR VC table */
1552        i =  ABR_VC_TABLE * iadev->memSize;
1553        writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1554                   
1555        i = ABR_VC_TABLE * iadev->memSize;
1556        abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1557        j = REASS_TABLE_SZ * iadev->memSize;
1558        memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1559        for(i = 0; i < j; i++) {                
1560                abr_vc_table->rdf = 0x0003;
1561                abr_vc_table->air = 0x5eb1;
1562                abr_vc_table++;         
1563        }  
1564
1565        /* Initialize other registers */  
1566  
1567        /* VP Filter Register set for VC Reassembly only */  
1568        writew(0xff00, iadev->reass_reg+VP_FILTER);  
1569        writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1570        writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1571
1572        /* Packet Timeout Count  related Registers : 
1573           Set packet timeout to occur in about 3 seconds
1574           Set Packet Aging Interval count register to overflow in about 4 us
1575        */  
1576        writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1577
1578        i = (j >> 6) & 0xFF;
1579        j += 2 * (j - 1);
1580        i |= ((j << 2) & 0xFF00);
1581        writew(i, iadev->reass_reg+TMOUT_RANGE);
1582
1583        /* initiate the desc_tble */
1584        for(i=0; i<iadev->num_tx_desc;i++)
1585            iadev->desc_tbl[i].timestamp = 0;
1586
1587        /* to clear the interrupt status register - read it */  
1588        readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1589  
1590        /* Mask Register - clear it */  
1591        writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1592  
1593        skb_queue_head_init(&iadev->rx_dma_q);  
1594        iadev->rx_free_desc_qhead = NULL;   
1595
1596        iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1597        if (!iadev->rx_open) {
1598                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1599                dev->number);  
1600                goto err_free_dle;
1601        }  
1602
1603        iadev->rxing = 1;
1604        iadev->rx_pkt_cnt = 0;
1605        /* Mode Register */  
1606        writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1607        return 0;  
1608
1609err_free_dle:
1610        pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1611                            iadev->rx_dle_dma);  
1612err_out:
1613        return -ENOMEM;
1614}  
1615  
1616
1617/*  
1618        The memory map suggested in appendix A and the coding for it.   
1619        Keeping it around just in case we change our mind later.  
1620  
1621                Buffer descr    0x0000 (128 - 4K)  
1622                UBR sched       0x1000 (1K - 4K)  
1623                UBR Wait q      0x2000 (1K - 4K)  
1624                Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1625                                        (128 - 256) each  
1626                extended VC     0x4000 (1K - 8K)  
1627                ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1628                CBR sched       0x7000 (as needed)  
1629                VC table        0x8000 (1K - 32K)  
1630*/  
1631  
1632static void tx_intr(struct atm_dev *dev)  
1633{  
1634        IADEV *iadev;  
1635        unsigned short status;  
1636        unsigned long flags;
1637
1638        iadev = INPH_IA_DEV(dev);  
1639  
1640        status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1641        if (status & TRANSMIT_DONE){
1642
1643           IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1644           spin_lock_irqsave(&iadev->tx_lock, flags);
1645           ia_tx_poll(iadev);
1646           spin_unlock_irqrestore(&iadev->tx_lock, flags);
1647           writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1648           if (iadev->close_pending)  
1649               wake_up(&iadev->close_wait);
1650        }         
1651        if (status & TCQ_NOT_EMPTY)  
1652        {  
1653            IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1654        }  
1655}  
1656  
1657static void tx_dle_intr(struct atm_dev *dev)
1658{
1659        IADEV *iadev;
1660        struct dle *dle, *cur_dle; 
1661        struct sk_buff *skb;
1662        struct atm_vcc *vcc;
1663        struct ia_vcc  *iavcc;
1664        u_int dle_lp;
1665        unsigned long flags;
1666
1667        iadev = INPH_IA_DEV(dev);
1668        spin_lock_irqsave(&iadev->tx_lock, flags);   
1669        dle = iadev->tx_dle_q.read;
1670        dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1671                                        (sizeof(struct dle)*DLE_ENTRIES - 1);
1672        cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1673        while (dle != cur_dle)
1674        {
1675            /* free the DMAed skb */ 
1676            skb = skb_dequeue(&iadev->tx_dma_q); 
1677            if (!skb) break;
1678
1679            /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1680            if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1681                pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1682                                 PCI_DMA_TODEVICE);
1683            }
1684            vcc = ATM_SKB(skb)->vcc;
1685            if (!vcc) {
1686                  printk("tx_dle_intr: vcc is null\n");
1687                  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1688                  dev_kfree_skb_any(skb);
1689
1690                  return;
1691            }
1692            iavcc = INPH_IA_VCC(vcc);
1693            if (!iavcc) {
1694                  printk("tx_dle_intr: iavcc is null\n");
1695                  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1696                  dev_kfree_skb_any(skb);
1697                  return;
1698            }
1699            if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1700               if ((vcc->pop) && (skb->len != 0))
1701               {     
1702                 vcc->pop(vcc, skb);
1703               } 
1704               else {
1705                 dev_kfree_skb_any(skb);
1706               }
1707            }
1708            else { /* Hold the rate-limited skb for flow control */
1709               IA_SKB_STATE(skb) |= IA_DLED;
1710               skb_queue_tail(&iavcc->txing_skb, skb);
1711            }
1712            IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1713            if (++dle == iadev->tx_dle_q.end)
1714                 dle = iadev->tx_dle_q.start;
1715        }
1716        iadev->tx_dle_q.read = dle;
1717        spin_unlock_irqrestore(&iadev->tx_lock, flags);
1718}
1719  
1720static int open_tx(struct atm_vcc *vcc)  
1721{  
1722        struct ia_vcc *ia_vcc;  
1723        IADEV *iadev;  
1724        struct main_vc *vc;  
1725        struct ext_vc *evc;  
1726        int ret;
1727        IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1728        if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1729        iadev = INPH_IA_DEV(vcc->dev);  
1730        
1731        if (iadev->phy_type & FE_25MBIT_PHY) {
1732           if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1733               printk("IA:  ABR not support\n");
1734               return -EINVAL; 
1735           }
1736          if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1737               printk("IA:  CBR not support\n");
1738               return -EINVAL; 
1739          }
1740        }
1741        ia_vcc =  INPH_IA_VCC(vcc);
1742        memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1743        if (vcc->qos.txtp.max_sdu > 
1744                         (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1745           printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1746                  vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1747           vcc->dev_data = NULL;
1748           kfree(ia_vcc);
1749           return -EINVAL; 
1750        }
1751        ia_vcc->vc_desc_cnt = 0;
1752        ia_vcc->txing = 1;
1753
1754        /* find pcr */
1755        if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1756           vcc->qos.txtp.pcr = iadev->LineRate;
1757        else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1758           vcc->qos.txtp.pcr = iadev->LineRate;
1759        else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1760           vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1761        if (vcc->qos.txtp.pcr > iadev->LineRate)
1762             vcc->qos.txtp.pcr = iadev->LineRate;
1763        ia_vcc->pcr = vcc->qos.txtp.pcr;
1764
1765        if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1766        else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1767        else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1768        else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1769        if (ia_vcc->pcr < iadev->rate_limit)
1770           skb_queue_head_init (&ia_vcc->txing_skb);
1771        if (ia_vcc->pcr < iadev->rate_limit) {
1772           struct sock *sk = sk_atm(vcc);
1773
1774           if (vcc->qos.txtp.max_sdu != 0) {
1775               if (ia_vcc->pcr > 60000)
1776                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1777               else if (ia_vcc->pcr > 2000)
1778                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1779               else
1780                 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1781           }
1782           else
1783             sk->sk_sndbuf = 24576;
1784        }
1785           
1786        vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1787        evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1788        vc += vcc->vci;  
1789        evc += vcc->vci;  
1790        memset((caddr_t)vc, 0, sizeof(*vc));  
1791        memset((caddr_t)evc, 0, sizeof(*evc));  
1792          
1793        /* store the most significant 4 bits of vci as the last 4 bits   
1794                of first part of atm header.  
1795           store the last 12 bits of vci as first 12 bits of the second  
1796                part of the atm header.  
1797        */  
1798        evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1799        evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1800 
1801        /* check the following for different traffic classes */  
1802        if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1803        {  
1804                vc->type = UBR;  
1805                vc->status = CRC_APPEND;
1806                vc->acr = cellrate_to_float(iadev->LineRate);  
1807                if (vcc->qos.txtp.pcr > 0) 
1808                   vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1809                IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1810                                             vcc->qos.txtp.max_pcr,vc->acr);)
1811        }  
1812        else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1813        {       srv_cls_param_t srv_p;
1814                IF_ABR(printk("Tx ABR VCC\n");)  
1815                init_abr_vc(iadev, &srv_p);
1816                if (vcc->qos.txtp.pcr > 0) 
1817                   srv_p.pcr = vcc->qos.txtp.pcr;
1818                if (vcc->qos.txtp.min_pcr > 0) {
1819                   int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1820                   if (tmpsum > iadev->LineRate)
1821                       return -EBUSY;
1822                   srv_p.mcr = vcc->qos.txtp.min_pcr;
1823                   iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1824                } 
1825                else srv_p.mcr = 0;
1826                if (vcc->qos.txtp.icr)
1827                   srv_p.icr = vcc->qos.txtp.icr;
1828                if (vcc->qos.txtp.tbe)
1829                   srv_p.tbe = vcc->qos.txtp.tbe;
1830                if (vcc->qos.txtp.frtt)
1831                   srv_p.frtt = vcc->qos.txtp.frtt;
1832                if (vcc->qos.txtp.rif)
1833                   srv_p.rif = vcc->qos.txtp.rif;
1834                if (vcc->qos.txtp.rdf)
1835                   srv_p.rdf = vcc->qos.txtp.rdf;
1836                if (vcc->qos.txtp.nrm_pres)
1837                   srv_p.nrm = vcc->qos.txtp.nrm;
1838                if (vcc->qos.txtp.trm_pres)
1839                   srv_p.trm = vcc->qos.txtp.trm;
1840                if (vcc->qos.txtp.adtf_pres)
1841                   srv_p.adtf = vcc->qos.txtp.adtf;
1842                if (vcc->qos.txtp.cdf_pres)
1843                   srv_p.cdf = vcc->qos.txtp.cdf;    
1844                if (srv_p.icr > srv_p.pcr)
1845                   srv_p.icr = srv_p.pcr;    
1846                IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1847                                                      srv_p.pcr, srv_p.mcr);)
1848                ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1849        } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1850                if (iadev->phy_type & FE_25MBIT_PHY) {
1851                    printk("IA:  CBR not support\n");
1852                    return -EINVAL; 
1853                }
1854                if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1855                   IF_CBR(printk("PCR is not available\n");)
1856                   return -1;
1857                }
1858                vc->type = CBR;
1859                vc->status = CRC_APPEND;
1860                if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1861                    return ret;
1862                }
1863       } 
1864        else  
1865           printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1866        
1867        iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1868        IF_EVENT(printk("ia open_tx returning \n");)  
1869        return 0;  
1870}  
1871  
1872  
1873static int tx_init(struct atm_dev *dev)  
1874{  
1875        IADEV *iadev;  
1876        struct tx_buf_desc *buf_desc_ptr;
1877        unsigned int tx_pkt_start;  
1878        void *dle_addr;  
1879        int i;  
1880        u_short tcq_st_adr;  
1881        u_short *tcq_start;  
1882        u_short prq_st_adr;  
1883        u_short *prq_start;  
1884        struct main_vc *vc;  
1885        struct ext_vc *evc;   
1886        u_short tmp16;
1887        u32 vcsize_sel;
1888 
1889        iadev = INPH_IA_DEV(dev);  
1890        spin_lock_init(&iadev->tx_lock);
1891 
1892        IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1893                                readw(iadev->seg_reg+SEG_MASK_REG));)  
1894
1895        /* Allocate 4k (boundary aligned) bytes */
1896        dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1897                                        &iadev->tx_dle_dma);  
1898        if (!dle_addr)  {
1899                printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1900                goto err_out;
1901        }
1902        iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1903        iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1904        iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1905        iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1906
1907        /* write the upper 20 bits of the start address to tx list address register */  
1908        writel(iadev->tx_dle_dma & 0xfffff000,
1909               iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1910        writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1911        writew(0, iadev->seg_reg+MODE_REG_0);  
1912        writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1913        iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1914        iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1915        iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1916  
1917        /*  
1918           Transmit side control memory map  
1919           --------------------------------    
1920         Buffer descr   0x0000 (128 - 4K)  
1921         Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1922                                        (512 - 1K) each  
1923                                        TCQ - 4K, PRQ - 5K  
1924         CBR Table      0x1800 (as needed) - 6K  
1925         UBR Table      0x3000 (1K - 4K) - 12K  
1926         UBR Wait queue 0x4000 (1K - 4K) - 16K  
1927         ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1928                                ABR Tbl - 20K, ABR Wq - 22K   
1929         extended VC    0x6000 (1K - 8K) - 24K  
1930         VC Table       0x8000 (1K - 32K) - 32K  
1931          
1932        Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1933        and Wait q, which can be allotted later.  
1934        */  
1935     
1936        /* Buffer Descriptor Table Base address */  
1937        writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1938  
1939        /* initialize each entry in the buffer descriptor table */  
1940        buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1941        memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1942        buf_desc_ptr++;  
1943        tx_pkt_start = TX_PACKET_RAM;  
1944        for(i=1; i<=iadev->num_tx_desc; i++)  
1945        {  
1946                memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1947                buf_desc_ptr->desc_mode = AAL5;  
1948                buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1949                buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1950                buf_desc_ptr++;           
1951                tx_pkt_start += iadev->tx_buf_sz;  
1952        }  
1953        iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1954        if (!iadev->tx_buf) {
1955            printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1956            goto err_free_dle;
1957        }
1958        for (i= 0; i< iadev->num_tx_desc; i++)
1959        {
1960            struct cpcs_trailer *cpcs;
1961 
1962            cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1963            if(!cpcs) {                
1964                printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1965                goto err_free_tx_bufs;
1966            }
1967            iadev->tx_buf[i].cpcs = cpcs;
1968            iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1969                cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1970        }
1971        iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1972                                   sizeof(struct desc_tbl_t), GFP_KERNEL);
1973        if (!iadev->desc_tbl) {
1974                printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1975                goto err_free_all_tx_bufs;
1976        }
1977  
1978        /* Communication Queues base address */  
1979        i = TX_COMP_Q * iadev->memSize;
1980        writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1981  
1982        /* Transmit Complete Queue */  
1983        writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1984        writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1985        writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1986        iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1987        writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1988                                              iadev->seg_reg+TCQ_ED_ADR); 
1989        /* Fill the TCQ with all the free descriptors. */  
1990        tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
1991        tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
1992        for(i=1; i<=iadev->num_tx_desc; i++)  
1993        {  
1994                *tcq_start = (u_short)i;  
1995                tcq_start++;  
1996        }  
1997  
1998        /* Packet Ready Queue */  
1999        i = PKT_RDY_Q * iadev->memSize; 
2000        writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2001        writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2002                                              iadev->seg_reg+PRQ_ED_ADR);
2003        writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2004        writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2005         
2006        /* Load local copy of PRQ and TCQ ptrs */
2007        iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2008        iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2009        iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2010
2011        iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2012        iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2013        iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2014
2015        /* Just for safety initializing the queue to have desc 1 always */  
2016        /* Fill the PRQ with all the free descriptors. */  
2017        prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2018        prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2019        for(i=1; i<=iadev->num_tx_desc; i++)  
2020        {  
2021                *prq_start = (u_short)0;        /* desc 1 in all entries */  
2022                prq_start++;  
2023        }  
2024        /* CBR Table */  
2025        IF_INIT(printk("Start CBR Init\n");)
2026#if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2027        writew(0,iadev->seg_reg+CBR_PTR_BASE);
2028#else /* Charlie's logic is wrong ? */
2029        tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2030        IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2031        writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2032#endif
2033
2034        IF_INIT(printk("value in register = 0x%x\n",
2035                                   readw(iadev->seg_reg+CBR_PTR_BASE));)
2036        tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2037        writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2038        IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2039                                        readw(iadev->seg_reg+CBR_TAB_BEG));)
2040        writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2041        tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2042        writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2043        IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2044               iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2045        IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2046          readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2047          readw(iadev->seg_reg+CBR_TAB_END+1));)
2048
2049        /* Initialize the CBR Schedualing Table */
2050        memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2051                                                          0, iadev->num_vc*6); 
2052        iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2053        iadev->CbrEntryPt = 0;
2054        iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2055        iadev->NumEnabledCBR = 0;
2056
2057        /* UBR scheduling Table and wait queue */  
2058        /* initialize all bytes of UBR scheduler table and wait queue to 0   
2059                - SCHEDSZ is 1K (# of entries).  
2060                - UBR Table size is 4K  
2061                - UBR wait queue is 4K  
2062           since the table and wait queues are contiguous, all the bytes   
2063           can be initialized by one memeset.
2064        */  
2065        
2066        vcsize_sel = 0;
2067        i = 8*1024;
2068        while (i != iadev->num_vc) {
2069          i /= 2;
2070          vcsize_sel++;
2071        }
2072 
2073        i = MAIN_VC_TABLE * iadev->memSize;
2074        writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2075        i =  EXT_VC_TABLE * iadev->memSize;
2076        writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2077        i = UBR_SCHED_TABLE * iadev->memSize;
2078        writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2079        i = UBR_WAIT_Q * iadev->memSize; 
2080        writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2081        memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2082                                                       0, iadev->num_vc*8);
2083        /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2084        /* initialize all bytes of ABR scheduler table and wait queue to 0   
2085                - SCHEDSZ is 1K (# of entries).  
2086                - ABR Table size is 2K  
2087                - ABR wait queue is 2K  
2088           since the table and wait queues are contiguous, all the bytes   
2089           can be initialized by one memeset.
2090        */  
2091        i = ABR_SCHED_TABLE * iadev->memSize;
2092        writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2093        i = ABR_WAIT_Q * iadev->memSize;
2094        writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2095 
2096        i = ABR_SCHED_TABLE*iadev->memSize;
2097        memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2098        vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2099        evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2100        iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2101        if (!iadev->testTable) {
2102           printk("Get freepage  failed\n");
2103           goto err_free_desc_tbl;
2104        }
2105        for(i=0; i<iadev->num_vc; i++)  
2106        {  
2107                memset((caddr_t)vc, 0, sizeof(*vc));  
2108                memset((caddr_t)evc, 0, sizeof(*evc));  
2109                iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2110                                                GFP_KERNEL);
2111                if (!iadev->testTable[i])
2112                        goto err_free_test_tables;
2113                iadev->testTable[i]->lastTime = 0;
2114                iadev->testTable[i]->fract = 0;
2115                iadev->testTable[i]->vc_status = VC_UBR;
2116                vc++;  
2117                evc++;  
2118        }  
2119  
2120        /* Other Initialization */  
2121          
2122        /* Max Rate Register */  
2123        if (iadev->phy_type & FE_25MBIT_PHY) {
2124           writew(RATE25, iadev->seg_reg+MAXRATE);  
2125           writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2126        }
2127        else {
2128           writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2129           writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2130        }
2131        /* Set Idle Header Reigisters to be sure */  
2132        writew(0, iadev->seg_reg+IDLEHEADHI);  
2133        writew(0, iadev->seg_reg+IDLEHEADLO);  
2134  
2135        /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2136        writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2137
2138        iadev->close_pending = 0;
2139        init_waitqueue_head(&iadev->close_wait);
2140        init_waitqueue_head(&iadev->timeout_wait);
2141        skb_queue_head_init(&iadev->tx_dma_q);  
2142        ia_init_rtn_q(&iadev->tx_return_q);  
2143
2144        /* RM Cell Protocol ID and Message Type */  
2145        writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2146        skb_queue_head_init (&iadev->tx_backlog);
2147  
2148        /* Mode Register 1 */  
2149        writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2150  
2151        /* Mode Register 0 */  
2152        writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2153  
2154        /* Interrupt Status Register - read to clear */  
2155        readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2156  
2157        /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2158        writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2159        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2160        iadev->tx_pkt_cnt = 0;
2161        iadev->rate_limit = iadev->LineRate / 3;
2162  
2163        return 0;
2164
2165err_free_test_tables:
2166        while (--i >= 0)
2167                kfree(iadev->testTable[i]);
2168        kfree(iadev->testTable);
2169err_free_desc_tbl:
2170        kfree(iadev->desc_tbl);
2171err_free_all_tx_bufs:
2172        i = iadev->num_tx_desc;
2173err_free_tx_bufs:
2174        while (--i >= 0) {
2175                struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2176
2177                pci_unmap_single(iadev->pci, desc->dma_addr,
2178                        sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2179                kfree(desc->cpcs);
2180        }
2181        kfree(iadev->tx_buf);
2182err_free_dle:
2183        pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2184                            iadev->tx_dle_dma);  
2185err_out:
2186        return -ENOMEM;
2187}   
2188   
2189static irqreturn_t ia_int(int irq, void *dev_id)  
2190{  
2191   struct atm_dev *dev;  
2192   IADEV *iadev;  
2193   unsigned int status;  
2194   int handled = 0;
2195
2196   dev = dev_id;  
2197   iadev = INPH_IA_DEV(dev);  
2198   while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2199   { 
2200        handled = 1;
2201        IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2202        if (status & STAT_REASSINT)  
2203        {  
2204           /* do something */  
2205           IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2206           rx_intr(dev);  
2207        }  
2208        if (status & STAT_DLERINT)  
2209        {  
2210           /* Clear this bit by writing a 1 to it. */  
2211           *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2212           rx_dle_intr(dev);  
2213        }  
2214        if (status & STAT_SEGINT)  
2215        {  
2216           /* do something */ 
2217           IF_EVENT(printk("IA: tx_intr \n");) 
2218           tx_intr(dev);  
2219        }  
2220        if (status & STAT_DLETINT)  
2221        {  
2222           *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2223           tx_dle_intr(dev);  
2224        }  
2225        if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2226        {  
2227           if (status & STAT_FEINT) 
2228               IaFrontEndIntr(iadev);
2229        }  
2230   }
2231   return IRQ_RETVAL(handled);
2232}  
2233          
2234          
2235          
2236/*----------------------------- entries --------------------------------*/  
2237static int get_esi(struct atm_dev *dev)  
2238{  
2239        IADEV *iadev;  
2240        int i;  
2241        u32 mac1;  
2242        u16 mac2;  
2243          
2244        iadev = INPH_IA_DEV(dev);  
2245        mac1 = cpu_to_be32(le32_to_cpu(readl(  
2246                                iadev->reg+IPHASE5575_MAC1)));  
2247        mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2248        IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2249        for (i=0; i<MAC1_LEN; i++)  
2250                dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2251          
2252        for (i=0; i<MAC2_LEN; i++)  
2253                dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2254        return 0;  
2255}  
2256          
2257static int reset_sar(struct atm_dev *dev)  
2258{  
2259        IADEV *iadev;  
2260        int i, error = 1;  
2261        unsigned int pci[64];  
2262          
2263        iadev = INPH_IA_DEV(dev);  
2264        for(i=0; i<64; i++)  
2265          if ((error = pci_read_config_dword(iadev->pci,  
2266                                i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2267              return error;  
2268        writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2269        for(i=0; i<64; i++)  
2270          if ((error = pci_write_config_dword(iadev->pci,  
2271                                        i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2272            return error;  
2273        udelay(5);  
2274        return 0;  
2275}  
2276          
2277          
2278static int __devinit ia_init(struct atm_dev *dev)
2279{  
2280        IADEV *iadev;  
2281        unsigned long real_base;
2282        void __iomem *base;
2283        unsigned short command;  
2284        int error, i; 
2285          
2286        /* The device has been identified and registered. Now we read   
2287           necessary configuration info like memory base address,   
2288           interrupt number etc */  
2289          
2290        IF_INIT(printk(">ia_init\n");)  
2291        dev->ci_range.vpi_bits = 0;  
2292        dev->ci_range.vci_bits = NR_VCI_LD;  
2293
2294        iadev = INPH_IA_DEV(dev);  
2295        real_base = pci_resource_start (iadev->pci, 0);
2296        iadev->irq = iadev->pci->irq;
2297                  
2298        error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2299        if (error) {
2300                printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2301                                dev->number,error);  
2302                return -EINVAL;  
2303        }  
2304        IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2305                        dev->number, iadev->pci->revision, real_base, iadev->irq);)
2306          
2307        /* find mapping size of board */  
2308          
2309        iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2310
2311        if (iadev->pci_map_size == 0x100000){
2312          iadev->num_vc = 4096;
2313          dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2314          iadev->memSize = 4;
2315        }
2316        else if (iadev->pci_map_size == 0x40000) {
2317          iadev->num_vc = 1024;
2318          iadev->memSize = 1;
2319        }
2320        else {
2321           printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2322           return -EINVAL;
2323        }
2324        IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2325          
2326        /* enable bus mastering */
2327        pci_set_master(iadev->pci);
2328
2329        /*  
2330         * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2331         */  
2332        udelay(10);  
2333          
2334        /* mapping the physical address to a virtual address in address space */  
2335        base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2336          
2337        if (!base)  
2338        {  
2339                printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2340                            dev->number);  
2341                return error;  
2342        }  
2343        IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2344                        dev->number, iadev->pci->revision, base, iadev->irq);)
2345          
2346        /* filling the iphase dev structure */  
2347        iadev->mem = iadev->pci_map_size /2;  
2348        iadev->real_base = real_base;  
2349        iadev->base = base;  
2350                  
2351        /* Bus Interface Control Registers */  
2352        iadev->reg = base + REG_BASE;
2353        /* Segmentation Control Registers */  
2354        iadev->seg_reg = base + SEG_BASE;
2355        /* Reassembly Control Registers */  
2356        iadev->reass_reg = base + REASS_BASE;  
2357        /* Front end/ DMA control registers */  
2358        iadev->phy = base + PHY_BASE;  
2359        iadev->dma = base + PHY_BASE;  
2360        /* RAM - Segmentation RAm and Reassembly RAM */  
2361        iadev->ram = base + ACTUAL_RAM_BASE;  
2362        iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2363        iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2364  
2365        /* lets print out the above */  
2366        IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2367          iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2368          iadev->phy, iadev->ram, iadev->seg_ram, 
2369          iadev->reass_ram);) 
2370          
2371        /* lets try reading the MAC address */  
2372        error = get_esi(dev);  
2373        if (error) {
2374          iounmap(iadev->base);
2375          return error;  
2376        }
2377        printk("IA: ");
2378        for (i=0; i < ESI_LEN; i++)  
2379                printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2380        printk("\n");  
2381  
2382        /* reset SAR */  
2383        if (reset_sar(dev)) {
2384           iounmap(iadev->base);
2385           printk("IA: reset SAR fail, please try again\n");
2386           return 1;
2387        }
2388        return 0;  
2389}  
2390
2391static void ia_update_stats(IADEV *iadev) {
2392    if (!iadev->carrier_detect)
2393        return;
2394    iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2395    iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2396    iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2397    iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2398    iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2399    iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2400    return;
2401}
2402  
2403static void ia_led_timer(unsigned long arg) {
2404        unsigned long flags;
2405        static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2406        u_char i;
2407        static u32 ctrl_reg; 
2408        for (i = 0; i < iadev_count; i++) {
2409           if (ia_dev[i]) {
2410              ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2411              if (blinking[i] == 0) {
2412                 blinking[i]++;
2413                 ctrl_reg &= (~CTRL_LED);
2414                 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2415                 ia_update_stats(ia_dev[i]);
2416              }
2417              else {
2418                 blinking[i] = 0;
2419                 ctrl_reg |= CTRL_LED;
2420                 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2421                 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2422                 if (ia_dev[i]->close_pending)  
2423                    wake_up(&ia_dev[i]->close_wait);
2424                 ia_tx_poll(ia_dev[i]);
2425                 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2426              }
2427           }
2428        }
2429        mod_timer(&ia_timer, jiffies + HZ / 4);
2430        return;
2431}
2432
2433static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2434        unsigned long addr)  
2435{  
2436        writel(value, INPH_IA_DEV(dev)->phy+addr);  
2437}  
2438  
2439static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2440{  
2441        return readl(INPH_IA_DEV(dev)->phy+addr);  
2442}  
2443
2444static void ia_free_tx(IADEV *iadev)
2445{
2446        int i;
2447
2448        kfree(iadev->desc_tbl);
2449        for (i = 0; i < iadev->num_vc; i++)
2450                kfree(iadev->testTable[i]);
2451        kfree(iadev->testTable);
2452        for (i = 0; i < iadev->num_tx_desc; i++) {
2453                struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2454
2455                pci_unmap_single(iadev->pci, desc->dma_addr,
2456                        sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2457                kfree(desc->cpcs);
2458        }
2459        kfree(iadev->tx_buf);
2460        pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2461                            iadev->tx_dle_dma);  
2462}
2463
2464static void ia_free_rx(IADEV *iadev)
2465{
2466        kfree(iadev->rx_open);
2467        pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2468                          iadev->rx_dle_dma);  
2469}
2470
2471static int __devinit ia_start(struct atm_dev *dev)
2472{  
2473        IADEV *iadev;  
2474        int error;  
2475        unsigned char phy;  
2476        u32 ctrl_reg;  
2477        IF_EVENT(printk(">ia_start\n");)  
2478        iadev = INPH_IA_DEV(dev);  
2479        if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2480                printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2481                    dev->number, iadev->irq);  
2482                error = -EAGAIN;
2483                goto err_out;
2484        }  
2485        /* @@@ should release IRQ on error */  
2486        /* enabling memory + master */  
2487        if ((error = pci_write_config_word(iadev->pci,   
2488                                PCI_COMMAND,   
2489                                PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2490        {  
2491                printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2492                    "master (0x%x)\n",dev->number, error);  
2493                error = -EIO;  
2494                goto err_free_irq;
2495        }  
2496        udelay(10);  
2497  
2498        /* Maybe we should reset the front end, initialize Bus Interface Control   
2499                Registers and see. */  
2500  
2501        IF_INIT(printk("Bus ctrl reg: %08x\n", 
2502                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2503        ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2504        ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2505                        | CTRL_B8  
2506                        | CTRL_B16  
2507                        | CTRL_B32  
2508                        | CTRL_B48  
2509                        | CTRL_B64  
2510                        | CTRL_B128  
2511                        | CTRL_ERRMASK  
2512                        | CTRL_DLETMASK         /* shud be removed l8r */  
2513                        | CTRL_DLERMASK  
2514                        | CTRL_SEGMASK  
2515                        | CTRL_REASSMASK          
2516                        | CTRL_FEMASK  
2517                        | CTRL_CSPREEMPT;  
2518  
2519       writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2520  
2521        IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2522                           readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2523           printk("Bus status reg after init: %08x\n", 
2524                            readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2525    
2526        ia_hw_type(iadev); 
2527        error = tx_init(dev);  
2528        if (error)
2529                goto err_free_irq;
2530        error = rx_init(dev);  
2531        if (error)
2532                goto err_free_tx;
2533  
2534        ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2535        writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2536        IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2537                               readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2538        phy = 0; /* resolve compiler complaint */
2539        IF_INIT ( 
2540        if ((phy=ia_phy_get(dev,0)) == 0x30)  
2541                printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2542        else  
2543                printk("IA: utopia,rev.%0x\n",phy);) 
2544
2545        if (iadev->phy_type &  FE_25MBIT_PHY)
2546           ia_mb25_init(iadev);
2547        else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2548           ia_suni_pm7345_init(iadev);
2549        else {
2550                error = suni_init(dev);
2551                if (error)
2552                        goto err_free_rx;
2553                if (dev->phy->start) {
2554                        error = dev->phy->start(dev);
2555                        if (error)
2556                                goto err_free_rx;
2557                }
2558                /* Get iadev->carrier_detect status */
2559                IaFrontEndIntr(iadev);
2560        }
2561        return 0;
2562
2563err_free_rx:
2564        ia_free_rx(iadev);
2565err_free_tx:
2566        ia_free_tx(iadev);
2567err_free_irq:
2568        free_irq(iadev->irq, dev);  
2569err_out:
2570        return error;
2571}  
2572  
2573static void ia_close(struct atm_vcc *vcc)  
2574{
2575        DEFINE_WAIT(wait);
2576        u16 *vc_table;
2577        IADEV *iadev;
2578        struct ia_vcc *ia_vcc;
2579        struct sk_buff *skb = NULL;
2580        struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2581        unsigned long closetime, flags;
2582
2583        iadev = INPH_IA_DEV(vcc->dev);
2584        ia_vcc = INPH_IA_VCC(vcc);
2585        if (!ia_vcc) return;  
2586
2587        IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2588                                              ia_vcc->vc_desc_cnt,vcc->vci);)
2589        clear_bit(ATM_VF_READY,&vcc->flags);
2590        skb_queue_head_init (&tmp_tx_backlog);
2591        skb_queue_head_init (&tmp_vcc_backlog); 
2592        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2593           iadev->close_pending++;
2594           prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2595           schedule_timeout(50);
2596           finish_wait(&iadev->timeout_wait, &wait);
2597           spin_lock_irqsave(&iadev->tx_lock, flags); 
2598           while((skb = skb_dequeue(&iadev->tx_backlog))) {
2599              if (ATM_SKB(skb)->vcc == vcc){ 
2600                 if (vcc->pop) vcc->pop(vcc, skb);
2601                 else dev_kfree_skb_any(skb);
2602              }
2603              else 
2604                 skb_queue_tail(&tmp_tx_backlog, skb);
2605           } 
2606           while((skb = skb_dequeue(&tmp_tx_backlog))) 
2607             skb_queue_tail(&iadev->tx_backlog, skb);
2608           IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2609           closetime = 300000 / ia_vcc->pcr;
2610           if (closetime == 0)
2611              closetime = 1;
2612           spin_unlock_irqrestore(&iadev->tx_lock, flags);
2613           wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2614           spin_lock_irqsave(&iadev->tx_lock, flags);
2615           iadev->close_pending--;
2616           iadev->testTable[vcc->vci]->lastTime = 0;
2617           iadev->testTable[vcc->vci]->fract = 0; 
2618           iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2619           if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2620              if (vcc->qos.txtp.min_pcr > 0)
2621                 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2622           }
2623           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2624              ia_vcc = INPH_IA_VCC(vcc); 
2625              iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2626              ia_cbrVc_close (vcc);
2627           }
2628           spin_unlock_irqrestore(&iadev->tx_lock, flags);
2629        }
2630        
2631        if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2632           // reset reass table
2633           vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2634           vc_table += vcc->vci; 
2635           *vc_table = NO_AAL5_PKT;
2636           // reset vc table
2637           vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2638           vc_table += vcc->vci;
2639           *vc_table = (vcc->vci << 6) | 15;
2640           if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2641              struct abr_vc_table __iomem *abr_vc_table = 
2642                                (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2643              abr_vc_table +=  vcc->vci;
2644              abr_vc_table->rdf = 0x0003;
2645              abr_vc_table->air = 0x5eb1;
2646           }                                 
2647           // Drain the packets
2648           rx_dle_intr(vcc->dev); 
2649           iadev->rx_open[vcc->vci] = NULL;
2650        }
2651        kfree(INPH_IA_VCC(vcc));  
2652        ia_vcc = NULL;
2653        vcc->dev_data = NULL;
2654        clear_bit(ATM_VF_ADDR,&vcc->flags);
2655        return;        
2656}  
2657  
2658static int ia_open(struct atm_vcc *vcc)
2659{  
2660        struct ia_vcc *ia_vcc;  
2661        int error;  
2662        if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2663        {  
2664                IF_EVENT(printk("ia: not partially allocated resources\n");)  
2665                vcc->dev_data = NULL;
2666        }  
2667        if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2668        {  
2669                IF_EVENT(printk("iphase open: unspec part\n");)  
2670                set_bit(ATM_VF_ADDR,&vcc->flags);
2671        }  
2672        if (vcc->qos.aal != ATM_AAL5)  
2673                return -EINVAL;  
2674        IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2675                                 vcc->dev->number, vcc->vpi, vcc->vci);)  
2676  
2677        /* Device dependent initialization */  
2678        ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2679        if (!ia_vcc) return -ENOMEM;  
2680        vcc->dev_data = ia_vcc;
2681  
2682        if ((error = open_rx(vcc)))  
2683        {  
2684                IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2685                ia_close(vcc);  
2686                return error;  
2687        }  
2688  
2689        if ((error = open_tx(vcc)))  
2690        {  
2691                IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2692                ia_close(vcc);  
2693                return error;  
2694        }  
2695  
2696        set_bit(ATM_VF_READY,&vcc->flags);
2697
2698#if 0
2699        {
2700           static u8 first = 1; 
2701           if (first) {
2702              ia_timer.expires = jiffies + 3*HZ;
2703              add_timer(&ia_timer);
2704              first = 0;
2705           }           
2706        }
2707#endif
2708        IF_EVENT(printk("ia open returning\n");)  
2709        return 0;  
2710}  
2711  
2712static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2713{  
2714        IF_EVENT(printk(">ia_change_qos\n");)  
2715        return 0;  
2716}  
2717  
2718static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2719{  
2720   IA_CMDBUF ia_cmds;
2721   IADEV *iadev;
2722   int i, board;
2723   u16 __user *tmps;
2724   IF_EVENT(printk(">ia_ioctl\n");)  
2725   if (cmd != IA_CMD) {
2726      if (!dev->phy->ioctl) return -EINVAL;
2727      return dev->phy->ioctl(dev,cmd,arg);
2728   }
2729   if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2730   board = ia_cmds.status;
2731   if ((board < 0) || (board > iadev_count))
2732         board = 0;    
2733   iadev = ia_dev[board];
2734   switch (ia_cmds.cmd) {
2735   case MEMDUMP:
2736   {
2737        switch (ia_cmds.sub_cmd) {
2738          case MEMDUMP_DEV:     
2739             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2740             if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2741                return -EFAULT;
2742             ia_cmds.status = 0;
2743             break;
2744          case MEMDUMP_SEGREG:
2745             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2746             tmps = (u16 __user *)ia_cmds.buf;
2747             for(i=0; i<0x80; i+=2, tmps++)
2748                if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2749             ia_cmds.status = 0;
2750             ia_cmds.len = 0x80;
2751             break;
2752          case MEMDUMP_REASSREG:
2753             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2754             tmps = (u16 __user *)ia_cmds.buf;
2755             for(i=0; i<0x80; i+=2, tmps++)
2756                if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2757             ia_cmds.status = 0;
2758             ia_cmds.len = 0x80;
2759             break;
2760          case MEMDUMP_FFL:
2761          {  
2762             ia_regs_t       *regs_local;
2763             ffredn_t        *ffL;
2764             rfredn_t        *rfL;
2765                     
2766             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2767             regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2768             if (!regs_local) return -ENOMEM;
2769             ffL = &regs_local->ffredn;
2770             rfL = &regs_local->rfredn;
2771             /* Copy real rfred registers into the local copy */
2772             for (i=0; i<(sizeof (rfredn_t))/4; i++)
2773                ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2774                /* Copy real ffred registers into the local copy */
2775             for (i=0; i<(sizeof (ffredn_t))/4; i++)
2776                ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2777
2778             if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2779                kfree(regs_local);
2780                return -EFAULT;
2781             }
2782             kfree(regs_local);
2783             printk("Board %d registers dumped\n", board);
2784             ia_cmds.status = 0;                  
2785         }      
2786             break;        
2787         case READ_REG:
2788         {  
2789             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2790             desc_dbg(iadev); 
2791             ia_cmds.status = 0; 
2792         }
2793             break;
2794         case 0x6:
2795         {  
2796             ia_cmds.status = 0; 
2797             printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2798             printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2799         }
2800             break;
2801         case 0x8:
2802         {
2803             struct k_sonet_stats *stats;
2804             stats = &PRIV(_ia_dev[board])->sonet_stats;
2805             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2806             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2807             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2808             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2809             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2810             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2811             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2812             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2813             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2814         }
2815            ia_cmds.status = 0;
2816            break;
2817         case 0x9:
2818            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2819            for (i = 1; i <= iadev->num_rx_desc; i++)
2820               free_desc(_ia_dev[board], i);
2821            writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2822                                            iadev->reass_reg+REASS_MASK_REG);
2823            iadev->rxing = 1;
2824            
2825            ia_cmds.status = 0;
2826            break;
2827
2828         case 0xb:
2829            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2830            IaFrontEndIntr(iadev);
2831            break;
2832         case 0xa:
2833            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2834         {  
2835             ia_cmds.status = 0; 
2836             IADebugFlag = ia_cmds.maddr;
2837             printk("New debug option loaded\n");
2838         }
2839             break;
2840         default:
2841             ia_cmds.status = 0;
2842             break;
2843      } 
2844   }
2845      break;
2846   default:
2847      break;
2848
2849   }    
2850   return 0;  
2851}  
2852  
2853static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2854        void __user *optval, int optlen)  
2855{  
2856        IF_EVENT(printk(">ia_getsockopt\n");)  
2857        return -EINVAL;  
2858}  
2859  
2860static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2861        void __user *optval, unsigned int optlen)  
2862{  
2863        IF_EVENT(printk(">ia_setsockopt\n");)  
2864        return -EINVAL;  
2865}  
2866  
2867static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2868        IADEV *iadev;
2869        struct dle *wr_ptr;
2870        struct tx_buf_desc __iomem *buf_desc_ptr;
2871        int desc;
2872        int comp_code;
2873        int total_len;
2874        struct cpcs_trailer *trailer;
2875        struct ia_vcc *iavcc;
2876
2877        iadev = INPH_IA_DEV(vcc->dev);  
2878        iavcc = INPH_IA_VCC(vcc);
2879        if (!iavcc->txing) {
2880           printk("discard packet on closed VC\n");
2881           if (vcc->pop)
2882                vcc->pop(vcc, skb);
2883           else
2884                dev_kfree_skb_any(skb);
2885           return 0;
2886        }
2887
2888        if (skb->len > iadev->tx_buf_sz - 8) {
2889           printk("Transmit size over tx buffer size\n");
2890           if (vcc->pop)
2891                 vcc->pop(vcc, skb);
2892           else
2893                 dev_kfree_skb_any(skb);
2894          return 0;
2895        }
2896        if ((unsigned long)skb->data & 3) {
2897           printk("Misaligned SKB\n");
2898           if (vcc->pop)
2899                 vcc->pop(vcc, skb);
2900           else
2901                 dev_kfree_skb_any(skb);
2902           return 0;
2903        }       
2904        /* Get a descriptor number from our free descriptor queue  
2905           We get the descr number from the TCQ now, since I am using  
2906           the TCQ as a free buffer queue. Initially TCQ will be   
2907           initialized with all the descriptors and is hence, full.  
2908        */
2909        desc = get_desc (iadev, iavcc);
2910        if (desc == 0xffff) 
2911            return 1;
2912        comp_code = desc >> 13;  
2913        desc &= 0x1fff;  
2914  
2915        if ((desc == 0) || (desc > iadev->num_tx_desc))  
2916        {  
2917                IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2918                atomic_inc(&vcc->stats->tx);
2919                if (vcc->pop)   
2920                    vcc->pop(vcc, skb);   
2921                else  
2922                    dev_kfree_skb_any(skb);
2923                return 0;   /* return SUCCESS */
2924        }  
2925  
2926        if (comp_code)  
2927        {  
2928            IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2929                                                            desc, comp_code);)  
2930        }  
2931       
2932        /* remember the desc and vcc mapping */
2933        iavcc->vc_desc_cnt++;
2934        iadev->desc_tbl[desc-1].iavcc = iavcc;
2935        iadev->desc_tbl[desc-1].txskb = skb;
2936        IA_SKB_STATE(skb) = 0;
2937
2938        iadev->ffL.tcq_rd += 2;
2939        if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2940                iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2941        writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2942  
2943        /* Put the descriptor number in the packet ready queue  
2944                and put the updated write pointer in the DLE field   
2945        */   
2946        *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2947
2948        iadev->ffL.prq_wr += 2;
2949        if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2950                iadev->ffL.prq_wr = iadev->ffL.prq_st;
2951          
2952        /* Figure out the exact length of the packet and padding required to 
2953           make it  aligned on a 48 byte boundary.  */
2954        total_len = skb->len + sizeof(struct cpcs_trailer);  
2955        total_len = ((total_len + 47) / 48) * 48;
2956        IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2957 
2958        /* Put the packet in a tx buffer */   
2959        trailer = iadev->tx_buf[desc-1].cpcs;
2960        IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2961                  skb, skb->data, skb->len, desc);)
2962        trailer->control = 0; 
2963        /*big endian*/ 
2964        trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2965        trailer->crc32 = 0;     /* not needed - dummy bytes */  
2966
2967        /* Display the packet */  
2968        IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2969                                                        skb->len, tcnter++);  
2970        xdump(skb->data, skb->len, "TX: ");
2971        printk("\n");)
2972
2973        /* Build the buffer descriptor */  
2974        buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2975        buf_desc_ptr += desc;   /* points to the corresponding entry */  
2976        buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2977        /* Huh ? p.115 of users guide describes this as a read-only register */
2978        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2979        buf_desc_ptr->vc_index = vcc->vci;
2980        buf_desc_ptr->bytes = total_len;  
2981
2982        if (vcc->qos.txtp.traffic_class == ATM_ABR)  
2983           clear_lockup (vcc, iadev);
2984
2985        /* Build the DLE structure */  
2986        wr_ptr = iadev->tx_dle_q.write;  
2987        memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
2988        wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
2989                skb->len, PCI_DMA_TODEVICE);
2990        wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
2991                                                  buf_desc_ptr->buf_start_lo;  
2992        /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
2993        wr_ptr->bytes = skb->len;  
2994
2995        /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
2996        if ((wr_ptr->bytes >> 2) == 0xb)
2997           wr_ptr->bytes = 0x30;
2998
2999        wr_ptr->mode = TX_DLE_PSI; 
3000        wr_ptr->prq_wr_ptr_data = 0;
3001  
3002        /* end is not to be used for the DLE q */  
3003        if (++wr_ptr == iadev->tx_dle_q.end)  
3004                wr_ptr = iadev->tx_dle_q.start;  
3005        
3006        /* Build trailer dle */
3007        wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3008        wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3009          buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3010
3011        wr_ptr->bytes = sizeof(struct cpcs_trailer);
3012        wr_ptr->mode = DMA_INT_ENABLE; 
3013        wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3014        
3015        /* end is not to be used for the DLE q */
3016        if (++wr_ptr == iadev->tx_dle_q.end)  
3017                wr_ptr = iadev->tx_dle_q.start;
3018
3019        iadev->tx_dle_q.write = wr_ptr;  
3020        ATM_DESC(skb) = vcc->vci;
3021        skb_queue_tail(&iadev->tx_dma_q, skb);
3022
3023        atomic_inc(&vcc->stats->tx);
3024        iadev->tx_pkt_cnt++;
3025        /* Increment transaction counter */  
3026        writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3027        
3028#if 0        
3029        /* add flow control logic */ 
3030        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3031          if (iavcc->vc_desc_cnt > 10) {
3032             vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3033            printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3034              iavcc->flow_inc = -1;
3035              iavcc->saved_tx_quota = vcc->tx_quota;
3036           } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3037             // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3038             printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3039              iavcc->flow_inc = 0;
3040           }
3041        }
3042#endif
3043        IF_TX(printk("ia send done\n");)  
3044        return 0;  
3045}  
3046
3047static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3048{
3049        IADEV *iadev; 
3050        unsigned long flags;
3051
3052        iadev = INPH_IA_DEV(vcc->dev);
3053        if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3054        {
3055            if (!skb)
3056                printk(KERN_CRIT "null skb in ia_send\n");
3057            else dev_kfree_skb_any(skb);
3058            return -EINVAL;
3059        }                         
3060        spin_lock_irqsave(&iadev->tx_lock, flags); 
3061        if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3062            dev_kfree_skb_any(skb);
3063            spin_unlock_irqrestore(&iadev->tx_lock, flags);
3064            return -EINVAL; 
3065        }
3066        ATM_SKB(skb)->vcc = vcc;
3067 
3068        if (skb_peek(&iadev->tx_backlog)) {
3069           skb_queue_tail(&iadev->tx_backlog, skb);
3070        }
3071        else {
3072           if (ia_pkt_tx (vcc, skb)) {
3073              skb_queue_tail(&iadev->tx_backlog, skb);
3074           }
3075        }
3076        spin_unlock_irqrestore(&iadev->tx_lock, flags);
3077        return 0;
3078
3079}
3080
3081static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3082{ 
3083  int   left = *pos, n;   
3084  char  *tmpPtr;
3085  IADEV *iadev = INPH_IA_DEV(dev);
3086  if(!left--) {
3087     if (iadev->phy_type == FE_25MBIT_PHY) {
3088       n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3089       return n;
3090     }
3091     if (iadev->phy_type == FE_DS3_PHY)
3092        n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3093     else if (iadev->phy_type == FE_E3_PHY)
3094        n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3095     else if (iadev->phy_type == FE_UTP_OPTION)
3096         n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3097     else
3098        n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3099     tmpPtr = page + n;
3100     if (iadev->pci_map_size == 0x40000)
3101        n += sprintf(tmpPtr, "-1KVC-");
3102     else
3103        n += sprintf(tmpPtr, "-4KVC-");  
3104     tmpPtr = page + n; 
3105     if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3106        n += sprintf(tmpPtr, "1M  \n");
3107     else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3108        n += sprintf(tmpPtr, "512K\n");
3109     else
3110       n += sprintf(tmpPtr, "128K\n");
3111     return n;
3112  }
3113  if (!left) {
3114     return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3115                           "  Size of Tx Buffer  :  %u\n"
3116                           "  Number of Rx Buffer:  %u\n"
3117                           "  Size of Rx Buffer  :  %u\n"
3118                           "  Packets Receiverd  :  %u\n"
3119                           "  Packets Transmitted:  %u\n"
3120                           "  Cells Received     :  %u\n"
3121                           "  Cells Transmitted  :  %u\n"
3122                           "  Board Dropped Cells:  %u\n"
3123                           "  Board Dropped Pkts :  %u\n",
3124                           iadev->num_tx_desc,  iadev->tx_buf_sz,
3125                           iadev->num_rx_desc,  iadev->rx_buf_sz,
3126                           iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3127                           iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3128                           iadev->drop_rxcell, iadev->drop_rxpkt);                        
3129  }
3130  return 0;
3131}
3132  
3133static const struct atmdev_ops ops = {  
3134        .open           = ia_open,  
3135        .close          = ia_close,  
3136        .ioctl          = ia_ioctl,  
3137        .getsockopt     = ia_getsockopt,  
3138        .setsockopt     = ia_setsockopt,  
3139        .send           = ia_send,  
3140        .phy_put        = ia_phy_put,  
3141        .phy_get        = ia_phy_get,  
3142        .change_qos     = ia_change_qos,  
3143        .proc_read      = ia_proc_read,
3144        .owner          = THIS_MODULE,
3145};  
3146          
3147static int __devinit ia_init_one(struct pci_dev *pdev,
3148                                 const struct pci_device_id *ent)
3149{  
3150        struct atm_dev *dev;  
3151        IADEV *iadev;  
3152        int ret;
3153
3154        iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3155        if (!iadev) {
3156                ret = -ENOMEM;
3157                goto err_out;
3158        }
3159
3160        iadev->pci = pdev;
3161
3162        IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3163                pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3164        if (pci_enable_device(pdev)) {
3165                ret = -ENODEV;
3166                goto err_out_free_iadev;
3167        }
3168        dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3169        if (!dev) {
3170                ret = -ENOMEM;
3171                goto err_out_disable_dev;
3172        }
3173        dev->dev_data = iadev;
3174        IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3175        IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3176                iadev->LineRate);)
3177
3178        pci_set_drvdata(pdev, dev);
3179
3180        ia_dev[iadev_count] = iadev;
3181        _ia_dev[iadev_count] = dev;
3182        iadev_count++;
3183        if (ia_init(dev) || ia_start(dev)) {  
3184                IF_INIT(printk("IA register failed!\n");)
3185                iadev_count--;
3186                ia_dev[iadev_count] = NULL;
3187                _ia_dev[iadev_count] = NULL;
3188                ret = -EINVAL;
3189                goto err_out_deregister_dev;
3190        }
3191        IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3192
3193        iadev->next_board = ia_boards;  
3194        ia_boards = dev;  
3195
3196        return 0;
3197
3198err_out_deregister_dev:
3199        atm_dev_deregister(dev);  
3200err_out_disable_dev:
3201        pci_disable_device(pdev);
3202err_out_free_iadev:
3203        kfree(iadev);
3204err_out:
3205        return ret;
3206}
3207
3208static void __devexit ia_remove_one(struct pci_dev *pdev)
3209{
3210        struct atm_dev *dev = pci_get_drvdata(pdev);
3211        IADEV *iadev = INPH_IA_DEV(dev);
3212
3213        /* Disable phy interrupts */
3214        ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3215                                   SUNI_RSOP_CIE);
3216        udelay(1);
3217
3218        if (dev->phy && dev->phy->stop)
3219                dev->phy->stop(dev);
3220
3221        /* De-register device */  
3222        free_irq(iadev->irq, dev);
3223        iadev_count--;
3224        ia_dev[iadev_count] = NULL;
3225        _ia_dev[iadev_count] = NULL;
3226        IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3227        atm_dev_deregister(dev);
3228
3229        iounmap(iadev->base);  
3230        pci_disable_device(pdev);
3231
3232        ia_free_rx(iadev);
3233        ia_free_tx(iadev);
3234
3235        kfree(iadev);
3236}
3237
3238static struct pci_device_id ia_pci_tbl[] = {
3239        { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3240        { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3241        { 0,}
3242};
3243MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3244
3245static struct pci_driver ia_driver = {
3246        .name =         DEV_LABEL,
3247        .id_table =     ia_pci_tbl,
3248        .probe =        ia_init_one,
3249        .remove =       __devexit_p(ia_remove_one),
3250};
3251
3252static int __init ia_module_init(void)
3253{
3254        int ret;
3255
3256        ret = pci_register_driver(&ia_driver);
3257        if (ret >= 0) {
3258                ia_timer.expires = jiffies + 3*HZ;
3259                add_timer(&ia_timer); 
3260        } else
3261                printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3262        return ret;
3263}
3264
3265static void __exit ia_module_exit(void)
3266{
3267        pci_unregister_driver(&ia_driver);
3268
3269        del_timer(&ia_timer);
3270}
3271
3272module_init(ia_module_init);
3273module_exit(ia_module_exit);
3274