linux/drivers/atm/iphase.c
<<
>>
Prefs
   1/******************************************************************************
   2         iphase.c: Device driver for Interphase ATM PCI adapter cards 
   3                    Author: Peter Wang  <pwang@iphase.com>            
   4                   Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   5                   Interphase Corporation  <www.iphase.com>           
   6                               Version: 1.0                           
   7*******************************************************************************
   8      
   9      This software may be used and distributed according to the terms
  10      of the GNU General Public License (GPL), incorporated herein by reference.
  11      Drivers based on this skeleton fall under the GPL and must retain
  12      the authorship (implicit copyright) notice.
  13
  14      This program is distributed in the hope that it will be useful, but
  15      WITHOUT ANY WARRANTY; without even the implied warranty of
  16      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17      General Public License for more details.
  18      
  19      Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
  20      was originally written by Monalisa Agrawal at UNH. Now this driver 
  21      supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
  22      card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
  23      in terms of PHY type, the size of control memory and the size of 
  24      packet memory. The followings are the change log and history:
  25     
  26          Bugfix the Mona's UBR driver.
  27          Modify the basic memory allocation and dma logic.
  28          Port the driver to the latest kernel from 2.0.46.
  29          Complete the ABR logic of the driver, and added the ABR work-
  30              around for the hardware anormalies.
  31          Add the CBR support.
  32          Add the flow control logic to the driver to allow rate-limit VC.
  33          Add 4K VC support to the board with 512K control memory.
  34          Add the support of all the variants of the Interphase ATM PCI 
  35          (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
  36          (25M UTP25) and x531 (DS3 and E3).
  37          Add SMP support.
  38
  39      Support and updates available at: ftp://ftp.iphase.com/pub/atm
  40
  41*******************************************************************************/
  42
  43#include <linux/module.h>  
  44#include <linux/kernel.h>  
  45#include <linux/mm.h>  
  46#include <linux/pci.h>  
  47#include <linux/errno.h>  
  48#include <linux/atm.h>  
  49#include <linux/atmdev.h>  
  50#include <linux/sonet.h>  
  51#include <linux/skbuff.h>  
  52#include <linux/time.h>  
  53#include <linux/delay.h>  
  54#include <linux/uio.h>  
  55#include <linux/init.h>  
  56#include <linux/interrupt.h>
  57#include <linux/wait.h>
  58#include <linux/slab.h>
  59#include <asm/io.h>  
  60#include <linux/atomic.h>
  61#include <asm/uaccess.h>  
  62#include <asm/string.h>  
  63#include <asm/byteorder.h>  
  64#include <linux/vmalloc.h>
  65#include <linux/jiffies.h>
  66#include "iphase.h"               
  67#include "suni.h"                 
  68#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
  69
  70#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
  71
  72static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
  73static void desc_dbg(IADEV *iadev);
  74
  75static IADEV *ia_dev[8];
  76static struct atm_dev *_ia_dev[8];
  77static int iadev_count;
  78static void ia_led_timer(unsigned long arg);
  79static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
  80static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
  81static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
  82static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
  83            |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
  84
  85module_param(IA_TX_BUF, int, 0);
  86module_param(IA_TX_BUF_SZ, int, 0);
  87module_param(IA_RX_BUF, int, 0);
  88module_param(IA_RX_BUF_SZ, int, 0);
  89module_param(IADebugFlag, uint, 0644);
  90
  91MODULE_LICENSE("GPL");
  92
  93/**************************** IA_LIB **********************************/
  94
  95static void ia_init_rtn_q (IARTN_Q *que) 
  96{ 
  97   que->next = NULL; 
  98   que->tail = NULL; 
  99}
 100
 101static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
 102{
 103   data->next = NULL;
 104   if (que->next == NULL) 
 105      que->next = que->tail = data;
 106   else {
 107      data->next = que->next;
 108      que->next = data;
 109   } 
 110   return;
 111}
 112
 113static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
 114   IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 115   if (!entry)
 116      return -ENOMEM;
 117   entry->data = data;
 118   entry->next = NULL;
 119   if (que->next == NULL) 
 120      que->next = que->tail = entry;
 121   else {
 122      que->tail->next = entry;
 123      que->tail = que->tail->next;
 124   }      
 125   return 1;
 126}
 127
 128static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
 129   IARTN_Q *tmpdata;
 130   if (que->next == NULL)
 131      return NULL;
 132   tmpdata = que->next;
 133   if ( que->next == que->tail)  
 134      que->next = que->tail = NULL;
 135   else 
 136      que->next = que->next->next;
 137   return tmpdata;
 138}
 139
 140static void ia_hack_tcq(IADEV *dev) {
 141
 142  u_short               desc1;
 143  u_short               tcq_wr;
 144  struct ia_vcc         *iavcc_r = NULL; 
 145
 146  tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
 147  while (dev->host_tcq_wr != tcq_wr) {
 148     desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
 149     if (!desc1) ;
 150     else if (!dev->desc_tbl[desc1 -1].timestamp) {
 151        IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
 152        *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
 153     }                                 
 154     else if (dev->desc_tbl[desc1 -1].timestamp) {
 155        if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
 156           printk("IA: Fatal err in get_desc\n");
 157           continue;
 158        }
 159        iavcc_r->vc_desc_cnt--;
 160        dev->desc_tbl[desc1 -1].timestamp = 0;
 161        IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
 162                                   dev->desc_tbl[desc1 -1].txskb, desc1);)
 163        if (iavcc_r->pcr < dev->rate_limit) {
 164           IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
 165           if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
 166              printk("ia_hack_tcq: No memory available\n");
 167        } 
 168        dev->desc_tbl[desc1 -1].iavcc = NULL;
 169        dev->desc_tbl[desc1 -1].txskb = NULL;
 170     }
 171     dev->host_tcq_wr += 2;
 172     if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
 173        dev->host_tcq_wr = dev->ffL.tcq_st;
 174  }
 175} /* ia_hack_tcq */
 176
 177static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
 178  u_short               desc_num, i;
 179  struct sk_buff        *skb;
 180  struct ia_vcc         *iavcc_r = NULL; 
 181  unsigned long delta;
 182  static unsigned long timer = 0;
 183  int ltimeout;
 184
 185  ia_hack_tcq (dev);
 186  if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
 187     timer = jiffies; 
 188     i=0;
 189     while (i < dev->num_tx_desc) {
 190        if (!dev->desc_tbl[i].timestamp) {
 191           i++;
 192           continue;
 193        }
 194        ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
 195        delta = jiffies - dev->desc_tbl[i].timestamp;
 196        if (delta >= ltimeout) {
 197           IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
 198           if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
 199              dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
 200           else 
 201              dev->ffL.tcq_rd -= 2;
 202           *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
 203           if (!(skb = dev->desc_tbl[i].txskb) || 
 204                          !(iavcc_r = dev->desc_tbl[i].iavcc))
 205              printk("Fatal err, desc table vcc or skb is NULL\n");
 206           else 
 207              iavcc_r->vc_desc_cnt--;
 208           dev->desc_tbl[i].timestamp = 0;
 209           dev->desc_tbl[i].iavcc = NULL;
 210           dev->desc_tbl[i].txskb = NULL;
 211        }
 212        i++;
 213     } /* while */
 214  }
 215  if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
 216     return 0xFFFF;
 217    
 218  /* Get the next available descriptor number from TCQ */
 219  desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
 220
 221  while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
 222     dev->ffL.tcq_rd += 2;
 223     if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
 224        dev->ffL.tcq_rd = dev->ffL.tcq_st;
 225     if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
 226        return 0xFFFF; 
 227     desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
 228  }
 229
 230  /* get system time */
 231  dev->desc_tbl[desc_num -1].timestamp = jiffies;
 232  return desc_num;
 233}
 234
 235static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
 236  u_char                foundLockUp;
 237  vcstatus_t            *vcstatus;
 238  u_short               *shd_tbl;
 239  u_short               tempCellSlot, tempFract;
 240  struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
 241  struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
 242  u_int  i;
 243
 244  if (vcc->qos.txtp.traffic_class == ATM_ABR) {
 245     vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
 246     vcstatus->cnt++;
 247     foundLockUp = 0;
 248     if( vcstatus->cnt == 0x05 ) {
 249        abr_vc += vcc->vci;
 250        eabr_vc += vcc->vci;
 251        if( eabr_vc->last_desc ) {
 252           if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
 253              /* Wait for 10 Micro sec */
 254              udelay(10);
 255              if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
 256                 foundLockUp = 1;
 257           }
 258           else {
 259              tempCellSlot = abr_vc->last_cell_slot;
 260              tempFract    = abr_vc->fraction;
 261              if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
 262                         && (tempFract == dev->testTable[vcc->vci]->fract))
 263                 foundLockUp = 1;                   
 264              dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
 265              dev->testTable[vcc->vci]->fract = tempFract; 
 266           }        
 267        } /* last descriptor */            
 268        vcstatus->cnt = 0;      
 269     } /* vcstatus->cnt */
 270        
 271     if (foundLockUp) {
 272        IF_ABR(printk("LOCK UP found\n");) 
 273        writew(0xFFFD, dev->seg_reg+MODE_REG_0);
 274        /* Wait for 10 Micro sec */
 275        udelay(10); 
 276        abr_vc->status &= 0xFFF8;
 277        abr_vc->status |= 0x0001;  /* state is idle */
 278        shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
 279        for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
 280        if (i < dev->num_vc)
 281           shd_tbl[i] = vcc->vci;
 282        else
 283           IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
 284        writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
 285        writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
 286        writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
 287        vcstatus->cnt = 0;
 288     } /* foundLockUp */
 289
 290  } /* if an ABR VC */
 291
 292
 293}
 294 
 295/*
 296** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
 297**
 298**  +----+----+------------------+-------------------------------+
 299**  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
 300**  +----+----+------------------+-------------------------------+
 301** 
 302**    R = reserved (written as 0)
 303**    NZ = 0 if 0 cells/sec; 1 otherwise
 304**
 305**    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
 306*/
 307static u16
 308cellrate_to_float(u32 cr)
 309{
 310
 311#define NZ              0x4000
 312#define M_BITS          9               /* Number of bits in mantissa */
 313#define E_BITS          5               /* Number of bits in exponent */
 314#define M_MASK          0x1ff           
 315#define E_MASK          0x1f
 316  u16   flot;
 317  u32   tmp = cr & 0x00ffffff;
 318  int   i   = 0;
 319  if (cr == 0)
 320     return 0;
 321  while (tmp != 1) {
 322     tmp >>= 1;
 323     i++;
 324  }
 325  if (i == M_BITS)
 326     flot = NZ | (i << M_BITS) | (cr & M_MASK);
 327  else if (i < M_BITS)
 328     flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
 329  else
 330     flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
 331  return flot;
 332}
 333
 334#if 0
 335/*
 336** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
 337*/
 338static u32
 339float_to_cellrate(u16 rate)
 340{
 341  u32   exp, mantissa, cps;
 342  if ((rate & NZ) == 0)
 343     return 0;
 344  exp = (rate >> M_BITS) & E_MASK;
 345  mantissa = rate & M_MASK;
 346  if (exp == 0)
 347     return 1;
 348  cps = (1 << M_BITS) | mantissa;
 349  if (exp == M_BITS)
 350     cps = cps;
 351  else if (exp > M_BITS)
 352     cps <<= (exp - M_BITS);
 353  else
 354     cps >>= (M_BITS - exp);
 355  return cps;
 356}
 357#endif 
 358
 359static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
 360  srv_p->class_type = ATM_ABR;
 361  srv_p->pcr        = dev->LineRate;
 362  srv_p->mcr        = 0;
 363  srv_p->icr        = 0x055cb7;
 364  srv_p->tbe        = 0xffffff;
 365  srv_p->frtt       = 0x3a;
 366  srv_p->rif        = 0xf;
 367  srv_p->rdf        = 0xb;
 368  srv_p->nrm        = 0x4;
 369  srv_p->trm        = 0x7;
 370  srv_p->cdf        = 0x3;
 371  srv_p->adtf       = 50;
 372}
 373
 374static int
 375ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
 376                                                struct atm_vcc *vcc, u8 flag)
 377{
 378  f_vc_abr_entry  *f_abr_vc;
 379  r_vc_abr_entry  *r_abr_vc;
 380  u32           icr;
 381  u8            trm, nrm, crm;
 382  u16           adtf, air, *ptr16;      
 383  f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
 384  f_abr_vc += vcc->vci;       
 385  switch (flag) {
 386     case 1: /* FFRED initialization */
 387#if 0  /* sanity check */
 388       if (srv_p->pcr == 0)
 389          return INVALID_PCR;
 390       if (srv_p->pcr > dev->LineRate)
 391          srv_p->pcr = dev->LineRate;
 392       if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
 393          return MCR_UNAVAILABLE;
 394       if (srv_p->mcr > srv_p->pcr)
 395          return INVALID_MCR;
 396       if (!(srv_p->icr))
 397          srv_p->icr = srv_p->pcr;
 398       if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
 399          return INVALID_ICR;
 400       if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
 401          return INVALID_TBE;
 402       if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
 403          return INVALID_FRTT;
 404       if (srv_p->nrm > MAX_NRM)
 405          return INVALID_NRM;
 406       if (srv_p->trm > MAX_TRM)
 407          return INVALID_TRM;
 408       if (srv_p->adtf > MAX_ADTF)
 409          return INVALID_ADTF;
 410       else if (srv_p->adtf == 0)
 411          srv_p->adtf = 1;
 412       if (srv_p->cdf > MAX_CDF)
 413          return INVALID_CDF;
 414       if (srv_p->rif > MAX_RIF)
 415          return INVALID_RIF;
 416       if (srv_p->rdf > MAX_RDF)
 417          return INVALID_RDF;
 418#endif
 419       memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
 420       f_abr_vc->f_vc_type = ABR;
 421       nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
 422                                  /* i.e 2**n = 2 << (n-1) */
 423       f_abr_vc->f_nrm = nrm << 8 | nrm;
 424       trm = 100000/(2 << (16 - srv_p->trm));
 425       if ( trm == 0) trm = 1;
 426       f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
 427       crm = srv_p->tbe / nrm;
 428       if (crm == 0) crm = 1;
 429       f_abr_vc->f_crm = crm & 0xff;
 430       f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
 431       icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
 432                                ((srv_p->tbe/srv_p->frtt)*1000000) :
 433                                (1000000/(srv_p->frtt/srv_p->tbe)));
 434       f_abr_vc->f_icr = cellrate_to_float(icr);
 435       adtf = (10000 * srv_p->adtf)/8192;
 436       if (adtf == 0) adtf = 1; 
 437       f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
 438       f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
 439       f_abr_vc->f_acr = f_abr_vc->f_icr;
 440       f_abr_vc->f_status = 0x0042;
 441       break;
 442    case 0: /* RFRED initialization */  
 443       ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
 444       *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
 445       r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
 446       r_abr_vc += vcc->vci;
 447       r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
 448       air = srv_p->pcr << (15 - srv_p->rif);
 449       if (air == 0) air = 1;
 450       r_abr_vc->r_air = cellrate_to_float(air);
 451       dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
 452       dev->sum_mcr        += srv_p->mcr;
 453       dev->n_abr++;
 454       break;
 455    default:
 456       break;
 457  }
 458  return        0;
 459}
 460static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
 461   u32 rateLow=0, rateHigh, rate;
 462   int entries;
 463   struct ia_vcc *ia_vcc;
 464
 465   int   idealSlot =0, testSlot, toBeAssigned, inc;
 466   u32   spacing;
 467   u16  *SchedTbl, *TstSchedTbl;
 468   u16  cbrVC, vcIndex;
 469   u32   fracSlot    = 0;
 470   u32   sp_mod      = 0;
 471   u32   sp_mod2     = 0;
 472
 473   /* IpAdjustTrafficParams */
 474   if (vcc->qos.txtp.max_pcr <= 0) {
 475      IF_ERR(printk("PCR for CBR not defined\n");)
 476      return -1;
 477   }
 478   rate = vcc->qos.txtp.max_pcr;
 479   entries = rate / dev->Granularity;
 480   IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
 481                                entries, rate, dev->Granularity);)
 482   if (entries < 1)
 483      IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
 484   rateLow  =  entries * dev->Granularity;
 485   rateHigh = (entries + 1) * dev->Granularity;
 486   if (3*(rate - rateLow) > (rateHigh - rate))
 487      entries++;
 488   if (entries > dev->CbrRemEntries) {
 489      IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
 490      IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
 491                                       entries, dev->CbrRemEntries);)
 492      return -EBUSY;
 493   }   
 494
 495   ia_vcc = INPH_IA_VCC(vcc);
 496   ia_vcc->NumCbrEntry = entries; 
 497   dev->sum_mcr += entries * dev->Granularity; 
 498   /* IaFFrednInsertCbrSched */
 499   // Starting at an arbitrary location, place the entries into the table
 500   // as smoothly as possible
 501   cbrVC   = 0;
 502   spacing = dev->CbrTotEntries / entries;
 503   sp_mod  = dev->CbrTotEntries % entries; // get modulo
 504   toBeAssigned = entries;
 505   fracSlot = 0;
 506   vcIndex  = vcc->vci;
 507   IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
 508   while (toBeAssigned)
 509   {
 510      // If this is the first time, start the table loading for this connection
 511      // as close to entryPoint as possible.
 512      if (toBeAssigned == entries)
 513      {
 514         idealSlot = dev->CbrEntryPt;
 515         dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
 516         if (dev->CbrEntryPt >= dev->CbrTotEntries) 
 517            dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
 518      } else {
 519         idealSlot += (u32)(spacing + fracSlot); // Point to the next location
 520         // in the table that would be  smoothest
 521         fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
 522         sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
 523      }
 524      if (idealSlot >= (int)dev->CbrTotEntries) 
 525         idealSlot -= dev->CbrTotEntries;  
 526      // Continuously check around this ideal value until a null
 527      // location is encountered.
 528      SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
 529      inc = 0;
 530      testSlot = idealSlot;
 531      TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
 532      IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
 533                                testSlot, TstSchedTbl,toBeAssigned);)
 534      memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
 535      while (cbrVC)  // If another VC at this location, we have to keep looking
 536      {
 537          inc++;
 538          testSlot = idealSlot - inc;
 539          if (testSlot < 0) { // Wrap if necessary
 540             testSlot += dev->CbrTotEntries;
 541             IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
 542                                                       SchedTbl,testSlot);)
 543          }
 544          TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
 545          memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
 546          if (!cbrVC)
 547             break;
 548          testSlot = idealSlot + inc;
 549          if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
 550             testSlot -= dev->CbrTotEntries;
 551             IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
 552             IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
 553                                            testSlot, toBeAssigned);)
 554          } 
 555          // set table index and read in value
 556          TstSchedTbl = (u16*)(SchedTbl + testSlot);
 557          IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
 558                          TstSchedTbl,cbrVC,inc);)
 559          memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
 560       } /* while */
 561       // Move this VCI number into this location of the CBR Sched table.
 562       memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
 563       dev->CbrRemEntries--;
 564       toBeAssigned--;
 565   } /* while */ 
 566
 567   /* IaFFrednCbrEnable */
 568   dev->NumEnabledCBR++;
 569   if (dev->NumEnabledCBR == 1) {
 570       writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
 571       IF_CBR(printk("CBR is enabled\n");)
 572   }
 573   return 0;
 574}
 575static void ia_cbrVc_close (struct atm_vcc *vcc) {
 576   IADEV *iadev;
 577   u16 *SchedTbl, NullVci = 0;
 578   u32 i, NumFound;
 579
 580   iadev = INPH_IA_DEV(vcc->dev);
 581   iadev->NumEnabledCBR--;
 582   SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
 583   if (iadev->NumEnabledCBR == 0) {
 584      writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
 585      IF_CBR (printk("CBR support disabled\n");)
 586   }
 587   NumFound = 0;
 588   for (i=0; i < iadev->CbrTotEntries; i++)
 589   {
 590      if (*SchedTbl == vcc->vci) {
 591         iadev->CbrRemEntries++;
 592         *SchedTbl = NullVci;
 593         IF_CBR(NumFound++;)
 594      }
 595      SchedTbl++;   
 596   } 
 597   IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
 598}
 599
 600static int ia_avail_descs(IADEV *iadev) {
 601   int tmp = 0;
 602   ia_hack_tcq(iadev);
 603   if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
 604      tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
 605   else
 606      tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
 607                   iadev->ffL.tcq_st) / 2;
 608   return tmp;
 609}    
 610
 611static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
 612
 613static int ia_que_tx (IADEV *iadev) { 
 614   struct sk_buff *skb;
 615   int num_desc;
 616   struct atm_vcc *vcc;
 617   num_desc = ia_avail_descs(iadev);
 618
 619   while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
 620      if (!(vcc = ATM_SKB(skb)->vcc)) {
 621         dev_kfree_skb_any(skb);
 622         printk("ia_que_tx: Null vcc\n");
 623         break;
 624      }
 625      if (!test_bit(ATM_VF_READY,&vcc->flags)) {
 626         dev_kfree_skb_any(skb);
 627         printk("Free the SKB on closed vci %d \n", vcc->vci);
 628         break;
 629      }
 630      if (ia_pkt_tx (vcc, skb)) {
 631         skb_queue_head(&iadev->tx_backlog, skb);
 632      }
 633      num_desc--;
 634   }
 635   return 0;
 636}
 637
 638static void ia_tx_poll (IADEV *iadev) {
 639   struct atm_vcc *vcc = NULL;
 640   struct sk_buff *skb = NULL, *skb1 = NULL;
 641   struct ia_vcc *iavcc;
 642   IARTN_Q *  rtne;
 643
 644   ia_hack_tcq(iadev);
 645   while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
 646       skb = rtne->data.txskb;
 647       if (!skb) {
 648           printk("ia_tx_poll: skb is null\n");
 649           goto out;
 650       }
 651       vcc = ATM_SKB(skb)->vcc;
 652       if (!vcc) {
 653           printk("ia_tx_poll: vcc is null\n");
 654           dev_kfree_skb_any(skb);
 655           goto out;
 656       }
 657
 658       iavcc = INPH_IA_VCC(vcc);
 659       if (!iavcc) {
 660           printk("ia_tx_poll: iavcc is null\n");
 661           dev_kfree_skb_any(skb);
 662           goto out;
 663       }
 664
 665       skb1 = skb_dequeue(&iavcc->txing_skb);
 666       while (skb1 && (skb1 != skb)) {
 667          if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
 668             printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
 669          }
 670          IF_ERR(printk("Release the SKB not match\n");)
 671          if ((vcc->pop) && (skb1->len != 0))
 672          {
 673             vcc->pop(vcc, skb1);
 674             IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
 675                                                          (long)skb1);)
 676          }
 677          else 
 678             dev_kfree_skb_any(skb1);
 679          skb1 = skb_dequeue(&iavcc->txing_skb);
 680       }                                                        
 681       if (!skb1) {
 682          IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
 683          ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
 684          break;
 685       }
 686       if ((vcc->pop) && (skb->len != 0))
 687       {
 688          vcc->pop(vcc, skb);
 689          IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
 690       }
 691       else 
 692          dev_kfree_skb_any(skb);
 693       kfree(rtne);
 694    }
 695    ia_que_tx(iadev);
 696out:
 697    return;
 698}
 699#if 0
 700static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
 701{
 702        u32     t;
 703        int     i;
 704        /*
 705         * Issue a command to enable writes to the NOVRAM
 706         */
 707        NVRAM_CMD (EXTEND + EWEN);
 708        NVRAM_CLR_CE;
 709        /*
 710         * issue the write command
 711         */
 712        NVRAM_CMD(IAWRITE + addr);
 713        /* 
 714         * Send the data, starting with D15, then D14, and so on for 16 bits
 715         */
 716        for (i=15; i>=0; i--) {
 717                NVRAM_CLKOUT (val & 0x8000);
 718                val <<= 1;
 719        }
 720        NVRAM_CLR_CE;
 721        CFG_OR(NVCE);
 722        t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
 723        while (!(t & NVDO))
 724                t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
 725
 726        NVRAM_CLR_CE;
 727        /*
 728         * disable writes again
 729         */
 730        NVRAM_CMD(EXTEND + EWDS)
 731        NVRAM_CLR_CE;
 732        CFG_AND(~NVDI);
 733}
 734#endif
 735
 736static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
 737{
 738        u_short val;
 739        u32     t;
 740        int     i;
 741        /*
 742         * Read the first bit that was clocked with the falling edge of the
 743         * the last command data clock
 744         */
 745        NVRAM_CMD(IAREAD + addr);
 746        /*
 747         * Now read the rest of the bits, the next bit read is D14, then D13,
 748         * and so on.
 749         */
 750        val = 0;
 751        for (i=15; i>=0; i--) {
 752                NVRAM_CLKIN(t);
 753                val |= (t << i);
 754        }
 755        NVRAM_CLR_CE;
 756        CFG_AND(~NVDI);
 757        return val;
 758}
 759
 760static void ia_hw_type(IADEV *iadev) {
 761   u_short memType = ia_eeprom_get(iadev, 25);   
 762   iadev->memType = memType;
 763   if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
 764      iadev->num_tx_desc = IA_TX_BUF;
 765      iadev->tx_buf_sz = IA_TX_BUF_SZ;
 766      iadev->num_rx_desc = IA_RX_BUF;
 767      iadev->rx_buf_sz = IA_RX_BUF_SZ; 
 768   } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
 769      if (IA_TX_BUF == DFL_TX_BUFFERS)
 770        iadev->num_tx_desc = IA_TX_BUF / 2;
 771      else 
 772        iadev->num_tx_desc = IA_TX_BUF;
 773      iadev->tx_buf_sz = IA_TX_BUF_SZ;
 774      if (IA_RX_BUF == DFL_RX_BUFFERS)
 775        iadev->num_rx_desc = IA_RX_BUF / 2;
 776      else
 777        iadev->num_rx_desc = IA_RX_BUF;
 778      iadev->rx_buf_sz = IA_RX_BUF_SZ;
 779   }
 780   else {
 781      if (IA_TX_BUF == DFL_TX_BUFFERS) 
 782        iadev->num_tx_desc = IA_TX_BUF / 8;
 783      else
 784        iadev->num_tx_desc = IA_TX_BUF;
 785      iadev->tx_buf_sz = IA_TX_BUF_SZ;
 786      if (IA_RX_BUF == DFL_RX_BUFFERS)
 787        iadev->num_rx_desc = IA_RX_BUF / 8;
 788      else
 789        iadev->num_rx_desc = IA_RX_BUF;
 790      iadev->rx_buf_sz = IA_RX_BUF_SZ; 
 791   } 
 792   iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
 793   IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
 794         iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
 795         iadev->rx_buf_sz, iadev->rx_pkt_ram);)
 796
 797#if 0
 798   if ((memType & FE_MASK) == FE_SINGLE_MODE) {
 799      iadev->phy_type = PHY_OC3C_S;
 800   else if ((memType & FE_MASK) == FE_UTP_OPTION)
 801      iadev->phy_type = PHY_UTP155;
 802   else
 803     iadev->phy_type = PHY_OC3C_M;
 804#endif
 805   
 806   iadev->phy_type = memType & FE_MASK;
 807   IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
 808                                         memType,iadev->phy_type);)
 809   if (iadev->phy_type == FE_25MBIT_PHY) 
 810      iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
 811   else if (iadev->phy_type == FE_DS3_PHY)
 812      iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
 813   else if (iadev->phy_type == FE_E3_PHY) 
 814      iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
 815   else
 816       iadev->LineRate = (u32)(ATM_OC3_PCR);
 817   IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
 818
 819}
 820
 821static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
 822{
 823        return readl(ia->phy + (reg >> 2));
 824}
 825
 826static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
 827{
 828        writel(val, ia->phy + (reg >> 2));
 829}
 830
 831static void ia_frontend_intr(struct iadev_priv *iadev)
 832{
 833        u32 status;
 834
 835        if (iadev->phy_type & FE_25MBIT_PHY) {
 836                status = ia_phy_read32(iadev, MB25_INTR_STATUS);
 837                iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
 838        } else if (iadev->phy_type & FE_DS3_PHY) {
 839                ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
 840                status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
 841                iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
 842        } else if (iadev->phy_type & FE_E3_PHY) {
 843                ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
 844                status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
 845                iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
 846        } else {
 847                status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
 848                iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
 849        }
 850
 851        printk(KERN_INFO "IA: SUNI carrier %s\n",
 852                iadev->carrier_detect ? "detected" : "lost signal");
 853}
 854
 855static void ia_mb25_init(struct iadev_priv *iadev)
 856{
 857#if 0
 858   mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
 859#endif
 860        ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
 861        ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
 862
 863        iadev->carrier_detect =
 864                (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
 865}
 866
 867struct ia_reg {
 868        u16 reg;
 869        u16 val;
 870};
 871
 872static void ia_phy_write(struct iadev_priv *iadev,
 873                         const struct ia_reg *regs, int len)
 874{
 875        while (len--) {
 876                ia_phy_write32(iadev, regs->reg, regs->val);
 877                regs++;
 878        }
 879}
 880
 881static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
 882{
 883        static const struct ia_reg suni_ds3_init [] = {
 884                { SUNI_DS3_FRM_INTR_ENBL,       0x17 },
 885                { SUNI_DS3_FRM_CFG,             0x01 },
 886                { SUNI_DS3_TRAN_CFG,            0x01 },
 887                { SUNI_CONFIG,                  0 },
 888                { SUNI_SPLR_CFG,                0 },
 889                { SUNI_SPLT_CFG,                0 }
 890        };
 891        u32 status;
 892
 893        status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
 894        iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
 895
 896        ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
 897}
 898
 899static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
 900{
 901        static const struct ia_reg suni_e3_init [] = {
 902                { SUNI_E3_FRM_FRAM_OPTIONS,             0x04 },
 903                { SUNI_E3_FRM_MAINT_OPTIONS,            0x20 },
 904                { SUNI_E3_FRM_FRAM_INTR_ENBL,           0x1d },
 905                { SUNI_E3_FRM_MAINT_INTR_ENBL,          0x30 },
 906                { SUNI_E3_TRAN_STAT_DIAG_OPTIONS,       0 },
 907                { SUNI_E3_TRAN_FRAM_OPTIONS,            0x01 },
 908                { SUNI_CONFIG,                          SUNI_PM7345_E3ENBL },
 909                { SUNI_SPLR_CFG,                        0x41 },
 910                { SUNI_SPLT_CFG,                        0x41 }
 911        };
 912        u32 status;
 913
 914        status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
 915        iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
 916        ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
 917}
 918
 919static void ia_suni_pm7345_init(struct iadev_priv *iadev)
 920{
 921        static const struct ia_reg suni_init [] = {
 922                /* Enable RSOP loss of signal interrupt. */
 923                { SUNI_INTR_ENBL,               0x28 },
 924                /* Clear error counters. */
 925                { SUNI_ID_RESET,                0 },
 926                /* Clear "PMCTST" in master test register. */
 927                { SUNI_MASTER_TEST,             0 },
 928
 929                { SUNI_RXCP_CTRL,               0x2c },
 930                { SUNI_RXCP_FCTRL,              0x81 },
 931
 932                { SUNI_RXCP_IDLE_PAT_H1,        0 },
 933                { SUNI_RXCP_IDLE_PAT_H2,        0 },
 934                { SUNI_RXCP_IDLE_PAT_H3,        0 },
 935                { SUNI_RXCP_IDLE_PAT_H4,        0x01 },
 936
 937                { SUNI_RXCP_IDLE_MASK_H1,       0xff },
 938                { SUNI_RXCP_IDLE_MASK_H2,       0xff },
 939                { SUNI_RXCP_IDLE_MASK_H3,       0xff },
 940                { SUNI_RXCP_IDLE_MASK_H4,       0xfe },
 941
 942                { SUNI_RXCP_CELL_PAT_H1,        0 },
 943                { SUNI_RXCP_CELL_PAT_H2,        0 },
 944                { SUNI_RXCP_CELL_PAT_H3,        0 },
 945                { SUNI_RXCP_CELL_PAT_H4,        0x01 },
 946
 947                { SUNI_RXCP_CELL_MASK_H1,       0xff },
 948                { SUNI_RXCP_CELL_MASK_H2,       0xff },
 949                { SUNI_RXCP_CELL_MASK_H3,       0xff },
 950                { SUNI_RXCP_CELL_MASK_H4,       0xff },
 951
 952                { SUNI_TXCP_CTRL,               0xa4 },
 953                { SUNI_TXCP_INTR_EN_STS,        0x10 },
 954                { SUNI_TXCP_IDLE_PAT_H5,        0x55 }
 955        };
 956
 957        if (iadev->phy_type & FE_DS3_PHY)
 958                ia_suni_pm7345_init_ds3(iadev);
 959        else
 960                ia_suni_pm7345_init_e3(iadev);
 961
 962        ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
 963
 964        ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
 965                ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
 966                  SUNI_PM7345_DLB | SUNI_PM7345_PLB));
 967#ifdef __SNMP__
 968   suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
 969#endif /* __SNMP__ */
 970   return;
 971}
 972
 973
 974/***************************** IA_LIB END *****************************/
 975    
 976#ifdef CONFIG_ATM_IA_DEBUG
 977static int tcnter = 0;
 978static void xdump( u_char*  cp, int  length, char*  prefix )
 979{
 980    int col, count;
 981    u_char prntBuf[120];
 982    u_char*  pBuf = prntBuf;
 983    count = 0;
 984    while(count < length){
 985        pBuf += sprintf( pBuf, "%s", prefix );
 986        for(col = 0;count + col < length && col < 16; col++){
 987            if (col != 0 && (col % 4) == 0)
 988                pBuf += sprintf( pBuf, " " );
 989            pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
 990        }
 991        while(col++ < 16){      /* pad end of buffer with blanks */
 992            if ((col % 4) == 0)
 993                sprintf( pBuf, " " );
 994            pBuf += sprintf( pBuf, "   " );
 995        }
 996        pBuf += sprintf( pBuf, "  " );
 997        for(col = 0;count + col < length && col < 16; col++){
 998            if (isprint((int)cp[count + col]))
 999                pBuf += sprintf( pBuf, "%c", cp[count + col] );
1000            else
1001                pBuf += sprintf( pBuf, "." );
1002                }
1003        printk("%s\n", prntBuf);
1004        count += col;
1005        pBuf = prntBuf;
1006    }
1007
1008}  /* close xdump(... */
1009#endif /* CONFIG_ATM_IA_DEBUG */
1010
1011  
1012static struct atm_dev *ia_boards = NULL;  
1013  
1014#define ACTUAL_RAM_BASE \
1015        RAM_BASE*((iadev->mem)/(128 * 1024))  
1016#define ACTUAL_SEG_RAM_BASE \
1017        IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1018#define ACTUAL_REASS_RAM_BASE \
1019        IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1020  
1021  
1022/*-- some utilities and memory allocation stuff will come here -------------*/  
1023  
1024static void desc_dbg(IADEV *iadev) {
1025
1026  u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1027  u32 i;
1028  void __iomem *tmp;
1029  // regval = readl((u32)ia_cmds->maddr);
1030  tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1031  printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1032                     tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1033                     readw(iadev->seg_ram+tcq_wr_ptr-2));
1034  printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1035                   iadev->ffL.tcq_rd);
1036  tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1037  tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1038  printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1039  i = 0;
1040  while (tcq_st_ptr != tcq_ed_ptr) {
1041      tmp = iadev->seg_ram+tcq_st_ptr;
1042      printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1043      tcq_st_ptr += 2;
1044  }
1045  for(i=0; i <iadev->num_tx_desc; i++)
1046      printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1047} 
1048  
1049  
1050/*----------------------------- Receiving side stuff --------------------------*/  
1051 
1052static void rx_excp_rcvd(struct atm_dev *dev)  
1053{  
1054#if 0 /* closing the receiving size will cause too many excp int */  
1055  IADEV *iadev;  
1056  u_short state;  
1057  u_short excpq_rd_ptr;  
1058  //u_short *ptr;  
1059  int vci, error = 1;  
1060  iadev = INPH_IA_DEV(dev);  
1061  state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1062  while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1063  { printk("state = %x \n", state); 
1064        excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1065 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1066        if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1067            IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1068        // TODO: update exception stat
1069        vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1070        error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1071        // pwang_test
1072        excpq_rd_ptr += 4;  
1073        if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1074            excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1075        writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1076        state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1077  }  
1078#endif
1079}  
1080  
1081static void free_desc(struct atm_dev *dev, int desc)  
1082{  
1083        IADEV *iadev;  
1084        iadev = INPH_IA_DEV(dev);  
1085        writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1086        iadev->rfL.fdq_wr +=2;
1087        if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1088                iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1089        writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1090}  
1091  
1092  
1093static int rx_pkt(struct atm_dev *dev)  
1094{  
1095        IADEV *iadev;  
1096        struct atm_vcc *vcc;  
1097        unsigned short status;  
1098        struct rx_buf_desc __iomem *buf_desc_ptr;  
1099        int desc;   
1100        struct dle* wr_ptr;  
1101        int len;  
1102        struct sk_buff *skb;  
1103        u_int buf_addr, dma_addr;  
1104
1105        iadev = INPH_IA_DEV(dev);  
1106        if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1107        {  
1108            printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1109            return -EINVAL;  
1110        }  
1111        /* mask 1st 3 bits to get the actual descno. */  
1112        desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1113        IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1114                                    iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1115              printk(" pcq_wr_ptr = 0x%x\n",
1116                               readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1117        /* update the read pointer  - maybe we shud do this in the end*/  
1118        if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1119                iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1120        else  
1121                iadev->rfL.pcq_rd += 2;
1122        writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1123  
1124        /* get the buffer desc entry.  
1125                update stuff. - doesn't seem to be any update necessary  
1126        */  
1127        buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1128        /* make the ptr point to the corresponding buffer desc entry */  
1129        buf_desc_ptr += desc;     
1130        if (!desc || (desc > iadev->num_rx_desc) || 
1131                      ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1132            free_desc(dev, desc);
1133            IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1134            return -1;
1135        }
1136        vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1137        if (!vcc)  
1138        {      
1139                free_desc(dev, desc); 
1140                printk("IA: null vcc, drop PDU\n");  
1141                return -1;  
1142        }  
1143          
1144  
1145        /* might want to check the status bits for errors */  
1146        status = (u_short) (buf_desc_ptr->desc_mode);  
1147        if (status & (RX_CER | RX_PTE | RX_OFL))  
1148        {  
1149                atomic_inc(&vcc->stats->rx_err);
1150                IF_ERR(printk("IA: bad packet, dropping it");)  
1151                if (status & RX_CER) { 
1152                    IF_ERR(printk(" cause: packet CRC error\n");)
1153                }
1154                else if (status & RX_PTE) {
1155                    IF_ERR(printk(" cause: packet time out\n");)
1156                }
1157                else {
1158                    IF_ERR(printk(" cause: buffer overflow\n");)
1159                }
1160                goto out_free_desc;
1161        }  
1162  
1163        /*  
1164                build DLE.        
1165        */  
1166  
1167        buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1168        dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1169        len = dma_addr - buf_addr;  
1170        if (len > iadev->rx_buf_sz) {
1171           printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1172           atomic_inc(&vcc->stats->rx_err);
1173           goto out_free_desc;
1174        }
1175                  
1176        if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1177           if (vcc->vci < 32)
1178              printk("Drop control packets\n");
1179           goto out_free_desc;
1180        }
1181        skb_put(skb,len);  
1182        // pwang_test
1183        ATM_SKB(skb)->vcc = vcc;
1184        ATM_DESC(skb) = desc;        
1185        skb_queue_tail(&iadev->rx_dma_q, skb);  
1186
1187        /* Build the DLE structure */  
1188        wr_ptr = iadev->rx_dle_q.write;  
1189        wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1190                                              len, DMA_FROM_DEVICE);
1191        wr_ptr->local_pkt_addr = buf_addr;  
1192        wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1193        wr_ptr->mode = DMA_INT_ENABLE;  
1194  
1195        /* shud take care of wrap around here too. */  
1196        if(++wr_ptr == iadev->rx_dle_q.end)
1197             wr_ptr = iadev->rx_dle_q.start;
1198        iadev->rx_dle_q.write = wr_ptr;  
1199        udelay(1);  
1200        /* Increment transaction counter */  
1201        writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1202out:    return 0;  
1203out_free_desc:
1204        free_desc(dev, desc);
1205        goto out;
1206}  
1207  
1208static void rx_intr(struct atm_dev *dev)  
1209{  
1210  IADEV *iadev;  
1211  u_short status;  
1212  u_short state, i;  
1213  
1214  iadev = INPH_IA_DEV(dev);  
1215  status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1216  IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1217  if (status & RX_PKT_RCVD)  
1218  {  
1219        /* do something */  
1220        /* Basically recvd an interrupt for receiving a packet.  
1221        A descriptor would have been written to the packet complete   
1222        queue. Get all the descriptors and set up dma to move the   
1223        packets till the packet complete queue is empty..  
1224        */  
1225        state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1226        IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1227        while(!(state & PCQ_EMPTY))  
1228        {  
1229             rx_pkt(dev);  
1230             state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1231        }  
1232        iadev->rxing = 1;
1233  }  
1234  if (status & RX_FREEQ_EMPT)  
1235  {   
1236     if (iadev->rxing) {
1237        iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1238        iadev->rx_tmp_jif = jiffies; 
1239        iadev->rxing = 0;
1240     } 
1241     else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1242               ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1243        for (i = 1; i <= iadev->num_rx_desc; i++)
1244               free_desc(dev, i);
1245printk("Test logic RUN!!!!\n");
1246        writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1247        iadev->rxing = 1;
1248     }
1249     IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1250  }  
1251
1252  if (status & RX_EXCP_RCVD)  
1253  {  
1254        /* probably need to handle the exception queue also. */  
1255        IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1256        rx_excp_rcvd(dev);  
1257  }  
1258
1259
1260  if (status & RX_RAW_RCVD)  
1261  {  
1262        /* need to handle the raw incoming cells. This deepnds on   
1263        whether we have programmed to receive the raw cells or not.  
1264        Else ignore. */  
1265        IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1266  }  
1267}  
1268  
1269  
1270static void rx_dle_intr(struct atm_dev *dev)  
1271{  
1272  IADEV *iadev;  
1273  struct atm_vcc *vcc;   
1274  struct sk_buff *skb;  
1275  int desc;  
1276  u_short state;   
1277  struct dle *dle, *cur_dle;  
1278  u_int dle_lp;  
1279  int len;
1280  iadev = INPH_IA_DEV(dev);  
1281 
1282  /* free all the dles done, that is just update our own dle read pointer   
1283        - do we really need to do this. Think not. */  
1284  /* DMA is done, just get all the recevie buffers from the rx dma queue  
1285        and push them up to the higher layer protocol. Also free the desc  
1286        associated with the buffer. */  
1287  dle = iadev->rx_dle_q.read;  
1288  dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1289  cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1290  while(dle != cur_dle)  
1291  {  
1292      /* free the DMAed skb */  
1293      skb = skb_dequeue(&iadev->rx_dma_q);  
1294      if (!skb)  
1295         goto INCR_DLE;
1296      desc = ATM_DESC(skb);
1297      free_desc(dev, desc);  
1298               
1299      if (!(len = skb->len))
1300      {  
1301          printk("rx_dle_intr: skb len 0\n");  
1302          dev_kfree_skb_any(skb);  
1303      }  
1304      else  
1305      {  
1306          struct cpcs_trailer *trailer;
1307          u_short length;
1308          struct ia_vcc *ia_vcc;
1309
1310          dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1311                           len, DMA_FROM_DEVICE);
1312          /* no VCC related housekeeping done as yet. lets see */  
1313          vcc = ATM_SKB(skb)->vcc;
1314          if (!vcc) {
1315              printk("IA: null vcc\n");  
1316              dev_kfree_skb_any(skb);
1317              goto INCR_DLE;
1318          }
1319          ia_vcc = INPH_IA_VCC(vcc);
1320          if (ia_vcc == NULL)
1321          {
1322             atomic_inc(&vcc->stats->rx_err);
1323             atm_return(vcc, skb->truesize);
1324             dev_kfree_skb_any(skb);
1325             goto INCR_DLE;
1326           }
1327          // get real pkt length  pwang_test
1328          trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1329                                 skb->len - sizeof(*trailer));
1330          length = swap_byte_order(trailer->length);
1331          if ((length > iadev->rx_buf_sz) || (length > 
1332                              (skb->len - sizeof(struct cpcs_trailer))))
1333          {
1334             atomic_inc(&vcc->stats->rx_err);
1335             IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1336                                                            length, skb->len);)
1337             atm_return(vcc, skb->truesize);
1338             dev_kfree_skb_any(skb);
1339             goto INCR_DLE;
1340          }
1341          skb_trim(skb, length);
1342          
1343          /* Display the packet */  
1344          IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1345          xdump(skb->data, skb->len, "RX: ");
1346          printk("\n");)
1347
1348          IF_RX(printk("rx_dle_intr: skb push");)  
1349          vcc->push(vcc,skb);  
1350          atomic_inc(&vcc->stats->rx);
1351          iadev->rx_pkt_cnt++;
1352      }  
1353INCR_DLE:
1354      if (++dle == iadev->rx_dle_q.end)  
1355          dle = iadev->rx_dle_q.start;  
1356  }  
1357  iadev->rx_dle_q.read = dle;  
1358  
1359  /* if the interrupts are masked because there were no free desc available,  
1360                unmask them now. */ 
1361  if (!iadev->rxing) {
1362     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1363     if (!(state & FREEQ_EMPTY)) {
1364        state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1365        writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1366                                      iadev->reass_reg+REASS_MASK_REG);
1367        iadev->rxing++; 
1368     }
1369  }
1370}  
1371  
1372  
1373static int open_rx(struct atm_vcc *vcc)  
1374{  
1375        IADEV *iadev;  
1376        u_short __iomem *vc_table;  
1377        u_short __iomem *reass_ptr;  
1378        IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1379
1380        if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1381        iadev = INPH_IA_DEV(vcc->dev);  
1382        if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1383           if (iadev->phy_type & FE_25MBIT_PHY) {
1384               printk("IA:  ABR not support\n");
1385               return -EINVAL; 
1386           }
1387        }
1388        /* Make only this VCI in the vc table valid and let all   
1389                others be invalid entries */  
1390        vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1391        vc_table += vcc->vci;
1392        /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1393
1394        *vc_table = vcc->vci << 6;
1395        /* Also keep a list of open rx vcs so that we can attach them with  
1396                incoming PDUs later. */  
1397        if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1398                                (vcc->qos.txtp.traffic_class == ATM_ABR))  
1399        {  
1400                srv_cls_param_t srv_p;
1401                init_abr_vc(iadev, &srv_p);
1402                ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1403        } 
1404        else {  /* for UBR  later may need to add CBR logic */
1405                reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1406                reass_ptr += vcc->vci;
1407                *reass_ptr = NO_AAL5_PKT;
1408        }
1409        
1410        if (iadev->rx_open[vcc->vci])  
1411                printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1412                        vcc->dev->number, vcc->vci);  
1413        iadev->rx_open[vcc->vci] = vcc;  
1414        return 0;  
1415}  
1416  
1417static int rx_init(struct atm_dev *dev)  
1418{  
1419        IADEV *iadev;  
1420        struct rx_buf_desc __iomem *buf_desc_ptr;  
1421        unsigned long rx_pkt_start = 0;  
1422        void *dle_addr;  
1423        struct abr_vc_table  *abr_vc_table; 
1424        u16 *vc_table;  
1425        u16 *reass_table;  
1426        int i,j, vcsize_sel;  
1427        u_short freeq_st_adr;  
1428        u_short *freeq_start;  
1429  
1430        iadev = INPH_IA_DEV(dev);  
1431  //    spin_lock_init(&iadev->rx_lock); 
1432  
1433        /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1434        dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1435                                      &iadev->rx_dle_dma, GFP_KERNEL);
1436        if (!dle_addr)  {  
1437                printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1438                goto err_out;
1439        }
1440        iadev->rx_dle_q.start = (struct dle *)dle_addr;
1441        iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1442        iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1443        iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1444        /* the end of the dle q points to the entry after the last  
1445        DLE that can be used. */  
1446  
1447        /* write the upper 20 bits of the start address to rx list address register */  
1448        /* We know this is 32bit bus addressed so the following is safe */
1449        writel(iadev->rx_dle_dma & 0xfffff000,
1450               iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1451        IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1452                      iadev->dma+IPHASE5575_TX_LIST_ADDR,
1453                      readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1454        printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1455                      iadev->dma+IPHASE5575_RX_LIST_ADDR,
1456                      readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1457  
1458        writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1459        writew(0, iadev->reass_reg+MODE_REG);  
1460        writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1461  
1462        /* Receive side control memory map  
1463           -------------------------------  
1464  
1465                Buffer descr    0x0000 (736 - 23K)  
1466                VP Table        0x5c00 (256 - 512)  
1467                Except q        0x5e00 (128 - 512)  
1468                Free buffer q   0x6000 (1K - 2K)  
1469                Packet comp q   0x6800 (1K - 2K)  
1470                Reass Table     0x7000 (1K - 2K)  
1471                VC Table        0x7800 (1K - 2K)  
1472                ABR VC Table    0x8000 (1K - 32K)  
1473        */  
1474          
1475        /* Base address for Buffer Descriptor Table */  
1476        writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1477        /* Set the buffer size register */  
1478        writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1479  
1480        /* Initialize each entry in the Buffer Descriptor Table */  
1481        iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1482        buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1483        memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1484        buf_desc_ptr++;  
1485        rx_pkt_start = iadev->rx_pkt_ram;  
1486        for(i=1; i<=iadev->num_rx_desc; i++)  
1487        {  
1488                memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1489                buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1490                buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1491                buf_desc_ptr++;           
1492                rx_pkt_start += iadev->rx_buf_sz;  
1493        }  
1494        IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1495        i = FREE_BUF_DESC_Q*iadev->memSize; 
1496        writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1497        writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1498        writew(i+iadev->num_rx_desc*sizeof(u_short), 
1499                                         iadev->reass_reg+FREEQ_ED_ADR);
1500        writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1501        writew(i+iadev->num_rx_desc*sizeof(u_short), 
1502                                        iadev->reass_reg+FREEQ_WR_PTR);    
1503        /* Fill the FREEQ with all the free descriptors. */  
1504        freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1505        freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1506        for(i=1; i<=iadev->num_rx_desc; i++)  
1507        {  
1508                *freeq_start = (u_short)i;  
1509                freeq_start++;  
1510        }  
1511        IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1512        /* Packet Complete Queue */
1513        i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1514        writew(i, iadev->reass_reg+PCQ_ST_ADR);
1515        writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1516        writew(i, iadev->reass_reg+PCQ_RD_PTR);
1517        writew(i, iadev->reass_reg+PCQ_WR_PTR);
1518
1519        /* Exception Queue */
1520        i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1521        writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1522        writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1523                                             iadev->reass_reg+EXCP_Q_ED_ADR);
1524        writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1525        writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1526 
1527        /* Load local copy of FREEQ and PCQ ptrs */
1528        iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1529        iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1530        iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1531        iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1532        iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1533        iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1534        iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1535        iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1536        
1537        IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1538              iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1539              iadev->rfL.pcq_wr);)                
1540        /* just for check - no VP TBL */  
1541        /* VP Table */  
1542        /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1543        /* initialize VP Table for invalid VPIs  
1544                - I guess we can write all 1s or 0x000f in the entire memory  
1545                  space or something similar.  
1546        */  
1547  
1548        /* This seems to work and looks right to me too !!! */  
1549        i =  REASS_TABLE * iadev->memSize;
1550        writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1551        /* initialize Reassembly table to I don't know what ???? */  
1552        reass_table = (u16 *)(iadev->reass_ram+i);  
1553        j = REASS_TABLE_SZ * iadev->memSize;
1554        for(i=0; i < j; i++)  
1555                *reass_table++ = NO_AAL5_PKT;  
1556       i = 8*1024;
1557       vcsize_sel =  0;
1558       while (i != iadev->num_vc) {
1559          i /= 2;
1560          vcsize_sel++;
1561       }
1562       i = RX_VC_TABLE * iadev->memSize;
1563       writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1564       vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1565        j = RX_VC_TABLE_SZ * iadev->memSize;
1566        for(i = 0; i < j; i++)  
1567        {  
1568                /* shift the reassembly pointer by 3 + lower 3 bits of   
1569                vc_lkup_base register (=3 for 1K VCs) and the last byte   
1570                is those low 3 bits.   
1571                Shall program this later.  
1572                */  
1573                *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1574                vc_table++;  
1575        }  
1576        /* ABR VC table */
1577        i =  ABR_VC_TABLE * iadev->memSize;
1578        writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1579                   
1580        i = ABR_VC_TABLE * iadev->memSize;
1581        abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1582        j = REASS_TABLE_SZ * iadev->memSize;
1583        memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1584        for(i = 0; i < j; i++) {                
1585                abr_vc_table->rdf = 0x0003;
1586                abr_vc_table->air = 0x5eb1;
1587                abr_vc_table++;         
1588        }  
1589
1590        /* Initialize other registers */  
1591  
1592        /* VP Filter Register set for VC Reassembly only */  
1593        writew(0xff00, iadev->reass_reg+VP_FILTER);  
1594        writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1595        writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1596
1597        /* Packet Timeout Count  related Registers : 
1598           Set packet timeout to occur in about 3 seconds
1599           Set Packet Aging Interval count register to overflow in about 4 us
1600        */  
1601        writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1602
1603        i = (j >> 6) & 0xFF;
1604        j += 2 * (j - 1);
1605        i |= ((j << 2) & 0xFF00);
1606        writew(i, iadev->reass_reg+TMOUT_RANGE);
1607
1608        /* initiate the desc_tble */
1609        for(i=0; i<iadev->num_tx_desc;i++)
1610            iadev->desc_tbl[i].timestamp = 0;
1611
1612        /* to clear the interrupt status register - read it */  
1613        readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1614  
1615        /* Mask Register - clear it */  
1616        writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1617  
1618        skb_queue_head_init(&iadev->rx_dma_q);  
1619        iadev->rx_free_desc_qhead = NULL;   
1620
1621        iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1622        if (!iadev->rx_open) {
1623                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1624                dev->number);  
1625                goto err_free_dle;
1626        }  
1627
1628        iadev->rxing = 1;
1629        iadev->rx_pkt_cnt = 0;
1630        /* Mode Register */  
1631        writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1632        return 0;  
1633
1634err_free_dle:
1635        dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1636                          iadev->rx_dle_dma);
1637err_out:
1638        return -ENOMEM;
1639}  
1640  
1641
1642/*  
1643        The memory map suggested in appendix A and the coding for it.   
1644        Keeping it around just in case we change our mind later.  
1645  
1646                Buffer descr    0x0000 (128 - 4K)  
1647                UBR sched       0x1000 (1K - 4K)  
1648                UBR Wait q      0x2000 (1K - 4K)  
1649                Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1650                                        (128 - 256) each  
1651                extended VC     0x4000 (1K - 8K)  
1652                ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1653                CBR sched       0x7000 (as needed)  
1654                VC table        0x8000 (1K - 32K)  
1655*/  
1656  
1657static void tx_intr(struct atm_dev *dev)  
1658{  
1659        IADEV *iadev;  
1660        unsigned short status;  
1661        unsigned long flags;
1662
1663        iadev = INPH_IA_DEV(dev);  
1664  
1665        status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1666        if (status & TRANSMIT_DONE){
1667
1668           IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1669           spin_lock_irqsave(&iadev->tx_lock, flags);
1670           ia_tx_poll(iadev);
1671           spin_unlock_irqrestore(&iadev->tx_lock, flags);
1672           writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1673           if (iadev->close_pending)  
1674               wake_up(&iadev->close_wait);
1675        }         
1676        if (status & TCQ_NOT_EMPTY)  
1677        {  
1678            IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1679        }  
1680}  
1681  
1682static void tx_dle_intr(struct atm_dev *dev)
1683{
1684        IADEV *iadev;
1685        struct dle *dle, *cur_dle; 
1686        struct sk_buff *skb;
1687        struct atm_vcc *vcc;
1688        struct ia_vcc  *iavcc;
1689        u_int dle_lp;
1690        unsigned long flags;
1691
1692        iadev = INPH_IA_DEV(dev);
1693        spin_lock_irqsave(&iadev->tx_lock, flags);   
1694        dle = iadev->tx_dle_q.read;
1695        dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1696                                        (sizeof(struct dle)*DLE_ENTRIES - 1);
1697        cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1698        while (dle != cur_dle)
1699        {
1700            /* free the DMAed skb */ 
1701            skb = skb_dequeue(&iadev->tx_dma_q); 
1702            if (!skb) break;
1703
1704            /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1705            if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1706                dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1707                                 DMA_TO_DEVICE);
1708            }
1709            vcc = ATM_SKB(skb)->vcc;
1710            if (!vcc) {
1711                  printk("tx_dle_intr: vcc is null\n");
1712                  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1713                  dev_kfree_skb_any(skb);
1714
1715                  return;
1716            }
1717            iavcc = INPH_IA_VCC(vcc);
1718            if (!iavcc) {
1719                  printk("tx_dle_intr: iavcc is null\n");
1720                  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1721                  dev_kfree_skb_any(skb);
1722                  return;
1723            }
1724            if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1725               if ((vcc->pop) && (skb->len != 0))
1726               {     
1727                 vcc->pop(vcc, skb);
1728               } 
1729               else {
1730                 dev_kfree_skb_any(skb);
1731               }
1732            }
1733            else { /* Hold the rate-limited skb for flow control */
1734               IA_SKB_STATE(skb) |= IA_DLED;
1735               skb_queue_tail(&iavcc->txing_skb, skb);
1736            }
1737            IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1738            if (++dle == iadev->tx_dle_q.end)
1739                 dle = iadev->tx_dle_q.start;
1740        }
1741        iadev->tx_dle_q.read = dle;
1742        spin_unlock_irqrestore(&iadev->tx_lock, flags);
1743}
1744  
1745static int open_tx(struct atm_vcc *vcc)  
1746{  
1747        struct ia_vcc *ia_vcc;  
1748        IADEV *iadev;  
1749        struct main_vc *vc;  
1750        struct ext_vc *evc;  
1751        int ret;
1752        IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1753        if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1754        iadev = INPH_IA_DEV(vcc->dev);  
1755        
1756        if (iadev->phy_type & FE_25MBIT_PHY) {
1757           if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1758               printk("IA:  ABR not support\n");
1759               return -EINVAL; 
1760           }
1761          if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1762               printk("IA:  CBR not support\n");
1763               return -EINVAL; 
1764          }
1765        }
1766        ia_vcc =  INPH_IA_VCC(vcc);
1767        memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1768        if (vcc->qos.txtp.max_sdu > 
1769                         (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1770           printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1771                  vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1772           vcc->dev_data = NULL;
1773           kfree(ia_vcc);
1774           return -EINVAL; 
1775        }
1776        ia_vcc->vc_desc_cnt = 0;
1777        ia_vcc->txing = 1;
1778
1779        /* find pcr */
1780        if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1781           vcc->qos.txtp.pcr = iadev->LineRate;
1782        else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1783           vcc->qos.txtp.pcr = iadev->LineRate;
1784        else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1785           vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1786        if (vcc->qos.txtp.pcr > iadev->LineRate)
1787             vcc->qos.txtp.pcr = iadev->LineRate;
1788        ia_vcc->pcr = vcc->qos.txtp.pcr;
1789
1790        if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1791        else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1792        else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1793        else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1794        if (ia_vcc->pcr < iadev->rate_limit)
1795           skb_queue_head_init (&ia_vcc->txing_skb);
1796        if (ia_vcc->pcr < iadev->rate_limit) {
1797           struct sock *sk = sk_atm(vcc);
1798
1799           if (vcc->qos.txtp.max_sdu != 0) {
1800               if (ia_vcc->pcr > 60000)
1801                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1802               else if (ia_vcc->pcr > 2000)
1803                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1804               else
1805                 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1806           }
1807           else
1808             sk->sk_sndbuf = 24576;
1809        }
1810           
1811        vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1812        evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1813        vc += vcc->vci;  
1814        evc += vcc->vci;  
1815        memset((caddr_t)vc, 0, sizeof(*vc));  
1816        memset((caddr_t)evc, 0, sizeof(*evc));  
1817          
1818        /* store the most significant 4 bits of vci as the last 4 bits   
1819                of first part of atm header.  
1820           store the last 12 bits of vci as first 12 bits of the second  
1821                part of the atm header.  
1822        */  
1823        evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1824        evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1825 
1826        /* check the following for different traffic classes */  
1827        if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1828        {  
1829                vc->type = UBR;  
1830                vc->status = CRC_APPEND;
1831                vc->acr = cellrate_to_float(iadev->LineRate);  
1832                if (vcc->qos.txtp.pcr > 0) 
1833                   vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1834                IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1835                                             vcc->qos.txtp.max_pcr,vc->acr);)
1836        }  
1837        else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1838        {       srv_cls_param_t srv_p;
1839                IF_ABR(printk("Tx ABR VCC\n");)  
1840                init_abr_vc(iadev, &srv_p);
1841                if (vcc->qos.txtp.pcr > 0) 
1842                   srv_p.pcr = vcc->qos.txtp.pcr;
1843                if (vcc->qos.txtp.min_pcr > 0) {
1844                   int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1845                   if (tmpsum > iadev->LineRate)
1846                       return -EBUSY;
1847                   srv_p.mcr = vcc->qos.txtp.min_pcr;
1848                   iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1849                } 
1850                else srv_p.mcr = 0;
1851                if (vcc->qos.txtp.icr)
1852                   srv_p.icr = vcc->qos.txtp.icr;
1853                if (vcc->qos.txtp.tbe)
1854                   srv_p.tbe = vcc->qos.txtp.tbe;
1855                if (vcc->qos.txtp.frtt)
1856                   srv_p.frtt = vcc->qos.txtp.frtt;
1857                if (vcc->qos.txtp.rif)
1858                   srv_p.rif = vcc->qos.txtp.rif;
1859                if (vcc->qos.txtp.rdf)
1860                   srv_p.rdf = vcc->qos.txtp.rdf;
1861                if (vcc->qos.txtp.nrm_pres)
1862                   srv_p.nrm = vcc->qos.txtp.nrm;
1863                if (vcc->qos.txtp.trm_pres)
1864                   srv_p.trm = vcc->qos.txtp.trm;
1865                if (vcc->qos.txtp.adtf_pres)
1866                   srv_p.adtf = vcc->qos.txtp.adtf;
1867                if (vcc->qos.txtp.cdf_pres)
1868                   srv_p.cdf = vcc->qos.txtp.cdf;    
1869                if (srv_p.icr > srv_p.pcr)
1870                   srv_p.icr = srv_p.pcr;    
1871                IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1872                                                      srv_p.pcr, srv_p.mcr);)
1873                ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1874        } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1875                if (iadev->phy_type & FE_25MBIT_PHY) {
1876                    printk("IA:  CBR not support\n");
1877                    return -EINVAL; 
1878                }
1879                if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1880                   IF_CBR(printk("PCR is not available\n");)
1881                   return -1;
1882                }
1883                vc->type = CBR;
1884                vc->status = CRC_APPEND;
1885                if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1886                    return ret;
1887                }
1888       } 
1889        else  
1890           printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1891        
1892        iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1893        IF_EVENT(printk("ia open_tx returning \n");)  
1894        return 0;  
1895}  
1896  
1897  
1898static int tx_init(struct atm_dev *dev)  
1899{  
1900        IADEV *iadev;  
1901        struct tx_buf_desc *buf_desc_ptr;
1902        unsigned int tx_pkt_start;  
1903        void *dle_addr;  
1904        int i;  
1905        u_short tcq_st_adr;  
1906        u_short *tcq_start;  
1907        u_short prq_st_adr;  
1908        u_short *prq_start;  
1909        struct main_vc *vc;  
1910        struct ext_vc *evc;   
1911        u_short tmp16;
1912        u32 vcsize_sel;
1913 
1914        iadev = INPH_IA_DEV(dev);  
1915        spin_lock_init(&iadev->tx_lock);
1916 
1917        IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1918                                readw(iadev->seg_reg+SEG_MASK_REG));)  
1919
1920        /* Allocate 4k (boundary aligned) bytes */
1921        dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1922                                      &iadev->tx_dle_dma, GFP_KERNEL);
1923        if (!dle_addr)  {
1924                printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1925                goto err_out;
1926        }
1927        iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1928        iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1929        iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1930        iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1931
1932        /* write the upper 20 bits of the start address to tx list address register */  
1933        writel(iadev->tx_dle_dma & 0xfffff000,
1934               iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1935        writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1936        writew(0, iadev->seg_reg+MODE_REG_0);  
1937        writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1938        iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1939        iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1940        iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1941  
1942        /*  
1943           Transmit side control memory map  
1944           --------------------------------    
1945         Buffer descr   0x0000 (128 - 4K)  
1946         Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1947                                        (512 - 1K) each  
1948                                        TCQ - 4K, PRQ - 5K  
1949         CBR Table      0x1800 (as needed) - 6K  
1950         UBR Table      0x3000 (1K - 4K) - 12K  
1951         UBR Wait queue 0x4000 (1K - 4K) - 16K  
1952         ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1953                                ABR Tbl - 20K, ABR Wq - 22K   
1954         extended VC    0x6000 (1K - 8K) - 24K  
1955         VC Table       0x8000 (1K - 32K) - 32K  
1956          
1957        Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1958        and Wait q, which can be allotted later.  
1959        */  
1960     
1961        /* Buffer Descriptor Table Base address */  
1962        writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1963  
1964        /* initialize each entry in the buffer descriptor table */  
1965        buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1966        memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1967        buf_desc_ptr++;  
1968        tx_pkt_start = TX_PACKET_RAM;  
1969        for(i=1; i<=iadev->num_tx_desc; i++)  
1970        {  
1971                memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1972                buf_desc_ptr->desc_mode = AAL5;  
1973                buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1974                buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1975                buf_desc_ptr++;           
1976                tx_pkt_start += iadev->tx_buf_sz;  
1977        }  
1978        iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1979        if (!iadev->tx_buf) {
1980            printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1981            goto err_free_dle;
1982        }
1983        for (i= 0; i< iadev->num_tx_desc; i++)
1984        {
1985            struct cpcs_trailer *cpcs;
1986 
1987            cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1988            if(!cpcs) {                
1989                printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1990                goto err_free_tx_bufs;
1991            }
1992            iadev->tx_buf[i].cpcs = cpcs;
1993            iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1994                                                       cpcs,
1995                                                       sizeof(*cpcs),
1996                                                       DMA_TO_DEVICE);
1997        }
1998        iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1999                                   sizeof(struct desc_tbl_t), GFP_KERNEL);
2000        if (!iadev->desc_tbl) {
2001                printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2002                goto err_free_all_tx_bufs;
2003        }
2004  
2005        /* Communication Queues base address */  
2006        i = TX_COMP_Q * iadev->memSize;
2007        writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
2008  
2009        /* Transmit Complete Queue */  
2010        writew(i, iadev->seg_reg+TCQ_ST_ADR);  
2011        writew(i, iadev->seg_reg+TCQ_RD_PTR);  
2012        writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
2013        iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2014        writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2015                                              iadev->seg_reg+TCQ_ED_ADR); 
2016        /* Fill the TCQ with all the free descriptors. */  
2017        tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2018        tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2019        for(i=1; i<=iadev->num_tx_desc; i++)  
2020        {  
2021                *tcq_start = (u_short)i;  
2022                tcq_start++;  
2023        }  
2024  
2025        /* Packet Ready Queue */  
2026        i = PKT_RDY_Q * iadev->memSize; 
2027        writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2028        writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2029                                              iadev->seg_reg+PRQ_ED_ADR);
2030        writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2031        writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2032         
2033        /* Load local copy of PRQ and TCQ ptrs */
2034        iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2035        iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2036        iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2037
2038        iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2039        iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2040        iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2041
2042        /* Just for safety initializing the queue to have desc 1 always */  
2043        /* Fill the PRQ with all the free descriptors. */  
2044        prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2045        prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2046        for(i=1; i<=iadev->num_tx_desc; i++)  
2047        {  
2048                *prq_start = (u_short)0;        /* desc 1 in all entries */  
2049                prq_start++;  
2050        }  
2051        /* CBR Table */  
2052        IF_INIT(printk("Start CBR Init\n");)
2053#if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2054        writew(0,iadev->seg_reg+CBR_PTR_BASE);
2055#else /* Charlie's logic is wrong ? */
2056        tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2057        IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2058        writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2059#endif
2060
2061        IF_INIT(printk("value in register = 0x%x\n",
2062                                   readw(iadev->seg_reg+CBR_PTR_BASE));)
2063        tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2064        writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2065        IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2066                                        readw(iadev->seg_reg+CBR_TAB_BEG));)
2067        writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2068        tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2069        writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2070        IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2071               iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2072        IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2073          readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2074          readw(iadev->seg_reg+CBR_TAB_END+1));)
2075
2076        /* Initialize the CBR Schedualing Table */
2077        memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2078                                                          0, iadev->num_vc*6); 
2079        iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2080        iadev->CbrEntryPt = 0;
2081        iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2082        iadev->NumEnabledCBR = 0;
2083
2084        /* UBR scheduling Table and wait queue */  
2085        /* initialize all bytes of UBR scheduler table and wait queue to 0   
2086                - SCHEDSZ is 1K (# of entries).  
2087                - UBR Table size is 4K  
2088                - UBR wait queue is 4K  
2089           since the table and wait queues are contiguous, all the bytes   
2090           can be initialized by one memeset.
2091        */  
2092        
2093        vcsize_sel = 0;
2094        i = 8*1024;
2095        while (i != iadev->num_vc) {
2096          i /= 2;
2097          vcsize_sel++;
2098        }
2099 
2100        i = MAIN_VC_TABLE * iadev->memSize;
2101        writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2102        i =  EXT_VC_TABLE * iadev->memSize;
2103        writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2104        i = UBR_SCHED_TABLE * iadev->memSize;
2105        writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2106        i = UBR_WAIT_Q * iadev->memSize; 
2107        writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2108        memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2109                                                       0, iadev->num_vc*8);
2110        /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2111        /* initialize all bytes of ABR scheduler table and wait queue to 0   
2112                - SCHEDSZ is 1K (# of entries).  
2113                - ABR Table size is 2K  
2114                - ABR wait queue is 2K  
2115           since the table and wait queues are contiguous, all the bytes   
2116           can be initialized by one memeset.
2117        */  
2118        i = ABR_SCHED_TABLE * iadev->memSize;
2119        writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2120        i = ABR_WAIT_Q * iadev->memSize;
2121        writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2122 
2123        i = ABR_SCHED_TABLE*iadev->memSize;
2124        memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2125        vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2126        evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2127        iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2128        if (!iadev->testTable) {
2129           printk("Get freepage  failed\n");
2130           goto err_free_desc_tbl;
2131        }
2132        for(i=0; i<iadev->num_vc; i++)  
2133        {  
2134                memset((caddr_t)vc, 0, sizeof(*vc));  
2135                memset((caddr_t)evc, 0, sizeof(*evc));  
2136                iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2137                                                GFP_KERNEL);
2138                if (!iadev->testTable[i])
2139                        goto err_free_test_tables;
2140                iadev->testTable[i]->lastTime = 0;
2141                iadev->testTable[i]->fract = 0;
2142                iadev->testTable[i]->vc_status = VC_UBR;
2143                vc++;  
2144                evc++;  
2145        }  
2146  
2147        /* Other Initialization */  
2148          
2149        /* Max Rate Register */  
2150        if (iadev->phy_type & FE_25MBIT_PHY) {
2151           writew(RATE25, iadev->seg_reg+MAXRATE);  
2152           writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2153        }
2154        else {
2155           writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2156           writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2157        }
2158        /* Set Idle Header Reigisters to be sure */  
2159        writew(0, iadev->seg_reg+IDLEHEADHI);  
2160        writew(0, iadev->seg_reg+IDLEHEADLO);  
2161  
2162        /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2163        writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2164
2165        iadev->close_pending = 0;
2166        init_waitqueue_head(&iadev->close_wait);
2167        init_waitqueue_head(&iadev->timeout_wait);
2168        skb_queue_head_init(&iadev->tx_dma_q);  
2169        ia_init_rtn_q(&iadev->tx_return_q);  
2170
2171        /* RM Cell Protocol ID and Message Type */  
2172        writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2173        skb_queue_head_init (&iadev->tx_backlog);
2174  
2175        /* Mode Register 1 */  
2176        writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2177  
2178        /* Mode Register 0 */  
2179        writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2180  
2181        /* Interrupt Status Register - read to clear */  
2182        readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2183  
2184        /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2185        writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2186        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2187        iadev->tx_pkt_cnt = 0;
2188        iadev->rate_limit = iadev->LineRate / 3;
2189  
2190        return 0;
2191
2192err_free_test_tables:
2193        while (--i >= 0)
2194                kfree(iadev->testTable[i]);
2195        kfree(iadev->testTable);
2196err_free_desc_tbl:
2197        kfree(iadev->desc_tbl);
2198err_free_all_tx_bufs:
2199        i = iadev->num_tx_desc;
2200err_free_tx_bufs:
2201        while (--i >= 0) {
2202                struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2203
2204                dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2205                                 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2206                kfree(desc->cpcs);
2207        }
2208        kfree(iadev->tx_buf);
2209err_free_dle:
2210        dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2211                          iadev->tx_dle_dma);
2212err_out:
2213        return -ENOMEM;
2214}   
2215   
2216static irqreturn_t ia_int(int irq, void *dev_id)  
2217{  
2218   struct atm_dev *dev;  
2219   IADEV *iadev;  
2220   unsigned int status;  
2221   int handled = 0;
2222
2223   dev = dev_id;  
2224   iadev = INPH_IA_DEV(dev);  
2225   while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2226   { 
2227        handled = 1;
2228        IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2229        if (status & STAT_REASSINT)  
2230        {  
2231           /* do something */  
2232           IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2233           rx_intr(dev);  
2234        }  
2235        if (status & STAT_DLERINT)  
2236        {  
2237           /* Clear this bit by writing a 1 to it. */  
2238           writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2239           rx_dle_intr(dev);  
2240        }  
2241        if (status & STAT_SEGINT)  
2242        {  
2243           /* do something */ 
2244           IF_EVENT(printk("IA: tx_intr \n");) 
2245           tx_intr(dev);  
2246        }  
2247        if (status & STAT_DLETINT)  
2248        {  
2249           writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2250           tx_dle_intr(dev);  
2251        }  
2252        if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2253        {  
2254           if (status & STAT_FEINT) 
2255               ia_frontend_intr(iadev);
2256        }  
2257   }
2258   return IRQ_RETVAL(handled);
2259}  
2260          
2261          
2262          
2263/*----------------------------- entries --------------------------------*/  
2264static int get_esi(struct atm_dev *dev)  
2265{  
2266        IADEV *iadev;  
2267        int i;  
2268        u32 mac1;  
2269        u16 mac2;  
2270          
2271        iadev = INPH_IA_DEV(dev);  
2272        mac1 = cpu_to_be32(le32_to_cpu(readl(  
2273                                iadev->reg+IPHASE5575_MAC1)));  
2274        mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2275        IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2276        for (i=0; i<MAC1_LEN; i++)  
2277                dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2278          
2279        for (i=0; i<MAC2_LEN; i++)  
2280                dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2281        return 0;  
2282}  
2283          
2284static int reset_sar(struct atm_dev *dev)  
2285{  
2286        IADEV *iadev;  
2287        int i, error = 1;  
2288        unsigned int pci[64];  
2289          
2290        iadev = INPH_IA_DEV(dev);  
2291        for(i=0; i<64; i++)  
2292          if ((error = pci_read_config_dword(iadev->pci,  
2293                                i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2294              return error;  
2295        writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2296        for(i=0; i<64; i++)  
2297          if ((error = pci_write_config_dword(iadev->pci,  
2298                                        i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2299            return error;  
2300        udelay(5);  
2301        return 0;  
2302}  
2303          
2304          
2305static int ia_init(struct atm_dev *dev)
2306{  
2307        IADEV *iadev;  
2308        unsigned long real_base;
2309        void __iomem *base;
2310        unsigned short command;  
2311        int error, i; 
2312          
2313        /* The device has been identified and registered. Now we read   
2314           necessary configuration info like memory base address,   
2315           interrupt number etc */  
2316          
2317        IF_INIT(printk(">ia_init\n");)  
2318        dev->ci_range.vpi_bits = 0;  
2319        dev->ci_range.vci_bits = NR_VCI_LD;  
2320
2321        iadev = INPH_IA_DEV(dev);  
2322        real_base = pci_resource_start (iadev->pci, 0);
2323        iadev->irq = iadev->pci->irq;
2324                  
2325        error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2326        if (error) {
2327                printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2328                                dev->number,error);  
2329                return -EINVAL;  
2330        }  
2331        IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2332                        dev->number, iadev->pci->revision, real_base, iadev->irq);)
2333          
2334        /* find mapping size of board */  
2335          
2336        iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2337
2338        if (iadev->pci_map_size == 0x100000){
2339          iadev->num_vc = 4096;
2340          dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2341          iadev->memSize = 4;
2342        }
2343        else if (iadev->pci_map_size == 0x40000) {
2344          iadev->num_vc = 1024;
2345          iadev->memSize = 1;
2346        }
2347        else {
2348           printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2349           return -EINVAL;
2350        }
2351        IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2352          
2353        /* enable bus mastering */
2354        pci_set_master(iadev->pci);
2355
2356        /*  
2357         * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2358         */  
2359        udelay(10);  
2360          
2361        /* mapping the physical address to a virtual address in address space */  
2362        base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2363          
2364        if (!base)  
2365        {  
2366                printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2367                            dev->number);  
2368                return -ENOMEM;
2369        }  
2370        IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2371                        dev->number, iadev->pci->revision, base, iadev->irq);)
2372          
2373        /* filling the iphase dev structure */  
2374        iadev->mem = iadev->pci_map_size /2;  
2375        iadev->real_base = real_base;  
2376        iadev->base = base;  
2377                  
2378        /* Bus Interface Control Registers */  
2379        iadev->reg = base + REG_BASE;
2380        /* Segmentation Control Registers */  
2381        iadev->seg_reg = base + SEG_BASE;
2382        /* Reassembly Control Registers */  
2383        iadev->reass_reg = base + REASS_BASE;  
2384        /* Front end/ DMA control registers */  
2385        iadev->phy = base + PHY_BASE;  
2386        iadev->dma = base + PHY_BASE;  
2387        /* RAM - Segmentation RAm and Reassembly RAM */  
2388        iadev->ram = base + ACTUAL_RAM_BASE;  
2389        iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2390        iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2391  
2392        /* lets print out the above */  
2393        IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2394          iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2395          iadev->phy, iadev->ram, iadev->seg_ram, 
2396          iadev->reass_ram);) 
2397          
2398        /* lets try reading the MAC address */  
2399        error = get_esi(dev);  
2400        if (error) {
2401          iounmap(iadev->base);
2402          return error;  
2403        }
2404        printk("IA: ");
2405        for (i=0; i < ESI_LEN; i++)  
2406                printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2407        printk("\n");  
2408  
2409        /* reset SAR */  
2410        if (reset_sar(dev)) {
2411           iounmap(iadev->base);
2412           printk("IA: reset SAR fail, please try again\n");
2413           return 1;
2414        }
2415        return 0;  
2416}  
2417
2418static void ia_update_stats(IADEV *iadev) {
2419    if (!iadev->carrier_detect)
2420        return;
2421    iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2422    iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2423    iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2424    iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2425    iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2426    iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2427    return;
2428}
2429  
2430static void ia_led_timer(unsigned long arg) {
2431        unsigned long flags;
2432        static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2433        u_char i;
2434        static u32 ctrl_reg; 
2435        for (i = 0; i < iadev_count; i++) {
2436           if (ia_dev[i]) {
2437              ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2438              if (blinking[i] == 0) {
2439                 blinking[i]++;
2440                 ctrl_reg &= (~CTRL_LED);
2441                 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2442                 ia_update_stats(ia_dev[i]);
2443              }
2444              else {
2445                 blinking[i] = 0;
2446                 ctrl_reg |= CTRL_LED;
2447                 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2448                 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2449                 if (ia_dev[i]->close_pending)  
2450                    wake_up(&ia_dev[i]->close_wait);
2451                 ia_tx_poll(ia_dev[i]);
2452                 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2453              }
2454           }
2455        }
2456        mod_timer(&ia_timer, jiffies + HZ / 4);
2457        return;
2458}
2459
2460static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2461        unsigned long addr)  
2462{  
2463        writel(value, INPH_IA_DEV(dev)->phy+addr);  
2464}  
2465  
2466static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2467{  
2468        return readl(INPH_IA_DEV(dev)->phy+addr);  
2469}  
2470
2471static void ia_free_tx(IADEV *iadev)
2472{
2473        int i;
2474
2475        kfree(iadev->desc_tbl);
2476        for (i = 0; i < iadev->num_vc; i++)
2477                kfree(iadev->testTable[i]);
2478        kfree(iadev->testTable);
2479        for (i = 0; i < iadev->num_tx_desc; i++) {
2480                struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2481
2482                dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2483                                 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2484                kfree(desc->cpcs);
2485        }
2486        kfree(iadev->tx_buf);
2487        dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2488                          iadev->tx_dle_dma);
2489}
2490
2491static void ia_free_rx(IADEV *iadev)
2492{
2493        kfree(iadev->rx_open);
2494        dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2495                          iadev->rx_dle_dma);
2496}
2497
2498static int ia_start(struct atm_dev *dev)
2499{  
2500        IADEV *iadev;  
2501        int error;  
2502        unsigned char phy;  
2503        u32 ctrl_reg;  
2504        IF_EVENT(printk(">ia_start\n");)  
2505        iadev = INPH_IA_DEV(dev);  
2506        if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2507                printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2508                    dev->number, iadev->irq);  
2509                error = -EAGAIN;
2510                goto err_out;
2511        }  
2512        /* @@@ should release IRQ on error */  
2513        /* enabling memory + master */  
2514        if ((error = pci_write_config_word(iadev->pci,   
2515                                PCI_COMMAND,   
2516                                PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2517        {  
2518                printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2519                    "master (0x%x)\n",dev->number, error);  
2520                error = -EIO;  
2521                goto err_free_irq;
2522        }  
2523        udelay(10);  
2524  
2525        /* Maybe we should reset the front end, initialize Bus Interface Control   
2526                Registers and see. */  
2527  
2528        IF_INIT(printk("Bus ctrl reg: %08x\n", 
2529                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2530        ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2531        ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2532                        | CTRL_B8  
2533                        | CTRL_B16  
2534                        | CTRL_B32  
2535                        | CTRL_B48  
2536                        | CTRL_B64  
2537                        | CTRL_B128  
2538                        | CTRL_ERRMASK  
2539                        | CTRL_DLETMASK         /* shud be removed l8r */  
2540                        | CTRL_DLERMASK  
2541                        | CTRL_SEGMASK  
2542                        | CTRL_REASSMASK          
2543                        | CTRL_FEMASK  
2544                        | CTRL_CSPREEMPT;  
2545  
2546       writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2547  
2548        IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2549                           readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2550           printk("Bus status reg after init: %08x\n", 
2551                            readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2552    
2553        ia_hw_type(iadev); 
2554        error = tx_init(dev);  
2555        if (error)
2556                goto err_free_irq;
2557        error = rx_init(dev);  
2558        if (error)
2559                goto err_free_tx;
2560  
2561        ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2562        writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2563        IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2564                               readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2565        phy = 0; /* resolve compiler complaint */
2566        IF_INIT ( 
2567        if ((phy=ia_phy_get(dev,0)) == 0x30)  
2568                printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2569        else  
2570                printk("IA: utopia,rev.%0x\n",phy);) 
2571
2572        if (iadev->phy_type &  FE_25MBIT_PHY)
2573           ia_mb25_init(iadev);
2574        else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2575           ia_suni_pm7345_init(iadev);
2576        else {
2577                error = suni_init(dev);
2578                if (error)
2579                        goto err_free_rx;
2580                if (dev->phy->start) {
2581                        error = dev->phy->start(dev);
2582                        if (error)
2583                                goto err_free_rx;
2584                }
2585                /* Get iadev->carrier_detect status */
2586                ia_frontend_intr(iadev);
2587        }
2588        return 0;
2589
2590err_free_rx:
2591        ia_free_rx(iadev);
2592err_free_tx:
2593        ia_free_tx(iadev);
2594err_free_irq:
2595        free_irq(iadev->irq, dev);  
2596err_out:
2597        return error;
2598}  
2599  
2600static void ia_close(struct atm_vcc *vcc)  
2601{
2602        DEFINE_WAIT(wait);
2603        u16 *vc_table;
2604        IADEV *iadev;
2605        struct ia_vcc *ia_vcc;
2606        struct sk_buff *skb = NULL;
2607        struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2608        unsigned long closetime, flags;
2609
2610        iadev = INPH_IA_DEV(vcc->dev);
2611        ia_vcc = INPH_IA_VCC(vcc);
2612        if (!ia_vcc) return;  
2613
2614        IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2615                                              ia_vcc->vc_desc_cnt,vcc->vci);)
2616        clear_bit(ATM_VF_READY,&vcc->flags);
2617        skb_queue_head_init (&tmp_tx_backlog);
2618        skb_queue_head_init (&tmp_vcc_backlog); 
2619        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2620           iadev->close_pending++;
2621           prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2622           schedule_timeout(msecs_to_jiffies(500));
2623           finish_wait(&iadev->timeout_wait, &wait);
2624           spin_lock_irqsave(&iadev->tx_lock, flags); 
2625           while((skb = skb_dequeue(&iadev->tx_backlog))) {
2626              if (ATM_SKB(skb)->vcc == vcc){ 
2627                 if (vcc->pop) vcc->pop(vcc, skb);
2628                 else dev_kfree_skb_any(skb);
2629              }
2630              else 
2631                 skb_queue_tail(&tmp_tx_backlog, skb);
2632           } 
2633           while((skb = skb_dequeue(&tmp_tx_backlog))) 
2634             skb_queue_tail(&iadev->tx_backlog, skb);
2635           IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2636           closetime = 300000 / ia_vcc->pcr;
2637           if (closetime == 0)
2638              closetime = 1;
2639           spin_unlock_irqrestore(&iadev->tx_lock, flags);
2640           wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2641           spin_lock_irqsave(&iadev->tx_lock, flags);
2642           iadev->close_pending--;
2643           iadev->testTable[vcc->vci]->lastTime = 0;
2644           iadev->testTable[vcc->vci]->fract = 0; 
2645           iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2646           if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2647              if (vcc->qos.txtp.min_pcr > 0)
2648                 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2649           }
2650           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2651              ia_vcc = INPH_IA_VCC(vcc); 
2652              iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2653              ia_cbrVc_close (vcc);
2654           }
2655           spin_unlock_irqrestore(&iadev->tx_lock, flags);
2656        }
2657        
2658        if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2659           // reset reass table
2660           vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2661           vc_table += vcc->vci; 
2662           *vc_table = NO_AAL5_PKT;
2663           // reset vc table
2664           vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2665           vc_table += vcc->vci;
2666           *vc_table = (vcc->vci << 6) | 15;
2667           if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2668              struct abr_vc_table __iomem *abr_vc_table = 
2669                                (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2670              abr_vc_table +=  vcc->vci;
2671              abr_vc_table->rdf = 0x0003;
2672              abr_vc_table->air = 0x5eb1;
2673           }                                 
2674           // Drain the packets
2675           rx_dle_intr(vcc->dev); 
2676           iadev->rx_open[vcc->vci] = NULL;
2677        }
2678        kfree(INPH_IA_VCC(vcc));  
2679        ia_vcc = NULL;
2680        vcc->dev_data = NULL;
2681        clear_bit(ATM_VF_ADDR,&vcc->flags);
2682        return;        
2683}  
2684  
2685static int ia_open(struct atm_vcc *vcc)
2686{  
2687        struct ia_vcc *ia_vcc;  
2688        int error;  
2689        if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2690        {  
2691                IF_EVENT(printk("ia: not partially allocated resources\n");)  
2692                vcc->dev_data = NULL;
2693        }  
2694        if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2695        {  
2696                IF_EVENT(printk("iphase open: unspec part\n");)  
2697                set_bit(ATM_VF_ADDR,&vcc->flags);
2698        }  
2699        if (vcc->qos.aal != ATM_AAL5)  
2700                return -EINVAL;  
2701        IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2702                                 vcc->dev->number, vcc->vpi, vcc->vci);)  
2703  
2704        /* Device dependent initialization */  
2705        ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2706        if (!ia_vcc) return -ENOMEM;  
2707        vcc->dev_data = ia_vcc;
2708  
2709        if ((error = open_rx(vcc)))  
2710        {  
2711                IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2712                ia_close(vcc);  
2713                return error;  
2714        }  
2715  
2716        if ((error = open_tx(vcc)))  
2717        {  
2718                IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2719                ia_close(vcc);  
2720                return error;  
2721        }  
2722  
2723        set_bit(ATM_VF_READY,&vcc->flags);
2724
2725#if 0
2726        {
2727           static u8 first = 1; 
2728           if (first) {
2729              ia_timer.expires = jiffies + 3*HZ;
2730              add_timer(&ia_timer);
2731              first = 0;
2732           }           
2733        }
2734#endif
2735        IF_EVENT(printk("ia open returning\n");)  
2736        return 0;  
2737}  
2738  
2739static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2740{  
2741        IF_EVENT(printk(">ia_change_qos\n");)  
2742        return 0;  
2743}  
2744  
2745static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2746{  
2747   IA_CMDBUF ia_cmds;
2748   IADEV *iadev;
2749   int i, board;
2750   u16 __user *tmps;
2751   IF_EVENT(printk(">ia_ioctl\n");)  
2752   if (cmd != IA_CMD) {
2753      if (!dev->phy->ioctl) return -EINVAL;
2754      return dev->phy->ioctl(dev,cmd,arg);
2755   }
2756   if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2757   board = ia_cmds.status;
2758   if ((board < 0) || (board > iadev_count))
2759         board = 0;    
2760   iadev = ia_dev[board];
2761   switch (ia_cmds.cmd) {
2762   case MEMDUMP:
2763   {
2764        switch (ia_cmds.sub_cmd) {
2765          case MEMDUMP_DEV:     
2766             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2767             if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2768                return -EFAULT;
2769             ia_cmds.status = 0;
2770             break;
2771          case MEMDUMP_SEGREG:
2772             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2773             tmps = (u16 __user *)ia_cmds.buf;
2774             for(i=0; i<0x80; i+=2, tmps++)
2775                if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2776             ia_cmds.status = 0;
2777             ia_cmds.len = 0x80;
2778             break;
2779          case MEMDUMP_REASSREG:
2780             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2781             tmps = (u16 __user *)ia_cmds.buf;
2782             for(i=0; i<0x80; i+=2, tmps++)
2783                if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2784             ia_cmds.status = 0;
2785             ia_cmds.len = 0x80;
2786             break;
2787          case MEMDUMP_FFL:
2788          {  
2789             ia_regs_t       *regs_local;
2790             ffredn_t        *ffL;
2791             rfredn_t        *rfL;
2792                     
2793             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2794             regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2795             if (!regs_local) return -ENOMEM;
2796             ffL = &regs_local->ffredn;
2797             rfL = &regs_local->rfredn;
2798             /* Copy real rfred registers into the local copy */
2799             for (i=0; i<(sizeof (rfredn_t))/4; i++)
2800                ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2801                /* Copy real ffred registers into the local copy */
2802             for (i=0; i<(sizeof (ffredn_t))/4; i++)
2803                ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2804
2805             if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2806                kfree(regs_local);
2807                return -EFAULT;
2808             }
2809             kfree(regs_local);
2810             printk("Board %d registers dumped\n", board);
2811             ia_cmds.status = 0;                  
2812         }      
2813             break;        
2814         case READ_REG:
2815         {  
2816             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2817             desc_dbg(iadev); 
2818             ia_cmds.status = 0; 
2819         }
2820             break;
2821         case 0x6:
2822         {  
2823             ia_cmds.status = 0; 
2824             printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2825             printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2826         }
2827             break;
2828         case 0x8:
2829         {
2830             struct k_sonet_stats *stats;
2831             stats = &PRIV(_ia_dev[board])->sonet_stats;
2832             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2833             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2834             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2835             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2836             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2837             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2838             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2839             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2840             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2841         }
2842            ia_cmds.status = 0;
2843            break;
2844         case 0x9:
2845            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2846            for (i = 1; i <= iadev->num_rx_desc; i++)
2847               free_desc(_ia_dev[board], i);
2848            writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2849                                            iadev->reass_reg+REASS_MASK_REG);
2850            iadev->rxing = 1;
2851            
2852            ia_cmds.status = 0;
2853            break;
2854
2855         case 0xb:
2856            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2857            ia_frontend_intr(iadev);
2858            break;
2859         case 0xa:
2860            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2861         {  
2862             ia_cmds.status = 0; 
2863             IADebugFlag = ia_cmds.maddr;
2864             printk("New debug option loaded\n");
2865         }
2866             break;
2867         default:
2868             ia_cmds.status = 0;
2869             break;
2870      } 
2871   }
2872      break;
2873   default:
2874      break;
2875
2876   }    
2877   return 0;  
2878}  
2879  
2880static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2881        void __user *optval, int optlen)  
2882{  
2883        IF_EVENT(printk(">ia_getsockopt\n");)  
2884        return -EINVAL;  
2885}  
2886  
2887static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2888        void __user *optval, unsigned int optlen)  
2889{  
2890        IF_EVENT(printk(">ia_setsockopt\n");)  
2891        return -EINVAL;  
2892}  
2893  
2894static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2895        IADEV *iadev;
2896        struct dle *wr_ptr;
2897        struct tx_buf_desc __iomem *buf_desc_ptr;
2898        int desc;
2899        int comp_code;
2900        int total_len;
2901        struct cpcs_trailer *trailer;
2902        struct ia_vcc *iavcc;
2903
2904        iadev = INPH_IA_DEV(vcc->dev);  
2905        iavcc = INPH_IA_VCC(vcc);
2906        if (!iavcc->txing) {
2907           printk("discard packet on closed VC\n");
2908           if (vcc->pop)
2909                vcc->pop(vcc, skb);
2910           else
2911                dev_kfree_skb_any(skb);
2912           return 0;
2913        }
2914
2915        if (skb->len > iadev->tx_buf_sz - 8) {
2916           printk("Transmit size over tx buffer size\n");
2917           if (vcc->pop)
2918                 vcc->pop(vcc, skb);
2919           else
2920                 dev_kfree_skb_any(skb);
2921          return 0;
2922        }
2923        if ((unsigned long)skb->data & 3) {
2924           printk("Misaligned SKB\n");
2925           if (vcc->pop)
2926                 vcc->pop(vcc, skb);
2927           else
2928                 dev_kfree_skb_any(skb);
2929           return 0;
2930        }       
2931        /* Get a descriptor number from our free descriptor queue  
2932           We get the descr number from the TCQ now, since I am using  
2933           the TCQ as a free buffer queue. Initially TCQ will be   
2934           initialized with all the descriptors and is hence, full.  
2935        */
2936        desc = get_desc (iadev, iavcc);
2937        if (desc == 0xffff) 
2938            return 1;
2939        comp_code = desc >> 13;  
2940        desc &= 0x1fff;  
2941  
2942        if ((desc == 0) || (desc > iadev->num_tx_desc))  
2943        {  
2944                IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2945                atomic_inc(&vcc->stats->tx);
2946                if (vcc->pop)   
2947                    vcc->pop(vcc, skb);   
2948                else  
2949                    dev_kfree_skb_any(skb);
2950                return 0;   /* return SUCCESS */
2951        }  
2952  
2953        if (comp_code)  
2954        {  
2955            IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2956                                                            desc, comp_code);)  
2957        }  
2958       
2959        /* remember the desc and vcc mapping */
2960        iavcc->vc_desc_cnt++;
2961        iadev->desc_tbl[desc-1].iavcc = iavcc;
2962        iadev->desc_tbl[desc-1].txskb = skb;
2963        IA_SKB_STATE(skb) = 0;
2964
2965        iadev->ffL.tcq_rd += 2;
2966        if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2967                iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2968        writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2969  
2970        /* Put the descriptor number in the packet ready queue  
2971                and put the updated write pointer in the DLE field   
2972        */   
2973        *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2974
2975        iadev->ffL.prq_wr += 2;
2976        if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2977                iadev->ffL.prq_wr = iadev->ffL.prq_st;
2978          
2979        /* Figure out the exact length of the packet and padding required to 
2980           make it  aligned on a 48 byte boundary.  */
2981        total_len = skb->len + sizeof(struct cpcs_trailer);  
2982        total_len = ((total_len + 47) / 48) * 48;
2983        IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2984 
2985        /* Put the packet in a tx buffer */   
2986        trailer = iadev->tx_buf[desc-1].cpcs;
2987        IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2988                  skb, skb->data, skb->len, desc);)
2989        trailer->control = 0; 
2990        /*big endian*/ 
2991        trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2992        trailer->crc32 = 0;     /* not needed - dummy bytes */  
2993
2994        /* Display the packet */  
2995        IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2996                                                        skb->len, tcnter++);  
2997        xdump(skb->data, skb->len, "TX: ");
2998        printk("\n");)
2999
3000        /* Build the buffer descriptor */  
3001        buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
3002        buf_desc_ptr += desc;   /* points to the corresponding entry */  
3003        buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
3004        /* Huh ? p.115 of users guide describes this as a read-only register */
3005        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3006        buf_desc_ptr->vc_index = vcc->vci;
3007        buf_desc_ptr->bytes = total_len;  
3008
3009        if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3010           clear_lockup (vcc, iadev);
3011
3012        /* Build the DLE structure */  
3013        wr_ptr = iadev->tx_dle_q.write;  
3014        memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3015        wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3016                                              skb->len, DMA_TO_DEVICE);
3017        wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3018                                                  buf_desc_ptr->buf_start_lo;  
3019        /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3020        wr_ptr->bytes = skb->len;  
3021
3022        /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3023        if ((wr_ptr->bytes >> 2) == 0xb)
3024           wr_ptr->bytes = 0x30;
3025
3026        wr_ptr->mode = TX_DLE_PSI; 
3027        wr_ptr->prq_wr_ptr_data = 0;
3028  
3029        /* end is not to be used for the DLE q */  
3030        if (++wr_ptr == iadev->tx_dle_q.end)  
3031                wr_ptr = iadev->tx_dle_q.start;  
3032        
3033        /* Build trailer dle */
3034        wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3035        wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3036          buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3037
3038        wr_ptr->bytes = sizeof(struct cpcs_trailer);
3039        wr_ptr->mode = DMA_INT_ENABLE; 
3040        wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3041        
3042        /* end is not to be used for the DLE q */
3043        if (++wr_ptr == iadev->tx_dle_q.end)  
3044                wr_ptr = iadev->tx_dle_q.start;
3045
3046        iadev->tx_dle_q.write = wr_ptr;  
3047        ATM_DESC(skb) = vcc->vci;
3048        skb_queue_tail(&iadev->tx_dma_q, skb);
3049
3050        atomic_inc(&vcc->stats->tx);
3051        iadev->tx_pkt_cnt++;
3052        /* Increment transaction counter */  
3053        writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3054        
3055#if 0        
3056        /* add flow control logic */ 
3057        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3058          if (iavcc->vc_desc_cnt > 10) {
3059             vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3060            printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3061              iavcc->flow_inc = -1;
3062              iavcc->saved_tx_quota = vcc->tx_quota;
3063           } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3064             // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3065             printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3066              iavcc->flow_inc = 0;
3067           }
3068        }
3069#endif
3070        IF_TX(printk("ia send done\n");)  
3071        return 0;  
3072}  
3073
3074static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3075{
3076        IADEV *iadev; 
3077        unsigned long flags;
3078
3079        iadev = INPH_IA_DEV(vcc->dev);
3080        if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3081        {
3082            if (!skb)
3083                printk(KERN_CRIT "null skb in ia_send\n");
3084            else dev_kfree_skb_any(skb);
3085            return -EINVAL;
3086        }                         
3087        spin_lock_irqsave(&iadev->tx_lock, flags); 
3088        if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3089            dev_kfree_skb_any(skb);
3090            spin_unlock_irqrestore(&iadev->tx_lock, flags);
3091            return -EINVAL; 
3092        }
3093        ATM_SKB(skb)->vcc = vcc;
3094 
3095        if (skb_peek(&iadev->tx_backlog)) {
3096           skb_queue_tail(&iadev->tx_backlog, skb);
3097        }
3098        else {
3099           if (ia_pkt_tx (vcc, skb)) {
3100              skb_queue_tail(&iadev->tx_backlog, skb);
3101           }
3102        }
3103        spin_unlock_irqrestore(&iadev->tx_lock, flags);
3104        return 0;
3105
3106}
3107
3108static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3109{ 
3110  int   left = *pos, n;   
3111  char  *tmpPtr;
3112  IADEV *iadev = INPH_IA_DEV(dev);
3113  if(!left--) {
3114     if (iadev->phy_type == FE_25MBIT_PHY) {
3115       n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3116       return n;
3117     }
3118     if (iadev->phy_type == FE_DS3_PHY)
3119        n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3120     else if (iadev->phy_type == FE_E3_PHY)
3121        n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3122     else if (iadev->phy_type == FE_UTP_OPTION)
3123         n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3124     else
3125        n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3126     tmpPtr = page + n;
3127     if (iadev->pci_map_size == 0x40000)
3128        n += sprintf(tmpPtr, "-1KVC-");
3129     else
3130        n += sprintf(tmpPtr, "-4KVC-");  
3131     tmpPtr = page + n; 
3132     if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3133        n += sprintf(tmpPtr, "1M  \n");
3134     else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3135        n += sprintf(tmpPtr, "512K\n");
3136     else
3137       n += sprintf(tmpPtr, "128K\n");
3138     return n;
3139  }
3140  if (!left) {
3141     return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3142                           "  Size of Tx Buffer  :  %u\n"
3143                           "  Number of Rx Buffer:  %u\n"
3144                           "  Size of Rx Buffer  :  %u\n"
3145                           "  Packets Receiverd  :  %u\n"
3146                           "  Packets Transmitted:  %u\n"
3147                           "  Cells Received     :  %u\n"
3148                           "  Cells Transmitted  :  %u\n"
3149                           "  Board Dropped Cells:  %u\n"
3150                           "  Board Dropped Pkts :  %u\n",
3151                           iadev->num_tx_desc,  iadev->tx_buf_sz,
3152                           iadev->num_rx_desc,  iadev->rx_buf_sz,
3153                           iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3154                           iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3155                           iadev->drop_rxcell, iadev->drop_rxpkt);                        
3156  }
3157  return 0;
3158}
3159  
3160static const struct atmdev_ops ops = {  
3161        .open           = ia_open,  
3162        .close          = ia_close,  
3163        .ioctl          = ia_ioctl,  
3164        .getsockopt     = ia_getsockopt,  
3165        .setsockopt     = ia_setsockopt,  
3166        .send           = ia_send,  
3167        .phy_put        = ia_phy_put,  
3168        .phy_get        = ia_phy_get,  
3169        .change_qos     = ia_change_qos,  
3170        .proc_read      = ia_proc_read,
3171        .owner          = THIS_MODULE,
3172};  
3173          
3174static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3175{  
3176        struct atm_dev *dev;  
3177        IADEV *iadev;  
3178        int ret;
3179
3180        iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3181        if (!iadev) {
3182                ret = -ENOMEM;
3183                goto err_out;
3184        }
3185
3186        iadev->pci = pdev;
3187
3188        IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3189                pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3190        if (pci_enable_device(pdev)) {
3191                ret = -ENODEV;
3192                goto err_out_free_iadev;
3193        }
3194        dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3195        if (!dev) {
3196                ret = -ENOMEM;
3197                goto err_out_disable_dev;
3198        }
3199        dev->dev_data = iadev;
3200        IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3201        IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3202                iadev->LineRate);)
3203
3204        pci_set_drvdata(pdev, dev);
3205
3206        ia_dev[iadev_count] = iadev;
3207        _ia_dev[iadev_count] = dev;
3208        iadev_count++;
3209        if (ia_init(dev) || ia_start(dev)) {  
3210                IF_INIT(printk("IA register failed!\n");)
3211                iadev_count--;
3212                ia_dev[iadev_count] = NULL;
3213                _ia_dev[iadev_count] = NULL;
3214                ret = -EINVAL;
3215                goto err_out_deregister_dev;
3216        }
3217        IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3218
3219        iadev->next_board = ia_boards;  
3220        ia_boards = dev;  
3221
3222        return 0;
3223
3224err_out_deregister_dev:
3225        atm_dev_deregister(dev);  
3226err_out_disable_dev:
3227        pci_disable_device(pdev);
3228err_out_free_iadev:
3229        kfree(iadev);
3230err_out:
3231        return ret;
3232}
3233
3234static void ia_remove_one(struct pci_dev *pdev)
3235{
3236        struct atm_dev *dev = pci_get_drvdata(pdev);
3237        IADEV *iadev = INPH_IA_DEV(dev);
3238
3239        /* Disable phy interrupts */
3240        ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3241                                   SUNI_RSOP_CIE);
3242        udelay(1);
3243
3244        if (dev->phy && dev->phy->stop)
3245                dev->phy->stop(dev);
3246
3247        /* De-register device */  
3248        free_irq(iadev->irq, dev);
3249        iadev_count--;
3250        ia_dev[iadev_count] = NULL;
3251        _ia_dev[iadev_count] = NULL;
3252        IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3253        atm_dev_deregister(dev);
3254
3255        iounmap(iadev->base);  
3256        pci_disable_device(pdev);
3257
3258        ia_free_rx(iadev);
3259        ia_free_tx(iadev);
3260
3261        kfree(iadev);
3262}
3263
3264static struct pci_device_id ia_pci_tbl[] = {
3265        { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3266        { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3267        { 0,}
3268};
3269MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3270
3271static struct pci_driver ia_driver = {
3272        .name =         DEV_LABEL,
3273        .id_table =     ia_pci_tbl,
3274        .probe =        ia_init_one,
3275        .remove =       ia_remove_one,
3276};
3277
3278static int __init ia_module_init(void)
3279{
3280        int ret;
3281
3282        ret = pci_register_driver(&ia_driver);
3283        if (ret >= 0) {
3284                ia_timer.expires = jiffies + 3*HZ;
3285                add_timer(&ia_timer); 
3286        } else
3287                printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3288        return ret;
3289}
3290
3291static void __exit ia_module_exit(void)
3292{
3293        pci_unregister_driver(&ia_driver);
3294
3295        del_timer(&ia_timer);
3296}
3297
3298module_init(ia_module_init);
3299module_exit(ia_module_exit);
3300