linux/drivers/atm/he.c
<<
>>
Prefs
   1/*
   2
   3  he.c
   4
   5  ForeRunnerHE ATM Adapter driver for ATM on Linux
   6  Copyright (C) 1999-2001  Naval Research Laboratory
   7
   8  This library is free software; you can redistribute it and/or
   9  modify it under the terms of the GNU Lesser General Public
  10  License as published by the Free Software Foundation; either
  11  version 2.1 of the License, or (at your option) any later version.
  12
  13  This library is distributed in the hope that it will be useful,
  14  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  Lesser General Public License for more details.
  17
  18  You should have received a copy of the GNU Lesser General Public
  19  License along with this library; if not, write to the Free Software
  20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21
  22*/
  23
  24/*
  25
  26  he.c
  27
  28  ForeRunnerHE ATM Adapter driver for ATM on Linux
  29  Copyright (C) 1999-2001  Naval Research Laboratory
  30
  31  Permission to use, copy, modify and distribute this software and its
  32  documentation is hereby granted, provided that both the copyright
  33  notice and this permission notice appear in all copies of the software,
  34  derivative works or modified versions, and any portions thereof, and
  35  that both notices appear in supporting documentation.
  36
  37  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
  38  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
  39  RESULTING FROM THE USE OF THIS SOFTWARE.
  40
  41  This driver was written using the "Programmer's Reference Manual for
  42  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
  43
  44  AUTHORS:
  45        chas williams <chas@cmf.nrl.navy.mil>
  46        eric kinzie <ekinzie@cmf.nrl.navy.mil>
  47
  48  NOTES:
  49        4096 supported 'connections'
  50        group 0 is used for all traffic
  51        interrupt queue 0 is used for all interrupts
  52        aal0 support (based on work from ulrich.u.muller@nokia.com)
  53
  54 */
  55
  56#include <linux/module.h>
  57#include <linux/kernel.h>
  58#include <linux/skbuff.h>
  59#include <linux/pci.h>
  60#include <linux/errno.h>
  61#include <linux/types.h>
  62#include <linux/string.h>
  63#include <linux/delay.h>
  64#include <linux/init.h>
  65#include <linux/mm.h>
  66#include <linux/sched.h>
  67#include <linux/timer.h>
  68#include <linux/interrupt.h>
  69#include <linux/dma-mapping.h>
  70#include <linux/bitmap.h>
  71#include <linux/slab.h>
  72#include <asm/io.h>
  73#include <asm/byteorder.h>
  74#include <asm/uaccess.h>
  75
  76#include <linux/atmdev.h>
  77#include <linux/atm.h>
  78#include <linux/sonet.h>
  79
  80#undef USE_SCATTERGATHER
  81#undef USE_CHECKSUM_HW                  /* still confused about this */
  82/* #undef HE_DEBUG */
  83
  84#include "he.h"
  85#include "suni.h"
  86#include <linux/atm_he.h>
  87
  88#define hprintk(fmt,args...)    printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  89
  90#ifdef HE_DEBUG
  91#define HPRINTK(fmt,args...)    printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  92#else /* !HE_DEBUG */
  93#define HPRINTK(fmt,args...)    do { } while (0)
  94#endif /* HE_DEBUG */
  95
  96/* declarations */
  97
  98static int he_open(struct atm_vcc *vcc);
  99static void he_close(struct atm_vcc *vcc);
 100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
 101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
 102static irqreturn_t he_irq_handler(int irq, void *dev_id);
 103static void he_tasklet(unsigned long data);
 104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
 105static int he_start(struct atm_dev *dev);
 106static void he_stop(struct he_dev *dev);
 107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
 108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
 109
 110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
 111
 112/* globals */
 113
 114static struct he_dev *he_devs;
 115static bool disable64;
 116static short nvpibits = -1;
 117static short nvcibits = -1;
 118static short rx_skb_reserve = 16;
 119static bool irq_coalesce = 1;
 120static bool sdh = 0;
 121
 122/* Read from EEPROM = 0000 0011b */
 123static unsigned int readtab[] = {
 124        CS_HIGH | CLK_HIGH,
 125        CS_LOW | CLK_LOW,
 126        CLK_HIGH,               /* 0 */
 127        CLK_LOW,
 128        CLK_HIGH,               /* 0 */
 129        CLK_LOW,
 130        CLK_HIGH,               /* 0 */
 131        CLK_LOW,
 132        CLK_HIGH,               /* 0 */
 133        CLK_LOW,
 134        CLK_HIGH,               /* 0 */
 135        CLK_LOW,
 136        CLK_HIGH,               /* 0 */
 137        CLK_LOW | SI_HIGH,
 138        CLK_HIGH | SI_HIGH,     /* 1 */
 139        CLK_LOW | SI_HIGH,
 140        CLK_HIGH | SI_HIGH      /* 1 */
 141};     
 142 
 143/* Clock to read from/write to the EEPROM */
 144static unsigned int clocktab[] = {
 145        CLK_LOW,
 146        CLK_HIGH,
 147        CLK_LOW,
 148        CLK_HIGH,
 149        CLK_LOW,
 150        CLK_HIGH,
 151        CLK_LOW,
 152        CLK_HIGH,
 153        CLK_LOW,
 154        CLK_HIGH,
 155        CLK_LOW,
 156        CLK_HIGH,
 157        CLK_LOW,
 158        CLK_HIGH,
 159        CLK_LOW,
 160        CLK_HIGH,
 161        CLK_LOW
 162};     
 163
 164static struct atmdev_ops he_ops =
 165{
 166        .open =         he_open,
 167        .close =        he_close,       
 168        .ioctl =        he_ioctl,       
 169        .send =         he_send,
 170        .phy_put =      he_phy_put,
 171        .phy_get =      he_phy_get,
 172        .proc_read =    he_proc_read,
 173        .owner =        THIS_MODULE
 174};
 175
 176#define he_writel(dev, val, reg)        do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
 177#define he_readl(dev, reg)              readl((dev)->membase + (reg))
 178
 179/* section 2.12 connection memory access */
 180
 181static __inline__ void
 182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
 183                                                                unsigned flags)
 184{
 185        he_writel(he_dev, val, CON_DAT);
 186        (void) he_readl(he_dev, CON_DAT);               /* flush posted writes */
 187        he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
 188        while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 189}
 190
 191#define he_writel_rcm(dev, val, reg)                            \
 192                        he_writel_internal(dev, val, reg, CON_CTL_RCM)
 193
 194#define he_writel_tcm(dev, val, reg)                            \
 195                        he_writel_internal(dev, val, reg, CON_CTL_TCM)
 196
 197#define he_writel_mbox(dev, val, reg)                           \
 198                        he_writel_internal(dev, val, reg, CON_CTL_MBOX)
 199
 200static unsigned
 201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
 202{
 203        he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
 204        while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 205        return he_readl(he_dev, CON_DAT);
 206}
 207
 208#define he_readl_rcm(dev, reg) \
 209                        he_readl_internal(dev, reg, CON_CTL_RCM)
 210
 211#define he_readl_tcm(dev, reg) \
 212                        he_readl_internal(dev, reg, CON_CTL_TCM)
 213
 214#define he_readl_mbox(dev, reg) \
 215                        he_readl_internal(dev, reg, CON_CTL_MBOX)
 216
 217
 218/* figure 2.2 connection id */
 219
 220#define he_mkcid(dev, vpi, vci)         (((vpi << (dev)->vcibits) | vci) & 0x1fff)
 221
 222/* 2.5.1 per connection transmit state registers */
 223
 224#define he_writel_tsr0(dev, val, cid) \
 225                he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
 226#define he_readl_tsr0(dev, cid) \
 227                he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
 228
 229#define he_writel_tsr1(dev, val, cid) \
 230                he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
 231
 232#define he_writel_tsr2(dev, val, cid) \
 233                he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
 234
 235#define he_writel_tsr3(dev, val, cid) \
 236                he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
 237
 238#define he_writel_tsr4(dev, val, cid) \
 239                he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
 240
 241        /* from page 2-20
 242         *
 243         * NOTE While the transmit connection is active, bits 23 through 0
 244         *      of this register must not be written by the host.  Byte
 245         *      enables should be used during normal operation when writing
 246         *      the most significant byte.
 247         */
 248
 249#define he_writel_tsr4_upper(dev, val, cid) \
 250                he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
 251                                                        CON_CTL_TCM \
 252                                                        | CON_BYTE_DISABLE_2 \
 253                                                        | CON_BYTE_DISABLE_1 \
 254                                                        | CON_BYTE_DISABLE_0)
 255
 256#define he_readl_tsr4(dev, cid) \
 257                he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
 258
 259#define he_writel_tsr5(dev, val, cid) \
 260                he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
 261
 262#define he_writel_tsr6(dev, val, cid) \
 263                he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
 264
 265#define he_writel_tsr7(dev, val, cid) \
 266                he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
 267
 268
 269#define he_writel_tsr8(dev, val, cid) \
 270                he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
 271
 272#define he_writel_tsr9(dev, val, cid) \
 273                he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
 274
 275#define he_writel_tsr10(dev, val, cid) \
 276                he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
 277
 278#define he_writel_tsr11(dev, val, cid) \
 279                he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
 280
 281
 282#define he_writel_tsr12(dev, val, cid) \
 283                he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
 284
 285#define he_writel_tsr13(dev, val, cid) \
 286                he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
 287
 288
 289#define he_writel_tsr14(dev, val, cid) \
 290                he_writel_tcm(dev, val, CONFIG_TSRD | cid)
 291
 292#define he_writel_tsr14_upper(dev, val, cid) \
 293                he_writel_internal(dev, val, CONFIG_TSRD | cid, \
 294                                                        CON_CTL_TCM \
 295                                                        | CON_BYTE_DISABLE_2 \
 296                                                        | CON_BYTE_DISABLE_1 \
 297                                                        | CON_BYTE_DISABLE_0)
 298
 299/* 2.7.1 per connection receive state registers */
 300
 301#define he_writel_rsr0(dev, val, cid) \
 302                he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
 303#define he_readl_rsr0(dev, cid) \
 304                he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
 305
 306#define he_writel_rsr1(dev, val, cid) \
 307                he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
 308
 309#define he_writel_rsr2(dev, val, cid) \
 310                he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
 311
 312#define he_writel_rsr3(dev, val, cid) \
 313                he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
 314
 315#define he_writel_rsr4(dev, val, cid) \
 316                he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
 317
 318#define he_writel_rsr5(dev, val, cid) \
 319                he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
 320
 321#define he_writel_rsr6(dev, val, cid) \
 322                he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
 323
 324#define he_writel_rsr7(dev, val, cid) \
 325                he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
 326
 327static __inline__ struct atm_vcc*
 328__find_vcc(struct he_dev *he_dev, unsigned cid)
 329{
 330        struct hlist_head *head;
 331        struct atm_vcc *vcc;
 332        struct sock *s;
 333        short vpi;
 334        int vci;
 335
 336        vpi = cid >> he_dev->vcibits;
 337        vci = cid & ((1 << he_dev->vcibits) - 1);
 338        head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
 339
 340        sk_for_each(s, head) {
 341                vcc = atm_sk(s);
 342                if (vcc->dev == he_dev->atm_dev &&
 343                    vcc->vci == vci && vcc->vpi == vpi &&
 344                    vcc->qos.rxtp.traffic_class != ATM_NONE) {
 345                                return vcc;
 346                }
 347        }
 348        return NULL;
 349}
 350
 351static int he_init_one(struct pci_dev *pci_dev,
 352                       const struct pci_device_id *pci_ent)
 353{
 354        struct atm_dev *atm_dev = NULL;
 355        struct he_dev *he_dev = NULL;
 356        int err = 0;
 357
 358        printk(KERN_INFO "ATM he driver\n");
 359
 360        if (pci_enable_device(pci_dev))
 361                return -EIO;
 362        if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
 363                printk(KERN_WARNING "he: no suitable dma available\n");
 364                err = -EIO;
 365                goto init_one_failure;
 366        }
 367
 368        atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
 369        if (!atm_dev) {
 370                err = -ENODEV;
 371                goto init_one_failure;
 372        }
 373        pci_set_drvdata(pci_dev, atm_dev);
 374
 375        he_dev = kzalloc(sizeof(struct he_dev),
 376                                                        GFP_KERNEL);
 377        if (!he_dev) {
 378                err = -ENOMEM;
 379                goto init_one_failure;
 380        }
 381        he_dev->pci_dev = pci_dev;
 382        he_dev->atm_dev = atm_dev;
 383        he_dev->atm_dev->dev_data = he_dev;
 384        atm_dev->dev_data = he_dev;
 385        he_dev->number = atm_dev->number;
 386        tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
 387        spin_lock_init(&he_dev->global_lock);
 388
 389        if (he_start(atm_dev)) {
 390                he_stop(he_dev);
 391                err = -ENODEV;
 392                goto init_one_failure;
 393        }
 394        he_dev->next = NULL;
 395        if (he_devs)
 396                he_dev->next = he_devs;
 397        he_devs = he_dev;
 398        return 0;
 399
 400init_one_failure:
 401        if (atm_dev)
 402                atm_dev_deregister(atm_dev);
 403        kfree(he_dev);
 404        pci_disable_device(pci_dev);
 405        return err;
 406}
 407
 408static void he_remove_one(struct pci_dev *pci_dev)
 409{
 410        struct atm_dev *atm_dev;
 411        struct he_dev *he_dev;
 412
 413        atm_dev = pci_get_drvdata(pci_dev);
 414        he_dev = HE_DEV(atm_dev);
 415
 416        /* need to remove from he_devs */
 417
 418        he_stop(he_dev);
 419        atm_dev_deregister(atm_dev);
 420        kfree(he_dev);
 421
 422        pci_disable_device(pci_dev);
 423}
 424
 425
 426static unsigned
 427rate_to_atmf(unsigned rate)             /* cps to atm forum format */
 428{
 429#define NONZERO (1 << 14)
 430
 431        unsigned exp = 0;
 432
 433        if (rate == 0)
 434                return 0;
 435
 436        rate <<= 9;
 437        while (rate > 0x3ff) {
 438                ++exp;
 439                rate >>= 1;
 440        }
 441
 442        return (NONZERO | (exp << 9) | (rate & 0x1ff));
 443}
 444
 445static void he_init_rx_lbfp0(struct he_dev *he_dev)
 446{
 447        unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 448        unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 449        unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 450        unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
 451        
 452        lbufd_index = 0;
 453        lbm_offset = he_readl(he_dev, RCMLBM_BA);
 454
 455        he_writel(he_dev, lbufd_index, RLBF0_H);
 456
 457        for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
 458                lbufd_index += 2;
 459                lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 460
 461                he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 462                he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 463
 464                if (++lbuf_count == lbufs_per_row) {
 465                        lbuf_count = 0;
 466                        row_offset += he_dev->bytes_per_row;
 467                }
 468                lbm_offset += 4;
 469        }
 470                
 471        he_writel(he_dev, lbufd_index - 2, RLBF0_T);
 472        he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
 473}
 474
 475static void he_init_rx_lbfp1(struct he_dev *he_dev)
 476{
 477        unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 478        unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 479        unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 480        unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
 481        
 482        lbufd_index = 1;
 483        lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 484
 485        he_writel(he_dev, lbufd_index, RLBF1_H);
 486
 487        for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
 488                lbufd_index += 2;
 489                lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 490
 491                he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 492                he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 493
 494                if (++lbuf_count == lbufs_per_row) {
 495                        lbuf_count = 0;
 496                        row_offset += he_dev->bytes_per_row;
 497                }
 498                lbm_offset += 4;
 499        }
 500                
 501        he_writel(he_dev, lbufd_index - 2, RLBF1_T);
 502        he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
 503}
 504
 505static void he_init_tx_lbfp(struct he_dev *he_dev)
 506{
 507        unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 508        unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 509        unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 510        unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
 511        
 512        lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
 513        lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 514
 515        he_writel(he_dev, lbufd_index, TLBF_H);
 516
 517        for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
 518                lbufd_index += 1;
 519                lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 520
 521                he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 522                he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 523
 524                if (++lbuf_count == lbufs_per_row) {
 525                        lbuf_count = 0;
 526                        row_offset += he_dev->bytes_per_row;
 527                }
 528                lbm_offset += 2;
 529        }
 530                
 531        he_writel(he_dev, lbufd_index - 1, TLBF_T);
 532}
 533
 534static int he_init_tpdrq(struct he_dev *he_dev)
 535{
 536        he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
 537                CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
 538        if (he_dev->tpdrq_base == NULL) {
 539                hprintk("failed to alloc tpdrq\n");
 540                return -ENOMEM;
 541        }
 542        memset(he_dev->tpdrq_base, 0,
 543                                CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
 544
 545        he_dev->tpdrq_tail = he_dev->tpdrq_base;
 546        he_dev->tpdrq_head = he_dev->tpdrq_base;
 547
 548        he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
 549        he_writel(he_dev, 0, TPDRQ_T);  
 550        he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
 551
 552        return 0;
 553}
 554
 555static void he_init_cs_block(struct he_dev *he_dev)
 556{
 557        unsigned clock, rate, delta;
 558        int reg;
 559
 560        /* 5.1.7 cs block initialization */
 561
 562        for (reg = 0; reg < 0x20; ++reg)
 563                he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
 564
 565        /* rate grid timer reload values */
 566
 567        clock = he_is622(he_dev) ? 66667000 : 50000000;
 568        rate = he_dev->atm_dev->link_rate;
 569        delta = rate / 16 / 2;
 570
 571        for (reg = 0; reg < 0x10; ++reg) {
 572                /* 2.4 internal transmit function
 573                 *
 574                 * we initialize the first row in the rate grid.
 575                 * values are period (in clock cycles) of timer
 576                 */
 577                unsigned period = clock / rate;
 578
 579                he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
 580                rate -= delta;
 581        }
 582
 583        if (he_is622(he_dev)) {
 584                /* table 5.2 (4 cells per lbuf) */
 585                he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
 586                he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
 587                he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
 588                he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
 589                he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
 590
 591                /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 592                he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
 593                he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
 594                he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
 595                he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 596                he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
 597                he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
 598
 599                he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 600
 601                /* table 5.8 */
 602                he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
 603                he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
 604                he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
 605                he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
 606                he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
 607                he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
 608
 609                /* table 5.9 */
 610                he_writel_mbox(he_dev, 0x5, CS_OTPPER);
 611                he_writel_mbox(he_dev, 0x14, CS_OTWPER);
 612        } else {
 613                /* table 5.1 (4 cells per lbuf) */
 614                he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
 615                he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
 616                he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
 617                he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
 618                he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
 619
 620                /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 621                he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
 622                he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
 623                he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
 624                he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 625                he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
 626                he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
 627
 628                he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 629
 630                /* table 5.8 */
 631                he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
 632                he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
 633                he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
 634                he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
 635                he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
 636                he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
 637
 638                /* table 5.9 */
 639                he_writel_mbox(he_dev, 0x6, CS_OTPPER);
 640                he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
 641        }
 642
 643        he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
 644
 645        for (reg = 0; reg < 0x8; ++reg)
 646                he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
 647
 648}
 649
 650static int he_init_cs_block_rcm(struct he_dev *he_dev)
 651{
 652        unsigned (*rategrid)[16][16];
 653        unsigned rate, delta;
 654        int i, j, reg;
 655
 656        unsigned rate_atmf, exp, man;
 657        unsigned long long rate_cps;
 658        int mult, buf, buf_limit = 4;
 659
 660        rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
 661        if (!rategrid)
 662                return -ENOMEM;
 663
 664        /* initialize rate grid group table */
 665
 666        for (reg = 0x0; reg < 0xff; ++reg)
 667                he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 668
 669        /* initialize rate controller groups */
 670
 671        for (reg = 0x100; reg < 0x1ff; ++reg)
 672                he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 673        
 674        /* initialize tNrm lookup table */
 675
 676        /* the manual makes reference to a routine in a sample driver
 677           for proper configuration; fortunately, we only need this
 678           in order to support abr connection */
 679        
 680        /* initialize rate to group table */
 681
 682        rate = he_dev->atm_dev->link_rate;
 683        delta = rate / 32;
 684
 685        /*
 686         * 2.4 transmit internal functions
 687         * 
 688         * we construct a copy of the rate grid used by the scheduler
 689         * in order to construct the rate to group table below
 690         */
 691
 692        for (j = 0; j < 16; j++) {
 693                (*rategrid)[0][j] = rate;
 694                rate -= delta;
 695        }
 696
 697        for (i = 1; i < 16; i++)
 698                for (j = 0; j < 16; j++)
 699                        if (i > 14)
 700                                (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
 701                        else
 702                                (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
 703
 704        /*
 705         * 2.4 transmit internal function
 706         *
 707         * this table maps the upper 5 bits of exponent and mantissa
 708         * of the atm forum representation of the rate into an index
 709         * on rate grid  
 710         */
 711
 712        rate_atmf = 0;
 713        while (rate_atmf < 0x400) {
 714                man = (rate_atmf & 0x1f) << 4;
 715                exp = rate_atmf >> 5;
 716
 717                /* 
 718                        instead of '/ 512', use '>> 9' to prevent a call
 719                        to divdu3 on x86 platforms
 720                */
 721                rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
 722
 723                if (rate_cps < 10)
 724                        rate_cps = 10;  /* 2.2.1 minimum payload rate is 10 cps */
 725
 726                for (i = 255; i > 0; i--)
 727                        if ((*rategrid)[i/16][i%16] >= rate_cps)
 728                                break;   /* pick nearest rate instead? */
 729
 730                /*
 731                 * each table entry is 16 bits: (rate grid index (8 bits)
 732                 * and a buffer limit (8 bits)
 733                 * there are two table entries in each 32-bit register
 734                 */
 735
 736#ifdef notdef
 737                buf = rate_cps * he_dev->tx_numbuffs /
 738                                (he_dev->atm_dev->link_rate * 2);
 739#else
 740                /* this is pretty, but avoids _divdu3 and is mostly correct */
 741                mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
 742                if (rate_cps > (272 * mult))
 743                        buf = 4;
 744                else if (rate_cps > (204 * mult))
 745                        buf = 3;
 746                else if (rate_cps > (136 * mult))
 747                        buf = 2;
 748                else if (rate_cps > (68 * mult))
 749                        buf = 1;
 750                else
 751                        buf = 0;
 752#endif
 753                if (buf > buf_limit)
 754                        buf = buf_limit;
 755                reg = (reg << 16) | ((i << 8) | buf);
 756
 757#define RTGTBL_OFFSET 0x400
 758          
 759                if (rate_atmf & 0x1)
 760                        he_writel_rcm(he_dev, reg,
 761                                CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
 762
 763                ++rate_atmf;
 764        }
 765
 766        kfree(rategrid);
 767        return 0;
 768}
 769
 770static int he_init_group(struct he_dev *he_dev, int group)
 771{
 772        struct he_buff *heb, *next;
 773        dma_addr_t mapping;
 774        int i;
 775
 776        he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
 777        he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
 778        he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
 779        he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
 780                  G0_RBPS_BS + (group * 32));
 781
 782        /* bitmap table */
 783        he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
 784                                     * sizeof(unsigned long), GFP_KERNEL);
 785        if (!he_dev->rbpl_table) {
 786                hprintk("unable to allocate rbpl bitmap table\n");
 787                return -ENOMEM;
 788        }
 789        bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
 790
 791        /* rbpl_virt 64-bit pointers */
 792        he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
 793                                    * sizeof(struct he_buff *), GFP_KERNEL);
 794        if (!he_dev->rbpl_virt) {
 795                hprintk("unable to allocate rbpl virt table\n");
 796                goto out_free_rbpl_table;
 797        }
 798
 799        /* large buffer pool */
 800        he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
 801                                            CONFIG_RBPL_BUFSIZE, 64, 0);
 802        if (he_dev->rbpl_pool == NULL) {
 803                hprintk("unable to create rbpl pool\n");
 804                goto out_free_rbpl_virt;
 805        }
 806
 807        he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
 808                CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
 809        if (he_dev->rbpl_base == NULL) {
 810                hprintk("failed to alloc rbpl_base\n");
 811                goto out_destroy_rbpl_pool;
 812        }
 813        memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
 814
 815        INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
 816
 817        for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
 818
 819                heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
 820                if (!heb)
 821                        goto out_free_rbpl;
 822                heb->mapping = mapping;
 823                list_add(&heb->entry, &he_dev->rbpl_outstanding);
 824
 825                set_bit(i, he_dev->rbpl_table);
 826                he_dev->rbpl_virt[i] = heb;
 827                he_dev->rbpl_hint = i + 1;
 828                he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
 829                he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
 830        }
 831        he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
 832
 833        he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
 834        he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
 835                                                G0_RBPL_T + (group * 32));
 836        he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
 837                                                G0_RBPL_BS + (group * 32));
 838        he_writel(he_dev,
 839                        RBP_THRESH(CONFIG_RBPL_THRESH) |
 840                        RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
 841                        RBP_INT_ENB,
 842                                                G0_RBPL_QI + (group * 32));
 843
 844        /* rx buffer ready queue */
 845
 846        he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
 847                CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
 848        if (he_dev->rbrq_base == NULL) {
 849                hprintk("failed to allocate rbrq\n");
 850                goto out_free_rbpl;
 851        }
 852        memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
 853
 854        he_dev->rbrq_head = he_dev->rbrq_base;
 855        he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
 856        he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
 857        he_writel(he_dev,
 858                RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
 859                                                G0_RBRQ_Q + (group * 16));
 860        if (irq_coalesce) {
 861                hprintk("coalescing interrupts\n");
 862                he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
 863                                                G0_RBRQ_I + (group * 16));
 864        } else
 865                he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
 866                                                G0_RBRQ_I + (group * 16));
 867
 868        /* tx buffer ready queue */
 869
 870        he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
 871                CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
 872        if (he_dev->tbrq_base == NULL) {
 873                hprintk("failed to allocate tbrq\n");
 874                goto out_free_rbpq_base;
 875        }
 876        memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
 877
 878        he_dev->tbrq_head = he_dev->tbrq_base;
 879
 880        he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
 881        he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
 882        he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
 883        he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
 884
 885        return 0;
 886
 887out_free_rbpq_base:
 888        pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
 889                        sizeof(struct he_rbrq), he_dev->rbrq_base,
 890                        he_dev->rbrq_phys);
 891out_free_rbpl:
 892        list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
 893                pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 894
 895        pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
 896                        sizeof(struct he_rbp), he_dev->rbpl_base,
 897                        he_dev->rbpl_phys);
 898out_destroy_rbpl_pool:
 899        pci_pool_destroy(he_dev->rbpl_pool);
 900out_free_rbpl_virt:
 901        kfree(he_dev->rbpl_virt);
 902out_free_rbpl_table:
 903        kfree(he_dev->rbpl_table);
 904
 905        return -ENOMEM;
 906}
 907
 908static int he_init_irq(struct he_dev *he_dev)
 909{
 910        int i;
 911
 912        /* 2.9.3.5  tail offset for each interrupt queue is located after the
 913                    end of the interrupt queue */
 914
 915        he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
 916                        (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
 917        if (he_dev->irq_base == NULL) {
 918                hprintk("failed to allocate irq\n");
 919                return -ENOMEM;
 920        }
 921        he_dev->irq_tailoffset = (unsigned *)
 922                                        &he_dev->irq_base[CONFIG_IRQ_SIZE];
 923        *he_dev->irq_tailoffset = 0;
 924        he_dev->irq_head = he_dev->irq_base;
 925        he_dev->irq_tail = he_dev->irq_base;
 926
 927        for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
 928                he_dev->irq_base[i].isw = ITYPE_INVALID;
 929
 930        he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
 931        he_writel(he_dev,
 932                IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
 933                                                                IRQ0_HEAD);
 934        he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
 935        he_writel(he_dev, 0x0, IRQ0_DATA);
 936
 937        he_writel(he_dev, 0x0, IRQ1_BASE);
 938        he_writel(he_dev, 0x0, IRQ1_HEAD);
 939        he_writel(he_dev, 0x0, IRQ1_CNTL);
 940        he_writel(he_dev, 0x0, IRQ1_DATA);
 941
 942        he_writel(he_dev, 0x0, IRQ2_BASE);
 943        he_writel(he_dev, 0x0, IRQ2_HEAD);
 944        he_writel(he_dev, 0x0, IRQ2_CNTL);
 945        he_writel(he_dev, 0x0, IRQ2_DATA);
 946
 947        he_writel(he_dev, 0x0, IRQ3_BASE);
 948        he_writel(he_dev, 0x0, IRQ3_HEAD);
 949        he_writel(he_dev, 0x0, IRQ3_CNTL);
 950        he_writel(he_dev, 0x0, IRQ3_DATA);
 951
 952        /* 2.9.3.2 interrupt queue mapping registers */
 953
 954        he_writel(he_dev, 0x0, GRP_10_MAP);
 955        he_writel(he_dev, 0x0, GRP_32_MAP);
 956        he_writel(he_dev, 0x0, GRP_54_MAP);
 957        he_writel(he_dev, 0x0, GRP_76_MAP);
 958
 959        if (request_irq(he_dev->pci_dev->irq,
 960                        he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
 961                hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
 962                return -EINVAL;
 963        }   
 964
 965        he_dev->irq = he_dev->pci_dev->irq;
 966
 967        return 0;
 968}
 969
 970static int he_start(struct atm_dev *dev)
 971{
 972        struct he_dev *he_dev;
 973        struct pci_dev *pci_dev;
 974        unsigned long membase;
 975
 976        u16 command;
 977        u32 gen_cntl_0, host_cntl, lb_swap;
 978        u8 cache_size, timer;
 979        
 980        unsigned err;
 981        unsigned int status, reg;
 982        int i, group;
 983
 984        he_dev = HE_DEV(dev);
 985        pci_dev = he_dev->pci_dev;
 986
 987        membase = pci_resource_start(pci_dev, 0);
 988        HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
 989
 990        /*
 991         * pci bus controller initialization 
 992         */
 993
 994        /* 4.3 pci bus controller-specific initialization */
 995        if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
 996                hprintk("can't read GEN_CNTL_0\n");
 997                return -EINVAL;
 998        }
 999        gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1000        if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1001                hprintk("can't write GEN_CNTL_0.\n");
1002                return -EINVAL;
1003        }
1004
1005        if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1006                hprintk("can't read PCI_COMMAND.\n");
1007                return -EINVAL;
1008        }
1009
1010        command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1011        if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1012                hprintk("can't enable memory.\n");
1013                return -EINVAL;
1014        }
1015
1016        if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1017                hprintk("can't read cache line size?\n");
1018                return -EINVAL;
1019        }
1020
1021        if (cache_size < 16) {
1022                cache_size = 16;
1023                if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1024                        hprintk("can't set cache line size to %d\n", cache_size);
1025        }
1026
1027        if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1028                hprintk("can't read latency timer?\n");
1029                return -EINVAL;
1030        }
1031
1032        /* from table 3.9
1033         *
1034         * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1035         * 
1036         * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1037         * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1038         *
1039         */ 
1040#define LAT_TIMER 209
1041        if (timer < LAT_TIMER) {
1042                HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1043                timer = LAT_TIMER;
1044                if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1045                        hprintk("can't set latency timer to %d\n", timer);
1046        }
1047
1048        if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1049                hprintk("can't set up page mapping\n");
1050                return -EINVAL;
1051        }
1052
1053        /* 4.4 card reset */
1054        he_writel(he_dev, 0x0, RESET_CNTL);
1055        he_writel(he_dev, 0xff, RESET_CNTL);
1056
1057        msleep(16);     /* 16 ms */
1058        status = he_readl(he_dev, RESET_CNTL);
1059        if ((status & BOARD_RST_STATUS) == 0) {
1060                hprintk("reset failed\n");
1061                return -EINVAL;
1062        }
1063
1064        /* 4.5 set bus width */
1065        host_cntl = he_readl(he_dev, HOST_CNTL);
1066        if (host_cntl & PCI_BUS_SIZE64)
1067                gen_cntl_0 |= ENBL_64;
1068        else
1069                gen_cntl_0 &= ~ENBL_64;
1070
1071        if (disable64 == 1) {
1072                hprintk("disabling 64-bit pci bus transfers\n");
1073                gen_cntl_0 &= ~ENBL_64;
1074        }
1075
1076        if (gen_cntl_0 & ENBL_64)
1077                hprintk("64-bit transfers enabled\n");
1078
1079        pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1080
1081        /* 4.7 read prom contents */
1082        for (i = 0; i < PROD_ID_LEN; ++i)
1083                he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1084
1085        he_dev->media = read_prom_byte(he_dev, MEDIA);
1086
1087        for (i = 0; i < 6; ++i)
1088                dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1089
1090        hprintk("%s%s, %pM\n", he_dev->prod_id,
1091                he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1092        he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1093                                                ATM_OC12_PCR : ATM_OC3_PCR;
1094
1095        /* 4.6 set host endianess */
1096        lb_swap = he_readl(he_dev, LB_SWAP);
1097        if (he_is622(he_dev))
1098                lb_swap &= ~XFER_SIZE;          /* 4 cells */
1099        else
1100                lb_swap |= XFER_SIZE;           /* 8 cells */
1101#ifdef __BIG_ENDIAN
1102        lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1103#else
1104        lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1105                        DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1106#endif /* __BIG_ENDIAN */
1107        he_writel(he_dev, lb_swap, LB_SWAP);
1108
1109        /* 4.8 sdram controller initialization */
1110        he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1111
1112        /* 4.9 initialize rnum value */
1113        lb_swap |= SWAP_RNUM_MAX(0xf);
1114        he_writel(he_dev, lb_swap, LB_SWAP);
1115
1116        /* 4.10 initialize the interrupt queues */
1117        if ((err = he_init_irq(he_dev)) != 0)
1118                return err;
1119
1120        /* 4.11 enable pci bus controller state machines */
1121        host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1122                                QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1123        he_writel(he_dev, host_cntl, HOST_CNTL);
1124
1125        gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1126        pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1127
1128        /*
1129         * atm network controller initialization
1130         */
1131
1132        /* 5.1.1 generic configuration state */
1133
1134        /*
1135         *              local (cell) buffer memory map
1136         *                    
1137         *             HE155                          HE622
1138         *                                                      
1139         *        0 ____________1023 bytes  0 _______________________2047 bytes
1140         *         |            |            |                   |   |
1141         *         |  utility   |            |        rx0        |   |
1142         *        5|____________|         255|___________________| u |
1143         *        6|            |         256|                   | t |
1144         *         |            |            |                   | i |
1145         *         |    rx0     |     row    |        tx         | l |
1146         *         |            |            |                   | i |
1147         *         |            |         767|___________________| t |
1148         *      517|____________|         768|                   | y |
1149         * row  518|            |            |        rx1        |   |
1150         *         |            |        1023|___________________|___|
1151         *         |            |
1152         *         |    tx      |
1153         *         |            |
1154         *         |            |
1155         *     1535|____________|
1156         *     1536|            |
1157         *         |    rx1     |
1158         *     2047|____________|
1159         *
1160         */
1161
1162        /* total 4096 connections */
1163        he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1164        he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1165
1166        if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1167                hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1168                return -ENODEV;
1169        }
1170
1171        if (nvpibits != -1) {
1172                he_dev->vpibits = nvpibits;
1173                he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1174        }
1175
1176        if (nvcibits != -1) {
1177                he_dev->vcibits = nvcibits;
1178                he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1179        }
1180
1181
1182        if (he_is622(he_dev)) {
1183                he_dev->cells_per_row = 40;
1184                he_dev->bytes_per_row = 2048;
1185                he_dev->r0_numrows = 256;
1186                he_dev->tx_numrows = 512;
1187                he_dev->r1_numrows = 256;
1188                he_dev->r0_startrow = 0;
1189                he_dev->tx_startrow = 256;
1190                he_dev->r1_startrow = 768;
1191        } else {
1192                he_dev->cells_per_row = 20;
1193                he_dev->bytes_per_row = 1024;
1194                he_dev->r0_numrows = 512;
1195                he_dev->tx_numrows = 1018;
1196                he_dev->r1_numrows = 512;
1197                he_dev->r0_startrow = 6;
1198                he_dev->tx_startrow = 518;
1199                he_dev->r1_startrow = 1536;
1200        }
1201
1202        he_dev->cells_per_lbuf = 4;
1203        he_dev->buffer_limit = 4;
1204        he_dev->r0_numbuffs = he_dev->r0_numrows *
1205                                he_dev->cells_per_row / he_dev->cells_per_lbuf;
1206        if (he_dev->r0_numbuffs > 2560)
1207                he_dev->r0_numbuffs = 2560;
1208
1209        he_dev->r1_numbuffs = he_dev->r1_numrows *
1210                                he_dev->cells_per_row / he_dev->cells_per_lbuf;
1211        if (he_dev->r1_numbuffs > 2560)
1212                he_dev->r1_numbuffs = 2560;
1213
1214        he_dev->tx_numbuffs = he_dev->tx_numrows *
1215                                he_dev->cells_per_row / he_dev->cells_per_lbuf;
1216        if (he_dev->tx_numbuffs > 5120)
1217                he_dev->tx_numbuffs = 5120;
1218
1219        /* 5.1.2 configure hardware dependent registers */
1220
1221        he_writel(he_dev, 
1222                SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1223                RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1224                (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1225                (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1226                                                                LBARB);
1227
1228        he_writel(he_dev, BANK_ON |
1229                (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1230                                                                SDRAMCON);
1231
1232        he_writel(he_dev,
1233                (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1234                                                RM_RW_WAIT(1), RCMCONFIG);
1235        he_writel(he_dev,
1236                (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1237                                                TM_RW_WAIT(1), TCMCONFIG);
1238
1239        he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1240
1241        he_writel(he_dev, 
1242                (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1243                (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1244                RX_VALVP(he_dev->vpibits) |
1245                RX_VALVC(he_dev->vcibits),                       RC_CONFIG);
1246
1247        he_writel(he_dev, DRF_THRESH(0x20) |
1248                (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1249                TX_VCI_MASK(he_dev->vcibits) |
1250                LBFREE_CNT(he_dev->tx_numbuffs),                TX_CONFIG);
1251
1252        he_writel(he_dev, 0x0, TXAAL5_PROTO);
1253
1254        he_writel(he_dev, PHY_INT_ENB |
1255                (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1256                                                                RH_CONFIG);
1257
1258        /* 5.1.3 initialize connection memory */
1259
1260        for (i = 0; i < TCM_MEM_SIZE; ++i)
1261                he_writel_tcm(he_dev, 0, i);
1262
1263        for (i = 0; i < RCM_MEM_SIZE; ++i)
1264                he_writel_rcm(he_dev, 0, i);
1265
1266        /*
1267         *      transmit connection memory map
1268         *
1269         *                  tx memory
1270         *          0x0 ___________________
1271         *             |                   |
1272         *             |                   |
1273         *             |       TSRa        |
1274         *             |                   |
1275         *             |                   |
1276         *       0x8000|___________________|
1277         *             |                   |
1278         *             |       TSRb        |
1279         *       0xc000|___________________|
1280         *             |                   |
1281         *             |       TSRc        |
1282         *       0xe000|___________________|
1283         *             |       TSRd        |
1284         *       0xf000|___________________|
1285         *             |       tmABR       |
1286         *      0x10000|___________________|
1287         *             |                   |
1288         *             |       tmTPD       |
1289         *             |___________________|
1290         *             |                   |
1291         *                      ....
1292         *      0x1ffff|___________________|
1293         *
1294         *
1295         */
1296
1297        he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1298        he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1299        he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1300        he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1301        he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1302
1303
1304        /*
1305         *      receive connection memory map
1306         *
1307         *          0x0 ___________________
1308         *             |                   |
1309         *             |                   |
1310         *             |       RSRa        |
1311         *             |                   |
1312         *             |                   |
1313         *       0x8000|___________________|
1314         *             |                   |
1315         *             |             rx0/1 |
1316         *             |       LBM         |   link lists of local
1317         *             |             tx    |   buffer memory 
1318         *             |                   |
1319         *       0xd000|___________________|
1320         *             |                   |
1321         *             |      rmABR        |
1322         *       0xe000|___________________|
1323         *             |                   |
1324         *             |       RSRb        |
1325         *             |___________________|
1326         *             |                   |
1327         *                      ....
1328         *       0xffff|___________________|
1329         */
1330
1331        he_writel(he_dev, 0x08000, RCMLBM_BA);
1332        he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1333        he_writel(he_dev, 0x0d800, RCMABR_BA);
1334
1335        /* 5.1.4 initialize local buffer free pools linked lists */
1336
1337        he_init_rx_lbfp0(he_dev);
1338        he_init_rx_lbfp1(he_dev);
1339
1340        he_writel(he_dev, 0x0, RLBC_H);
1341        he_writel(he_dev, 0x0, RLBC_T);
1342        he_writel(he_dev, 0x0, RLBC_H2);
1343
1344        he_writel(he_dev, 512, RXTHRSH);        /* 10% of r0+r1 buffers */
1345        he_writel(he_dev, 256, LITHRSH);        /* 5% of r0+r1 buffers */
1346
1347        he_init_tx_lbfp(he_dev);
1348
1349        he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1350
1351        /* 5.1.5 initialize intermediate receive queues */
1352
1353        if (he_is622(he_dev)) {
1354                he_writel(he_dev, 0x000f, G0_INMQ_S);
1355                he_writel(he_dev, 0x200f, G0_INMQ_L);
1356
1357                he_writel(he_dev, 0x001f, G1_INMQ_S);
1358                he_writel(he_dev, 0x201f, G1_INMQ_L);
1359
1360                he_writel(he_dev, 0x002f, G2_INMQ_S);
1361                he_writel(he_dev, 0x202f, G2_INMQ_L);
1362
1363                he_writel(he_dev, 0x003f, G3_INMQ_S);
1364                he_writel(he_dev, 0x203f, G3_INMQ_L);
1365
1366                he_writel(he_dev, 0x004f, G4_INMQ_S);
1367                he_writel(he_dev, 0x204f, G4_INMQ_L);
1368
1369                he_writel(he_dev, 0x005f, G5_INMQ_S);
1370                he_writel(he_dev, 0x205f, G5_INMQ_L);
1371
1372                he_writel(he_dev, 0x006f, G6_INMQ_S);
1373                he_writel(he_dev, 0x206f, G6_INMQ_L);
1374
1375                he_writel(he_dev, 0x007f, G7_INMQ_S);
1376                he_writel(he_dev, 0x207f, G7_INMQ_L);
1377        } else {
1378                he_writel(he_dev, 0x0000, G0_INMQ_S);
1379                he_writel(he_dev, 0x0008, G0_INMQ_L);
1380
1381                he_writel(he_dev, 0x0001, G1_INMQ_S);
1382                he_writel(he_dev, 0x0009, G1_INMQ_L);
1383
1384                he_writel(he_dev, 0x0002, G2_INMQ_S);
1385                he_writel(he_dev, 0x000a, G2_INMQ_L);
1386
1387                he_writel(he_dev, 0x0003, G3_INMQ_S);
1388                he_writel(he_dev, 0x000b, G3_INMQ_L);
1389
1390                he_writel(he_dev, 0x0004, G4_INMQ_S);
1391                he_writel(he_dev, 0x000c, G4_INMQ_L);
1392
1393                he_writel(he_dev, 0x0005, G5_INMQ_S);
1394                he_writel(he_dev, 0x000d, G5_INMQ_L);
1395
1396                he_writel(he_dev, 0x0006, G6_INMQ_S);
1397                he_writel(he_dev, 0x000e, G6_INMQ_L);
1398
1399                he_writel(he_dev, 0x0007, G7_INMQ_S);
1400                he_writel(he_dev, 0x000f, G7_INMQ_L);
1401        }
1402
1403        /* 5.1.6 application tunable parameters */
1404
1405        he_writel(he_dev, 0x0, MCC);
1406        he_writel(he_dev, 0x0, OEC);
1407        he_writel(he_dev, 0x0, DCC);
1408        he_writel(he_dev, 0x0, CEC);
1409        
1410        /* 5.1.7 cs block initialization */
1411
1412        he_init_cs_block(he_dev);
1413
1414        /* 5.1.8 cs block connection memory initialization */
1415        
1416        if (he_init_cs_block_rcm(he_dev) < 0)
1417                return -ENOMEM;
1418
1419        /* 5.1.10 initialize host structures */
1420
1421        he_init_tpdrq(he_dev);
1422
1423        he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1424                sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1425        if (he_dev->tpd_pool == NULL) {
1426                hprintk("unable to create tpd pci_pool\n");
1427                return -ENOMEM;         
1428        }
1429
1430        INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1431
1432        if (he_init_group(he_dev, 0) != 0)
1433                return -ENOMEM;
1434
1435        for (group = 1; group < HE_NUM_GROUPS; ++group) {
1436                he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1437                he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1438                he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1439                he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1440                                                G0_RBPS_BS + (group * 32));
1441
1442                he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1443                he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1444                he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1445                                                G0_RBPL_QI + (group * 32));
1446                he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1447
1448                he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1449                he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1450                he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1451                                                G0_RBRQ_Q + (group * 16));
1452                he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1453
1454                he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1455                he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1456                he_writel(he_dev, TBRQ_THRESH(0x1),
1457                                                G0_TBRQ_THRESH + (group * 16));
1458                he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1459        }
1460
1461        /* host status page */
1462
1463        he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1464                                sizeof(struct he_hsp), &he_dev->hsp_phys);
1465        if (he_dev->hsp == NULL) {
1466                hprintk("failed to allocate host status page\n");
1467                return -ENOMEM;
1468        }
1469        memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1470        he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1471
1472        /* initialize framer */
1473
1474#ifdef CONFIG_ATM_HE_USE_SUNI
1475        if (he_isMM(he_dev))
1476                suni_init(he_dev->atm_dev);
1477        if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1478                he_dev->atm_dev->phy->start(he_dev->atm_dev);
1479#endif /* CONFIG_ATM_HE_USE_SUNI */
1480
1481        if (sdh) {
1482                /* this really should be in suni.c but for now... */
1483                int val;
1484
1485                val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1486                val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1487                he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1488                he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1489        }
1490
1491        /* 5.1.12 enable transmit and receive */
1492
1493        reg = he_readl_mbox(he_dev, CS_ERCTL0);
1494        reg |= TX_ENABLE|ER_ENABLE;
1495        he_writel_mbox(he_dev, reg, CS_ERCTL0);
1496
1497        reg = he_readl(he_dev, RC_CONFIG);
1498        reg |= RX_ENABLE;
1499        he_writel(he_dev, reg, RC_CONFIG);
1500
1501        for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1502                he_dev->cs_stper[i].inuse = 0;
1503                he_dev->cs_stper[i].pcr = -1;
1504        }
1505        he_dev->total_bw = 0;
1506
1507
1508        /* atm linux initialization */
1509
1510        he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1511        he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1512
1513        he_dev->irq_peak = 0;
1514        he_dev->rbrq_peak = 0;
1515        he_dev->rbpl_peak = 0;
1516        he_dev->tbrq_peak = 0;
1517
1518        HPRINTK("hell bent for leather!\n");
1519
1520        return 0;
1521}
1522
1523static void
1524he_stop(struct he_dev *he_dev)
1525{
1526        struct he_buff *heb, *next;
1527        struct pci_dev *pci_dev;
1528        u32 gen_cntl_0, reg;
1529        u16 command;
1530
1531        pci_dev = he_dev->pci_dev;
1532
1533        /* disable interrupts */
1534
1535        if (he_dev->membase) {
1536                pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1537                gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1538                pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1539
1540                tasklet_disable(&he_dev->tasklet);
1541
1542                /* disable recv and transmit */
1543
1544                reg = he_readl_mbox(he_dev, CS_ERCTL0);
1545                reg &= ~(TX_ENABLE|ER_ENABLE);
1546                he_writel_mbox(he_dev, reg, CS_ERCTL0);
1547
1548                reg = he_readl(he_dev, RC_CONFIG);
1549                reg &= ~(RX_ENABLE);
1550                he_writel(he_dev, reg, RC_CONFIG);
1551        }
1552
1553#ifdef CONFIG_ATM_HE_USE_SUNI
1554        if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1555                he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1556#endif /* CONFIG_ATM_HE_USE_SUNI */
1557
1558        if (he_dev->irq)
1559                free_irq(he_dev->irq, he_dev);
1560
1561        if (he_dev->irq_base)
1562                pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1563                        * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1564
1565        if (he_dev->hsp)
1566                pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1567                                                he_dev->hsp, he_dev->hsp_phys);
1568
1569        if (he_dev->rbpl_base) {
1570                list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1571                        pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1572
1573                pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1574                        * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1575        }
1576
1577        kfree(he_dev->rbpl_virt);
1578        kfree(he_dev->rbpl_table);
1579
1580        if (he_dev->rbpl_pool)
1581                pci_pool_destroy(he_dev->rbpl_pool);
1582
1583        if (he_dev->rbrq_base)
1584                pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1585                                                        he_dev->rbrq_base, he_dev->rbrq_phys);
1586
1587        if (he_dev->tbrq_base)
1588                pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1589                                                        he_dev->tbrq_base, he_dev->tbrq_phys);
1590
1591        if (he_dev->tpdrq_base)
1592                pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1593                                                        he_dev->tpdrq_base, he_dev->tpdrq_phys);
1594
1595        if (he_dev->tpd_pool)
1596                pci_pool_destroy(he_dev->tpd_pool);
1597
1598        if (he_dev->pci_dev) {
1599                pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1600                command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1601                pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1602        }
1603        
1604        if (he_dev->membase)
1605                iounmap(he_dev->membase);
1606}
1607
1608static struct he_tpd *
1609__alloc_tpd(struct he_dev *he_dev)
1610{
1611        struct he_tpd *tpd;
1612        dma_addr_t mapping;
1613
1614        tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1615        if (tpd == NULL)
1616                return NULL;
1617                        
1618        tpd->status = TPD_ADDR(mapping);
1619        tpd->reserved = 0; 
1620        tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1621        tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1622        tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1623
1624        return tpd;
1625}
1626
1627#define AAL5_LEN(buf,len)                                               \
1628                        ((((unsigned char *)(buf))[(len)-6] << 8) |     \
1629                                (((unsigned char *)(buf))[(len)-5]))
1630
1631/* 2.10.1.2 receive
1632 *
1633 * aal5 packets can optionally return the tcp checksum in the lower
1634 * 16 bits of the crc (RSR0_TCP_CKSUM)
1635 */
1636
1637#define TCP_CKSUM(buf,len)                                              \
1638                        ((((unsigned char *)(buf))[(len)-2] << 8) |     \
1639                                (((unsigned char *)(buf))[(len-1)]))
1640
1641static int
1642he_service_rbrq(struct he_dev *he_dev, int group)
1643{
1644        struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1645                                ((unsigned long)he_dev->rbrq_base |
1646                                        he_dev->hsp->group[group].rbrq_tail);
1647        unsigned cid, lastcid = -1;
1648        struct sk_buff *skb;
1649        struct atm_vcc *vcc = NULL;
1650        struct he_vcc *he_vcc;
1651        struct he_buff *heb, *next;
1652        int i;
1653        int pdus_assembled = 0;
1654        int updated = 0;
1655
1656        read_lock(&vcc_sklist_lock);
1657        while (he_dev->rbrq_head != rbrq_tail) {
1658                ++updated;
1659
1660                HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1661                        he_dev->rbrq_head, group,
1662                        RBRQ_ADDR(he_dev->rbrq_head),
1663                        RBRQ_BUFLEN(he_dev->rbrq_head),
1664                        RBRQ_CID(he_dev->rbrq_head),
1665                        RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1666                        RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1667                        RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1668                        RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1669                        RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1670                        RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1671
1672                i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1673                heb = he_dev->rbpl_virt[i];
1674
1675                cid = RBRQ_CID(he_dev->rbrq_head);
1676                if (cid != lastcid)
1677                        vcc = __find_vcc(he_dev, cid);
1678                lastcid = cid;
1679
1680                if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1681                        hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1682                        if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1683                                clear_bit(i, he_dev->rbpl_table);
1684                                list_del(&heb->entry);
1685                                pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1686                        }
1687                                        
1688                        goto next_rbrq_entry;
1689                }
1690
1691                if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1692                        hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1693                                atomic_inc(&vcc->stats->rx_drop);
1694                        goto return_host_buffers;
1695                }
1696
1697                heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1698                clear_bit(i, he_dev->rbpl_table);
1699                list_move_tail(&heb->entry, &he_vcc->buffers);
1700                he_vcc->pdu_len += heb->len;
1701
1702                if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1703                        lastcid = -1;
1704                        HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1705                        wake_up(&he_vcc->rx_waitq);
1706                        goto return_host_buffers;
1707                }
1708
1709                if (!RBRQ_END_PDU(he_dev->rbrq_head))
1710                        goto next_rbrq_entry;
1711
1712                if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1713                                || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1714                        HPRINTK("%s%s (%d.%d)\n",
1715                                RBRQ_CRC_ERR(he_dev->rbrq_head)
1716                                                        ? "CRC_ERR " : "",
1717                                RBRQ_LEN_ERR(he_dev->rbrq_head)
1718                                                        ? "LEN_ERR" : "",
1719                                                        vcc->vpi, vcc->vci);
1720                        atomic_inc(&vcc->stats->rx_err);
1721                        goto return_host_buffers;
1722                }
1723
1724                skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1725                                                        GFP_ATOMIC);
1726                if (!skb) {
1727                        HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1728                        goto return_host_buffers;
1729                }
1730
1731                if (rx_skb_reserve > 0)
1732                        skb_reserve(skb, rx_skb_reserve);
1733
1734                __net_timestamp(skb);
1735
1736                list_for_each_entry(heb, &he_vcc->buffers, entry)
1737                        memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1738
1739                switch (vcc->qos.aal) {
1740                        case ATM_AAL0:
1741                                /* 2.10.1.5 raw cell receive */
1742                                skb->len = ATM_AAL0_SDU;
1743                                skb_set_tail_pointer(skb, skb->len);
1744                                break;
1745                        case ATM_AAL5:
1746                                /* 2.10.1.2 aal5 receive */
1747
1748                                skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1749                                skb_set_tail_pointer(skb, skb->len);
1750#ifdef USE_CHECKSUM_HW
1751                                if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1752                                        skb->ip_summed = CHECKSUM_COMPLETE;
1753                                        skb->csum = TCP_CKSUM(skb->data,
1754                                                        he_vcc->pdu_len);
1755                                }
1756#endif
1757                                break;
1758                }
1759
1760#ifdef should_never_happen
1761                if (skb->len > vcc->qos.rxtp.max_sdu)
1762                        hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1763#endif
1764
1765#ifdef notdef
1766                ATM_SKB(skb)->vcc = vcc;
1767#endif
1768                spin_unlock(&he_dev->global_lock);
1769                vcc->push(vcc, skb);
1770                spin_lock(&he_dev->global_lock);
1771
1772                atomic_inc(&vcc->stats->rx);
1773
1774return_host_buffers:
1775                ++pdus_assembled;
1776
1777                list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1778                        pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1779                INIT_LIST_HEAD(&he_vcc->buffers);
1780                he_vcc->pdu_len = 0;
1781
1782next_rbrq_entry:
1783                he_dev->rbrq_head = (struct he_rbrq *)
1784                                ((unsigned long) he_dev->rbrq_base |
1785                                        RBRQ_MASK(he_dev->rbrq_head + 1));
1786
1787        }
1788        read_unlock(&vcc_sklist_lock);
1789
1790        if (updated) {
1791                if (updated > he_dev->rbrq_peak)
1792                        he_dev->rbrq_peak = updated;
1793
1794                he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1795                                                G0_RBRQ_H + (group * 16));
1796        }
1797
1798        return pdus_assembled;
1799}
1800
1801static void
1802he_service_tbrq(struct he_dev *he_dev, int group)
1803{
1804        struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1805                                ((unsigned long)he_dev->tbrq_base |
1806                                        he_dev->hsp->group[group].tbrq_tail);
1807        struct he_tpd *tpd;
1808        int slot, updated = 0;
1809        struct he_tpd *__tpd;
1810
1811        /* 2.1.6 transmit buffer return queue */
1812
1813        while (he_dev->tbrq_head != tbrq_tail) {
1814                ++updated;
1815
1816                HPRINTK("tbrq%d 0x%x%s%s\n",
1817                        group,
1818                        TBRQ_TPD(he_dev->tbrq_head), 
1819                        TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1820                        TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1821                tpd = NULL;
1822                list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1823                        if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1824                                tpd = __tpd;
1825                                list_del(&__tpd->entry);
1826                                break;
1827                        }
1828                }
1829
1830                if (tpd == NULL) {
1831                        hprintk("unable to locate tpd for dma buffer %x\n",
1832                                                TBRQ_TPD(he_dev->tbrq_head));
1833                        goto next_tbrq_entry;
1834                }
1835
1836                if (TBRQ_EOS(he_dev->tbrq_head)) {
1837                        HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1838                                he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1839                        if (tpd->vcc)
1840                                wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1841
1842                        goto next_tbrq_entry;
1843                }
1844
1845                for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1846                        if (tpd->iovec[slot].addr)
1847                                pci_unmap_single(he_dev->pci_dev,
1848                                        tpd->iovec[slot].addr,
1849                                        tpd->iovec[slot].len & TPD_LEN_MASK,
1850                                                        PCI_DMA_TODEVICE);
1851                        if (tpd->iovec[slot].len & TPD_LST)
1852                                break;
1853                                
1854                }
1855
1856                if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1857                        if (tpd->vcc && tpd->vcc->pop)
1858                                tpd->vcc->pop(tpd->vcc, tpd->skb);
1859                        else
1860                                dev_kfree_skb_any(tpd->skb);
1861                }
1862
1863next_tbrq_entry:
1864                if (tpd)
1865                        pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1866                he_dev->tbrq_head = (struct he_tbrq *)
1867                                ((unsigned long) he_dev->tbrq_base |
1868                                        TBRQ_MASK(he_dev->tbrq_head + 1));
1869        }
1870
1871        if (updated) {
1872                if (updated > he_dev->tbrq_peak)
1873                        he_dev->tbrq_peak = updated;
1874
1875                he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1876                                                G0_TBRQ_H + (group * 16));
1877        }
1878}
1879
1880static void
1881he_service_rbpl(struct he_dev *he_dev, int group)
1882{
1883        struct he_rbp *new_tail;
1884        struct he_rbp *rbpl_head;
1885        struct he_buff *heb;
1886        dma_addr_t mapping;
1887        int i;
1888        int moved = 0;
1889
1890        rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1891                                        RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1892
1893        for (;;) {
1894                new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1895                                                RBPL_MASK(he_dev->rbpl_tail+1));
1896
1897                /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1898                if (new_tail == rbpl_head)
1899                        break;
1900
1901                i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1902                if (i > (RBPL_TABLE_SIZE - 1)) {
1903                        i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1904                        if (i > (RBPL_TABLE_SIZE - 1))
1905                                break;
1906                }
1907                he_dev->rbpl_hint = i + 1;
1908
1909                heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1910                if (!heb)
1911                        break;
1912                heb->mapping = mapping;
1913                list_add(&heb->entry, &he_dev->rbpl_outstanding);
1914                he_dev->rbpl_virt[i] = heb;
1915                set_bit(i, he_dev->rbpl_table);
1916                new_tail->idx = i << RBP_IDX_OFFSET;
1917                new_tail->phys = mapping + offsetof(struct he_buff, data);
1918
1919                he_dev->rbpl_tail = new_tail;
1920                ++moved;
1921        } 
1922
1923        if (moved)
1924                he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1925}
1926
1927static void
1928he_tasklet(unsigned long data)
1929{
1930        unsigned long flags;
1931        struct he_dev *he_dev = (struct he_dev *) data;
1932        int group, type;
1933        int updated = 0;
1934
1935        HPRINTK("tasklet (0x%lx)\n", data);
1936        spin_lock_irqsave(&he_dev->global_lock, flags);
1937
1938        while (he_dev->irq_head != he_dev->irq_tail) {
1939                ++updated;
1940
1941                type = ITYPE_TYPE(he_dev->irq_head->isw);
1942                group = ITYPE_GROUP(he_dev->irq_head->isw);
1943
1944                switch (type) {
1945                        case ITYPE_RBRQ_THRESH:
1946                                HPRINTK("rbrq%d threshold\n", group);
1947                                /* fall through */
1948                        case ITYPE_RBRQ_TIMER:
1949                                if (he_service_rbrq(he_dev, group))
1950                                        he_service_rbpl(he_dev, group);
1951                                break;
1952                        case ITYPE_TBRQ_THRESH:
1953                                HPRINTK("tbrq%d threshold\n", group);
1954                                /* fall through */
1955                        case ITYPE_TPD_COMPLETE:
1956                                he_service_tbrq(he_dev, group);
1957                                break;
1958                        case ITYPE_RBPL_THRESH:
1959                                he_service_rbpl(he_dev, group);
1960                                break;
1961                        case ITYPE_RBPS_THRESH:
1962                                /* shouldn't happen unless small buffers enabled */
1963                                break;
1964                        case ITYPE_PHY:
1965                                HPRINTK("phy interrupt\n");
1966#ifdef CONFIG_ATM_HE_USE_SUNI
1967                                spin_unlock_irqrestore(&he_dev->global_lock, flags);
1968                                if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1969                                        he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1970                                spin_lock_irqsave(&he_dev->global_lock, flags);
1971#endif
1972                                break;
1973                        case ITYPE_OTHER:
1974                                switch (type|group) {
1975                                        case ITYPE_PARITY:
1976                                                hprintk("parity error\n");
1977                                                break;
1978                                        case ITYPE_ABORT:
1979                                                hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1980                                                break;
1981                                }
1982                                break;
1983                        case ITYPE_TYPE(ITYPE_INVALID):
1984                                /* see 8.1.1 -- check all queues */
1985
1986                                HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1987
1988                                he_service_rbrq(he_dev, 0);
1989                                he_service_rbpl(he_dev, 0);
1990                                he_service_tbrq(he_dev, 0);
1991                                break;
1992                        default:
1993                                hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1994                }
1995
1996                he_dev->irq_head->isw = ITYPE_INVALID;
1997
1998                he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1999        }
2000
2001        if (updated) {
2002                if (updated > he_dev->irq_peak)
2003                        he_dev->irq_peak = updated;
2004
2005                he_writel(he_dev,
2006                        IRQ_SIZE(CONFIG_IRQ_SIZE) |
2007                        IRQ_THRESH(CONFIG_IRQ_THRESH) |
2008                        IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2009                (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2010        }
2011        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2012}
2013
2014static irqreturn_t
2015he_irq_handler(int irq, void *dev_id)
2016{
2017        unsigned long flags;
2018        struct he_dev *he_dev = (struct he_dev * )dev_id;
2019        int handled = 0;
2020
2021        if (he_dev == NULL)
2022                return IRQ_NONE;
2023
2024        spin_lock_irqsave(&he_dev->global_lock, flags);
2025
2026        he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2027                                                (*he_dev->irq_tailoffset << 2));
2028
2029        if (he_dev->irq_tail == he_dev->irq_head) {
2030                HPRINTK("tailoffset not updated?\n");
2031                he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2032                        ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2033                (void) he_readl(he_dev, INT_FIFO);      /* 8.1.2 controller errata */
2034        }
2035
2036#ifdef DEBUG
2037        if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2038                hprintk("spurious (or shared) interrupt?\n");
2039#endif
2040
2041        if (he_dev->irq_head != he_dev->irq_tail) {
2042                handled = 1;
2043                tasklet_schedule(&he_dev->tasklet);
2044                he_writel(he_dev, INT_CLEAR_A, INT_FIFO);       /* clear interrupt */
2045                (void) he_readl(he_dev, INT_FIFO);              /* flush posted writes */
2046        }
2047        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2048        return IRQ_RETVAL(handled);
2049
2050}
2051
2052static __inline__ void
2053__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2054{
2055        struct he_tpdrq *new_tail;
2056
2057        HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2058                                        tpd, cid, he_dev->tpdrq_tail);
2059
2060        /* new_tail = he_dev->tpdrq_tail; */
2061        new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2062                                        TPDRQ_MASK(he_dev->tpdrq_tail+1));
2063
2064        /*
2065         * check to see if we are about to set the tail == head
2066         * if true, update the head pointer from the adapter
2067         * to see if this is really the case (reading the queue
2068         * head for every enqueue would be unnecessarily slow)
2069         */
2070
2071        if (new_tail == he_dev->tpdrq_head) {
2072                he_dev->tpdrq_head = (struct he_tpdrq *)
2073                        (((unsigned long)he_dev->tpdrq_base) |
2074                                TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2075
2076                if (new_tail == he_dev->tpdrq_head) {
2077                        int slot;
2078
2079                        hprintk("tpdrq full (cid 0x%x)\n", cid);
2080                        /*
2081                         * FIXME
2082                         * push tpd onto a transmit backlog queue
2083                         * after service_tbrq, service the backlog
2084                         * for now, we just drop the pdu
2085                         */
2086                        for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2087                                if (tpd->iovec[slot].addr)
2088                                        pci_unmap_single(he_dev->pci_dev,
2089                                                tpd->iovec[slot].addr,
2090                                                tpd->iovec[slot].len & TPD_LEN_MASK,
2091                                                                PCI_DMA_TODEVICE);
2092                        }
2093                        if (tpd->skb) {
2094                                if (tpd->vcc->pop)
2095                                        tpd->vcc->pop(tpd->vcc, tpd->skb);
2096                                else
2097                                        dev_kfree_skb_any(tpd->skb);
2098                                atomic_inc(&tpd->vcc->stats->tx_err);
2099                        }
2100                        pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2101                        return;
2102                }
2103        }
2104
2105        /* 2.1.5 transmit packet descriptor ready queue */
2106        list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2107        he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2108        he_dev->tpdrq_tail->cid = cid;
2109        wmb();
2110
2111        he_dev->tpdrq_tail = new_tail;
2112
2113        he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2114        (void) he_readl(he_dev, TPDRQ_T);               /* flush posted writes */
2115}
2116
2117static int
2118he_open(struct atm_vcc *vcc)
2119{
2120        unsigned long flags;
2121        struct he_dev *he_dev = HE_DEV(vcc->dev);
2122        struct he_vcc *he_vcc;
2123        int err = 0;
2124        unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2125        short vpi = vcc->vpi;
2126        int vci = vcc->vci;
2127
2128        if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2129                return 0;
2130
2131        HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2132
2133        set_bit(ATM_VF_ADDR, &vcc->flags);
2134
2135        cid = he_mkcid(he_dev, vpi, vci);
2136
2137        he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2138        if (he_vcc == NULL) {
2139                hprintk("unable to allocate he_vcc during open\n");
2140                return -ENOMEM;
2141        }
2142
2143        INIT_LIST_HEAD(&he_vcc->buffers);
2144        he_vcc->pdu_len = 0;
2145        he_vcc->rc_index = -1;
2146
2147        init_waitqueue_head(&he_vcc->rx_waitq);
2148        init_waitqueue_head(&he_vcc->tx_waitq);
2149
2150        vcc->dev_data = he_vcc;
2151
2152        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2153                int pcr_goal;
2154
2155                pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2156                if (pcr_goal == 0)
2157                        pcr_goal = he_dev->atm_dev->link_rate;
2158                if (pcr_goal < 0)       /* means round down, technically */
2159                        pcr_goal = -pcr_goal;
2160
2161                HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2162
2163                switch (vcc->qos.aal) {
2164                        case ATM_AAL5:
2165                                tsr0_aal = TSR0_AAL5;
2166                                tsr4 = TSR4_AAL5;
2167                                break;
2168                        case ATM_AAL0:
2169                                tsr0_aal = TSR0_AAL0_SDU;
2170                                tsr4 = TSR4_AAL0_SDU;
2171                                break;
2172                        default:
2173                                err = -EINVAL;
2174                                goto open_failed;
2175                }
2176
2177                spin_lock_irqsave(&he_dev->global_lock, flags);
2178                tsr0 = he_readl_tsr0(he_dev, cid);
2179                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2180
2181                if (TSR0_CONN_STATE(tsr0) != 0) {
2182                        hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2183                        err = -EBUSY;
2184                        goto open_failed;
2185                }
2186
2187                switch (vcc->qos.txtp.traffic_class) {
2188                        case ATM_UBR:
2189                                /* 2.3.3.1 open connection ubr */
2190
2191                                tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2192                                        TSR0_USE_WMIN | TSR0_UPDATE_GER;
2193                                break;
2194
2195                        case ATM_CBR:
2196                                /* 2.3.3.2 open connection cbr */
2197
2198                                /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2199                                if ((he_dev->total_bw + pcr_goal)
2200                                        > (he_dev->atm_dev->link_rate * 9 / 10))
2201                                {
2202                                        err = -EBUSY;
2203                                        goto open_failed;
2204                                }
2205
2206                                spin_lock_irqsave(&he_dev->global_lock, flags);                 /* also protects he_dev->cs_stper[] */
2207
2208                                /* find an unused cs_stper register */
2209                                for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2210                                        if (he_dev->cs_stper[reg].inuse == 0 || 
2211                                            he_dev->cs_stper[reg].pcr == pcr_goal)
2212                                                        break;
2213
2214                                if (reg == HE_NUM_CS_STPER) {
2215                                        err = -EBUSY;
2216                                        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2217                                        goto open_failed;
2218                                }
2219
2220                                he_dev->total_bw += pcr_goal;
2221
2222                                he_vcc->rc_index = reg;
2223                                ++he_dev->cs_stper[reg].inuse;
2224                                he_dev->cs_stper[reg].pcr = pcr_goal;
2225
2226                                clock = he_is622(he_dev) ? 66667000 : 50000000;
2227                                period = clock / pcr_goal;
2228                                
2229                                HPRINTK("rc_index = %d period = %d\n",
2230                                                                reg, period);
2231
2232                                he_writel_mbox(he_dev, rate_to_atmf(period/2),
2233                                                        CS_STPER0 + reg);
2234                                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2235
2236                                tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2237                                                        TSR0_RC_INDEX(reg);
2238
2239                                break;
2240                        default:
2241                                err = -EINVAL;
2242                                goto open_failed;
2243                }
2244
2245                spin_lock_irqsave(&he_dev->global_lock, flags);
2246
2247                he_writel_tsr0(he_dev, tsr0, cid);
2248                he_writel_tsr4(he_dev, tsr4 | 1, cid);
2249                he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2250                                        TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2251                he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2252                he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2253
2254                he_writel_tsr3(he_dev, 0x0, cid);
2255                he_writel_tsr5(he_dev, 0x0, cid);
2256                he_writel_tsr6(he_dev, 0x0, cid);
2257                he_writel_tsr7(he_dev, 0x0, cid);
2258                he_writel_tsr8(he_dev, 0x0, cid);
2259                he_writel_tsr10(he_dev, 0x0, cid);
2260                he_writel_tsr11(he_dev, 0x0, cid);
2261                he_writel_tsr12(he_dev, 0x0, cid);
2262                he_writel_tsr13(he_dev, 0x0, cid);
2263                he_writel_tsr14(he_dev, 0x0, cid);
2264                (void) he_readl_tsr0(he_dev, cid);              /* flush posted writes */
2265                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2266        }
2267
2268        if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2269                unsigned aal;
2270
2271                HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2272                                                &HE_VCC(vcc)->rx_waitq);
2273
2274                switch (vcc->qos.aal) {
2275                        case ATM_AAL5:
2276                                aal = RSR0_AAL5;
2277                                break;
2278                        case ATM_AAL0:
2279                                aal = RSR0_RAWCELL;
2280                                break;
2281                        default:
2282                                err = -EINVAL;
2283                                goto open_failed;
2284                }
2285
2286                spin_lock_irqsave(&he_dev->global_lock, flags);
2287
2288                rsr0 = he_readl_rsr0(he_dev, cid);
2289                if (rsr0 & RSR0_OPEN_CONN) {
2290                        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2291
2292                        hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2293                        err = -EBUSY;
2294                        goto open_failed;
2295                }
2296
2297                rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2298                rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2299                rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2300                                (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2301
2302#ifdef USE_CHECKSUM_HW
2303                if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2304                        rsr0 |= RSR0_TCP_CKSUM;
2305#endif
2306
2307                he_writel_rsr4(he_dev, rsr4, cid);
2308                he_writel_rsr1(he_dev, rsr1, cid);
2309                /* 5.1.11 last parameter initialized should be
2310                          the open/closed indication in rsr0 */
2311                he_writel_rsr0(he_dev,
2312                        rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2313                (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2314
2315                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2316        }
2317
2318open_failed:
2319
2320        if (err) {
2321                kfree(he_vcc);
2322                clear_bit(ATM_VF_ADDR, &vcc->flags);
2323        }
2324        else
2325                set_bit(ATM_VF_READY, &vcc->flags);
2326
2327        return err;
2328}
2329
2330static void
2331he_close(struct atm_vcc *vcc)
2332{
2333        unsigned long flags;
2334        DECLARE_WAITQUEUE(wait, current);
2335        struct he_dev *he_dev = HE_DEV(vcc->dev);
2336        struct he_tpd *tpd;
2337        unsigned cid;
2338        struct he_vcc *he_vcc = HE_VCC(vcc);
2339#define MAX_RETRY 30
2340        int retry = 0, sleep = 1, tx_inuse;
2341
2342        HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2343
2344        clear_bit(ATM_VF_READY, &vcc->flags);
2345        cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2346
2347        if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2348                int timeout;
2349
2350                HPRINTK("close rx cid 0x%x\n", cid);
2351
2352                /* 2.7.2.2 close receive operation */
2353
2354                /* wait for previous close (if any) to finish */
2355
2356                spin_lock_irqsave(&he_dev->global_lock, flags);
2357                while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2358                        HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2359                        udelay(250);
2360                }
2361
2362                set_current_state(TASK_UNINTERRUPTIBLE);
2363                add_wait_queue(&he_vcc->rx_waitq, &wait);
2364
2365                he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2366                (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2367                he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2368                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2369
2370                timeout = schedule_timeout(30*HZ);
2371
2372                remove_wait_queue(&he_vcc->rx_waitq, &wait);
2373                set_current_state(TASK_RUNNING);
2374
2375                if (timeout == 0)
2376                        hprintk("close rx timeout cid 0x%x\n", cid);
2377
2378                HPRINTK("close rx cid 0x%x complete\n", cid);
2379
2380        }
2381
2382        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2383                volatile unsigned tsr4, tsr0;
2384                int timeout;
2385
2386                HPRINTK("close tx cid 0x%x\n", cid);
2387                
2388                /* 2.1.2
2389                 *
2390                 * ... the host must first stop queueing packets to the TPDRQ
2391                 * on the connection to be closed, then wait for all outstanding
2392                 * packets to be transmitted and their buffers returned to the
2393                 * TBRQ. When the last packet on the connection arrives in the
2394                 * TBRQ, the host issues the close command to the adapter.
2395                 */
2396
2397                while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2398                       (retry < MAX_RETRY)) {
2399                        msleep(sleep);
2400                        if (sleep < 250)
2401                                sleep = sleep * 2;
2402
2403                        ++retry;
2404                }
2405
2406                if (tx_inuse > 1)
2407                        hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2408
2409                /* 2.3.1.1 generic close operations with flush */
2410
2411                spin_lock_irqsave(&he_dev->global_lock, flags);
2412                he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2413                                        /* also clears TSR4_SESSION_ENDED */
2414
2415                switch (vcc->qos.txtp.traffic_class) {
2416                        case ATM_UBR:
2417                                he_writel_tsr1(he_dev, 
2418                                        TSR1_MCR(rate_to_atmf(200000))
2419                                        | TSR1_PCR(0), cid);
2420                                break;
2421                        case ATM_CBR:
2422                                he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2423                                break;
2424                }
2425                (void) he_readl_tsr4(he_dev, cid);              /* flush posted writes */
2426
2427                tpd = __alloc_tpd(he_dev);
2428                if (tpd == NULL) {
2429                        hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2430                        goto close_tx_incomplete;
2431                }
2432                tpd->status |= TPD_EOS | TPD_INT;
2433                tpd->skb = NULL;
2434                tpd->vcc = vcc;
2435                wmb();
2436
2437                set_current_state(TASK_UNINTERRUPTIBLE);
2438                add_wait_queue(&he_vcc->tx_waitq, &wait);
2439                __enqueue_tpd(he_dev, tpd, cid);
2440                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2441
2442                timeout = schedule_timeout(30*HZ);
2443
2444                remove_wait_queue(&he_vcc->tx_waitq, &wait);
2445                set_current_state(TASK_RUNNING);
2446
2447                spin_lock_irqsave(&he_dev->global_lock, flags);
2448
2449                if (timeout == 0) {
2450                        hprintk("close tx timeout cid 0x%x\n", cid);
2451                        goto close_tx_incomplete;
2452                }
2453
2454                while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2455                        HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2456                        udelay(250);
2457                }
2458
2459                while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2460                        HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2461                        udelay(250);
2462                }
2463
2464close_tx_incomplete:
2465
2466                if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2467                        int reg = he_vcc->rc_index;
2468
2469                        HPRINTK("cs_stper reg = %d\n", reg);
2470
2471                        if (he_dev->cs_stper[reg].inuse == 0)
2472                                hprintk("cs_stper[%d].inuse = 0!\n", reg);
2473                        else
2474                                --he_dev->cs_stper[reg].inuse;
2475
2476                        he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2477                }
2478                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2479
2480                HPRINTK("close tx cid 0x%x complete\n", cid);
2481        }
2482
2483        kfree(he_vcc);
2484
2485        clear_bit(ATM_VF_ADDR, &vcc->flags);
2486}
2487
2488static int
2489he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2490{
2491        unsigned long flags;
2492        struct he_dev *he_dev = HE_DEV(vcc->dev);
2493        unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2494        struct he_tpd *tpd;
2495#ifdef USE_SCATTERGATHER
2496        int i, slot = 0;
2497#endif
2498
2499#define HE_TPD_BUFSIZE 0xffff
2500
2501        HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2502
2503        if ((skb->len > HE_TPD_BUFSIZE) ||
2504            ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2505                hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2506                if (vcc->pop)
2507                        vcc->pop(vcc, skb);
2508                else
2509                        dev_kfree_skb_any(skb);
2510                atomic_inc(&vcc->stats->tx_err);
2511                return -EINVAL;
2512        }
2513
2514#ifndef USE_SCATTERGATHER
2515        if (skb_shinfo(skb)->nr_frags) {
2516                hprintk("no scatter/gather support\n");
2517                if (vcc->pop)
2518                        vcc->pop(vcc, skb);
2519                else
2520                        dev_kfree_skb_any(skb);
2521                atomic_inc(&vcc->stats->tx_err);
2522                return -EINVAL;
2523        }
2524#endif
2525        spin_lock_irqsave(&he_dev->global_lock, flags);
2526
2527        tpd = __alloc_tpd(he_dev);
2528        if (tpd == NULL) {
2529                if (vcc->pop)
2530                        vcc->pop(vcc, skb);
2531                else
2532                        dev_kfree_skb_any(skb);
2533                atomic_inc(&vcc->stats->tx_err);
2534                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2535                return -ENOMEM;
2536        }
2537
2538        if (vcc->qos.aal == ATM_AAL5)
2539                tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2540        else {
2541                char *pti_clp = (void *) (skb->data + 3);
2542                int clp, pti;
2543
2544                pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2545                clp = (*pti_clp & ATM_HDR_CLP);
2546                tpd->status |= TPD_CELLTYPE(pti);
2547                if (clp)
2548                        tpd->status |= TPD_CLP;
2549
2550                skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2551        }
2552
2553#ifdef USE_SCATTERGATHER
2554        tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2555                                skb_headlen(skb), PCI_DMA_TODEVICE);
2556        tpd->iovec[slot].len = skb_headlen(skb);
2557        ++slot;
2558
2559        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2560                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2561
2562                if (slot == TPD_MAXIOV) {       /* queue tpd; start new tpd */
2563                        tpd->vcc = vcc;
2564                        tpd->skb = NULL;        /* not the last fragment
2565                                                   so dont ->push() yet */
2566                        wmb();
2567
2568                        __enqueue_tpd(he_dev, tpd, cid);
2569                        tpd = __alloc_tpd(he_dev);
2570                        if (tpd == NULL) {
2571                                if (vcc->pop)
2572                                        vcc->pop(vcc, skb);
2573                                else
2574                                        dev_kfree_skb_any(skb);
2575                                atomic_inc(&vcc->stats->tx_err);
2576                                spin_unlock_irqrestore(&he_dev->global_lock, flags);
2577                                return -ENOMEM;
2578                        }
2579                        tpd->status |= TPD_USERCELL;
2580                        slot = 0;
2581                }
2582
2583                tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2584                        (void *) page_address(frag->page) + frag->page_offset,
2585                                frag->size, PCI_DMA_TODEVICE);
2586                tpd->iovec[slot].len = frag->size;
2587                ++slot;
2588
2589        }
2590
2591        tpd->iovec[slot - 1].len |= TPD_LST;
2592#else
2593        tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2594        tpd->length0 = skb->len | TPD_LST;
2595#endif
2596        tpd->status |= TPD_INT;
2597
2598        tpd->vcc = vcc;
2599        tpd->skb = skb;
2600        wmb();
2601        ATM_SKB(skb)->vcc = vcc;
2602
2603        __enqueue_tpd(he_dev, tpd, cid);
2604        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2605
2606        atomic_inc(&vcc->stats->tx);
2607
2608        return 0;
2609}
2610
2611static int
2612he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2613{
2614        unsigned long flags;
2615        struct he_dev *he_dev = HE_DEV(atm_dev);
2616        struct he_ioctl_reg reg;
2617        int err = 0;
2618
2619        switch (cmd) {
2620                case HE_GET_REG:
2621                        if (!capable(CAP_NET_ADMIN))
2622                                return -EPERM;
2623
2624                        if (copy_from_user(&reg, arg,
2625                                           sizeof(struct he_ioctl_reg)))
2626                                return -EFAULT;
2627
2628                        spin_lock_irqsave(&he_dev->global_lock, flags);
2629                        switch (reg.type) {
2630                                case HE_REGTYPE_PCI:
2631                                        if (reg.addr >= HE_REGMAP_SIZE) {
2632                                                err = -EINVAL;
2633                                                break;
2634                                        }
2635
2636                                        reg.val = he_readl(he_dev, reg.addr);
2637                                        break;
2638                                case HE_REGTYPE_RCM:
2639                                        reg.val =
2640                                                he_readl_rcm(he_dev, reg.addr);
2641                                        break;
2642                                case HE_REGTYPE_TCM:
2643                                        reg.val =
2644                                                he_readl_tcm(he_dev, reg.addr);
2645                                        break;
2646                                case HE_REGTYPE_MBOX:
2647                                        reg.val =
2648                                                he_readl_mbox(he_dev, reg.addr);
2649                                        break;
2650                                default:
2651                                        err = -EINVAL;
2652                                        break;
2653                        }
2654                        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2655                        if (err == 0)
2656                                if (copy_to_user(arg, &reg,
2657                                                        sizeof(struct he_ioctl_reg)))
2658                                        return -EFAULT;
2659                        break;
2660                default:
2661#ifdef CONFIG_ATM_HE_USE_SUNI
2662                        if (atm_dev->phy && atm_dev->phy->ioctl)
2663                                err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2664#else /* CONFIG_ATM_HE_USE_SUNI */
2665                        err = -EINVAL;
2666#endif /* CONFIG_ATM_HE_USE_SUNI */
2667                        break;
2668        }
2669
2670        return err;
2671}
2672
2673static void
2674he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2675{
2676        unsigned long flags;
2677        struct he_dev *he_dev = HE_DEV(atm_dev);
2678
2679        HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2680
2681        spin_lock_irqsave(&he_dev->global_lock, flags);
2682        he_writel(he_dev, val, FRAMER + (addr*4));
2683        (void) he_readl(he_dev, FRAMER + (addr*4));             /* flush posted writes */
2684        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2685}
2686 
2687        
2688static unsigned char
2689he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2690{ 
2691        unsigned long flags;
2692        struct he_dev *he_dev = HE_DEV(atm_dev);
2693        unsigned reg;
2694
2695        spin_lock_irqsave(&he_dev->global_lock, flags);
2696        reg = he_readl(he_dev, FRAMER + (addr*4));
2697        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2698
2699        HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2700        return reg;
2701}
2702
2703static int
2704he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2705{
2706        unsigned long flags;
2707        struct he_dev *he_dev = HE_DEV(dev);
2708        int left, i;
2709#ifdef notdef
2710        struct he_rbrq *rbrq_tail;
2711        struct he_tpdrq *tpdrq_head;
2712        int rbpl_head, rbpl_tail;
2713#endif
2714        static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2715
2716
2717        left = *pos;
2718        if (!left--)
2719                return sprintf(page, "ATM he driver\n");
2720
2721        if (!left--)
2722                return sprintf(page, "%s%s\n\n",
2723                        he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2724
2725        if (!left--)
2726                return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2727
2728        spin_lock_irqsave(&he_dev->global_lock, flags);
2729        mcc += he_readl(he_dev, MCC);
2730        oec += he_readl(he_dev, OEC);
2731        dcc += he_readl(he_dev, DCC);
2732        cec += he_readl(he_dev, CEC);
2733        spin_unlock_irqrestore(&he_dev->global_lock, flags);
2734
2735        if (!left--)
2736                return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2737                                                        mcc, oec, dcc, cec);
2738
2739        if (!left--)
2740                return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2741                                CONFIG_IRQ_SIZE, he_dev->irq_peak);
2742
2743        if (!left--)
2744                return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2745                                                CONFIG_TPDRQ_SIZE);
2746
2747        if (!left--)
2748                return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2749                                CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2750
2751        if (!left--)
2752                return sprintf(page, "tbrq_size = %d  peak = %d\n",
2753                                        CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2754
2755
2756#ifdef notdef
2757        rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2758        rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2759
2760        inuse = rbpl_head - rbpl_tail;
2761        if (inuse < 0)
2762                inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2763        inuse /= sizeof(struct he_rbp);
2764
2765        if (!left--)
2766                return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2767                                                CONFIG_RBPL_SIZE, inuse);
2768#endif
2769
2770        if (!left--)
2771                return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2772
2773        for (i = 0; i < HE_NUM_CS_STPER; ++i)
2774                if (!left--)
2775                        return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2776                                                he_dev->cs_stper[i].pcr,
2777                                                he_dev->cs_stper[i].inuse);
2778
2779        if (!left--)
2780                return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2781                        he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2782
2783        return 0;
2784}
2785
2786/* eeprom routines  -- see 4.7 */
2787
2788static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2789{
2790        u32 val = 0, tmp_read = 0;
2791        int i, j = 0;
2792        u8 byte_read = 0;
2793
2794        val = readl(he_dev->membase + HOST_CNTL);
2795        val &= 0xFFFFE0FF;
2796       
2797        /* Turn on write enable */
2798        val |= 0x800;
2799        he_writel(he_dev, val, HOST_CNTL);
2800       
2801        /* Send READ instruction */
2802        for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2803                he_writel(he_dev, val | readtab[i], HOST_CNTL);
2804                udelay(EEPROM_DELAY);
2805        }
2806       
2807        /* Next, we need to send the byte address to read from */
2808        for (i = 7; i >= 0; i--) {
2809                he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2810                udelay(EEPROM_DELAY);
2811                he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2812                udelay(EEPROM_DELAY);
2813        }
2814       
2815        j = 0;
2816
2817        val &= 0xFFFFF7FF;      /* Turn off write enable */
2818        he_writel(he_dev, val, HOST_CNTL);
2819       
2820        /* Now, we can read data from the EEPROM by clocking it in */
2821        for (i = 7; i >= 0; i--) {
2822                he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2823                udelay(EEPROM_DELAY);
2824                tmp_read = he_readl(he_dev, HOST_CNTL);
2825                byte_read |= (unsigned char)
2826                           ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2827                he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2828                udelay(EEPROM_DELAY);
2829        }
2830       
2831        he_writel(he_dev, val | ID_CS, HOST_CNTL);
2832        udelay(EEPROM_DELAY);
2833
2834        return byte_read;
2835}
2836
2837MODULE_LICENSE("GPL");
2838MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2839MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2840module_param(disable64, bool, 0);
2841MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2842module_param(nvpibits, short, 0);
2843MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2844module_param(nvcibits, short, 0);
2845MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2846module_param(rx_skb_reserve, short, 0);
2847MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2848module_param(irq_coalesce, bool, 0);
2849MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2850module_param(sdh, bool, 0);
2851MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2852
2853static struct pci_device_id he_pci_tbl[] = {
2854        { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2855        { 0, }
2856};
2857
2858MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2859
2860static struct pci_driver he_driver = {
2861        .name =         "he",
2862        .probe =        he_init_one,
2863        .remove =       he_remove_one,
2864        .id_table =     he_pci_tbl,
2865};
2866
2867module_pci_driver(he_driver);
2868