linux/drivers/atm/nicstar.c
<<
>>
Prefs
   1/*
   2 * nicstar.c
   3 *
   4 * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
   5 *
   6 * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME.
   7 *            It was taken from the frle-0.22 device driver.
   8 *            As the file doesn't have a copyright notice, in the file
   9 *            nicstarmac.copyright I put the copyright notice from the
  10 *            frle-0.22 device driver.
  11 *            Some code is based on the nicstar driver by M. Welsh.
  12 *
  13 * Author: Rui Prior (rprior@inescn.pt)
  14 * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
  15 *
  16 *
  17 * (C) INESC 1999
  18 */
  19
  20/*
  21 * IMPORTANT INFORMATION
  22 *
  23 * There are currently three types of spinlocks:
  24 *
  25 * 1 - Per card interrupt spinlock (to protect structures and such)
  26 * 2 - Per SCQ scq spinlock
  27 * 3 - Per card resource spinlock (to access registers, etc.)
  28 *
  29 * These must NEVER be grabbed in reverse order.
  30 *
  31 */
  32
  33/* Header files */
  34
  35#include <linux/module.h>
  36#include <linux/kernel.h>
  37#include <linux/skbuff.h>
  38#include <linux/atmdev.h>
  39#include <linux/atm.h>
  40#include <linux/pci.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/types.h>
  43#include <linux/string.h>
  44#include <linux/delay.h>
  45#include <linux/init.h>
  46#include <linux/sched.h>
  47#include <linux/timer.h>
  48#include <linux/interrupt.h>
  49#include <linux/bitops.h>
  50#include <linux/slab.h>
  51#include <linux/idr.h>
  52#include <asm/io.h>
  53#include <asm/uaccess.h>
  54#include <linux/atomic.h>
  55#include "nicstar.h"
  56#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
  57#include "suni.h"
  58#endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
  59#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
  60#include "idt77105.h"
  61#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
  62
  63/* Additional code */
  64
  65#include "nicstarmac.c"
  66
  67/* Configurable parameters */
  68
  69#undef PHY_LOOPBACK
  70#undef TX_DEBUG
  71#undef RX_DEBUG
  72#undef GENERAL_DEBUG
  73#undef EXTRA_DEBUG
  74
  75#undef NS_USE_DESTRUCTORS       /* For now keep this undefined unless you know
  76                                   you're going to use only raw ATM */
  77
  78/* Do not touch these */
  79
  80#ifdef TX_DEBUG
  81#define TXPRINTK(args...) printk(args)
  82#else
  83#define TXPRINTK(args...)
  84#endif /* TX_DEBUG */
  85
  86#ifdef RX_DEBUG
  87#define RXPRINTK(args...) printk(args)
  88#else
  89#define RXPRINTK(args...)
  90#endif /* RX_DEBUG */
  91
  92#ifdef GENERAL_DEBUG
  93#define PRINTK(args...) printk(args)
  94#else
  95#define PRINTK(args...)
  96#endif /* GENERAL_DEBUG */
  97
  98#ifdef EXTRA_DEBUG
  99#define XPRINTK(args...) printk(args)
 100#else
 101#define XPRINTK(args...)
 102#endif /* EXTRA_DEBUG */
 103
 104/* Macros */
 105
 106#define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
 107
 108#define NS_DELAY mdelay(1)
 109
 110#define PTR_DIFF(a, b)  ((u32)((unsigned long)(a) - (unsigned long)(b)))
 111
 112#ifndef ATM_SKB
 113#define ATM_SKB(s) (&(s)->atm)
 114#endif
 115
 116#define scq_virt_to_bus(scq, p) \
 117                (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
 118
 119/* Function declarations */
 120
 121static u32 ns_read_sram(ns_dev * card, u32 sram_address);
 122static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
 123                          int count);
 124static int ns_init_card(int i, struct pci_dev *pcidev);
 125static void ns_init_card_error(ns_dev * card, int error);
 126static scq_info *get_scq(ns_dev *card, int size, u32 scd);
 127static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
 128static void push_rxbufs(ns_dev *, struct sk_buff *);
 129static irqreturn_t ns_irq_handler(int irq, void *dev_id);
 130static int ns_open(struct atm_vcc *vcc);
 131static void ns_close(struct atm_vcc *vcc);
 132static void fill_tst(ns_dev * card, int n, vc_map * vc);
 133static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
 134static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
 135                     struct sk_buff *skb);
 136static void process_tsq(ns_dev * card);
 137static void drain_scq(ns_dev * card, scq_info * scq, int pos);
 138static void process_rsq(ns_dev * card);
 139static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
 140#ifdef NS_USE_DESTRUCTORS
 141static void ns_sb_destructor(struct sk_buff *sb);
 142static void ns_lb_destructor(struct sk_buff *lb);
 143static void ns_hb_destructor(struct sk_buff *hb);
 144#endif /* NS_USE_DESTRUCTORS */
 145static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
 146static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
 147static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
 148static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
 149static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb);
 150static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page);
 151static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
 152#ifdef EXTRA_DEBUG
 153static void which_list(ns_dev * card, struct sk_buff *skb);
 154#endif
 155static void ns_poll(unsigned long arg);
 156static void ns_phy_put(struct atm_dev *dev, unsigned char value,
 157                       unsigned long addr);
 158static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
 159
 160/* Global variables */
 161
 162static struct ns_dev *cards[NS_MAX_CARDS];
 163static unsigned num_cards;
 164static struct atmdev_ops atm_ops = {
 165        .open = ns_open,
 166        .close = ns_close,
 167        .ioctl = ns_ioctl,
 168        .send = ns_send,
 169        .phy_put = ns_phy_put,
 170        .phy_get = ns_phy_get,
 171        .proc_read = ns_proc_read,
 172        .owner = THIS_MODULE,
 173};
 174
 175static struct timer_list ns_timer;
 176static char *mac[NS_MAX_CARDS];
 177module_param_array(mac, charp, NULL, 0);
 178MODULE_LICENSE("GPL");
 179
 180/* Functions */
 181
 182static int nicstar_init_one(struct pci_dev *pcidev,
 183                            const struct pci_device_id *ent)
 184{
 185        static int index = -1;
 186        unsigned int error;
 187
 188        index++;
 189        cards[index] = NULL;
 190
 191        error = ns_init_card(index, pcidev);
 192        if (error) {
 193                cards[index--] = NULL;  /* don't increment index */
 194                goto err_out;
 195        }
 196
 197        return 0;
 198err_out:
 199        return -ENODEV;
 200}
 201
 202static void nicstar_remove_one(struct pci_dev *pcidev)
 203{
 204        int i, j;
 205        ns_dev *card = pci_get_drvdata(pcidev);
 206        struct sk_buff *hb;
 207        struct sk_buff *iovb;
 208        struct sk_buff *lb;
 209        struct sk_buff *sb;
 210
 211        i = card->index;
 212
 213        if (cards[i] == NULL)
 214                return;
 215
 216        if (card->atmdev->phy && card->atmdev->phy->stop)
 217                card->atmdev->phy->stop(card->atmdev);
 218
 219        /* Stop everything */
 220        writel(0x00000000, card->membase + CFG);
 221
 222        /* De-register device */
 223        atm_dev_deregister(card->atmdev);
 224
 225        /* Disable PCI device */
 226        pci_disable_device(pcidev);
 227
 228        /* Free up resources */
 229        j = 0;
 230        PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
 231        while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) {
 232                dev_kfree_skb_any(hb);
 233                j++;
 234        }
 235        PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
 236        j = 0;
 237        PRINTK("nicstar%d: freeing %d iovec buffers.\n", i,
 238               card->iovpool.count);
 239        while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) {
 240                dev_kfree_skb_any(iovb);
 241                j++;
 242        }
 243        PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
 244        while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
 245                dev_kfree_skb_any(lb);
 246        while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
 247                dev_kfree_skb_any(sb);
 248        free_scq(card, card->scq0, NULL);
 249        for (j = 0; j < NS_FRSCD_NUM; j++) {
 250                if (card->scd2vc[j] != NULL)
 251                        free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
 252        }
 253        idr_destroy(&card->idr);
 254        pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
 255                            card->rsq.org, card->rsq.dma);
 256        pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
 257                            card->tsq.org, card->tsq.dma);
 258        free_irq(card->pcidev->irq, card);
 259        iounmap(card->membase);
 260        kfree(card);
 261}
 262
 263static struct pci_device_id nicstar_pci_tbl[] = {
 264        { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
 265        {0,}                    /* terminate list */
 266};
 267
 268MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
 269
 270static struct pci_driver nicstar_driver = {
 271        .name = "nicstar",
 272        .id_table = nicstar_pci_tbl,
 273        .probe = nicstar_init_one,
 274        .remove = nicstar_remove_one,
 275};
 276
 277static int __init nicstar_init(void)
 278{
 279        unsigned error = 0;     /* Initialized to remove compile warning */
 280
 281        XPRINTK("nicstar: nicstar_init() called.\n");
 282
 283        error = pci_register_driver(&nicstar_driver);
 284
 285        TXPRINTK("nicstar: TX debug enabled.\n");
 286        RXPRINTK("nicstar: RX debug enabled.\n");
 287        PRINTK("nicstar: General debug enabled.\n");
 288#ifdef PHY_LOOPBACK
 289        printk("nicstar: using PHY loopback.\n");
 290#endif /* PHY_LOOPBACK */
 291        XPRINTK("nicstar: nicstar_init() returned.\n");
 292
 293        if (!error) {
 294                init_timer(&ns_timer);
 295                ns_timer.expires = jiffies + NS_POLL_PERIOD;
 296                ns_timer.data = 0UL;
 297                ns_timer.function = ns_poll;
 298                add_timer(&ns_timer);
 299        }
 300
 301        return error;
 302}
 303
 304static void __exit nicstar_cleanup(void)
 305{
 306        XPRINTK("nicstar: nicstar_cleanup() called.\n");
 307
 308        del_timer(&ns_timer);
 309
 310        pci_unregister_driver(&nicstar_driver);
 311
 312        XPRINTK("nicstar: nicstar_cleanup() returned.\n");
 313}
 314
 315static u32 ns_read_sram(ns_dev * card, u32 sram_address)
 316{
 317        unsigned long flags;
 318        u32 data;
 319        sram_address <<= 2;
 320        sram_address &= 0x0007FFFC;     /* address must be dword aligned */
 321        sram_address |= 0x50000000;     /* SRAM read command */
 322        spin_lock_irqsave(&card->res_lock, flags);
 323        while (CMD_BUSY(card)) ;
 324        writel(sram_address, card->membase + CMD);
 325        while (CMD_BUSY(card)) ;
 326        data = readl(card->membase + DR0);
 327        spin_unlock_irqrestore(&card->res_lock, flags);
 328        return data;
 329}
 330
 331static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
 332                          int count)
 333{
 334        unsigned long flags;
 335        int i, c;
 336        count--;                /* count range now is 0..3 instead of 1..4 */
 337        c = count;
 338        c <<= 2;                /* to use increments of 4 */
 339        spin_lock_irqsave(&card->res_lock, flags);
 340        while (CMD_BUSY(card)) ;
 341        for (i = 0; i <= c; i += 4)
 342                writel(*(value++), card->membase + i);
 343        /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
 344           so card->membase + DR0 == card->membase */
 345        sram_address <<= 2;
 346        sram_address &= 0x0007FFFC;
 347        sram_address |= (0x40000000 | count);
 348        writel(sram_address, card->membase + CMD);
 349        spin_unlock_irqrestore(&card->res_lock, flags);
 350}
 351
 352static int ns_init_card(int i, struct pci_dev *pcidev)
 353{
 354        int j;
 355        struct ns_dev *card = NULL;
 356        unsigned char pci_latency;
 357        unsigned error;
 358        u32 data;
 359        u32 u32d[4];
 360        u32 ns_cfg_rctsize;
 361        int bcount;
 362        unsigned long membase;
 363
 364        error = 0;
 365
 366        if (pci_enable_device(pcidev)) {
 367                printk("nicstar%d: can't enable PCI device\n", i);
 368                error = 2;
 369                ns_init_card_error(card, error);
 370                return error;
 371        }
 372        if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
 373            (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
 374                printk(KERN_WARNING
 375                       "nicstar%d: No suitable DMA available.\n", i);
 376                error = 2;
 377                ns_init_card_error(card, error);
 378                return error;
 379        }
 380
 381        if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) {
 382                printk
 383                    ("nicstar%d: can't allocate memory for device structure.\n",
 384                     i);
 385                error = 2;
 386                ns_init_card_error(card, error);
 387                return error;
 388        }
 389        cards[i] = card;
 390        spin_lock_init(&card->int_lock);
 391        spin_lock_init(&card->res_lock);
 392
 393        pci_set_drvdata(pcidev, card);
 394
 395        card->index = i;
 396        card->atmdev = NULL;
 397        card->pcidev = pcidev;
 398        membase = pci_resource_start(pcidev, 1);
 399        card->membase = ioremap(membase, NS_IOREMAP_SIZE);
 400        if (!card->membase) {
 401                printk("nicstar%d: can't ioremap() membase.\n", i);
 402                error = 3;
 403                ns_init_card_error(card, error);
 404                return error;
 405        }
 406        PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase);
 407
 408        pci_set_master(pcidev);
 409
 410        if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) {
 411                printk("nicstar%d: can't read PCI latency timer.\n", i);
 412                error = 6;
 413                ns_init_card_error(card, error);
 414                return error;
 415        }
 416#ifdef NS_PCI_LATENCY
 417        if (pci_latency < NS_PCI_LATENCY) {
 418                PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i,
 419                       NS_PCI_LATENCY);
 420                for (j = 1; j < 4; j++) {
 421                        if (pci_write_config_byte
 422                            (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
 423                                break;
 424                }
 425                if (j == 4) {
 426                        printk
 427                            ("nicstar%d: can't set PCI latency timer to %d.\n",
 428                             i, NS_PCI_LATENCY);
 429                        error = 7;
 430                        ns_init_card_error(card, error);
 431                        return error;
 432                }
 433        }
 434#endif /* NS_PCI_LATENCY */
 435
 436        /* Clear timer overflow */
 437        data = readl(card->membase + STAT);
 438        if (data & NS_STAT_TMROF)
 439                writel(NS_STAT_TMROF, card->membase + STAT);
 440
 441        /* Software reset */
 442        writel(NS_CFG_SWRST, card->membase + CFG);
 443        NS_DELAY;
 444        writel(0x00000000, card->membase + CFG);
 445
 446        /* PHY reset */
 447        writel(0x00000008, card->membase + GP);
 448        NS_DELAY;
 449        writel(0x00000001, card->membase + GP);
 450        NS_DELAY;
 451        while (CMD_BUSY(card)) ;
 452        writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
 453        NS_DELAY;
 454
 455        /* Detect PHY type */
 456        while (CMD_BUSY(card)) ;
 457        writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
 458        while (CMD_BUSY(card)) ;
 459        data = readl(card->membase + DR0);
 460        switch (data) {
 461        case 0x00000009:
 462                printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
 463                card->max_pcr = ATM_25_PCR;
 464                while (CMD_BUSY(card)) ;
 465                writel(0x00000008, card->membase + DR0);
 466                writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
 467                /* Clear an eventual pending interrupt */
 468                writel(NS_STAT_SFBQF, card->membase + STAT);
 469#ifdef PHY_LOOPBACK
 470                while (CMD_BUSY(card)) ;
 471                writel(0x00000022, card->membase + DR0);
 472                writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
 473#endif /* PHY_LOOPBACK */
 474                break;
 475        case 0x00000030:
 476        case 0x00000031:
 477                printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
 478                card->max_pcr = ATM_OC3_PCR;
 479#ifdef PHY_LOOPBACK
 480                while (CMD_BUSY(card)) ;
 481                writel(0x00000002, card->membase + DR0);
 482                writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
 483#endif /* PHY_LOOPBACK */
 484                break;
 485        default:
 486                printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
 487                error = 8;
 488                ns_init_card_error(card, error);
 489                return error;
 490        }
 491        writel(0x00000000, card->membase + GP);
 492
 493        /* Determine SRAM size */
 494        data = 0x76543210;
 495        ns_write_sram(card, 0x1C003, &data, 1);
 496        data = 0x89ABCDEF;
 497        ns_write_sram(card, 0x14003, &data, 1);
 498        if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
 499            ns_read_sram(card, 0x1C003) == 0x76543210)
 500                card->sram_size = 128;
 501        else
 502                card->sram_size = 32;
 503        PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
 504
 505        card->rct_size = NS_MAX_RCTSIZE;
 506
 507#if (NS_MAX_RCTSIZE == 4096)
 508        if (card->sram_size == 128)
 509                printk
 510                    ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n",
 511                     i);
 512#elif (NS_MAX_RCTSIZE == 16384)
 513        if (card->sram_size == 32) {
 514                printk
 515                    ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n",
 516                     i);
 517                card->rct_size = 4096;
 518        }
 519#else
 520#error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
 521#endif
 522
 523        card->vpibits = NS_VPIBITS;
 524        if (card->rct_size == 4096)
 525                card->vcibits = 12 - NS_VPIBITS;
 526        else                    /* card->rct_size == 16384 */
 527                card->vcibits = 14 - NS_VPIBITS;
 528
 529        /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
 530        if (mac[i] == NULL)
 531                nicstar_init_eprom(card->membase);
 532
 533        /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
 534        writel(0x00000000, card->membase + VPM);
 535
 536        /* Initialize TSQ */
 537        card->tsq.org = pci_alloc_consistent(card->pcidev,
 538                                             NS_TSQSIZE + NS_TSQ_ALIGNMENT,
 539                                             &card->tsq.dma);
 540        if (card->tsq.org == NULL) {
 541                printk("nicstar%d: can't allocate TSQ.\n", i);
 542                error = 10;
 543                ns_init_card_error(card, error);
 544                return error;
 545        }
 546        card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT);
 547        card->tsq.next = card->tsq.base;
 548        card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
 549        for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
 550                ns_tsi_init(card->tsq.base + j);
 551        writel(0x00000000, card->membase + TSQH);
 552        writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB);
 553        PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
 554
 555        /* Initialize RSQ */
 556        card->rsq.org = pci_alloc_consistent(card->pcidev,
 557                                             NS_RSQSIZE + NS_RSQ_ALIGNMENT,
 558                                             &card->rsq.dma);
 559        if (card->rsq.org == NULL) {
 560                printk("nicstar%d: can't allocate RSQ.\n", i);
 561                error = 11;
 562                ns_init_card_error(card, error);
 563                return error;
 564        }
 565        card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT);
 566        card->rsq.next = card->rsq.base;
 567        card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
 568        for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
 569                ns_rsqe_init(card->rsq.base + j);
 570        writel(0x00000000, card->membase + RSQH);
 571        writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB);
 572        PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base);
 573
 574        /* Initialize SCQ0, the only VBR SCQ used */
 575        card->scq1 = NULL;
 576        card->scq2 = NULL;
 577        card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0);
 578        if (card->scq0 == NULL) {
 579                printk("nicstar%d: can't get SCQ0.\n", i);
 580                error = 12;
 581                ns_init_card_error(card, error);
 582                return error;
 583        }
 584        u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base);
 585        u32d[1] = (u32) 0x00000000;
 586        u32d[2] = (u32) 0xffffffff;
 587        u32d[3] = (u32) 0x00000000;
 588        ns_write_sram(card, NS_VRSCD0, u32d, 4);
 589        ns_write_sram(card, NS_VRSCD1, u32d, 4);        /* These last two won't be used */
 590        ns_write_sram(card, NS_VRSCD2, u32d, 4);        /* but are initialized, just in case... */
 591        card->scq0->scd = NS_VRSCD0;
 592        PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base);
 593
 594        /* Initialize TSTs */
 595        card->tst_addr = NS_TST0;
 596        card->tst_free_entries = NS_TST_NUM_ENTRIES;
 597        data = NS_TST_OPCODE_VARIABLE;
 598        for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
 599                ns_write_sram(card, NS_TST0 + j, &data, 1);
 600        data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
 601        ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
 602        for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
 603                ns_write_sram(card, NS_TST1 + j, &data, 1);
 604        data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
 605        ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
 606        for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
 607                card->tste2vc[j] = NULL;
 608        writel(NS_TST0 << 2, card->membase + TSTB);
 609
 610        /* Initialize RCT. AAL type is set on opening the VC. */
 611#ifdef RCQ_SUPPORT
 612        u32d[0] = NS_RCTE_RAWCELLINTEN;
 613#else
 614        u32d[0] = 0x00000000;
 615#endif /* RCQ_SUPPORT */
 616        u32d[1] = 0x00000000;
 617        u32d[2] = 0x00000000;
 618        u32d[3] = 0xFFFFFFFF;
 619        for (j = 0; j < card->rct_size; j++)
 620                ns_write_sram(card, j * 4, u32d, 4);
 621
 622        memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
 623
 624        for (j = 0; j < NS_FRSCD_NUM; j++)
 625                card->scd2vc[j] = NULL;
 626
 627        /* Initialize buffer levels */
 628        card->sbnr.min = MIN_SB;
 629        card->sbnr.init = NUM_SB;
 630        card->sbnr.max = MAX_SB;
 631        card->lbnr.min = MIN_LB;
 632        card->lbnr.init = NUM_LB;
 633        card->lbnr.max = MAX_LB;
 634        card->iovnr.min = MIN_IOVB;
 635        card->iovnr.init = NUM_IOVB;
 636        card->iovnr.max = MAX_IOVB;
 637        card->hbnr.min = MIN_HB;
 638        card->hbnr.init = NUM_HB;
 639        card->hbnr.max = MAX_HB;
 640
 641        card->sm_handle = 0x00000000;
 642        card->sm_addr = 0x00000000;
 643        card->lg_handle = 0x00000000;
 644        card->lg_addr = 0x00000000;
 645
 646        card->efbie = 1;        /* To prevent push_rxbufs from enabling the interrupt */
 647
 648        idr_init(&card->idr);
 649
 650        /* Pre-allocate some huge buffers */
 651        skb_queue_head_init(&card->hbpool.queue);
 652        card->hbpool.count = 0;
 653        for (j = 0; j < NUM_HB; j++) {
 654                struct sk_buff *hb;
 655                hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
 656                if (hb == NULL) {
 657                        printk
 658                            ("nicstar%d: can't allocate %dth of %d huge buffers.\n",
 659                             i, j, NUM_HB);
 660                        error = 13;
 661                        ns_init_card_error(card, error);
 662                        return error;
 663                }
 664                NS_PRV_BUFTYPE(hb) = BUF_NONE;
 665                skb_queue_tail(&card->hbpool.queue, hb);
 666                card->hbpool.count++;
 667        }
 668
 669        /* Allocate large buffers */
 670        skb_queue_head_init(&card->lbpool.queue);
 671        card->lbpool.count = 0; /* Not used */
 672        for (j = 0; j < NUM_LB; j++) {
 673                struct sk_buff *lb;
 674                lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
 675                if (lb == NULL) {
 676                        printk
 677                            ("nicstar%d: can't allocate %dth of %d large buffers.\n",
 678                             i, j, NUM_LB);
 679                        error = 14;
 680                        ns_init_card_error(card, error);
 681                        return error;
 682                }
 683                NS_PRV_BUFTYPE(lb) = BUF_LG;
 684                skb_queue_tail(&card->lbpool.queue, lb);
 685                skb_reserve(lb, NS_SMBUFSIZE);
 686                push_rxbufs(card, lb);
 687                /* Due to the implementation of push_rxbufs() this is 1, not 0 */
 688                if (j == 1) {
 689                        card->rcbuf = lb;
 690                        card->rawcell = (struct ns_rcqe *) lb->data;
 691                        card->rawch = NS_PRV_DMA(lb);
 692                }
 693        }
 694        /* Test for strange behaviour which leads to crashes */
 695        if ((bcount =
 696             ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) {
 697                printk
 698                    ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
 699                     i, j, bcount);
 700                error = 14;
 701                ns_init_card_error(card, error);
 702                return error;
 703        }
 704
 705        /* Allocate small buffers */
 706        skb_queue_head_init(&card->sbpool.queue);
 707        card->sbpool.count = 0; /* Not used */
 708        for (j = 0; j < NUM_SB; j++) {
 709                struct sk_buff *sb;
 710                sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
 711                if (sb == NULL) {
 712                        printk
 713                            ("nicstar%d: can't allocate %dth of %d small buffers.\n",
 714                             i, j, NUM_SB);
 715                        error = 15;
 716                        ns_init_card_error(card, error);
 717                        return error;
 718                }
 719                NS_PRV_BUFTYPE(sb) = BUF_SM;
 720                skb_queue_tail(&card->sbpool.queue, sb);
 721                skb_reserve(sb, NS_AAL0_HEADER);
 722                push_rxbufs(card, sb);
 723        }
 724        /* Test for strange behaviour which leads to crashes */
 725        if ((bcount =
 726             ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) {
 727                printk
 728                    ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
 729                     i, j, bcount);
 730                error = 15;
 731                ns_init_card_error(card, error);
 732                return error;
 733        }
 734
 735        /* Allocate iovec buffers */
 736        skb_queue_head_init(&card->iovpool.queue);
 737        card->iovpool.count = 0;
 738        for (j = 0; j < NUM_IOVB; j++) {
 739                struct sk_buff *iovb;
 740                iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
 741                if (iovb == NULL) {
 742                        printk
 743                            ("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
 744                             i, j, NUM_IOVB);
 745                        error = 16;
 746                        ns_init_card_error(card, error);
 747                        return error;
 748                }
 749                NS_PRV_BUFTYPE(iovb) = BUF_NONE;
 750                skb_queue_tail(&card->iovpool.queue, iovb);
 751                card->iovpool.count++;
 752        }
 753
 754        /* Configure NICStAR */
 755        if (card->rct_size == 4096)
 756                ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
 757        else                    /* (card->rct_size == 16384) */
 758                ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
 759
 760        card->efbie = 1;
 761
 762        card->intcnt = 0;
 763        if (request_irq
 764            (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
 765                printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
 766                error = 9;
 767                ns_init_card_error(card, error);
 768                return error;
 769        }
 770
 771        /* Register device */
 772        card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
 773                                        -1, NULL);
 774        if (card->atmdev == NULL) {
 775                printk("nicstar%d: can't register device.\n", i);
 776                error = 17;
 777                ns_init_card_error(card, error);
 778                return error;
 779        }
 780
 781        if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
 782                nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
 783                                   card->atmdev->esi, 6);
 784                if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
 785                    0) {
 786                        nicstar_read_eprom(card->membase,
 787                                           NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
 788                                           card->atmdev->esi, 6);
 789                }
 790        }
 791
 792        printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
 793
 794        card->atmdev->dev_data = card;
 795        card->atmdev->ci_range.vpi_bits = card->vpibits;
 796        card->atmdev->ci_range.vci_bits = card->vcibits;
 797        card->atmdev->link_rate = card->max_pcr;
 798        card->atmdev->phy = NULL;
 799
 800#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
 801        if (card->max_pcr == ATM_OC3_PCR)
 802                suni_init(card->atmdev);
 803#endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
 804
 805#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
 806        if (card->max_pcr == ATM_25_PCR)
 807                idt77105_init(card->atmdev);
 808#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
 809
 810        if (card->atmdev->phy && card->atmdev->phy->start)
 811                card->atmdev->phy->start(card->atmdev);
 812
 813        writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE |    /* Only enabled if RCQ_SUPPORT */
 814               NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */
 815               NS_CFG_PHYIE, card->membase + CFG);
 816
 817        num_cards++;
 818
 819        return error;
 820}
 821
 822static void ns_init_card_error(ns_dev *card, int error)
 823{
 824        if (error >= 17) {
 825                writel(0x00000000, card->membase + CFG);
 826        }
 827        if (error >= 16) {
 828                struct sk_buff *iovb;
 829                while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
 830                        dev_kfree_skb_any(iovb);
 831        }
 832        if (error >= 15) {
 833                struct sk_buff *sb;
 834                while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
 835                        dev_kfree_skb_any(sb);
 836                free_scq(card, card->scq0, NULL);
 837        }
 838        if (error >= 14) {
 839                struct sk_buff *lb;
 840                while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
 841                        dev_kfree_skb_any(lb);
 842        }
 843        if (error >= 13) {
 844                struct sk_buff *hb;
 845                while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
 846                        dev_kfree_skb_any(hb);
 847        }
 848        if (error >= 12) {
 849                kfree(card->rsq.org);
 850        }
 851        if (error >= 11) {
 852                kfree(card->tsq.org);
 853        }
 854        if (error >= 10) {
 855                free_irq(card->pcidev->irq, card);
 856        }
 857        if (error >= 4) {
 858                iounmap(card->membase);
 859        }
 860        if (error >= 3) {
 861                pci_disable_device(card->pcidev);
 862                kfree(card);
 863        }
 864}
 865
 866static scq_info *get_scq(ns_dev *card, int size, u32 scd)
 867{
 868        scq_info *scq;
 869        int i;
 870
 871        if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
 872                return NULL;
 873
 874        scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
 875        if (!scq)
 876                return NULL;
 877        scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
 878        if (!scq->org) {
 879                kfree(scq);
 880                return NULL;
 881        }
 882        scq->skb = kmalloc(sizeof(struct sk_buff *) *
 883                           (size / NS_SCQE_SIZE), GFP_KERNEL);
 884        if (!scq->skb) {
 885                kfree(scq->org);
 886                kfree(scq);
 887                return NULL;
 888        }
 889        scq->num_entries = size / NS_SCQE_SIZE;
 890        scq->base = PTR_ALIGN(scq->org, size);
 891        scq->next = scq->base;
 892        scq->last = scq->base + (scq->num_entries - 1);
 893        scq->tail = scq->last;
 894        scq->scd = scd;
 895        scq->num_entries = size / NS_SCQE_SIZE;
 896        scq->tbd_count = 0;
 897        init_waitqueue_head(&scq->scqfull_waitq);
 898        scq->full = 0;
 899        spin_lock_init(&scq->lock);
 900
 901        for (i = 0; i < scq->num_entries; i++)
 902                scq->skb[i] = NULL;
 903
 904        return scq;
 905}
 906
 907/* For variable rate SCQ vcc must be NULL */
 908static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
 909{
 910        int i;
 911
 912        if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
 913                for (i = 0; i < scq->num_entries; i++) {
 914                        if (scq->skb[i] != NULL) {
 915                                vcc = ATM_SKB(scq->skb[i])->vcc;
 916                                if (vcc->pop != NULL)
 917                                        vcc->pop(vcc, scq->skb[i]);
 918                                else
 919                                        dev_kfree_skb_any(scq->skb[i]);
 920                        }
 921        } else {                /* vcc must be != NULL */
 922
 923                if (vcc == NULL) {
 924                        printk
 925                            ("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
 926                        for (i = 0; i < scq->num_entries; i++)
 927                                dev_kfree_skb_any(scq->skb[i]);
 928                } else
 929                        for (i = 0; i < scq->num_entries; i++) {
 930                                if (scq->skb[i] != NULL) {
 931                                        if (vcc->pop != NULL)
 932                                                vcc->pop(vcc, scq->skb[i]);
 933                                        else
 934                                                dev_kfree_skb_any(scq->skb[i]);
 935                                }
 936                        }
 937        }
 938        kfree(scq->skb);
 939        pci_free_consistent(card->pcidev,
 940                            2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
 941                                 VBR_SCQSIZE : CBR_SCQSIZE),
 942                            scq->org, scq->dma);
 943        kfree(scq);
 944}
 945
 946/* The handles passed must be pointers to the sk_buff containing the small
 947   or large buffer(s) cast to u32. */
 948static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
 949{
 950        struct sk_buff *handle1, *handle2;
 951        int id1, id2;
 952        u32 addr1, addr2;
 953        u32 stat;
 954        unsigned long flags;
 955
 956        /* *BARF* */
 957        handle2 = NULL;
 958        addr2 = 0;
 959        handle1 = skb;
 960        addr1 = pci_map_single(card->pcidev,
 961                               skb->data,
 962                               (NS_PRV_BUFTYPE(skb) == BUF_SM
 963                                ? NS_SMSKBSIZE : NS_LGSKBSIZE),
 964                               PCI_DMA_TODEVICE);
 965        NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
 966
 967#ifdef GENERAL_DEBUG
 968        if (!addr1)
 969                printk("nicstar%d: push_rxbufs called with addr1 = 0.\n",
 970                       card->index);
 971#endif /* GENERAL_DEBUG */
 972
 973        stat = readl(card->membase + STAT);
 974        card->sbfqc = ns_stat_sfbqc_get(stat);
 975        card->lbfqc = ns_stat_lfbqc_get(stat);
 976        if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
 977                if (!addr2) {
 978                        if (card->sm_addr) {
 979                                addr2 = card->sm_addr;
 980                                handle2 = card->sm_handle;
 981                                card->sm_addr = 0x00000000;
 982                                card->sm_handle = 0x00000000;
 983                        } else {        /* (!sm_addr) */
 984
 985                                card->sm_addr = addr1;
 986                                card->sm_handle = handle1;
 987                        }
 988                }
 989        } else {                /* buf_type == BUF_LG */
 990
 991                if (!addr2) {
 992                        if (card->lg_addr) {
 993                                addr2 = card->lg_addr;
 994                                handle2 = card->lg_handle;
 995                                card->lg_addr = 0x00000000;
 996                                card->lg_handle = 0x00000000;
 997                        } else {        /* (!lg_addr) */
 998
 999                                card->lg_addr = addr1;
1000                                card->lg_handle = handle1;
1001                        }
1002                }
1003        }
1004
1005        if (addr2) {
1006                if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
1007                        if (card->sbfqc >= card->sbnr.max) {
1008                                skb_unlink(handle1, &card->sbpool.queue);
1009                                dev_kfree_skb_any(handle1);
1010                                skb_unlink(handle2, &card->sbpool.queue);
1011                                dev_kfree_skb_any(handle2);
1012                                return;
1013                        } else
1014                                card->sbfqc += 2;
1015                } else {        /* (buf_type == BUF_LG) */
1016
1017                        if (card->lbfqc >= card->lbnr.max) {
1018                                skb_unlink(handle1, &card->lbpool.queue);
1019                                dev_kfree_skb_any(handle1);
1020                                skb_unlink(handle2, &card->lbpool.queue);
1021                                dev_kfree_skb_any(handle2);
1022                                return;
1023                        } else
1024                                card->lbfqc += 2;
1025                }
1026
1027                id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC);
1028                if (id1 < 0)
1029                        goto out;
1030
1031                id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC);
1032                if (id2 < 0)
1033                        goto out;
1034
1035                spin_lock_irqsave(&card->res_lock, flags);
1036                while (CMD_BUSY(card)) ;
1037                writel(addr2, card->membase + DR3);
1038                writel(id2, card->membase + DR2);
1039                writel(addr1, card->membase + DR1);
1040                writel(id1, card->membase + DR0);
1041                writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb),
1042                       card->membase + CMD);
1043                spin_unlock_irqrestore(&card->res_lock, flags);
1044
1045                XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n",
1046                        card->index,
1047                        (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"),
1048                        addr1, addr2);
1049        }
1050
1051        if (!card->efbie && card->sbfqc >= card->sbnr.min &&
1052            card->lbfqc >= card->lbnr.min) {
1053                card->efbie = 1;
1054                writel((readl(card->membase + CFG) | NS_CFG_EFBIE),
1055                       card->membase + CFG);
1056        }
1057
1058out:
1059        return;
1060}
1061
1062static irqreturn_t ns_irq_handler(int irq, void *dev_id)
1063{
1064        u32 stat_r;
1065        ns_dev *card;
1066        struct atm_dev *dev;
1067        unsigned long flags;
1068
1069        card = (ns_dev *) dev_id;
1070        dev = card->atmdev;
1071        card->intcnt++;
1072
1073        PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
1074
1075        spin_lock_irqsave(&card->int_lock, flags);
1076
1077        stat_r = readl(card->membase + STAT);
1078
1079        /* Transmit Status Indicator has been written to T. S. Queue */
1080        if (stat_r & NS_STAT_TSIF) {
1081                TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
1082                process_tsq(card);
1083                writel(NS_STAT_TSIF, card->membase + STAT);
1084        }
1085
1086        /* Incomplete CS-PDU has been transmitted */
1087        if (stat_r & NS_STAT_TXICP) {
1088                writel(NS_STAT_TXICP, card->membase + STAT);
1089                TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
1090                         card->index);
1091        }
1092
1093        /* Transmit Status Queue 7/8 full */
1094        if (stat_r & NS_STAT_TSQF) {
1095                writel(NS_STAT_TSQF, card->membase + STAT);
1096                PRINTK("nicstar%d: TSQ full.\n", card->index);
1097                process_tsq(card);
1098        }
1099
1100        /* Timer overflow */
1101        if (stat_r & NS_STAT_TMROF) {
1102                writel(NS_STAT_TMROF, card->membase + STAT);
1103                PRINTK("nicstar%d: Timer overflow.\n", card->index);
1104        }
1105
1106        /* PHY device interrupt signal active */
1107        if (stat_r & NS_STAT_PHYI) {
1108                writel(NS_STAT_PHYI, card->membase + STAT);
1109                PRINTK("nicstar%d: PHY interrupt.\n", card->index);
1110                if (dev->phy && dev->phy->interrupt) {
1111                        dev->phy->interrupt(dev);
1112                }
1113        }
1114
1115        /* Small Buffer Queue is full */
1116        if (stat_r & NS_STAT_SFBQF) {
1117                writel(NS_STAT_SFBQF, card->membase + STAT);
1118                printk("nicstar%d: Small free buffer queue is full.\n",
1119                       card->index);
1120        }
1121
1122        /* Large Buffer Queue is full */
1123        if (stat_r & NS_STAT_LFBQF) {
1124                writel(NS_STAT_LFBQF, card->membase + STAT);
1125                printk("nicstar%d: Large free buffer queue is full.\n",
1126                       card->index);
1127        }
1128
1129        /* Receive Status Queue is full */
1130        if (stat_r & NS_STAT_RSQF) {
1131                writel(NS_STAT_RSQF, card->membase + STAT);
1132                printk("nicstar%d: RSQ full.\n", card->index);
1133                process_rsq(card);
1134        }
1135
1136        /* Complete CS-PDU received */
1137        if (stat_r & NS_STAT_EOPDU) {
1138                RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
1139                process_rsq(card);
1140                writel(NS_STAT_EOPDU, card->membase + STAT);
1141        }
1142
1143        /* Raw cell received */
1144        if (stat_r & NS_STAT_RAWCF) {
1145                writel(NS_STAT_RAWCF, card->membase + STAT);
1146#ifndef RCQ_SUPPORT
1147                printk("nicstar%d: Raw cell received and no support yet...\n",
1148                       card->index);
1149#endif /* RCQ_SUPPORT */
1150                /* NOTE: the following procedure may keep a raw cell pending until the
1151                   next interrupt. As this preliminary support is only meant to
1152                   avoid buffer leakage, this is not an issue. */
1153                while (readl(card->membase + RAWCT) != card->rawch) {
1154
1155                        if (ns_rcqe_islast(card->rawcell)) {
1156                                struct sk_buff *oldbuf;
1157
1158                                oldbuf = card->rcbuf;
1159                                card->rcbuf = idr_find(&card->idr,
1160                                                       ns_rcqe_nextbufhandle(card->rawcell));
1161                                card->rawch = NS_PRV_DMA(card->rcbuf);
1162                                card->rawcell = (struct ns_rcqe *)
1163                                                card->rcbuf->data;
1164                                recycle_rx_buf(card, oldbuf);
1165                        } else {
1166                                card->rawch += NS_RCQE_SIZE;
1167                                card->rawcell++;
1168                        }
1169                }
1170        }
1171
1172        /* Small buffer queue is empty */
1173        if (stat_r & NS_STAT_SFBQE) {
1174                int i;
1175                struct sk_buff *sb;
1176
1177                writel(NS_STAT_SFBQE, card->membase + STAT);
1178                printk("nicstar%d: Small free buffer queue empty.\n",
1179                       card->index);
1180                for (i = 0; i < card->sbnr.min; i++) {
1181                        sb = dev_alloc_skb(NS_SMSKBSIZE);
1182                        if (sb == NULL) {
1183                                writel(readl(card->membase + CFG) &
1184                                       ~NS_CFG_EFBIE, card->membase + CFG);
1185                                card->efbie = 0;
1186                                break;
1187                        }
1188                        NS_PRV_BUFTYPE(sb) = BUF_SM;
1189                        skb_queue_tail(&card->sbpool.queue, sb);
1190                        skb_reserve(sb, NS_AAL0_HEADER);
1191                        push_rxbufs(card, sb);
1192                }
1193                card->sbfqc = i;
1194                process_rsq(card);
1195        }
1196
1197        /* Large buffer queue empty */
1198        if (stat_r & NS_STAT_LFBQE) {
1199                int i;
1200                struct sk_buff *lb;
1201
1202                writel(NS_STAT_LFBQE, card->membase + STAT);
1203                printk("nicstar%d: Large free buffer queue empty.\n",
1204                       card->index);
1205                for (i = 0; i < card->lbnr.min; i++) {
1206                        lb = dev_alloc_skb(NS_LGSKBSIZE);
1207                        if (lb == NULL) {
1208                                writel(readl(card->membase + CFG) &
1209                                       ~NS_CFG_EFBIE, card->membase + CFG);
1210                                card->efbie = 0;
1211                                break;
1212                        }
1213                        NS_PRV_BUFTYPE(lb) = BUF_LG;
1214                        skb_queue_tail(&card->lbpool.queue, lb);
1215                        skb_reserve(lb, NS_SMBUFSIZE);
1216                        push_rxbufs(card, lb);
1217                }
1218                card->lbfqc = i;
1219                process_rsq(card);
1220        }
1221
1222        /* Receive Status Queue is 7/8 full */
1223        if (stat_r & NS_STAT_RSQAF) {
1224                writel(NS_STAT_RSQAF, card->membase + STAT);
1225                RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
1226                process_rsq(card);
1227        }
1228
1229        spin_unlock_irqrestore(&card->int_lock, flags);
1230        PRINTK("nicstar%d: end of interrupt service\n", card->index);
1231        return IRQ_HANDLED;
1232}
1233
1234static int ns_open(struct atm_vcc *vcc)
1235{
1236        ns_dev *card;
1237        vc_map *vc;
1238        unsigned long tmpl, modl;
1239        int tcr, tcra;          /* target cell rate, and absolute value */
1240        int n = 0;              /* Number of entries in the TST. Initialized to remove
1241                                   the compiler warning. */
1242        u32 u32d[4];
1243        int frscdi = 0;         /* Index of the SCD. Initialized to remove the compiler
1244                                   warning. How I wish compilers were clever enough to
1245                                   tell which variables can truly be used
1246                                   uninitialized... */
1247        int inuse;              /* tx or rx vc already in use by another vcc */
1248        short vpi = vcc->vpi;
1249        int vci = vcc->vci;
1250
1251        card = (ns_dev *) vcc->dev->dev_data;
1252        PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi,
1253               vci);
1254        if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
1255                PRINTK("nicstar%d: unsupported AAL.\n", card->index);
1256                return -EINVAL;
1257        }
1258
1259        vc = &(card->vcmap[vpi << card->vcibits | vci]);
1260        vcc->dev_data = vc;
1261
1262        inuse = 0;
1263        if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
1264                inuse = 1;
1265        if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
1266                inuse += 2;
1267        if (inuse) {
1268                printk("nicstar%d: %s vci already in use.\n", card->index,
1269                       inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
1270                return -EINVAL;
1271        }
1272
1273        set_bit(ATM_VF_ADDR, &vcc->flags);
1274
1275        /* NOTE: You are not allowed to modify an open connection's QOS. To change
1276           that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
1277           needed to do that. */
1278        if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
1279                scq_info *scq;
1280
1281                set_bit(ATM_VF_PARTIAL, &vcc->flags);
1282                if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1283                        /* Check requested cell rate and availability of SCD */
1284                        if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0
1285                            && vcc->qos.txtp.min_pcr == 0) {
1286                                PRINTK
1287                                    ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
1288                                     card->index);
1289                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1290                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1291                                return -EINVAL;
1292                        }
1293
1294                        tcr = atm_pcr_goal(&(vcc->qos.txtp));
1295                        tcra = tcr >= 0 ? tcr : -tcr;
1296
1297                        PRINTK("nicstar%d: target cell rate = %d.\n",
1298                               card->index, vcc->qos.txtp.max_pcr);
1299
1300                        tmpl =
1301                            (unsigned long)tcra *(unsigned long)
1302                            NS_TST_NUM_ENTRIES;
1303                        modl = tmpl % card->max_pcr;
1304
1305                        n = (int)(tmpl / card->max_pcr);
1306                        if (tcr > 0) {
1307                                if (modl > 0)
1308                                        n++;
1309                        } else if (tcr == 0) {
1310                                if ((n =
1311                                     (card->tst_free_entries -
1312                                      NS_TST_RESERVED)) <= 0) {
1313                                        PRINTK
1314                                            ("nicstar%d: no CBR bandwidth free.\n",
1315                                             card->index);
1316                                        clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1317                                        clear_bit(ATM_VF_ADDR, &vcc->flags);
1318                                        return -EINVAL;
1319                                }
1320                        }
1321
1322                        if (n == 0) {
1323                                printk
1324                                    ("nicstar%d: selected bandwidth < granularity.\n",
1325                                     card->index);
1326                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1327                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1328                                return -EINVAL;
1329                        }
1330
1331                        if (n > (card->tst_free_entries - NS_TST_RESERVED)) {
1332                                PRINTK
1333                                    ("nicstar%d: not enough free CBR bandwidth.\n",
1334                                     card->index);
1335                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1336                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1337                                return -EINVAL;
1338                        } else
1339                                card->tst_free_entries -= n;
1340
1341                        XPRINTK("nicstar%d: writing %d tst entries.\n",
1342                                card->index, n);
1343                        for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) {
1344                                if (card->scd2vc[frscdi] == NULL) {
1345                                        card->scd2vc[frscdi] = vc;
1346                                        break;
1347                                }
1348                        }
1349                        if (frscdi == NS_FRSCD_NUM) {
1350                                PRINTK
1351                                    ("nicstar%d: no SCD available for CBR channel.\n",
1352                                     card->index);
1353                                card->tst_free_entries += n;
1354                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1355                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1356                                return -EBUSY;
1357                        }
1358
1359                        vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1360
1361                        scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd);
1362                        if (scq == NULL) {
1363                                PRINTK("nicstar%d: can't get fixed rate SCQ.\n",
1364                                       card->index);
1365                                card->scd2vc[frscdi] = NULL;
1366                                card->tst_free_entries += n;
1367                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1368                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1369                                return -ENOMEM;
1370                        }
1371                        vc->scq = scq;
1372                        u32d[0] = scq_virt_to_bus(scq, scq->base);
1373                        u32d[1] = (u32) 0x00000000;
1374                        u32d[2] = (u32) 0xffffffff;
1375                        u32d[3] = (u32) 0x00000000;
1376                        ns_write_sram(card, vc->cbr_scd, u32d, 4);
1377
1378                        fill_tst(card, n, vc);
1379                } else if (vcc->qos.txtp.traffic_class == ATM_UBR) {
1380                        vc->cbr_scd = 0x00000000;
1381                        vc->scq = card->scq0;
1382                }
1383
1384                if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1385                        vc->tx = 1;
1386                        vc->tx_vcc = vcc;
1387                        vc->tbd_count = 0;
1388                }
1389                if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
1390                        u32 status;
1391
1392                        vc->rx = 1;
1393                        vc->rx_vcc = vcc;
1394                        vc->rx_iov = NULL;
1395
1396                        /* Open the connection in hardware */
1397                        if (vcc->qos.aal == ATM_AAL5)
1398                                status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
1399                        else    /* vcc->qos.aal == ATM_AAL0 */
1400                                status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
1401#ifdef RCQ_SUPPORT
1402                        status |= NS_RCTE_RAWCELLINTEN;
1403#endif /* RCQ_SUPPORT */
1404                        ns_write_sram(card,
1405                                      NS_RCT +
1406                                      (vpi << card->vcibits | vci) *
1407                                      NS_RCT_ENTRY_SIZE, &status, 1);
1408                }
1409
1410        }
1411
1412        set_bit(ATM_VF_READY, &vcc->flags);
1413        return 0;
1414}
1415
1416static void ns_close(struct atm_vcc *vcc)
1417{
1418        vc_map *vc;
1419        ns_dev *card;
1420        u32 data;
1421        int i;
1422
1423        vc = vcc->dev_data;
1424        card = vcc->dev->dev_data;
1425        PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
1426               (int)vcc->vpi, vcc->vci);
1427
1428        clear_bit(ATM_VF_READY, &vcc->flags);
1429
1430        if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
1431                u32 addr;
1432                unsigned long flags;
1433
1434                addr =
1435                    NS_RCT +
1436                    (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
1437                spin_lock_irqsave(&card->res_lock, flags);
1438                while (CMD_BUSY(card)) ;
1439                writel(NS_CMD_CLOSE_CONNECTION | addr << 2,
1440                       card->membase + CMD);
1441                spin_unlock_irqrestore(&card->res_lock, flags);
1442
1443                vc->rx = 0;
1444                if (vc->rx_iov != NULL) {
1445                        struct sk_buff *iovb;
1446                        u32 stat;
1447
1448                        stat = readl(card->membase + STAT);
1449                        card->sbfqc = ns_stat_sfbqc_get(stat);
1450                        card->lbfqc = ns_stat_lfbqc_get(stat);
1451
1452                        PRINTK
1453                            ("nicstar%d: closing a VC with pending rx buffers.\n",
1454                             card->index);
1455                        iovb = vc->rx_iov;
1456                        recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
1457                                              NS_PRV_IOVCNT(iovb));
1458                        NS_PRV_IOVCNT(iovb) = 0;
1459                        spin_lock_irqsave(&card->int_lock, flags);
1460                        recycle_iov_buf(card, iovb);
1461                        spin_unlock_irqrestore(&card->int_lock, flags);
1462                        vc->rx_iov = NULL;
1463                }
1464        }
1465
1466        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1467                vc->tx = 0;
1468        }
1469
1470        if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1471                unsigned long flags;
1472                ns_scqe *scqep;
1473                scq_info *scq;
1474
1475                scq = vc->scq;
1476
1477                for (;;) {
1478                        spin_lock_irqsave(&scq->lock, flags);
1479                        scqep = scq->next;
1480                        if (scqep == scq->base)
1481                                scqep = scq->last;
1482                        else
1483                                scqep--;
1484                        if (scqep == scq->tail) {
1485                                spin_unlock_irqrestore(&scq->lock, flags);
1486                                break;
1487                        }
1488                        /* If the last entry is not a TSR, place one in the SCQ in order to
1489                           be able to completely drain it and then close. */
1490                        if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) {
1491                                ns_scqe tsr;
1492                                u32 scdi, scqi;
1493                                u32 data;
1494                                int index;
1495
1496                                tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1497                                scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1498                                scqi = scq->next - scq->base;
1499                                tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1500                                tsr.word_3 = 0x00000000;
1501                                tsr.word_4 = 0x00000000;
1502                                *scq->next = tsr;
1503                                index = (int)scqi;
1504                                scq->skb[index] = NULL;
1505                                if (scq->next == scq->last)
1506                                        scq->next = scq->base;
1507                                else
1508                                        scq->next++;
1509                                data = scq_virt_to_bus(scq, scq->next);
1510                                ns_write_sram(card, scq->scd, &data, 1);
1511                        }
1512                        spin_unlock_irqrestore(&scq->lock, flags);
1513                        schedule();
1514                }
1515
1516                /* Free all TST entries */
1517                data = NS_TST_OPCODE_VARIABLE;
1518                for (i = 0; i < NS_TST_NUM_ENTRIES; i++) {
1519                        if (card->tste2vc[i] == vc) {
1520                                ns_write_sram(card, card->tst_addr + i, &data,
1521                                              1);
1522                                card->tste2vc[i] = NULL;
1523                                card->tst_free_entries++;
1524                        }
1525                }
1526
1527                card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1528                free_scq(card, vc->scq, vcc);
1529        }
1530
1531        /* remove all references to vcc before deleting it */
1532        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1533                unsigned long flags;
1534                scq_info *scq = card->scq0;
1535
1536                spin_lock_irqsave(&scq->lock, flags);
1537
1538                for (i = 0; i < scq->num_entries; i++) {
1539                        if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
1540                                ATM_SKB(scq->skb[i])->vcc = NULL;
1541                                atm_return(vcc, scq->skb[i]->truesize);
1542                                PRINTK
1543                                    ("nicstar: deleted pending vcc mapping\n");
1544                        }
1545                }
1546
1547                spin_unlock_irqrestore(&scq->lock, flags);
1548        }
1549
1550        vcc->dev_data = NULL;
1551        clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1552        clear_bit(ATM_VF_ADDR, &vcc->flags);
1553
1554#ifdef RX_DEBUG
1555        {
1556                u32 stat, cfg;
1557                stat = readl(card->membase + STAT);
1558                cfg = readl(card->membase + CFG);
1559                printk("STAT = 0x%08X  CFG = 0x%08X  \n", stat, cfg);
1560                printk
1561                    ("TSQ: base = 0x%p  next = 0x%p  last = 0x%p  TSQT = 0x%08X \n",
1562                     card->tsq.base, card->tsq.next,
1563                     card->tsq.last, readl(card->membase + TSQT));
1564                printk
1565                    ("RSQ: base = 0x%p  next = 0x%p  last = 0x%p  RSQT = 0x%08X \n",
1566                     card->rsq.base, card->rsq.next,
1567                     card->rsq.last, readl(card->membase + RSQT));
1568                printk("Empty free buffer queue interrupt %s \n",
1569                       card->efbie ? "enabled" : "disabled");
1570                printk("SBCNT = %d  count = %d   LBCNT = %d count = %d \n",
1571                       ns_stat_sfbqc_get(stat), card->sbpool.count,
1572                       ns_stat_lfbqc_get(stat), card->lbpool.count);
1573                printk("hbpool.count = %d  iovpool.count = %d \n",
1574                       card->hbpool.count, card->iovpool.count);
1575        }
1576#endif /* RX_DEBUG */
1577}
1578
1579static void fill_tst(ns_dev * card, int n, vc_map * vc)
1580{
1581        u32 new_tst;
1582        unsigned long cl;
1583        int e, r;
1584        u32 data;
1585
1586        /* It would be very complicated to keep the two TSTs synchronized while
1587           assuring that writes are only made to the inactive TST. So, for now I
1588           will use only one TST. If problems occur, I will change this again */
1589
1590        new_tst = card->tst_addr;
1591
1592        /* Fill procedure */
1593
1594        for (e = 0; e < NS_TST_NUM_ENTRIES; e++) {
1595                if (card->tste2vc[e] == NULL)
1596                        break;
1597        }
1598        if (e == NS_TST_NUM_ENTRIES) {
1599                printk("nicstar%d: No free TST entries found. \n", card->index);
1600                return;
1601        }
1602
1603        r = n;
1604        cl = NS_TST_NUM_ENTRIES;
1605        data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
1606
1607        while (r > 0) {
1608                if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) {
1609                        card->tste2vc[e] = vc;
1610                        ns_write_sram(card, new_tst + e, &data, 1);
1611                        cl -= NS_TST_NUM_ENTRIES;
1612                        r--;
1613                }
1614
1615                if (++e == NS_TST_NUM_ENTRIES) {
1616                        e = 0;
1617                }
1618                cl += n;
1619        }
1620
1621        /* End of fill procedure */
1622
1623        data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
1624        ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
1625        ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
1626        card->tst_addr = new_tst;
1627}
1628
1629static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1630{
1631        ns_dev *card;
1632        vc_map *vc;
1633        scq_info *scq;
1634        unsigned long buflen;
1635        ns_scqe scqe;
1636        u32 flags;              /* TBD flags, not CPU flags */
1637
1638        card = vcc->dev->dev_data;
1639        TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
1640        if ((vc = (vc_map *) vcc->dev_data) == NULL) {
1641                printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
1642                       card->index);
1643                atomic_inc(&vcc->stats->tx_err);
1644                dev_kfree_skb_any(skb);
1645                return -EINVAL;
1646        }
1647
1648        if (!vc->tx) {
1649                printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
1650                       card->index);
1651                atomic_inc(&vcc->stats->tx_err);
1652                dev_kfree_skb_any(skb);
1653                return -EINVAL;
1654        }
1655
1656        if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
1657                printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
1658                       card->index);
1659                atomic_inc(&vcc->stats->tx_err);
1660                dev_kfree_skb_any(skb);
1661                return -EINVAL;
1662        }
1663
1664        if (skb_shinfo(skb)->nr_frags != 0) {
1665                printk("nicstar%d: No scatter-gather yet.\n", card->index);
1666                atomic_inc(&vcc->stats->tx_err);
1667                dev_kfree_skb_any(skb);
1668                return -EINVAL;
1669        }
1670
1671        ATM_SKB(skb)->vcc = vcc;
1672
1673        NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
1674                                         skb->len, PCI_DMA_TODEVICE);
1675
1676        if (vcc->qos.aal == ATM_AAL5) {
1677                buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
1678                flags = NS_TBD_AAL5;
1679                scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb));
1680                scqe.word_3 = cpu_to_le32(skb->len);
1681                scqe.word_4 =
1682                    ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
1683                                    ATM_SKB(skb)->
1684                                    atm_options & ATM_ATMOPT_CLP ? 1 : 0);
1685                flags |= NS_TBD_EOPDU;
1686        } else {                /* (vcc->qos.aal == ATM_AAL0) */
1687
1688                buflen = ATM_CELL_PAYLOAD;      /* i.e., 48 bytes */
1689                flags = NS_TBD_AAL0;
1690                scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER);
1691                scqe.word_3 = cpu_to_le32(0x00000000);
1692                if (*skb->data & 0x02)  /* Payload type 1 - end of pdu */
1693                        flags |= NS_TBD_EOPDU;
1694                scqe.word_4 =
1695                    cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
1696                /* Force the VPI/VCI to be the same as in VCC struct */
1697                scqe.word_4 |=
1698                    cpu_to_le32((((u32) vcc->
1699                                  vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc->
1700                                                              vci) <<
1701                                 NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK);
1702        }
1703
1704        if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1705                scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
1706                scq = ((vc_map *) vcc->dev_data)->scq;
1707        } else {
1708                scqe.word_1 =
1709                    ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
1710                scq = card->scq0;
1711        }
1712
1713        if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
1714                atomic_inc(&vcc->stats->tx_err);
1715                dev_kfree_skb_any(skb);
1716                return -EIO;
1717        }
1718        atomic_inc(&vcc->stats->tx);
1719
1720        return 0;
1721}
1722
1723static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
1724                     struct sk_buff *skb)
1725{
1726        unsigned long flags;
1727        ns_scqe tsr;
1728        u32 scdi, scqi;
1729        int scq_is_vbr;
1730        u32 data;
1731        int index;
1732
1733        spin_lock_irqsave(&scq->lock, flags);
1734        while (scq->tail == scq->next) {
1735                if (in_interrupt()) {
1736                        spin_unlock_irqrestore(&scq->lock, flags);
1737                        printk("nicstar%d: Error pushing TBD.\n", card->index);
1738                        return 1;
1739                }
1740
1741                scq->full = 1;
1742                spin_unlock_irqrestore(&scq->lock, flags);
1743                interruptible_sleep_on_timeout(&scq->scqfull_waitq,
1744                                               SCQFULL_TIMEOUT);
1745                spin_lock_irqsave(&scq->lock, flags);
1746
1747                if (scq->full) {
1748                        spin_unlock_irqrestore(&scq->lock, flags);
1749                        printk("nicstar%d: Timeout pushing TBD.\n",
1750                               card->index);
1751                        return 1;
1752                }
1753        }
1754        *scq->next = *tbd;
1755        index = (int)(scq->next - scq->base);
1756        scq->skb[index] = skb;
1757        XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n",
1758                card->index, skb, index);
1759        XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1760                card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
1761                le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
1762                scq->next);
1763        if (scq->next == scq->last)
1764                scq->next = scq->base;
1765        else
1766                scq->next++;
1767
1768        vc->tbd_count++;
1769        if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) {
1770                scq->tbd_count++;
1771                scq_is_vbr = 1;
1772        } else
1773                scq_is_vbr = 0;
1774
1775        if (vc->tbd_count >= MAX_TBD_PER_VC
1776            || scq->tbd_count >= MAX_TBD_PER_SCQ) {
1777                int has_run = 0;
1778
1779                while (scq->tail == scq->next) {
1780                        if (in_interrupt()) {
1781                                data = scq_virt_to_bus(scq, scq->next);
1782                                ns_write_sram(card, scq->scd, &data, 1);
1783                                spin_unlock_irqrestore(&scq->lock, flags);
1784                                printk("nicstar%d: Error pushing TSR.\n",
1785                                       card->index);
1786                                return 0;
1787                        }
1788
1789                        scq->full = 1;
1790                        if (has_run++)
1791                                break;
1792                        spin_unlock_irqrestore(&scq->lock, flags);
1793                        interruptible_sleep_on_timeout(&scq->scqfull_waitq,
1794                                                       SCQFULL_TIMEOUT);
1795                        spin_lock_irqsave(&scq->lock, flags);
1796                }
1797
1798                if (!scq->full) {
1799                        tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1800                        if (scq_is_vbr)
1801                                scdi = NS_TSR_SCDISVBR;
1802                        else
1803                                scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1804                        scqi = scq->next - scq->base;
1805                        tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1806                        tsr.word_3 = 0x00000000;
1807                        tsr.word_4 = 0x00000000;
1808
1809                        *scq->next = tsr;
1810                        index = (int)scqi;
1811                        scq->skb[index] = NULL;
1812                        XPRINTK
1813                            ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1814                             card->index, le32_to_cpu(tsr.word_1),
1815                             le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3),
1816                             le32_to_cpu(tsr.word_4), scq->next);
1817                        if (scq->next == scq->last)
1818                                scq->next = scq->base;
1819                        else
1820                                scq->next++;
1821                        vc->tbd_count = 0;
1822                        scq->tbd_count = 0;
1823                } else
1824                        PRINTK("nicstar%d: Timeout pushing TSR.\n",
1825                               card->index);
1826        }
1827        data = scq_virt_to_bus(scq, scq->next);
1828        ns_write_sram(card, scq->scd, &data, 1);
1829
1830        spin_unlock_irqrestore(&scq->lock, flags);
1831
1832        return 0;
1833}
1834
1835static void process_tsq(ns_dev * card)
1836{
1837        u32 scdi;
1838        scq_info *scq;
1839        ns_tsi *previous = NULL, *one_ahead, *two_ahead;
1840        int serviced_entries;   /* flag indicating at least on entry was serviced */
1841
1842        serviced_entries = 0;
1843
1844        if (card->tsq.next == card->tsq.last)
1845                one_ahead = card->tsq.base;
1846        else
1847                one_ahead = card->tsq.next + 1;
1848
1849        if (one_ahead == card->tsq.last)
1850                two_ahead = card->tsq.base;
1851        else
1852                two_ahead = one_ahead + 1;
1853
1854        while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
1855               !ns_tsi_isempty(two_ahead))
1856                /* At most two empty, as stated in the 77201 errata */
1857        {
1858                serviced_entries = 1;
1859
1860                /* Skip the one or two possible empty entries */
1861                while (ns_tsi_isempty(card->tsq.next)) {
1862                        if (card->tsq.next == card->tsq.last)
1863                                card->tsq.next = card->tsq.base;
1864                        else
1865                                card->tsq.next++;
1866                }
1867
1868                if (!ns_tsi_tmrof(card->tsq.next)) {
1869                        scdi = ns_tsi_getscdindex(card->tsq.next);
1870                        if (scdi == NS_TSI_SCDISVBR)
1871                                scq = card->scq0;
1872                        else {
1873                                if (card->scd2vc[scdi] == NULL) {
1874                                        printk
1875                                            ("nicstar%d: could not find VC from SCD index.\n",
1876                                             card->index);
1877                                        ns_tsi_init(card->tsq.next);
1878                                        return;
1879                                }
1880                                scq = card->scd2vc[scdi]->scq;
1881                        }
1882                        drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
1883                        scq->full = 0;
1884                        wake_up_interruptible(&(scq->scqfull_waitq));
1885                }
1886
1887                ns_tsi_init(card->tsq.next);
1888                previous = card->tsq.next;
1889                if (card->tsq.next == card->tsq.last)
1890                        card->tsq.next = card->tsq.base;
1891                else
1892                        card->tsq.next++;
1893
1894                if (card->tsq.next == card->tsq.last)
1895                        one_ahead = card->tsq.base;
1896                else
1897                        one_ahead = card->tsq.next + 1;
1898
1899                if (one_ahead == card->tsq.last)
1900                        two_ahead = card->tsq.base;
1901                else
1902                        two_ahead = one_ahead + 1;
1903        }
1904
1905        if (serviced_entries)
1906                writel(PTR_DIFF(previous, card->tsq.base),
1907                       card->membase + TSQH);
1908}
1909
1910static void drain_scq(ns_dev * card, scq_info * scq, int pos)
1911{
1912        struct atm_vcc *vcc;
1913        struct sk_buff *skb;
1914        int i;
1915        unsigned long flags;
1916
1917        XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n",
1918                card->index, scq, pos);
1919        if (pos >= scq->num_entries) {
1920                printk("nicstar%d: Bad index on drain_scq().\n", card->index);
1921                return;
1922        }
1923
1924        spin_lock_irqsave(&scq->lock, flags);
1925        i = (int)(scq->tail - scq->base);
1926        if (++i == scq->num_entries)
1927                i = 0;
1928        while (i != pos) {
1929                skb = scq->skb[i];
1930                XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
1931                        card->index, skb, i);
1932                if (skb != NULL) {
1933                        pci_unmap_single(card->pcidev,
1934                                         NS_PRV_DMA(skb),
1935                                         skb->len,
1936                                         PCI_DMA_TODEVICE);
1937                        vcc = ATM_SKB(skb)->vcc;
1938                        if (vcc && vcc->pop != NULL) {
1939                                vcc->pop(vcc, skb);
1940                        } else {
1941                                dev_kfree_skb_irq(skb);
1942                        }
1943                        scq->skb[i] = NULL;
1944                }
1945                if (++i == scq->num_entries)
1946                        i = 0;
1947        }
1948        scq->tail = scq->base + pos;
1949        spin_unlock_irqrestore(&scq->lock, flags);
1950}
1951
1952static void process_rsq(ns_dev * card)
1953{
1954        ns_rsqe *previous;
1955
1956        if (!ns_rsqe_valid(card->rsq.next))
1957                return;
1958        do {
1959                dequeue_rx(card, card->rsq.next);
1960                ns_rsqe_init(card->rsq.next);
1961                previous = card->rsq.next;
1962                if (card->rsq.next == card->rsq.last)
1963                        card->rsq.next = card->rsq.base;
1964                else
1965                        card->rsq.next++;
1966        } while (ns_rsqe_valid(card->rsq.next));
1967        writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH);
1968}
1969
1970static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
1971{
1972        u32 vpi, vci;
1973        vc_map *vc;
1974        struct sk_buff *iovb;
1975        struct iovec *iov;
1976        struct atm_vcc *vcc;
1977        struct sk_buff *skb;
1978        unsigned short aal5_len;
1979        int len;
1980        u32 stat;
1981        u32 id;
1982
1983        stat = readl(card->membase + STAT);
1984        card->sbfqc = ns_stat_sfbqc_get(stat);
1985        card->lbfqc = ns_stat_lfbqc_get(stat);
1986
1987        id = le32_to_cpu(rsqe->buffer_handle);
1988        skb = idr_find(&card->idr, id);
1989        if (!skb) {
1990                RXPRINTK(KERN_ERR
1991                         "nicstar%d: idr_find() failed!\n", card->index);
1992                return;
1993        }
1994        idr_remove(&card->idr, id);
1995        pci_dma_sync_single_for_cpu(card->pcidev,
1996                                    NS_PRV_DMA(skb),
1997                                    (NS_PRV_BUFTYPE(skb) == BUF_SM
1998                                     ? NS_SMSKBSIZE : NS_LGSKBSIZE),
1999                                    PCI_DMA_FROMDEVICE);
2000        pci_unmap_single(card->pcidev,
2001                         NS_PRV_DMA(skb),
2002                         (NS_PRV_BUFTYPE(skb) == BUF_SM
2003                          ? NS_SMSKBSIZE : NS_LGSKBSIZE),
2004                         PCI_DMA_FROMDEVICE);
2005        vpi = ns_rsqe_vpi(rsqe);
2006        vci = ns_rsqe_vci(rsqe);
2007        if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
2008                printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
2009                       card->index, vpi, vci);
2010                recycle_rx_buf(card, skb);
2011                return;
2012        }
2013
2014        vc = &(card->vcmap[vpi << card->vcibits | vci]);
2015        if (!vc->rx) {
2016                RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
2017                         card->index, vpi, vci);
2018                recycle_rx_buf(card, skb);
2019                return;
2020        }
2021
2022        vcc = vc->rx_vcc;
2023
2024        if (vcc->qos.aal == ATM_AAL0) {
2025                struct sk_buff *sb;
2026                unsigned char *cell;
2027                int i;
2028
2029                cell = skb->data;
2030                for (i = ns_rsqe_cellcount(rsqe); i; i--) {
2031                        if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) {
2032                                printk
2033                                    ("nicstar%d: Can't allocate buffers for aal0.\n",
2034                                     card->index);
2035                                atomic_add(i, &vcc->stats->rx_drop);
2036                                break;
2037                        }
2038                        if (!atm_charge(vcc, sb->truesize)) {
2039                                RXPRINTK
2040                                    ("nicstar%d: atm_charge() dropped aal0 packets.\n",
2041                                     card->index);
2042                                atomic_add(i - 1, &vcc->stats->rx_drop);        /* already increased by 1 */
2043                                dev_kfree_skb_any(sb);
2044                                break;
2045                        }
2046                        /* Rebuild the header */
2047                        *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
2048                            (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
2049                        if (i == 1 && ns_rsqe_eopdu(rsqe))
2050                                *((u32 *) sb->data) |= 0x00000002;
2051                        skb_put(sb, NS_AAL0_HEADER);
2052                        memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
2053                        skb_put(sb, ATM_CELL_PAYLOAD);
2054                        ATM_SKB(sb)->vcc = vcc;
2055                        __net_timestamp(sb);
2056                        vcc->push(vcc, sb);
2057                        atomic_inc(&vcc->stats->rx);
2058                        cell += ATM_CELL_PAYLOAD;
2059                }
2060
2061                recycle_rx_buf(card, skb);
2062                return;
2063        }
2064
2065        /* To reach this point, the AAL layer can only be AAL5 */
2066
2067        if ((iovb = vc->rx_iov) == NULL) {
2068                iovb = skb_dequeue(&(card->iovpool.queue));
2069                if (iovb == NULL) {     /* No buffers in the queue */
2070                        iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2071                        if (iovb == NULL) {
2072                                printk("nicstar%d: Out of iovec buffers.\n",
2073                                       card->index);
2074                                atomic_inc(&vcc->stats->rx_drop);
2075                                recycle_rx_buf(card, skb);
2076                                return;
2077                        }
2078                        NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2079                } else if (--card->iovpool.count < card->iovnr.min) {
2080                        struct sk_buff *new_iovb;
2081                        if ((new_iovb =
2082                             alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) {
2083                                NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2084                                skb_queue_tail(&card->iovpool.queue, new_iovb);
2085                                card->iovpool.count++;
2086                        }
2087                }
2088                vc->rx_iov = iovb;
2089                NS_PRV_IOVCNT(iovb) = 0;
2090                iovb->len = 0;
2091                iovb->data = iovb->head;
2092                skb_reset_tail_pointer(iovb);
2093                /* IMPORTANT: a pointer to the sk_buff containing the small or large
2094                   buffer is stored as iovec base, NOT a pointer to the
2095                   small or large buffer itself. */
2096        } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
2097                printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2098                atomic_inc(&vcc->stats->rx_err);
2099                recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2100                                      NS_MAX_IOVECS);
2101                NS_PRV_IOVCNT(iovb) = 0;
2102                iovb->len = 0;
2103                iovb->data = iovb->head;
2104                skb_reset_tail_pointer(iovb);
2105        }
2106        iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++];
2107        iov->iov_base = (void *)skb;
2108        iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2109        iovb->len += iov->iov_len;
2110
2111#ifdef EXTRA_DEBUG
2112        if (NS_PRV_IOVCNT(iovb) == 1) {
2113                if (NS_PRV_BUFTYPE(skb) != BUF_SM) {
2114                        printk
2115                            ("nicstar%d: Expected a small buffer, and this is not one.\n",
2116                             card->index);
2117                        which_list(card, skb);
2118                        atomic_inc(&vcc->stats->rx_err);
2119                        recycle_rx_buf(card, skb);
2120                        vc->rx_iov = NULL;
2121                        recycle_iov_buf(card, iovb);
2122                        return;
2123                }
2124        } else {                /* NS_PRV_IOVCNT(iovb) >= 2 */
2125
2126                if (NS_PRV_BUFTYPE(skb) != BUF_LG) {
2127                        printk
2128                            ("nicstar%d: Expected a large buffer, and this is not one.\n",
2129                             card->index);
2130                        which_list(card, skb);
2131                        atomic_inc(&vcc->stats->rx_err);
2132                        recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2133                                              NS_PRV_IOVCNT(iovb));
2134                        vc->rx_iov = NULL;
2135                        recycle_iov_buf(card, iovb);
2136                        return;
2137                }
2138        }
2139#endif /* EXTRA_DEBUG */
2140
2141        if (ns_rsqe_eopdu(rsqe)) {
2142                /* This works correctly regardless of the endianness of the host */
2143                unsigned char *L1L2 = (unsigned char *)
2144                                                (skb->data + iov->iov_len - 6);
2145                aal5_len = L1L2[0] << 8 | L1L2[1];
2146                len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2147                if (ns_rsqe_crcerr(rsqe) ||
2148                    len + 8 > iovb->len || len + (47 + 8) < iovb->len) {
2149                        printk("nicstar%d: AAL5 CRC error", card->index);
2150                        if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2151                                printk(" - PDU size mismatch.\n");
2152                        else
2153                                printk(".\n");
2154                        atomic_inc(&vcc->stats->rx_err);
2155                        recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2156                                              NS_PRV_IOVCNT(iovb));
2157                        vc->rx_iov = NULL;
2158                        recycle_iov_buf(card, iovb);
2159                        return;
2160                }
2161
2162                /* By this point we (hopefully) have a complete SDU without errors. */
2163
2164                if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */
2165                        /* skb points to a small buffer */
2166                        if (!atm_charge(vcc, skb->truesize)) {
2167                                push_rxbufs(card, skb);
2168                                atomic_inc(&vcc->stats->rx_drop);
2169                        } else {
2170                                skb_put(skb, len);
2171                                dequeue_sm_buf(card, skb);
2172#ifdef NS_USE_DESTRUCTORS
2173                                skb->destructor = ns_sb_destructor;
2174#endif /* NS_USE_DESTRUCTORS */
2175                                ATM_SKB(skb)->vcc = vcc;
2176                                __net_timestamp(skb);
2177                                vcc->push(vcc, skb);
2178                                atomic_inc(&vcc->stats->rx);
2179                        }
2180                } else if (NS_PRV_IOVCNT(iovb) == 2) {  /* One small plus one large buffer */
2181                        struct sk_buff *sb;
2182
2183                        sb = (struct sk_buff *)(iov - 1)->iov_base;
2184                        /* skb points to a large buffer */
2185
2186                        if (len <= NS_SMBUFSIZE) {
2187                                if (!atm_charge(vcc, sb->truesize)) {
2188                                        push_rxbufs(card, sb);
2189                                        atomic_inc(&vcc->stats->rx_drop);
2190                                } else {
2191                                        skb_put(sb, len);
2192                                        dequeue_sm_buf(card, sb);
2193#ifdef NS_USE_DESTRUCTORS
2194                                        sb->destructor = ns_sb_destructor;
2195#endif /* NS_USE_DESTRUCTORS */
2196                                        ATM_SKB(sb)->vcc = vcc;
2197                                        __net_timestamp(sb);
2198                                        vcc->push(vcc, sb);
2199                                        atomic_inc(&vcc->stats->rx);
2200                                }
2201
2202                                push_rxbufs(card, skb);
2203
2204                        } else {        /* len > NS_SMBUFSIZE, the usual case */
2205
2206                                if (!atm_charge(vcc, skb->truesize)) {
2207                                        push_rxbufs(card, skb);
2208                                        atomic_inc(&vcc->stats->rx_drop);
2209                                } else {
2210                                        dequeue_lg_buf(card, skb);
2211#ifdef NS_USE_DESTRUCTORS
2212                                        skb->destructor = ns_lb_destructor;
2213#endif /* NS_USE_DESTRUCTORS */
2214                                        skb_push(skb, NS_SMBUFSIZE);
2215                                        skb_copy_from_linear_data(sb, skb->data,
2216                                                                  NS_SMBUFSIZE);
2217                                        skb_put(skb, len - NS_SMBUFSIZE);
2218                                        ATM_SKB(skb)->vcc = vcc;
2219                                        __net_timestamp(skb);
2220                                        vcc->push(vcc, skb);
2221                                        atomic_inc(&vcc->stats->rx);
2222                                }
2223
2224                                push_rxbufs(card, sb);
2225
2226                        }
2227
2228                } else {        /* Must push a huge buffer */
2229
2230                        struct sk_buff *hb, *sb, *lb;
2231                        int remaining, tocopy;
2232                        int j;
2233
2234                        hb = skb_dequeue(&(card->hbpool.queue));
2235                        if (hb == NULL) {       /* No buffers in the queue */
2236
2237                                hb = dev_alloc_skb(NS_HBUFSIZE);
2238                                if (hb == NULL) {
2239                                        printk
2240                                            ("nicstar%d: Out of huge buffers.\n",
2241                                             card->index);
2242                                        atomic_inc(&vcc->stats->rx_drop);
2243                                        recycle_iovec_rx_bufs(card,
2244                                                              (struct iovec *)
2245                                                              iovb->data,
2246                                                              NS_PRV_IOVCNT(iovb));
2247                                        vc->rx_iov = NULL;
2248                                        recycle_iov_buf(card, iovb);
2249                                        return;
2250                                } else if (card->hbpool.count < card->hbnr.min) {
2251                                        struct sk_buff *new_hb;
2252                                        if ((new_hb =
2253                                             dev_alloc_skb(NS_HBUFSIZE)) !=
2254                                            NULL) {
2255                                                skb_queue_tail(&card->hbpool.
2256                                                               queue, new_hb);
2257                                                card->hbpool.count++;
2258                                        }
2259                                }
2260                                NS_PRV_BUFTYPE(hb) = BUF_NONE;
2261                        } else if (--card->hbpool.count < card->hbnr.min) {
2262                                struct sk_buff *new_hb;
2263                                if ((new_hb =
2264                                     dev_alloc_skb(NS_HBUFSIZE)) != NULL) {
2265                                        NS_PRV_BUFTYPE(new_hb) = BUF_NONE;
2266                                        skb_queue_tail(&card->hbpool.queue,
2267                                                       new_hb);
2268                                        card->hbpool.count++;
2269                                }
2270                                if (card->hbpool.count < card->hbnr.min) {
2271                                        if ((new_hb =
2272                                             dev_alloc_skb(NS_HBUFSIZE)) !=
2273                                            NULL) {
2274                                                NS_PRV_BUFTYPE(new_hb) =
2275                                                    BUF_NONE;
2276                                                skb_queue_tail(&card->hbpool.
2277                                                               queue, new_hb);
2278                                                card->hbpool.count++;
2279                                        }
2280                                }
2281                        }
2282
2283                        iov = (struct iovec *)iovb->data;
2284
2285                        if (!atm_charge(vcc, hb->truesize)) {
2286                                recycle_iovec_rx_bufs(card, iov,
2287                                                      NS_PRV_IOVCNT(iovb));
2288                                if (card->hbpool.count < card->hbnr.max) {
2289                                        skb_queue_tail(&card->hbpool.queue, hb);
2290                                        card->hbpool.count++;
2291                                } else
2292                                        dev_kfree_skb_any(hb);
2293                                atomic_inc(&vcc->stats->rx_drop);
2294                        } else {
2295                                /* Copy the small buffer to the huge buffer */
2296                                sb = (struct sk_buff *)iov->iov_base;
2297                                skb_copy_from_linear_data(sb, hb->data,
2298                                                          iov->iov_len);
2299                                skb_put(hb, iov->iov_len);
2300                                remaining = len - iov->iov_len;
2301                                iov++;
2302                                /* Free the small buffer */
2303                                push_rxbufs(card, sb);
2304
2305                                /* Copy all large buffers to the huge buffer and free them */
2306                                for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) {
2307                                        lb = (struct sk_buff *)iov->iov_base;
2308                                        tocopy =
2309                                            min_t(int, remaining, iov->iov_len);
2310                                        skb_copy_from_linear_data(lb,
2311                                                                  skb_tail_pointer
2312                                                                  (hb), tocopy);
2313                                        skb_put(hb, tocopy);
2314                                        iov++;
2315                                        remaining -= tocopy;
2316                                        push_rxbufs(card, lb);
2317                                }
2318#ifdef EXTRA_DEBUG
2319                                if (remaining != 0 || hb->len != len)
2320                                        printk
2321                                            ("nicstar%d: Huge buffer len mismatch.\n",
2322                                             card->index);
2323#endif /* EXTRA_DEBUG */
2324                                ATM_SKB(hb)->vcc = vcc;
2325#ifdef NS_USE_DESTRUCTORS
2326                                hb->destructor = ns_hb_destructor;
2327#endif /* NS_USE_DESTRUCTORS */
2328                                __net_timestamp(hb);
2329                                vcc->push(vcc, hb);
2330                                atomic_inc(&vcc->stats->rx);
2331                        }
2332                }
2333
2334                vc->rx_iov = NULL;
2335                recycle_iov_buf(card, iovb);
2336        }
2337
2338}
2339
2340#ifdef NS_USE_DESTRUCTORS
2341
2342static void ns_sb_destructor(struct sk_buff *sb)
2343{
2344        ns_dev *card;
2345        u32 stat;
2346
2347        card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
2348        stat = readl(card->membase + STAT);
2349        card->sbfqc = ns_stat_sfbqc_get(stat);
2350        card->lbfqc = ns_stat_lfbqc_get(stat);
2351
2352        do {
2353                sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2354                if (sb == NULL)
2355                        break;
2356                NS_PRV_BUFTYPE(sb) = BUF_SM;
2357                skb_queue_tail(&card->sbpool.queue, sb);
2358                skb_reserve(sb, NS_AAL0_HEADER);
2359                push_rxbufs(card, sb);
2360        } while (card->sbfqc < card->sbnr.min);
2361}
2362
2363static void ns_lb_destructor(struct sk_buff *lb)
2364{
2365        ns_dev *card;
2366        u32 stat;
2367
2368        card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
2369        stat = readl(card->membase + STAT);
2370        card->sbfqc = ns_stat_sfbqc_get(stat);
2371        card->lbfqc = ns_stat_lfbqc_get(stat);
2372
2373        do {
2374                lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2375                if (lb == NULL)
2376                        break;
2377                NS_PRV_BUFTYPE(lb) = BUF_LG;
2378                skb_queue_tail(&card->lbpool.queue, lb);
2379                skb_reserve(lb, NS_SMBUFSIZE);
2380                push_rxbufs(card, lb);
2381        } while (card->lbfqc < card->lbnr.min);
2382}
2383
2384static void ns_hb_destructor(struct sk_buff *hb)
2385{
2386        ns_dev *card;
2387
2388        card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
2389
2390        while (card->hbpool.count < card->hbnr.init) {
2391                hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2392                if (hb == NULL)
2393                        break;
2394                NS_PRV_BUFTYPE(hb) = BUF_NONE;
2395                skb_queue_tail(&card->hbpool.queue, hb);
2396                card->hbpool.count++;
2397        }
2398}
2399
2400#endif /* NS_USE_DESTRUCTORS */
2401
2402static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
2403{
2404        if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
2405                printk("nicstar%d: What kind of rx buffer is this?\n",
2406                       card->index);
2407                dev_kfree_skb_any(skb);
2408        } else
2409                push_rxbufs(card, skb);
2410}
2411
2412static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count)
2413{
2414        while (count-- > 0)
2415                recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base);
2416}
2417
2418static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
2419{
2420        if (card->iovpool.count < card->iovnr.max) {
2421                skb_queue_tail(&card->iovpool.queue, iovb);
2422                card->iovpool.count++;
2423        } else
2424                dev_kfree_skb_any(iovb);
2425}
2426
2427static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2428{
2429        skb_unlink(sb, &card->sbpool.queue);
2430#ifdef NS_USE_DESTRUCTORS
2431        if (card->sbfqc < card->sbnr.min)
2432#else
2433        if (card->sbfqc < card->sbnr.init) {
2434                struct sk_buff *new_sb;
2435                if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2436                        NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2437                        skb_queue_tail(&card->sbpool.queue, new_sb);
2438                        skb_reserve(new_sb, NS_AAL0_HEADER);
2439                        push_rxbufs(card, new_sb);
2440                }
2441        }
2442        if (card->sbfqc < card->sbnr.init)
2443#endif /* NS_USE_DESTRUCTORS */
2444        {
2445                struct sk_buff *new_sb;
2446                if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2447                        NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2448                        skb_queue_tail(&card->sbpool.queue, new_sb);
2449                        skb_reserve(new_sb, NS_AAL0_HEADER);
2450                        push_rxbufs(card, new_sb);
2451                }
2452        }
2453}
2454
2455static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
2456{
2457        skb_unlink(lb, &card->lbpool.queue);
2458#ifdef NS_USE_DESTRUCTORS
2459        if (card->lbfqc < card->lbnr.min)
2460#else
2461        if (card->lbfqc < card->lbnr.init) {
2462                struct sk_buff *new_lb;
2463                if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2464                        NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2465                        skb_queue_tail(&card->lbpool.queue, new_lb);
2466                        skb_reserve(new_lb, NS_SMBUFSIZE);
2467                        push_rxbufs(card, new_lb);
2468                }
2469        }
2470        if (card->lbfqc < card->lbnr.init)
2471#endif /* NS_USE_DESTRUCTORS */
2472        {
2473                struct sk_buff *new_lb;
2474                if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2475                        NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2476                        skb_queue_tail(&card->lbpool.queue, new_lb);
2477                        skb_reserve(new_lb, NS_SMBUFSIZE);
2478                        push_rxbufs(card, new_lb);
2479                }
2480        }
2481}
2482
2483static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
2484{
2485        u32 stat;
2486        ns_dev *card;
2487        int left;
2488
2489        left = (int)*pos;
2490        card = (ns_dev *) dev->dev_data;
2491        stat = readl(card->membase + STAT);
2492        if (!left--)
2493                return sprintf(page, "Pool   count    min   init    max \n");
2494        if (!left--)
2495                return sprintf(page, "Small  %5d  %5d  %5d  %5d \n",
2496                               ns_stat_sfbqc_get(stat), card->sbnr.min,
2497                               card->sbnr.init, card->sbnr.max);
2498        if (!left--)
2499                return sprintf(page, "Large  %5d  %5d  %5d  %5d \n",
2500                               ns_stat_lfbqc_get(stat), card->lbnr.min,
2501                               card->lbnr.init, card->lbnr.max);
2502        if (!left--)
2503                return sprintf(page, "Huge   %5d  %5d  %5d  %5d \n",
2504                               card->hbpool.count, card->hbnr.min,
2505                               card->hbnr.init, card->hbnr.max);
2506        if (!left--)
2507                return sprintf(page, "Iovec  %5d  %5d  %5d  %5d \n",
2508                               card->iovpool.count, card->iovnr.min,
2509                               card->iovnr.init, card->iovnr.max);
2510        if (!left--) {
2511                int retval;
2512                retval =
2513                    sprintf(page, "Interrupt counter: %u \n", card->intcnt);
2514                card->intcnt = 0;
2515                return retval;
2516        }
2517#if 0
2518        /* Dump 25.6 Mbps PHY registers */
2519        /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
2520           here just in case it's needed for debugging. */
2521        if (card->max_pcr == ATM_25_PCR && !left--) {
2522                u32 phy_regs[4];
2523                u32 i;
2524
2525                for (i = 0; i < 4; i++) {
2526                        while (CMD_BUSY(card)) ;
2527                        writel(NS_CMD_READ_UTILITY | 0x00000200 | i,
2528                               card->membase + CMD);
2529                        while (CMD_BUSY(card)) ;
2530                        phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
2531                }
2532
2533                return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
2534                               phy_regs[0], phy_regs[1], phy_regs[2],
2535                               phy_regs[3]);
2536        }
2537#endif /* 0 - Dump 25.6 Mbps PHY registers */
2538#if 0
2539        /* Dump TST */
2540        if (left-- < NS_TST_NUM_ENTRIES) {
2541                if (card->tste2vc[left + 1] == NULL)
2542                        return sprintf(page, "%5d - VBR/UBR \n", left + 1);
2543                else
2544                        return sprintf(page, "%5d - %d %d \n", left + 1,
2545                                       card->tste2vc[left + 1]->tx_vcc->vpi,
2546                                       card->tste2vc[left + 1]->tx_vcc->vci);
2547        }
2548#endif /* 0 */
2549        return 0;
2550}
2551
2552static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2553{
2554        ns_dev *card;
2555        pool_levels pl;
2556        long btype;
2557        unsigned long flags;
2558
2559        card = dev->dev_data;
2560        switch (cmd) {
2561        case NS_GETPSTAT:
2562                if (get_user
2563                    (pl.buftype, &((pool_levels __user *) arg)->buftype))
2564                        return -EFAULT;
2565                switch (pl.buftype) {
2566                case NS_BUFTYPE_SMALL:
2567                        pl.count =
2568                            ns_stat_sfbqc_get(readl(card->membase + STAT));
2569                        pl.level.min = card->sbnr.min;
2570                        pl.level.init = card->sbnr.init;
2571                        pl.level.max = card->sbnr.max;
2572                        break;
2573
2574                case NS_BUFTYPE_LARGE:
2575                        pl.count =
2576                            ns_stat_lfbqc_get(readl(card->membase + STAT));
2577                        pl.level.min = card->lbnr.min;
2578                        pl.level.init = card->lbnr.init;
2579                        pl.level.max = card->lbnr.max;
2580                        break;
2581
2582                case NS_BUFTYPE_HUGE:
2583                        pl.count = card->hbpool.count;
2584                        pl.level.min = card->hbnr.min;
2585                        pl.level.init = card->hbnr.init;
2586                        pl.level.max = card->hbnr.max;
2587                        break;
2588
2589                case NS_BUFTYPE_IOVEC:
2590                        pl.count = card->iovpool.count;
2591                        pl.level.min = card->iovnr.min;
2592                        pl.level.init = card->iovnr.init;
2593                        pl.level.max = card->iovnr.max;
2594                        break;
2595
2596                default:
2597                        return -ENOIOCTLCMD;
2598
2599                }
2600                if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
2601                        return (sizeof(pl));
2602                else
2603                        return -EFAULT;
2604
2605        case NS_SETBUFLEV:
2606                if (!capable(CAP_NET_ADMIN))
2607                        return -EPERM;
2608                if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
2609                        return -EFAULT;
2610                if (pl.level.min >= pl.level.init
2611                    || pl.level.init >= pl.level.max)
2612                        return -EINVAL;
2613                if (pl.level.min == 0)
2614                        return -EINVAL;
2615                switch (pl.buftype) {
2616                case NS_BUFTYPE_SMALL:
2617                        if (pl.level.max > TOP_SB)
2618                                return -EINVAL;
2619                        card->sbnr.min = pl.level.min;
2620                        card->sbnr.init = pl.level.init;
2621                        card->sbnr.max = pl.level.max;
2622                        break;
2623
2624                case NS_BUFTYPE_LARGE:
2625                        if (pl.level.max > TOP_LB)
2626                                return -EINVAL;
2627                        card->lbnr.min = pl.level.min;
2628                        card->lbnr.init = pl.level.init;
2629                        card->lbnr.max = pl.level.max;
2630                        break;
2631
2632                case NS_BUFTYPE_HUGE:
2633                        if (pl.level.max > TOP_HB)
2634                                return -EINVAL;
2635                        card->hbnr.min = pl.level.min;
2636                        card->hbnr.init = pl.level.init;
2637                        card->hbnr.max = pl.level.max;
2638                        break;
2639
2640                case NS_BUFTYPE_IOVEC:
2641                        if (pl.level.max > TOP_IOVB)
2642                                return -EINVAL;
2643                        card->iovnr.min = pl.level.min;
2644                        card->iovnr.init = pl.level.init;
2645                        card->iovnr.max = pl.level.max;
2646                        break;
2647
2648                default:
2649                        return -EINVAL;
2650
2651                }
2652                return 0;
2653
2654        case NS_ADJBUFLEV:
2655                if (!capable(CAP_NET_ADMIN))
2656                        return -EPERM;
2657                btype = (long)arg;      /* a long is the same size as a pointer or bigger */
2658                switch (btype) {
2659                case NS_BUFTYPE_SMALL:
2660                        while (card->sbfqc < card->sbnr.init) {
2661                                struct sk_buff *sb;
2662
2663                                sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2664                                if (sb == NULL)
2665                                        return -ENOMEM;
2666                                NS_PRV_BUFTYPE(sb) = BUF_SM;
2667                                skb_queue_tail(&card->sbpool.queue, sb);
2668                                skb_reserve(sb, NS_AAL0_HEADER);
2669                                push_rxbufs(card, sb);
2670                        }
2671                        break;
2672
2673                case NS_BUFTYPE_LARGE:
2674                        while (card->lbfqc < card->lbnr.init) {
2675                                struct sk_buff *lb;
2676
2677                                lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2678                                if (lb == NULL)
2679                                        return -ENOMEM;
2680                                NS_PRV_BUFTYPE(lb) = BUF_LG;
2681                                skb_queue_tail(&card->lbpool.queue, lb);
2682                                skb_reserve(lb, NS_SMBUFSIZE);
2683                                push_rxbufs(card, lb);
2684                        }
2685                        break;
2686
2687                case NS_BUFTYPE_HUGE:
2688                        while (card->hbpool.count > card->hbnr.init) {
2689                                struct sk_buff *hb;
2690
2691                                spin_lock_irqsave(&card->int_lock, flags);
2692                                hb = skb_dequeue(&card->hbpool.queue);
2693                                card->hbpool.count--;
2694                                spin_unlock_irqrestore(&card->int_lock, flags);
2695                                if (hb == NULL)
2696                                        printk
2697                                            ("nicstar%d: huge buffer count inconsistent.\n",
2698                                             card->index);
2699                                else
2700                                        dev_kfree_skb_any(hb);
2701
2702                        }
2703                        while (card->hbpool.count < card->hbnr.init) {
2704                                struct sk_buff *hb;
2705
2706                                hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2707                                if (hb == NULL)
2708                                        return -ENOMEM;
2709                                NS_PRV_BUFTYPE(hb) = BUF_NONE;
2710                                spin_lock_irqsave(&card->int_lock, flags);
2711                                skb_queue_tail(&card->hbpool.queue, hb);
2712                                card->hbpool.count++;
2713                                spin_unlock_irqrestore(&card->int_lock, flags);
2714                        }
2715                        break;
2716
2717                case NS_BUFTYPE_IOVEC:
2718                        while (card->iovpool.count > card->iovnr.init) {
2719                                struct sk_buff *iovb;
2720
2721                                spin_lock_irqsave(&card->int_lock, flags);
2722                                iovb = skb_dequeue(&card->iovpool.queue);
2723                                card->iovpool.count--;
2724                                spin_unlock_irqrestore(&card->int_lock, flags);
2725                                if (iovb == NULL)
2726                                        printk
2727                                            ("nicstar%d: iovec buffer count inconsistent.\n",
2728                                             card->index);
2729                                else
2730                                        dev_kfree_skb_any(iovb);
2731
2732                        }
2733                        while (card->iovpool.count < card->iovnr.init) {
2734                                struct sk_buff *iovb;
2735
2736                                iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2737                                if (iovb == NULL)
2738                                        return -ENOMEM;
2739                                NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2740                                spin_lock_irqsave(&card->int_lock, flags);
2741                                skb_queue_tail(&card->iovpool.queue, iovb);
2742                                card->iovpool.count++;
2743                                spin_unlock_irqrestore(&card->int_lock, flags);
2744                        }
2745                        break;
2746
2747                default:
2748                        return -EINVAL;
2749
2750                }
2751                return 0;
2752
2753        default:
2754                if (dev->phy && dev->phy->ioctl) {
2755                        return dev->phy->ioctl(dev, cmd, arg);
2756                } else {
2757                        printk("nicstar%d: %s == NULL \n", card->index,
2758                               dev->phy ? "dev->phy->ioctl" : "dev->phy");
2759                        return -ENOIOCTLCMD;
2760                }
2761        }
2762}
2763
2764#ifdef EXTRA_DEBUG
2765static void which_list(ns_dev * card, struct sk_buff *skb)
2766{
2767        printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb));
2768}
2769#endif /* EXTRA_DEBUG */
2770
2771static void ns_poll(unsigned long arg)
2772{
2773        int i;
2774        ns_dev *card;
2775        unsigned long flags;
2776        u32 stat_r, stat_w;
2777
2778        PRINTK("nicstar: Entering ns_poll().\n");
2779        for (i = 0; i < num_cards; i++) {
2780                card = cards[i];
2781                if (spin_is_locked(&card->int_lock)) {
2782                        /* Probably it isn't worth spinning */
2783                        continue;
2784                }
2785                spin_lock_irqsave(&card->int_lock, flags);
2786
2787                stat_w = 0;
2788                stat_r = readl(card->membase + STAT);
2789                if (stat_r & NS_STAT_TSIF)
2790                        stat_w |= NS_STAT_TSIF;
2791                if (stat_r & NS_STAT_EOPDU)
2792                        stat_w |= NS_STAT_EOPDU;
2793
2794                process_tsq(card);
2795                process_rsq(card);
2796
2797                writel(stat_w, card->membase + STAT);
2798                spin_unlock_irqrestore(&card->int_lock, flags);
2799        }
2800        mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
2801        PRINTK("nicstar: Leaving ns_poll().\n");
2802}
2803
2804static void ns_phy_put(struct atm_dev *dev, unsigned char value,
2805                       unsigned long addr)
2806{
2807        ns_dev *card;
2808        unsigned long flags;
2809
2810        card = dev->dev_data;
2811        spin_lock_irqsave(&card->res_lock, flags);
2812        while (CMD_BUSY(card)) ;
2813        writel((u32) value, card->membase + DR0);
2814        writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
2815               card->membase + CMD);
2816        spin_unlock_irqrestore(&card->res_lock, flags);
2817}
2818
2819static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
2820{
2821        ns_dev *card;
2822        unsigned long flags;
2823        u32 data;
2824
2825        card = dev->dev_data;
2826        spin_lock_irqsave(&card->res_lock, flags);
2827        while (CMD_BUSY(card)) ;
2828        writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
2829               card->membase + CMD);
2830        while (CMD_BUSY(card)) ;
2831        data = readl(card->membase + DR0) & 0x000000FF;
2832        spin_unlock_irqrestore(&card->res_lock, flags);
2833        return (unsigned char)data;
2834}
2835
2836module_init(nicstar_init);
2837module_exit(nicstar_cleanup);
2838