linux/drivers/net/wan/hd64572.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Hitachi (now Renesas) SCA-II HD64572 driver for Linux
   4 *
   5 * Copyright (C) 1998-2008 Krzysztof Halasa <khc@pm.waw.pl>
   6 *
   7 * Source of information: HD64572 SCA-II User's Manual
   8 *
   9 * We use the following SCA memory map:
  10 *
  11 * Packet buffer descriptor rings - starting from card->rambase:
  12 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
  13 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
  14 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
  15 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
  16 *
  17 * Packet data buffers - starting from card->rambase + buff_offset:
  18 * rx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 RX buffers
  19 * tx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 TX buffers
  20 * rx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 RX buffers (if used)
  21 * tx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 TX buffers (if used)
  22 */
  23
  24#include <linux/bitops.h>
  25#include <linux/errno.h>
  26#include <linux/fcntl.h>
  27#include <linux/hdlc.h>
  28#include <linux/in.h>
  29#include <linux/interrupt.h>
  30#include <linux/ioport.h>
  31#include <linux/jiffies.h>
  32#include <linux/kernel.h>
  33#include <linux/module.h>
  34#include <linux/netdevice.h>
  35#include <linux/skbuff.h>
  36#include <linux/string.h>
  37#include <linux/types.h>
  38#include <asm/io.h>
  39#include <linux/uaccess.h>
  40#include "hd64572.h"
  41
  42#define NAPI_WEIGHT             16
  43
  44#define get_msci(port)    (port->chan ?   MSCI1_OFFSET :   MSCI0_OFFSET)
  45#define get_dmac_rx(port) (port->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
  46#define get_dmac_tx(port) (port->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
  47
  48#define sca_in(reg, card)            readb(card->scabase + (reg))
  49#define sca_out(value, reg, card)    writeb(value, card->scabase + (reg))
  50#define sca_inw(reg, card)           readw(card->scabase + (reg))
  51#define sca_outw(value, reg, card)   writew(value, card->scabase + (reg))
  52#define sca_inl(reg, card)           readl(card->scabase + (reg))
  53#define sca_outl(value, reg, card)   writel(value, card->scabase + (reg))
  54
  55static int sca_poll(struct napi_struct *napi, int budget);
  56
  57static inline port_t* dev_to_port(struct net_device *dev)
  58{
  59        return dev_to_hdlc(dev)->priv;
  60}
  61
  62static inline void enable_intr(port_t *port)
  63{
  64        /* enable DMIB and MSCI RXINTA interrupts */
  65        sca_outl(sca_inl(IER0, port->card) |
  66                 (port->chan ? 0x08002200 : 0x00080022), IER0, port->card);
  67}
  68
  69static inline void disable_intr(port_t *port)
  70{
  71        sca_outl(sca_inl(IER0, port->card) &
  72                 (port->chan ? 0x00FF00FF : 0xFF00FF00), IER0, port->card);
  73}
  74
  75static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
  76{
  77        u16 rx_buffs = port->card->rx_ring_buffers;
  78        u16 tx_buffs = port->card->tx_ring_buffers;
  79
  80        desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
  81        return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc;
  82}
  83
  84
  85static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
  86{
  87        /* Descriptor offset always fits in 16 bits */
  88        return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
  89}
  90
  91
  92static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
  93                                             int transmit)
  94{
  95        return (pkt_desc __iomem *)(port->card->rambase +
  96                                    desc_offset(port, desc, transmit));
  97}
  98
  99
 100static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
 101{
 102        return port->card->buff_offset +
 103                desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
 104}
 105
 106
 107static inline void sca_set_carrier(port_t *port)
 108{
 109        if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) {
 110#ifdef DEBUG_LINK
 111                printk(KERN_DEBUG "%s: sca_set_carrier on\n",
 112                       port->netdev.name);
 113#endif
 114                netif_carrier_on(port->netdev);
 115        } else {
 116#ifdef DEBUG_LINK
 117                printk(KERN_DEBUG "%s: sca_set_carrier off\n",
 118                       port->netdev.name);
 119#endif
 120                netif_carrier_off(port->netdev);
 121        }
 122}
 123
 124
 125static void sca_init_port(port_t *port)
 126{
 127        card_t *card = port->card;
 128        u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port);
 129        int transmit, i;
 130
 131        port->rxin = 0;
 132        port->txin = 0;
 133        port->txlast = 0;
 134
 135        for (transmit = 0; transmit < 2; transmit++) {
 136                u16 buffs = transmit ? card->tx_ring_buffers
 137                        : card->rx_ring_buffers;
 138
 139                for (i = 0; i < buffs; i++) {
 140                        pkt_desc __iomem *desc = desc_address(port, i, transmit);
 141                        u16 chain_off = desc_offset(port, i + 1, transmit);
 142                        u32 buff_off = buffer_offset(port, i, transmit);
 143
 144                        writel(chain_off, &desc->cp);
 145                        writel(buff_off, &desc->bp);
 146                        writew(0, &desc->len);
 147                        writeb(0, &desc->stat);
 148                }
 149        }
 150
 151        /* DMA disable - to halt state */
 152        sca_out(0, DSR_RX(port->chan), card);
 153        sca_out(0, DSR_TX(port->chan), card);
 154
 155        /* software ABORT - to initial state */
 156        sca_out(DCR_ABORT, DCR_RX(port->chan), card);
 157        sca_out(DCR_ABORT, DCR_TX(port->chan), card);
 158
 159        /* current desc addr */
 160        sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card);
 161        sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0),
 162                 dmac_rx + EDAL, card);
 163        sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card);
 164        sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card);
 165
 166        /* clear frame end interrupt counter */
 167        sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card);
 168        sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card);
 169
 170        /* Receive */
 171        sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */
 172        sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */
 173        sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */
 174        sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */
 175
 176        /* Transmit */
 177        sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */
 178        sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */
 179
 180        sca_set_carrier(port);
 181        netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT);
 182}
 183
 184
 185/* MSCI interrupt service */
 186static inline void sca_msci_intr(port_t *port)
 187{
 188        u16 msci = get_msci(port);
 189        card_t* card = port->card;
 190
 191        if (sca_in(msci + ST1, card) & ST1_CDCD) {
 192                /* Reset MSCI CDCD status bit */
 193                sca_out(ST1_CDCD, msci + ST1, card);
 194                sca_set_carrier(port);
 195        }
 196}
 197
 198
 199static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
 200                          u16 rxin)
 201{
 202        struct net_device *dev = port->netdev;
 203        struct sk_buff *skb;
 204        u16 len;
 205        u32 buff;
 206
 207        len = readw(&desc->len);
 208        skb = dev_alloc_skb(len);
 209        if (!skb) {
 210                dev->stats.rx_dropped++;
 211                return;
 212        }
 213
 214        buff = buffer_offset(port, rxin, 0);
 215        memcpy_fromio(skb->data, card->rambase + buff, len);
 216
 217        skb_put(skb, len);
 218#ifdef DEBUG_PKT
 219        printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
 220        debug_frame(skb);
 221#endif
 222        dev->stats.rx_packets++;
 223        dev->stats.rx_bytes += skb->len;
 224        skb->protocol = hdlc_type_trans(skb, dev);
 225        netif_receive_skb(skb);
 226}
 227
 228
 229/* Receive DMA service */
 230static inline int sca_rx_done(port_t *port, int budget)
 231{
 232        struct net_device *dev = port->netdev;
 233        u16 dmac = get_dmac_rx(port);
 234        card_t *card = port->card;
 235        u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */
 236        int received = 0;
 237
 238        /* Reset DSR status bits */
 239        sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
 240                DSR_RX(port->chan), card);
 241
 242        if (stat & DSR_BOF)
 243                /* Dropped one or more frames */
 244                dev->stats.rx_over_errors++;
 245
 246        while (received < budget) {
 247                u32 desc_off = desc_offset(port, port->rxin, 0);
 248                pkt_desc __iomem *desc;
 249                u32 cda = sca_inl(dmac + CDAL, card);
 250
 251                if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
 252                        break;  /* No frame received */
 253
 254                desc = desc_address(port, port->rxin, 0);
 255                stat = readb(&desc->stat);
 256                if (!(stat & ST_RX_EOM))
 257                        port->rxpart = 1; /* partial frame received */
 258                else if ((stat & ST_ERROR_MASK) || port->rxpart) {
 259                        dev->stats.rx_errors++;
 260                        if (stat & ST_RX_OVERRUN)
 261                                dev->stats.rx_fifo_errors++;
 262                        else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
 263                                          ST_RX_RESBIT)) || port->rxpart)
 264                                dev->stats.rx_frame_errors++;
 265                        else if (stat & ST_RX_CRC)
 266                                dev->stats.rx_crc_errors++;
 267                        if (stat & ST_RX_EOM)
 268                                port->rxpart = 0; /* received last fragment */
 269                } else {
 270                        sca_rx(card, port, desc, port->rxin);
 271                        received++;
 272                }
 273
 274                /* Set new error descriptor address */
 275                sca_outl(desc_off, dmac + EDAL, card);
 276                port->rxin = (port->rxin + 1) % card->rx_ring_buffers;
 277        }
 278
 279        /* make sure RX DMA is enabled */
 280        sca_out(DSR_DE, DSR_RX(port->chan), card);
 281        return received;
 282}
 283
 284
 285/* Transmit DMA service */
 286static inline void sca_tx_done(port_t *port)
 287{
 288        struct net_device *dev = port->netdev;
 289        card_t* card = port->card;
 290        u8 stat;
 291        unsigned count = 0;
 292
 293        spin_lock(&port->lock);
 294
 295        stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */
 296
 297        /* Reset DSR status bits */
 298        sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
 299                DSR_TX(port->chan), card);
 300
 301        while (1) {
 302                pkt_desc __iomem *desc = desc_address(port, port->txlast, 1);
 303                u8 stat = readb(&desc->stat);
 304
 305                if (!(stat & ST_TX_OWNRSHP))
 306                        break; /* not yet transmitted */
 307                if (stat & ST_TX_UNDRRUN) {
 308                        dev->stats.tx_errors++;
 309                        dev->stats.tx_fifo_errors++;
 310                } else {
 311                        dev->stats.tx_packets++;
 312                        dev->stats.tx_bytes += readw(&desc->len);
 313                }
 314                writeb(0, &desc->stat); /* Free descriptor */
 315                count++;
 316                port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
 317        }
 318
 319        if (count)
 320                netif_wake_queue(dev);
 321        spin_unlock(&port->lock);
 322}
 323
 324
 325static int sca_poll(struct napi_struct *napi, int budget)
 326{
 327        port_t *port = container_of(napi, port_t, napi);
 328        u32 isr0 = sca_inl(ISR0, port->card);
 329        int received = 0;
 330
 331        if (isr0 & (port->chan ? 0x08000000 : 0x00080000))
 332                sca_msci_intr(port);
 333
 334        if (isr0 & (port->chan ? 0x00002000 : 0x00000020))
 335                sca_tx_done(port);
 336
 337        if (isr0 & (port->chan ? 0x00000200 : 0x00000002))
 338                received = sca_rx_done(port, budget);
 339
 340        if (received < budget) {
 341                napi_complete_done(napi, received);
 342                enable_intr(port);
 343        }
 344
 345        return received;
 346}
 347
 348static irqreturn_t sca_intr(int irq, void *dev_id)
 349{
 350        card_t *card = dev_id;
 351        u32 isr0 = sca_inl(ISR0, card);
 352        int i, handled = 0;
 353
 354        for (i = 0; i < 2; i++) {
 355                port_t *port = get_port(card, i);
 356                if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
 357                        handled = 1;
 358                        disable_intr(port);
 359                        napi_schedule(&port->napi);
 360                }
 361        }
 362
 363        return IRQ_RETVAL(handled);
 364}
 365
 366
 367static void sca_set_port(port_t *port)
 368{
 369        card_t* card = port->card;
 370        u16 msci = get_msci(port);
 371        u8 md2 = sca_in(msci + MD2, card);
 372        unsigned int tmc, br = 10, brv = 1024;
 373
 374
 375        if (port->settings.clock_rate > 0) {
 376                /* Try lower br for better accuracy*/
 377                do {
 378                        br--;
 379                        brv >>= 1; /* brv = 2^9 = 512 max in specs */
 380
 381                        /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
 382                        tmc = CLOCK_BASE / brv / port->settings.clock_rate;
 383                }while (br > 1 && tmc <= 128);
 384
 385                if (tmc < 1) {
 386                        tmc = 1;
 387                        br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
 388                        brv = 1;
 389                } else if (tmc > 255)
 390                        tmc = 256; /* tmc=0 means 256 - low baud rates */
 391
 392                port->settings.clock_rate = CLOCK_BASE / brv / tmc;
 393        } else {
 394                br = 9; /* Minimum clock rate */
 395                tmc = 256;      /* 8bit = 0 */
 396                port->settings.clock_rate = CLOCK_BASE / (256 * 512);
 397        }
 398
 399        port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
 400        port->txs = (port->txs & ~CLK_BRG_MASK) | br;
 401        port->tmc = tmc;
 402
 403        /* baud divisor - time constant*/
 404        sca_out(port->tmc, msci + TMCR, card);
 405        sca_out(port->tmc, msci + TMCT, card);
 406
 407        /* Set BRG bits */
 408        sca_out(port->rxs, msci + RXS, card);
 409        sca_out(port->txs, msci + TXS, card);
 410
 411        if (port->settings.loopback)
 412                md2 |= MD2_LOOPBACK;
 413        else
 414                md2 &= ~MD2_LOOPBACK;
 415
 416        sca_out(md2, msci + MD2, card);
 417
 418}
 419
 420
 421static void sca_open(struct net_device *dev)
 422{
 423        port_t *port = dev_to_port(dev);
 424        card_t* card = port->card;
 425        u16 msci = get_msci(port);
 426        u8 md0, md2;
 427
 428        switch(port->encoding) {
 429        case ENCODING_NRZ:      md2 = MD2_NRZ;          break;
 430        case ENCODING_NRZI:     md2 = MD2_NRZI;         break;
 431        case ENCODING_FM_MARK:  md2 = MD2_FM_MARK;      break;
 432        case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE;     break;
 433        default:                md2 = MD2_MANCHESTER;
 434        }
 435
 436        if (port->settings.loopback)
 437                md2 |= MD2_LOOPBACK;
 438
 439        switch(port->parity) {
 440        case PARITY_CRC16_PR0:       md0 = MD0_HDLC | MD0_CRC_16_0;  break;
 441        case PARITY_CRC16_PR1:       md0 = MD0_HDLC | MD0_CRC_16;    break;
 442        case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
 443        case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU;   break;
 444        default:                     md0 = MD0_HDLC | MD0_CRC_NONE;
 445        }
 446
 447        sca_out(CMD_RESET, msci + CMD, card);
 448        sca_out(md0, msci + MD0, card);
 449        sca_out(0x00, msci + MD1, card); /* no address field check */
 450        sca_out(md2, msci + MD2, card);
 451        sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
 452        /* Skip the rest of underrun frame */
 453        sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
 454        sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
 455        sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
 456        sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
 457        sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
 458        sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
 459
 460/* We're using the following interrupts:
 461   - RXINTA (DCD changes only)
 462   - DMIB (EOM - single frame transfer complete)
 463*/
 464        sca_outl(IE0_RXINTA | IE0_CDCD, msci + IE0, card);
 465
 466        sca_out(port->tmc, msci + TMCR, card);
 467        sca_out(port->tmc, msci + TMCT, card);
 468        sca_out(port->rxs, msci + RXS, card);
 469        sca_out(port->txs, msci + TXS, card);
 470        sca_out(CMD_TX_ENABLE, msci + CMD, card);
 471        sca_out(CMD_RX_ENABLE, msci + CMD, card);
 472
 473        sca_set_carrier(port);
 474        enable_intr(port);
 475        napi_enable(&port->napi);
 476        netif_start_queue(dev);
 477}
 478
 479
 480static void sca_close(struct net_device *dev)
 481{
 482        port_t *port = dev_to_port(dev);
 483
 484        /* reset channel */
 485        sca_out(CMD_RESET, get_msci(port) + CMD, port->card);
 486        disable_intr(port);
 487        napi_disable(&port->napi);
 488        netif_stop_queue(dev);
 489}
 490
 491
 492static int sca_attach(struct net_device *dev, unsigned short encoding,
 493                      unsigned short parity)
 494{
 495        if (encoding != ENCODING_NRZ &&
 496            encoding != ENCODING_NRZI &&
 497            encoding != ENCODING_FM_MARK &&
 498            encoding != ENCODING_FM_SPACE &&
 499            encoding != ENCODING_MANCHESTER)
 500                return -EINVAL;
 501
 502        if (parity != PARITY_NONE &&
 503            parity != PARITY_CRC16_PR0 &&
 504            parity != PARITY_CRC16_PR1 &&
 505            parity != PARITY_CRC32_PR1_CCITT &&
 506            parity != PARITY_CRC16_PR1_CCITT)
 507                return -EINVAL;
 508
 509        dev_to_port(dev)->encoding = encoding;
 510        dev_to_port(dev)->parity = parity;
 511        return 0;
 512}
 513
 514
 515#ifdef DEBUG_RINGS
 516static void sca_dump_rings(struct net_device *dev)
 517{
 518        port_t *port = dev_to_port(dev);
 519        card_t *card = port->card;
 520        u16 cnt;
 521
 522        printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
 523               sca_inl(get_dmac_rx(port) + CDAL, card),
 524               sca_inl(get_dmac_rx(port) + EDAL, card),
 525               sca_in(DSR_RX(port->chan), card), port->rxin,
 526               sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
 527        for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
 528                pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
 529        pr_cont("\n");
 530
 531        printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
 532               "last=%u %sactive",
 533               sca_inl(get_dmac_tx(port) + CDAL, card),
 534               sca_inl(get_dmac_tx(port) + EDAL, card),
 535               sca_in(DSR_TX(port->chan), card), port->txin, port->txlast,
 536               sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in");
 537
 538        for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++)
 539                pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
 540        pr_cont("\n");
 541
 542        printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x,"
 543               " ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n",
 544               sca_in(get_msci(port) + MD0, card),
 545               sca_in(get_msci(port) + MD1, card),
 546               sca_in(get_msci(port) + MD2, card),
 547               sca_in(get_msci(port) + ST0, card),
 548               sca_in(get_msci(port) + ST1, card),
 549               sca_in(get_msci(port) + ST2, card),
 550               sca_in(get_msci(port) + ST3, card),
 551               sca_in(get_msci(port) + ST4, card),
 552               sca_in(get_msci(port) + FST, card),
 553               sca_in(get_msci(port) + CST0, card),
 554               sca_in(get_msci(port) + CST1, card));
 555
 556        printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
 557               sca_inl(ISR0, card), sca_inl(ISR1, card));
 558}
 559#endif /* DEBUG_RINGS */
 560
 561
 562static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
 563{
 564        port_t *port = dev_to_port(dev);
 565        card_t *card = port->card;
 566        pkt_desc __iomem *desc;
 567        u32 buff, len;
 568
 569        spin_lock_irq(&port->lock);
 570
 571        desc = desc_address(port, port->txin + 1, 1);
 572        BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
 573
 574#ifdef DEBUG_PKT
 575        printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
 576        debug_frame(skb);
 577#endif
 578
 579        desc = desc_address(port, port->txin, 1);
 580        buff = buffer_offset(port, port->txin, 1);
 581        len = skb->len;
 582        memcpy_toio(card->rambase + buff, skb->data, len);
 583
 584        writew(len, &desc->len);
 585        writeb(ST_TX_EOM, &desc->stat);
 586
 587        port->txin = (port->txin + 1) % card->tx_ring_buffers;
 588        sca_outl(desc_offset(port, port->txin, 1),
 589                 get_dmac_tx(port) + EDAL, card);
 590
 591        sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */
 592
 593        desc = desc_address(port, port->txin + 1, 1);
 594        if (readb(&desc->stat)) /* allow 1 packet gap */
 595                netif_stop_queue(dev);
 596
 597        spin_unlock_irq(&port->lock);
 598
 599        dev_kfree_skb(skb);
 600        return NETDEV_TX_OK;
 601}
 602
 603
 604static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
 605{
 606        /* Round RAM size to 32 bits, fill from end to start */
 607        u32 i = ramsize &= ~3;
 608
 609        do {
 610                i -= 4;
 611                writel(i ^ 0x12345678, rambase + i);
 612        } while (i > 0);
 613
 614        for (i = 0; i < ramsize ; i += 4) {
 615                if (readl(rambase + i) != (i ^ 0x12345678))
 616                        break;
 617        }
 618
 619        return i;
 620}
 621
 622
 623static void sca_init(card_t *card, int wait_states)
 624{
 625        sca_out(wait_states, WCRL, card); /* Wait Control */
 626        sca_out(wait_states, WCRM, card);
 627        sca_out(wait_states, WCRH, card);
 628
 629        sca_out(0, DMER, card); /* DMA Master disable */
 630        sca_out(0x03, PCR, card); /* DMA priority */
 631        sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
 632        sca_out(0, DSR_TX(0), card);
 633        sca_out(0, DSR_RX(1), card);
 634        sca_out(0, DSR_TX(1), card);
 635        sca_out(DMER_DME, DMER, card); /* DMA Master enable */
 636}
 637