linux/drivers/staging/cxt1e1/musycc.c
<<
>>
Prefs
   1unsigned int max_intcnt = 0;
   2unsigned int max_bh = 0;
   3
   4/*-----------------------------------------------------------------------------
   5 * musycc.c -
   6 *
   7 * Copyright (C) 2007  One Stop Systems, Inc.
   8 * Copyright (C) 2003-2006  SBE, Inc.
   9 *
  10 *   This program is free software; you can redistribute it and/or modify
  11 *   it under the terms of the GNU General Public License as published by
  12 *   the Free Software Foundation; either version 2 of the License, or
  13 *   (at your option) any later version.
  14 *
  15 *   This program is distributed in the hope that it will be useful,
  16 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 *   GNU General Public License for more details.
  19 *
  20 * For further information, contact via email: support@onestopsystems.com
  21 * One Stop Systems, Inc.  Escondido, California  U.S.A.
  22 *-----------------------------------------------------------------------------
  23 */
  24
  25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  26
  27#include <linux/types.h>
  28#include "pmcc4_sysdep.h"
  29#include <linux/kernel.h>
  30#include <linux/errno.h>
  31#include <linux/init.h>
  32#include "sbecom_inline_linux.h"
  33#include "libsbew.h"
  34#include "pmcc4_private.h"
  35#include "pmcc4.h"
  36#include "musycc.h"
  37
  38#define sd_find_chan(ci,ch)   c4_find_chan(ch)
  39
  40
  41/*******************************************************************/
  42/* global driver variables */
  43extern ci_t *c4_list;
  44extern int  drvr_state;
  45extern int  cxt1e1_log_level;
  46
  47extern int  cxt1e1_max_mru;
  48extern int  cxt1e1_max_mtu;
  49extern int  max_rxdesc_used;
  50extern int  max_txdesc_used;
  51extern ci_t *CI;                /* dummy pointr to board ZEROE's data - DEBUG
  52                                 * USAGE */
  53
  54
  55/*******************************************************************/
  56/* forward references */
  57void        c4_fifo_free(mpi_t *, int);
  58void        c4_wk_chan_restart(mch_t *);
  59void        musycc_bh_tx_eom(mpi_t *, int);
  60int         musycc_chan_up(ci_t *, int);
  61status_t __init musycc_init(ci_t *);
  62void        musycc_intr_bh_tasklet(ci_t *);
  63void        musycc_serv_req(mpi_t *, u_int32_t);
  64void        musycc_update_timeslots(mpi_t *);
  65
  66/*******************************************************************/
  67
  68#if 1
  69static int
  70musycc_dump_rxbuffer_ring(mch_t *ch, int lockit)
  71{
  72    struct mdesc *m;
  73    unsigned long flags = 0;
  74
  75    u_int32_t status;
  76    int         n;
  77
  78    if (lockit)
  79        spin_lock_irqsave(&ch->ch_rxlock, flags);
  80    if (ch->rxd_num == 0)
  81        pr_info("  ZERO receive buffers allocated for this channel.");
  82    else {
  83        FLUSH_MEM_READ();
  84        m = &ch->mdr[ch->rxix_irq_srv];
  85        for (n = ch->rxd_num; n; n--) {
  86            status = le32_to_cpu(m->status);
  87            {
  88                pr_info("%c  %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
  89                        (m == &ch->mdr[ch->rxix_irq_srv]) ? 'F' : ' ',
  90                        (unsigned long) m, n,
  91                        status,
  92                        m->data ? (status & HOST_RX_OWNED ? 'H' : 'M') : '-',
  93                        status & POLL_DISABLED ? 'P' : '-',
  94                        status & EOBIRQ_ENABLE ? 'b' : '-',
  95                        status & EOMIRQ_ENABLE ? 'm' : '-',
  96                        status & LENGTH_MASK,
  97                        le32_to_cpu(m->data), le32_to_cpu(m->next));
  98#ifdef RLD_DUMP_BUFDATA
  99                {
 100                    u_int32_t  *dp;
 101                    int         len = status & LENGTH_MASK;
 102
 103#if 1
 104                    if (m->data && (status & HOST_RX_OWNED))
 105#else
 106                    if (m->data)    /* always dump regardless of valid RX
 107                                     * data */
 108#endif
 109                    {
 110                        dp = (u_int32_t *) OS_phystov((void *) (le32_to_cpu(m->data)));
 111                        if (len >= 0x10)
 112                            pr_info("    %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len,
 113                                    *dp, *(dp + 1), *(dp + 2), *(dp + 3));
 114                        else if (len >= 0x08)
 115                            pr_info("    %x[%x]: %08X %08X\n", (u_int32_t) dp, len,
 116                                    *dp, *(dp + 1));
 117                        else
 118                            pr_info("    %x[%x]: %08X\n", (u_int32_t) dp, len, *dp);
 119                    }
 120                }
 121#endif
 122            }
 123            m = m->snext;
 124        }
 125    }                               /* -for- */
 126    pr_info("\n");
 127
 128    if (lockit)
 129        spin_unlock_irqrestore(&ch->ch_rxlock, flags);
 130    return 0;
 131}
 132#endif
 133
 134#if 1
 135static int
 136musycc_dump_txbuffer_ring(mch_t *ch, int lockit)
 137{
 138    struct mdesc *m;
 139    unsigned long flags = 0;
 140    u_int32_t   status;
 141    int         n;
 142
 143    if (lockit)
 144        spin_lock_irqsave(&ch->ch_txlock, flags);
 145    if (ch->txd_num == 0)
 146        pr_info("  ZERO transmit buffers allocated for this channel.");
 147    else {
 148        FLUSH_MEM_READ();
 149        m = ch->txd_irq_srv;
 150        for (n = ch->txd_num; n; n--) {
 151            status = le32_to_cpu(m->status);
 152            {
 153                pr_info("%c%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
 154                        (m == ch->txd_usr_add) ? 'F' : ' ',
 155                        (m == ch->txd_irq_srv) ? 'L' : ' ',
 156                        (unsigned long) m, n,
 157                        status,
 158                     m->data ? (status & MUSYCC_TX_OWNED ? 'M' : 'H') : '-',
 159                        status & POLL_DISABLED ? 'P' : '-',
 160                        status & EOBIRQ_ENABLE ? 'b' : '-',
 161                        status & EOMIRQ_ENABLE ? 'm' : '-',
 162                        status & LENGTH_MASK,
 163                        le32_to_cpu(m->data), le32_to_cpu(m->next));
 164#ifdef RLD_DUMP_BUFDATA
 165                {
 166                    u_int32_t  *dp;
 167                    int         len = status & LENGTH_MASK;
 168
 169                    if (m->data) {
 170                        dp = (u_int32_t *) OS_phystov((void *) (le32_to_cpu(m->data)));
 171                        if (len >= 0x10)
 172                            pr_info("    %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len,
 173                                    *dp, *(dp + 1), *(dp + 2), *(dp + 3));
 174                        else if (len >= 0x08)
 175                            pr_info("    %x[%x]: %08X %08X\n", (u_int32_t) dp, len,
 176                                    *dp, *(dp + 1));
 177                        else
 178                            pr_info("    %x[%x]: %08X\n", (u_int32_t) dp, len, *dp);
 179                    }
 180                }
 181#endif
 182            }
 183            m = m->snext;
 184        }
 185    }                               /* -for- */
 186    pr_info("\n");
 187
 188    if (lockit)
 189        spin_unlock_irqrestore(&ch->ch_txlock, flags);
 190    return 0;
 191}
 192#endif
 193
 194
 195/*
 196 * The following supports a backdoor debug facility which can be used to
 197 * display the state of a board's channel.
 198 */
 199
 200status_t
 201musycc_dump_ring(ci_t *ci, unsigned int chan)
 202{
 203    mch_t      *ch;
 204
 205    if (chan >= MAX_CHANS_USED)
 206        return SBE_DRVR_FAIL;       /* E2BIG */
 207    {
 208        int         bh;
 209
 210        bh = atomic_read(&ci->bh_pending);
 211        pr_info(">> bh_pend %d [%d] ihead %d itail %d [%d] th_cnt %d bh_cnt %d wdcnt %d note %d\n",
 212                bh, max_bh, ci->iqp_headx, ci->iqp_tailx, max_intcnt,
 213                ci->intlog.drvr_intr_thcount,
 214                ci->intlog.drvr_intr_bhcount,
 215                ci->wdcount, ci->wd_notify);
 216        max_bh = 0;                 /* reset counter */
 217        max_intcnt = 0;             /* reset counter */
 218    }
 219
 220    if (!(ch = sd_find_chan(dummy, chan))) {
 221        pr_info(">> musycc_dump_ring: channel %d not up.\n", chan);
 222        return ENOENT;
 223    }
 224    pr_info(">> CI %p CHANNEL %3d @ %p: state %x status/p %x/%x\n", ci, chan, ch, ch->state,
 225            ch->status, ch->p.status);
 226    pr_info("--------------------------------\nTX Buffer Ring - Channel %d, txd_num %d. (bd/ch pend %d %d), TXD required %d, txpkt %lu\n",
 227            chan, ch->txd_num,
 228            (u_int32_t) atomic_read(&ci->tx_pending), (u_int32_t) atomic_read(&ch->tx_pending), ch->txd_required, ch->s.tx_packets);
 229    pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
 230            ch->user, ch->txd_irq_srv, ch->txd_usr_add,
 231            sd_queue_stopped(ch->user),
 232            ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
 233    musycc_dump_txbuffer_ring(ch, 1);
 234    pr_info("RX Buffer Ring - Channel %d, rxd_num %d. IRQ_SRV[%d] 0x%p, start_rx %x rxpkt %lu\n",
 235            chan, ch->rxd_num, ch->rxix_irq_srv,
 236            &ch->mdr[ch->rxix_irq_srv], ch->ch_start_rx, ch->s.rx_packets);
 237    musycc_dump_rxbuffer_ring(ch, 1);
 238
 239    return SBE_DRVR_SUCCESS;
 240}
 241
 242
 243status_t
 244musycc_dump_rings(ci_t *ci, unsigned int start_chan)
 245{
 246    unsigned int chan;
 247
 248    for (chan = start_chan; chan < (start_chan + 5); chan++)
 249        musycc_dump_ring(ci, chan);
 250    return SBE_DRVR_SUCCESS;
 251}
 252
 253
 254/*
 255 * NOTE on musycc_init_mdt():  These MUSYCC writes are only operational after
 256 * a MUSYCC GROUP_INIT command has been issued.
 257 */
 258
 259void
 260musycc_init_mdt(mpi_t *pi)
 261{
 262    u_int32_t  *addr, cfg;
 263    int         i;
 264
 265    /*
 266     * This Idle Code insertion takes effect prior to channel's first
 267     * transmitted  message.  After that, each message contains its own Idle
 268     * Code information which is to be issued after the message is
 269     * transmitted (Ref.MUSYCC 5.2.2.3: MCENBL bit in Group Configuration
 270     * Descriptor).
 271     */
 272
 273    addr = (u_int32_t *) ((u_long) pi->reg + MUSYCC_MDT_BASE03_ADDR);
 274    cfg = CFG_CH_FLAG_7E << IDLE_CODE;
 275
 276    for (i = 0; i < 32; addr++, i++)
 277        pci_write_32(addr, cfg);
 278}
 279
 280
 281/* Set TX thp to the next unprocessed md */
 282
 283void
 284musycc_update_tx_thp(mch_t *ch)
 285{
 286    struct mdesc *md;
 287    unsigned long flags;
 288
 289    spin_lock_irqsave(&ch->ch_txlock, flags);
 290    while (1) {
 291        md = ch->txd_irq_srv;
 292        FLUSH_MEM_READ();
 293        if (!md->data) {
 294            /* No MDs with buffers to process */
 295            spin_unlock_irqrestore(&ch->ch_txlock, flags);
 296            return;
 297        }
 298        if ((le32_to_cpu(md->status)) & MUSYCC_TX_OWNED) {
 299            /* this is the MD to restart TX with */
 300            break;
 301        }
 302        /*
 303         * Otherwise, we have a valid, host-owned message descriptor which
 304         * has been successfully transmitted and whose buffer can be freed,
 305         * so... process this MD, it's owned by the host.  (This might give
 306         * as a new, updated txd_irq_srv.)
 307         */
 308        musycc_bh_tx_eom(ch->up, ch->gchan);
 309    }
 310    md = ch->txd_irq_srv;
 311    ch->up->regram->thp[ch->gchan] = cpu_to_le32(OS_vtophys(md));
 312    FLUSH_MEM_WRITE();
 313
 314    if (ch->tx_full) {
 315        ch->tx_full = 0;
 316        ch->txd_required = 0;
 317        sd_enable_xmit(ch->user);  /* re-enable to catch flow controlled
 318                                     * channel */
 319    }
 320    spin_unlock_irqrestore(&ch->ch_txlock, flags);
 321
 322#ifdef RLD_TRANS_DEBUG
 323    pr_info("++ musycc_update_tx_thp[%d]: setting thp = %p, sts %x\n", ch->channum, md, md->status);
 324#endif
 325}
 326
 327
 328/*
 329 * This is the workq task executed by the OS when our queue_work() is
 330 * scheduled and run.  It can fire off either RX or TX ACTIVATION depending
 331 * upon the channel's ch_start_tx and ch_start_rx variables.  This routine
 332 * is implemented as a work queue so that the call to the service request is
 333 * able to sleep, awaiting an interrupt acknowledgment response (SACK) from
 334 * the hardware.
 335 */
 336
 337void
 338musycc_wq_chan_restart(void *arg)      /* channel private structure */
 339{
 340    mch_t      *ch;
 341    mpi_t      *pi;
 342    struct mdesc *md;
 343#if 0
 344    unsigned long flags;
 345#endif
 346
 347    ch = container_of(arg, struct c4_chan_info, ch_work);
 348    pi = ch->up;
 349
 350#ifdef RLD_TRANS_DEBUG
 351    pr_info("wq_chan_restart[%d]: start_RT[%d/%d] status %x\n",
 352            ch->channum, ch->ch_start_rx, ch->ch_start_tx, ch->status);
 353
 354#endif
 355
 356    /**********************************/
 357    /** check for RX restart request **/
 358    /**********************************/
 359
 360    if ((ch->ch_start_rx) && (ch->status & RX_ENABLED)) {
 361
 362        ch->ch_start_rx = 0;
 363#if defined(RLD_TRANS_DEBUG) || defined(RLD_RXACT_DEBUG)
 364        {
 365            static int  hereb4 = 7;
 366
 367            if (hereb4) {            /* RLD DEBUG */
 368                hereb4--;
 369#ifdef RLD_TRANS_DEBUG
 370                md = &ch->mdr[ch->rxix_irq_srv];
 371                pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
 372                ch->channum, ch->rxix_irq_srv, md, le32_to_cpu(md->status),
 373                        ch->s.rx_packets);
 374#elif defined(RLD_RXACT_DEBUG)
 375                md = &ch->mdr[ch->rxix_irq_srv];
 376                pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
 377                ch->channum, ch->rxix_irq_srv, md, le32_to_cpu(md->status),
 378                        ch->s.rx_packets);
 379                musycc_dump_rxbuffer_ring(ch, 1);      /* RLD DEBUG */
 380#endif
 381            }
 382        }
 383#endif
 384        musycc_serv_req(pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | ch->gchan);
 385    }
 386    /**********************************/
 387    /** check for TX restart request **/
 388    /**********************************/
 389
 390    if ((ch->ch_start_tx) && (ch->status & TX_ENABLED)) {
 391        /* find next unprocessed message, then set TX thp to it */
 392        musycc_update_tx_thp(ch);
 393
 394#if 0
 395        spin_lock_irqsave(&ch->ch_txlock, flags);
 396#endif
 397        md = ch->txd_irq_srv;
 398        if (!md) {
 399#ifdef RLD_TRANS_DEBUG
 400            pr_info("-- musycc_wq_chan_restart[%d]: WARNING, starting NULL md\n", ch->channum);
 401#endif
 402#if 0
 403            spin_unlock_irqrestore(&ch->ch_txlock, flags);
 404#endif
 405        } else if (md->data && ((le32_to_cpu(md->status)) & MUSYCC_TX_OWNED)) {
 406            ch->ch_start_tx = 0;
 407#if 0
 408            spin_unlock_irqrestore(&ch->ch_txlock, flags);   /* allow interrupts for service request */
 409#endif
 410#ifdef RLD_TRANS_DEBUG
 411            pr_info("++ musycc_wq_chan_restart() CHAN TX ACTIVATE: chan %d txd_irq_srv %p = sts %x, txpkt %lu\n",
 412                    ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status, ch->s.tx_packets);
 413#endif
 414            musycc_serv_req(pi, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | ch->gchan);
 415        }
 416#ifdef RLD_RESTART_DEBUG
 417        else {
 418            /* retain request to start until retried and we have data to xmit */
 419            pr_info("-- musycc_wq_chan_restart[%d]: DELAYED due to md %p sts %x data %x, start_tx %x\n",
 420                    ch->channum, md,
 421                    le32_to_cpu(md->status),
 422                    le32_to_cpu(md->data), ch->ch_start_tx);
 423            musycc_dump_txbuffer_ring(ch, 0);
 424#if 0
 425            spin_unlock_irqrestore(&ch->ch_txlock, flags);   /* allow interrupts for service request */
 426#endif
 427        }
 428#endif
 429    }
 430}
 431
 432
 433 /*
 434  * Channel restart either fires of a workqueue request (2.6) or lodges a
 435  * watchdog activation sequence (2.4).
 436  */
 437
 438void
 439musycc_chan_restart(mch_t *ch)
 440{
 441#ifdef RLD_RESTART_DEBUG
 442    pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n",
 443            ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status);
 444#endif
 445
 446    /* 2.6 - find next unprocessed message, then set TX thp to it */
 447#ifdef RLD_RESTART_DEBUG
 448    pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n", ch->channum, &ch->ch_work);
 449#endif
 450    c4_wk_chan_restart(ch);        /* work queue mechanism fires off: Ref:
 451                                     * musycc_wq_chan_restart () */
 452
 453}
 454
 455
 456void
 457rld_put_led(mpi_t *pi, u_int32_t ledval)
 458{
 459    static u_int32_t led = 0;
 460
 461    if (ledval == 0)
 462        led = 0;
 463    else
 464        led |= ledval;
 465
 466    pci_write_32((u_int32_t *) &pi->up->cpldbase->leds, led);  /* RLD DEBUG TRANHANG */
 467}
 468
 469
 470#define MUSYCC_SR_RETRY_CNT  9
 471
 472void
 473musycc_serv_req(mpi_t *pi, u_int32_t req)
 474{
 475    volatile u_int32_t r;
 476    int         rcnt;
 477
 478    /*
 479     * PORT NOTE: Semaphore protect service loop guarantees only a single
 480     * operation at a time.  Per MUSYCC Manual - "Issuing service requests to
 481     * the same channel group without first receiving ACK from each request
 482     * may cause the host to lose track of which service request has been
 483     * acknowledged."
 484     */
 485
 486    SD_SEM_TAKE(&pi->sr_sem_busy, "serv");     /* only 1 thru here, per
 487                                                 * group */
 488
 489    if (pi->sr_last == req) {
 490#ifdef RLD_TRANS_DEBUG
 491        pr_info(">> same SR, Port %d Req %x\n", pi->portnum, req);
 492#endif
 493
 494        /*
 495         * The most likely repeated request is the channel activation command
 496         * which follows the occurrence of a Transparent mode TX ONR or a
 497         * BUFF error.  If the previous command was a CHANNEL ACTIVATE,
 498         * precede it with a NOOP command in order maintain coherent control
 499         * of this current (re)ACTIVATE.
 500         */
 501
 502        r = (pi->sr_last & ~SR_GCHANNEL_MASK);
 503        if ((r == (SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION)) ||
 504            (r == (SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION))) {
 505#ifdef RLD_TRANS_DEBUG
 506            pr_info(">> same CHAN ACT SR, Port %d Req %x => issue SR_NOOP CMD\n", pi->portnum, req);
 507#endif
 508            SD_SEM_GIVE(&pi->sr_sem_busy);     /* allow this next request */
 509            musycc_serv_req(pi, SR_NOOP);
 510            SD_SEM_TAKE(&pi->sr_sem_busy, "serv");     /* relock & continue w/
 511                                                         * original req */
 512        } else if (req == SR_NOOP) {
 513            /* no need to issue back-to-back SR_NOOP commands at this time */
 514#ifdef RLD_TRANS_DEBUG
 515            pr_info(">> same Port SR_NOOP skipped, Port %d\n", pi->portnum);
 516#endif
 517            SD_SEM_GIVE(&pi->sr_sem_busy);     /* allow this next request */
 518            return;
 519        }
 520    }
 521    rcnt = 0;
 522    pi->sr_last = req;
 523rewrite:
 524    pci_write_32((u_int32_t *) &pi->reg->srd, req);
 525    FLUSH_MEM_WRITE();
 526
 527    /*
 528     * Per MUSYCC Manual, Section 6.1,2 - "When writing an SCR service
 529     * request, the host must ensure at least one PCI bus clock cycle has
 530     * elapsed before writing another service request.  To meet this minimum
 531     * elapsed service request write timing interval, it is recommended that
 532     * the host follow any SCR write with another operation which reads from
 533     * the same address."
 534     */
 535    r = pci_read_32((u_int32_t *) &pi->reg->srd);      /* adhere to write
 536                                                         * timing imposition */
 537
 538
 539    if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT)) {
 540        if (cxt1e1_log_level >= LOG_MONITOR)
 541            pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n",
 542                    pi->up->devname, rcnt, req, pi->sr_last, r,
 543                    (pi->portnum * MUSYCC_NCHANS) + (req & 0x1f));
 544        OS_uwait_dummy();          /* this delay helps reduce reissue counts
 545                                     * (reason not yet researched) */
 546        goto rewrite;
 547    }
 548    if (rcnt > MUSYCC_SR_RETRY_CNT) {
 549        pr_warning("%s: failed service request (#%d)= %x, group %d.\n",
 550                   pi->up->devname, MUSYCC_SR_RETRY_CNT, req, pi->portnum);
 551        SD_SEM_GIVE(&pi->sr_sem_busy); /* allow any next request */
 552        return;
 553    }
 554    if (req == SR_CHIP_RESET) {
 555        /*
 556         * PORT NOTE: the CHIP_RESET command is NOT ack'd by the MUSYCC, thus
 557         * the upcoming delay is used.  Though the MUSYCC documentation
 558         * suggests a read-after-write would supply the required delay, it's
 559         * unclear what CPU/BUS clock speeds might have been assumed when
 560         * suggesting this 'lack of ACK' workaround.  Thus the use of uwait.
 561         */
 562        OS_uwait(100000, "icard"); /* 100ms */
 563    } else {
 564        FLUSH_MEM_READ();
 565        SD_SEM_TAKE(&pi->sr_sem_wait, "sakack");       /* sleep until SACK
 566                                                         * interrupt occurs */
 567    }
 568    SD_SEM_GIVE(&pi->sr_sem_busy); /* allow any next request */
 569}
 570
 571
 572#ifdef  SBE_PMCC4_ENABLE
 573void
 574musycc_update_timeslots(mpi_t *pi)
 575{
 576    int         i, ch;
 577    char        e1mode = IS_FRAME_ANY_E1(pi->p.port_mode);
 578
 579    for (i = 0; i < 32; i++) {
 580        int         usedby = 0, last = 0, ts, j, bits[8];
 581
 582        u_int8_t lastval = 0;
 583
 584        if (((i == 0) && e1mode) || /* disable if  E1 mode */
 585            ((i == 16) && ((pi->p.port_mode == CFG_FRAME_E1CRC_CAS) || (pi->p.port_mode == CFG_FRAME_E1CRC_CAS_AMI)))
 586            || ((i > 23) && (!e1mode))) /* disable if T1 mode */
 587            pi->tsm[i] = 0xff;      /* make tslot unavailable for this mode */
 588        else
 589            pi->tsm[i] = 0x00;      /* make tslot available for assignment */
 590        for (j = 0; j < 8; j++)
 591            bits[j] = -1;
 592        for (ch = 0; ch < MUSYCC_NCHANS; ch++) {
 593            if ((pi->chan[ch]->state == UP) && (pi->chan[ch]->p.bitmask[i])) {
 594                usedby++;
 595                last = ch;
 596                lastval = pi->chan[ch]->p.bitmask[i];
 597                for (j = 0; j < 8; j++)
 598                    if (lastval & (1 << j))
 599                        bits[j] = ch;
 600                pi->tsm[i] |= lastval;
 601            }
 602        }
 603        if (!usedby)
 604            ts = 0;
 605        else if ((usedby == 1) && (lastval == 0xff))
 606            ts = (4 << 5) | last;
 607        else if ((usedby == 1) && (lastval == 0x7f))
 608            ts = (5 << 5) | last;
 609        else {
 610            int         idx;
 611
 612            if (bits[0] < 0)
 613                ts = (6 << 5) | (idx = last);
 614            else
 615                ts = (7 << 5) | (idx = bits[0]);
 616            for (j = 1; j < 8; j++) {
 617                pi->regram->rscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]);
 618                pi->regram->tscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]);
 619            }
 620        }
 621        pi->regram->rtsm[i] = ts;
 622        pi->regram->ttsm[i] = ts;
 623    }
 624    FLUSH_MEM_WRITE();
 625
 626    musycc_serv_req(pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION);
 627    musycc_serv_req(pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION);
 628    musycc_serv_req(pi, SR_SUBCHANNEL_MAP | SR_RX_DIRECTION);
 629    musycc_serv_req(pi, SR_SUBCHANNEL_MAP | SR_TX_DIRECTION);
 630}
 631#endif
 632
 633
 634#ifdef SBE_WAN256T3_ENABLE
 635void
 636musycc_update_timeslots(mpi_t *pi)
 637{
 638    mch_t      *ch;
 639
 640    u_int8_t    ts, hmask, tsen;
 641    int         gchan;
 642    int         i;
 643
 644#ifdef SBE_PMCC4_ENABLE
 645    hmask = (0x1f << pi->up->p.hypersize) & 0x1f;
 646#endif
 647#ifdef SBE_WAN256T3_ENABLE
 648    hmask = (0x1f << hyperdummy) & 0x1f;
 649#endif
 650    for (i = 0; i < 128; i++) {
 651        gchan = ((pi->portnum * MUSYCC_NCHANS) + (i & hmask)) % MUSYCC_NCHANS;
 652        ch = pi->chan[gchan];
 653        if (ch->p.mode_56k)
 654            tsen = MODE_56KBPS;
 655        else
 656            tsen = MODE_64KBPS;     /* also the default */
 657        ts = ((pi->portnum % 4) == (i / 32)) ? (tsen << 5) | (i & hmask) : 0;
 658        pi->regram->rtsm[i] = ts;
 659        pi->regram->ttsm[i] = ts;
 660    }
 661    FLUSH_MEM_WRITE();
 662    musycc_serv_req(pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION);
 663    musycc_serv_req(pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION);
 664}
 665#endif
 666
 667
 668 /*
 669  * This routine converts a generic library channel configuration parameter
 670  * into a hardware specific register value (IE. MUSYCC CCD Register).
 671  */
 672u_int32_t
 673musycc_chan_proto(int proto)
 674{
 675    int         reg;
 676
 677    switch (proto) {
 678    case CFG_CH_PROTO_TRANS:        /* 0 */
 679        reg = MUSYCC_CCD_TRANS;
 680        break;
 681    case CFG_CH_PROTO_SS7:          /* 1 */
 682        reg = MUSYCC_CCD_SS7;
 683        break;
 684    default:
 685    case CFG_CH_PROTO_ISLP_MODE:   /* 4 */
 686    case CFG_CH_PROTO_HDLC_FCS16:  /* 2 */
 687        reg = MUSYCC_CCD_HDLC_FCS16;
 688        break;
 689    case CFG_CH_PROTO_HDLC_FCS32:  /* 3 */
 690        reg = MUSYCC_CCD_HDLC_FCS32;
 691        break;
 692    }
 693
 694    return reg;
 695}
 696
 697#ifdef SBE_WAN256T3_ENABLE
 698static void __init
 699musycc_init_port(mpi_t *pi)
 700{
 701    pci_write_32((u_int32_t *) &pi->reg->gbp, OS_vtophys(pi->regram));
 702
 703    pi->regram->grcd =
 704        __constant_cpu_to_le32(MUSYCC_GRCD_RX_ENABLE |
 705                                MUSYCC_GRCD_TX_ENABLE |
 706                                MUSYCC_GRCD_SF_ALIGN |
 707                                MUSYCC_GRCD_SUBCHAN_DISABLE |
 708                                MUSYCC_GRCD_OOFMP_DISABLE |
 709                                MUSYCC_GRCD_COFAIRQ_DISABLE |
 710                                MUSYCC_GRCD_MC_ENABLE |
 711                       (MUSYCC_GRCD_POLLTH_32 << MUSYCC_GRCD_POLLTH_SHIFT));
 712
 713    pi->regram->pcd =
 714        __constant_cpu_to_le32(MUSYCC_PCD_E1X4_MODE |
 715                                MUSYCC_PCD_TXDATA_RISING |
 716                                MUSYCC_PCD_TX_DRIVEN);
 717
 718    /* Message length descriptor */
 719       pi->regram->mld = __constant_cpu_to_le32(cxt1e1_max_mru | (cxt1e1_max_mru << 16));
 720    FLUSH_MEM_WRITE();
 721
 722    musycc_serv_req(pi, SR_GROUP_INIT | SR_RX_DIRECTION);
 723    musycc_serv_req(pi, SR_GROUP_INIT | SR_TX_DIRECTION);
 724
 725    musycc_init_mdt(pi);
 726
 727    musycc_update_timeslots(pi);
 728}
 729#endif
 730
 731
 732status_t    __init
 733musycc_init(ci_t *ci)
 734{
 735    char       *regaddr;        /* temp for address boundary calculations */
 736    int         i, gchan;
 737
 738    OS_sem_init(&ci->sem_wdbusy, SEM_AVAILABLE);       /* watchdog exclusion */
 739
 740    /*
 741     * Per MUSYCC manual, Section 6.3.4 - "The host must allocate a dword
 742     * aligned memory segment for interrupt queue pointers."
 743     */
 744
 745#define INT_QUEUE_BOUNDARY  4
 746
 747    regaddr = OS_kmalloc((INT_QUEUE_SIZE + 1) * sizeof(u_int32_t));
 748    if (regaddr == 0)
 749        return ENOMEM;
 750    ci->iqd_p_saved = regaddr;      /* save orig value for free's usage */
 751    ci->iqd_p = (u_int32_t *) ((unsigned long) (regaddr + INT_QUEUE_BOUNDARY - 1) &
 752                               (~(INT_QUEUE_BOUNDARY - 1)));    /* this calculates
 753                                                                 * closest boundary */
 754
 755    for (i = 0; i < INT_QUEUE_SIZE; i++)
 756        ci->iqd_p[i] = __constant_cpu_to_le32(INT_EMPTY_ENTRY);
 757
 758    for (i = 0; i < ci->max_port; i++) {
 759        mpi_t      *pi = &ci->port[i];
 760
 761        /*
 762         * Per MUSYCC manual, Section 6.3.2 - "The host must allocate a 2KB
 763         * bound memory segment for Channel Group 0."
 764         */
 765
 766#define GROUP_BOUNDARY   0x800
 767
 768        regaddr = OS_kmalloc(sizeof(struct musycc_groupr) + GROUP_BOUNDARY);
 769        if (regaddr == 0) {
 770            for (gchan = 0; gchan < i; gchan++) {
 771                pi = &ci->port[gchan];
 772                OS_kfree(pi->reg);
 773                pi->reg = 0;
 774            }
 775            return ENOMEM;
 776        }
 777        pi->regram_saved = regaddr; /* save orig value for free's usage */
 778        pi->regram = (struct musycc_groupr *) ((unsigned long) (regaddr + GROUP_BOUNDARY - 1) &
 779                                               (~(GROUP_BOUNDARY - 1)));        /* this calculates
 780                                                                                 * closest boundary */
 781    }
 782
 783    /* any board centric MUSYCC commands will use group ZERO as its "home" */
 784    ci->regram = ci->port[0].regram;
 785    musycc_serv_req(&ci->port[0], SR_CHIP_RESET);
 786
 787    pci_write_32((u_int32_t *) &ci->reg->gbp, OS_vtophys(ci->regram));
 788    pci_flush_write(ci);
 789#ifdef CONFIG_SBE_PMCC4_NCOMM
 790    ci->regram->__glcd = __constant_cpu_to_le32(GCD_MAGIC);
 791#else
 792    /* standard driver POLLS for INTB via CPLD register */
 793    ci->regram->__glcd = __constant_cpu_to_le32(GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE);
 794#endif
 795
 796    ci->regram->__iqp = cpu_to_le32(OS_vtophys(&ci->iqd_p[0]));
 797    ci->regram->__iql = __constant_cpu_to_le32(INT_QUEUE_SIZE - 1);
 798    pci_write_32((u_int32_t *) &ci->reg->dacbp, 0);
 799    FLUSH_MEM_WRITE();
 800
 801    ci->state = C_RUNNING;          /* mark as full interrupt processing
 802                                     * available */
 803
 804    musycc_serv_req(&ci->port[0], SR_GLOBAL_INIT);     /* FIRST INTERRUPT ! */
 805
 806    /* sanity check settable parameters */
 807
 808       if (cxt1e1_max_mru > 0xffe) {
 809        pr_warning("Maximum allowed MRU exceeded, resetting %d to %d.\n",
 810                                  cxt1e1_max_mru, 0xffe);
 811               cxt1e1_max_mru = 0xffe;
 812    }
 813       if (cxt1e1_max_mtu > 0xffe) {
 814        pr_warning("Maximum allowed MTU exceeded, resetting %d to %d.\n",
 815                                  cxt1e1_max_mtu, 0xffe);
 816               cxt1e1_max_mtu = 0xffe;
 817    }
 818#ifdef SBE_WAN256T3_ENABLE
 819    for (i = 0; i < MUSYCC_NPORTS; i++)
 820        musycc_init_port(&ci->port[i]);
 821#endif
 822
 823    return SBE_DRVR_SUCCESS;        /* no error */
 824}
 825
 826
 827void
 828musycc_bh_tx_eom(mpi_t *pi, int gchan)
 829{
 830    mch_t      *ch;
 831    struct mdesc *md;
 832
 833#if 0
 834#ifndef SBE_ISR_INLINE
 835    unsigned long flags;
 836
 837#endif
 838#endif
 839    volatile u_int32_t status;
 840
 841    ch = pi->chan[gchan];
 842    if (ch == 0 || ch->state != UP) {
 843        if (cxt1e1_log_level >= LOG_ERROR)
 844            pr_info("%s: intr: xmit EOM on uninitialized channel %d\n",
 845                    pi->up->devname, gchan);
 846    }
 847    if (ch == 0 || ch->mdt == 0)
 848        return;                     /* note: mdt==0 implies a malloc()
 849                                     * failure w/in chan_up() routine */
 850
 851#if 0
 852#ifdef SBE_ISR_INLINE
 853    spin_lock_irq(&ch->ch_txlock);
 854#else
 855    spin_lock_irqsave(&ch->ch_txlock, flags);
 856#endif
 857#endif
 858    do {
 859        FLUSH_MEM_READ();
 860        md = ch->txd_irq_srv;
 861        status = le32_to_cpu(md->status);
 862
 863        /*
 864         * Note: Per MUSYCC Ref 6.4.9, the host does not poll a host-owned
 865         * Transmit Buffer Descriptor during Transparent Mode.
 866         */
 867        if (status & MUSYCC_TX_OWNED) {
 868            int         readCount, loopCount;
 869
 870            /***********************************************************/
 871            /* HW Bug Fix                                              */
 872            /* ----------                                              */
 873            /* Under certain PCI Bus loading conditions, the data      */
 874            /* associated with an update of Shared Memory is delayed   */
 875            /* relative to its PCI Interrupt.  This is caught when     */
 876            /* the host determines it does not yet OWN the descriptor. */
 877            /***********************************************************/
 878
 879            readCount = 0;
 880            while (status & MUSYCC_TX_OWNED) {
 881                for (loopCount = 0; loopCount < 0x30; loopCount++)
 882                    OS_uwait_dummy();  /* use call to avoid optimization
 883                                         * removal of dummy delay */
 884                FLUSH_MEM_READ();
 885                status = le32_to_cpu(md->status);
 886                if (readCount++ > 40)
 887                    break;          /* don't wait any longer */
 888            }
 889            if (status & MUSYCC_TX_OWNED) {
 890                if (cxt1e1_log_level >= LOG_MONITOR) {
 891                    pr_info("%s: Port %d Chan %2d - unexpected TX msg ownership intr (md %p sts %x)\n",
 892                            pi->up->devname, pi->portnum, ch->channum,
 893                            md, status);
 894                    pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
 895                            ch->user, ch->txd_irq_srv, ch->txd_usr_add,
 896                            sd_queue_stopped(ch->user),
 897                            ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
 898                    musycc_dump_txbuffer_ring(ch, 0);
 899                }
 900                break;              /* Not our mdesc, done */
 901            } else {
 902                if (cxt1e1_log_level >= LOG_MONITOR)
 903                    pr_info("%s: Port %d Chan %2d - recovered TX msg ownership [%d] (md %p sts %x)\n",
 904                            pi->up->devname, pi->portnum, ch->channum, readCount, md, status);
 905            }
 906        }
 907        ch->txd_irq_srv = md->snext;
 908
 909        md->data = 0;
 910        if (md->mem_token != 0) {
 911            /* upcount channel */
 912            atomic_sub(OS_mem_token_tlen(md->mem_token), &ch->tx_pending);
 913            /* upcount card */
 914            atomic_sub(OS_mem_token_tlen(md->mem_token), &pi->up->tx_pending);
 915#ifdef SBE_WAN256T3_ENABLE
 916            if (!atomic_read(&pi->up->tx_pending))
 917                wan256t3_led(pi->up, LED_TX, 0);
 918#endif
 919
 920#ifdef CONFIG_SBE_WAN256T3_NCOMM
 921            /* callback that our packet was sent */
 922            {
 923                int         hdlcnum = (pi->portnum * 32 + gchan);
 924
 925                if (hdlcnum >= 228) {
 926                    if (nciProcess_TX_complete)
 927                        (*nciProcess_TX_complete) (hdlcnum,
 928                                                   getuserbychan(gchan));
 929                }
 930            }
 931#endif                              /*** CONFIG_SBE_WAN256T3_NCOMM ***/
 932
 933            OS_mem_token_free_irq(md->mem_token);
 934            md->mem_token = 0;
 935        }
 936        md->status = 0;
 937#ifdef RLD_TXFULL_DEBUG
 938        if (cxt1e1_log_level >= LOG_MONITOR2)
 939            pr_info("~~ tx_eom: tx_full %x  txd_free %d -> %d\n",
 940                    ch->tx_full, ch->txd_free, ch->txd_free + 1);
 941#endif
 942        ++ch->txd_free;
 943        FLUSH_MEM_WRITE();
 944
 945        if ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && (status & EOBIRQ_ENABLE)) {
 946            if (cxt1e1_log_level >= LOG_MONITOR)
 947                pr_info("%s: Mode (%x) incorrect EOB status (%x)\n",
 948                        pi->up->devname, ch->p.chan_mode, status);
 949            if ((status & EOMIRQ_ENABLE) == 0)
 950                break;
 951        }
 952    } while ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && ((status & EOMIRQ_ENABLE) == 0));
 953    /*
 954     * NOTE: (The above 'while' is coupled w/ previous 'do', way above.) Each
 955     * Transparent data buffer has the EOB bit, and NOT the EOM bit, set and
 956     * will furthermore have a separate IQD associated with each messages
 957     * buffer.
 958     */
 959
 960    FLUSH_MEM_READ();
 961    /*
 962     * Smooth flow control hysterisis by maintaining task stoppage until half
 963     * the available write buffers are available.
 964     */
 965    if (ch->tx_full && (ch->txd_free >= (ch->txd_num / 2))) {
 966        /*
 967         * Then, only releave task stoppage if we actually have enough
 968         * buffers to service the last requested packet.  It may require MORE
 969         * than half the available!
 970         */
 971        if (ch->txd_free >= ch->txd_required) {
 972
 973#ifdef RLD_TXFULL_DEBUG
 974            if (cxt1e1_log_level >= LOG_MONITOR2)
 975                pr_info("tx_eom[%d]: enable xmit tx_full no more, txd_free %d txd_num/2 %d\n",
 976                        ch->channum,
 977                        ch->txd_free, ch->txd_num / 2);
 978#endif
 979            ch->tx_full = 0;
 980            ch->txd_required = 0;
 981            sd_enable_xmit(ch->user);  /* re-enable to catch flow controlled
 982                                         * channel */
 983        }
 984    }
 985#ifdef RLD_TXFULL_DEBUG
 986    else if (ch->tx_full) {
 987        if (cxt1e1_log_level >= LOG_MONITOR2)
 988            pr_info("tx_eom[%d]: bypass TX enable though room available? (txd_free %d txd_num/2 %d)\n",
 989                    ch->channum,
 990                    ch->txd_free, ch->txd_num / 2);
 991    }
 992#endif
 993
 994    FLUSH_MEM_WRITE();
 995#if 0
 996#ifdef SBE_ISR_INLINE
 997    spin_unlock_irq(&ch->ch_txlock);
 998#else
 999    spin_unlock_irqrestore(&ch->ch_txlock, flags);
1000#endif
1001#endif
1002}
1003
1004
1005static void
1006musycc_bh_rx_eom(mpi_t *pi, int gchan)
1007{
1008    mch_t      *ch;
1009    void       *m, *m2;
1010    struct mdesc *md;
1011    volatile u_int32_t status;
1012    u_int32_t   error;
1013
1014    ch = pi->chan[gchan];
1015    if (ch == 0 || ch->state != UP) {
1016        if (cxt1e1_log_level > LOG_ERROR)
1017            pr_info("%s: intr: receive EOM on uninitialized channel %d\n",
1018                    pi->up->devname, gchan);
1019        return;
1020    }
1021    if (ch->mdr == 0)
1022        return;                     /* can this happen ? */
1023
1024    for (;;) {
1025        FLUSH_MEM_READ();
1026        md = &ch->mdr[ch->rxix_irq_srv];
1027        status = le32_to_cpu(md->status);
1028        if (!(status & HOST_RX_OWNED))
1029            break;                  /* Not our mdesc, done */
1030        m = md->mem_token;
1031        error = (status >> 16) & 0xf;
1032        if (error == 0) {
1033#ifdef CONFIG_SBE_WAN256T3_NCOMM
1034            int         hdlcnum = (pi->portnum * 32 + gchan);
1035
1036            /*
1037             * if the packet number belongs to NCOMM, then send it to the TMS
1038             * driver
1039             */
1040            if (hdlcnum >= 228) {
1041                if (nciProcess_RX_packet)
1042                    (*nciProcess_RX_packet) (hdlcnum, status & 0x3fff, m, ch->user);
1043            } else
1044#endif                              /*** CONFIG_SBE_WAN256T3_NCOMM ***/
1045
1046            {
1047                if ((m2 = OS_mem_token_alloc(cxt1e1_max_mru))) {
1048                    /* substitute the mbuf+cluster */
1049                    md->mem_token = m2;
1050                    md->data = cpu_to_le32(OS_vtophys(OS_mem_token_data(m2)));
1051
1052                    /* pass the received mbuf upward */
1053                    sd_recv_consume(m, status & LENGTH_MASK, ch->user);
1054                    ch->s.rx_packets++;
1055                    ch->s.rx_bytes += status & LENGTH_MASK;
1056                } else
1057                    ch->s.rx_dropped++;
1058            }
1059        } else if (error == ERR_FCS)
1060            ch->s.rx_crc_errors++;
1061        else if (error == ERR_ALIGN)
1062            ch->s.rx_missed_errors++;
1063        else if (error == ERR_ABT)
1064            ch->s.rx_missed_errors++;
1065        else if (error == ERR_LNG)
1066            ch->s.rx_length_errors++;
1067        else if (error == ERR_SHT)
1068            ch->s.rx_length_errors++;
1069        FLUSH_MEM_WRITE();
1070               status = cxt1e1_max_mru;
1071        if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
1072            status |= EOBIRQ_ENABLE;
1073        md->status = cpu_to_le32(status);
1074
1075        /* Check next mdesc in the ring */
1076        if (++ch->rxix_irq_srv >= ch->rxd_num)
1077            ch->rxix_irq_srv = 0;
1078        FLUSH_MEM_WRITE();
1079    }
1080}
1081
1082
1083irqreturn_t
1084musycc_intr_th_handler(void *devp)
1085{
1086    ci_t       *ci = (ci_t *) devp;
1087    volatile u_int32_t status, currInt = 0;
1088    u_int32_t   nextInt, intCnt;
1089
1090    /*
1091     * Hardware not available, potential interrupt hang.  But since interrupt
1092     * might be shared, just return.
1093     */
1094    if (ci->state == C_INIT)
1095        return IRQ_NONE;
1096    /*
1097     * Marked as hardware available. Don't service interrupts, just clear the
1098     * event.
1099     */
1100
1101    if (ci->state == C_IDLE) {
1102        status = pci_read_32((u_int32_t *) &ci->reg->isd);
1103
1104        /* clear the interrupt but process nothing else */
1105        pci_write_32((u_int32_t *) &ci->reg->isd, status);
1106        return IRQ_HANDLED;
1107    }
1108    FLUSH_PCI_READ();
1109    FLUSH_MEM_READ();
1110
1111    status = pci_read_32((u_int32_t *) &ci->reg->isd);
1112    nextInt = INTRPTS_NEXTINT(status);
1113    intCnt = INTRPTS_INTCNT(status);
1114    ci->intlog.drvr_intr_thcount++;
1115
1116    /*********************************************************/
1117    /* HW Bug Fix                                            */
1118    /* ----------                                            */
1119    /* Under certain PCI Bus loading conditions, the         */
1120    /* MUSYCC looses the data associated with an update      */
1121    /* of its ISD and erroneously returns the immediately    */
1122    /* preceding 'nextInt' value.  However, the 'intCnt'     */
1123    /* value appears to be correct.  By not starting service */
1124    /* where the 'missing' 'nextInt' SHOULD point causes     */
1125    /* the IQD not to be serviced - the 'not serviced'       */
1126    /* entries then remain and continue to increase as more  */
1127    /* incorrect ISD's are encountered.                      */
1128    /*********************************************************/
1129
1130    if (nextInt != INTRPTS_NEXTINT(ci->intlog.this_status_new)) {
1131        if (cxt1e1_log_level >= LOG_MONITOR) {
1132            pr_info("%s: note - updated ISD from %08x to %08x\n",
1133                    ci->devname, status,
1134              (status & (~INTRPTS_NEXTINT_M)) | ci->intlog.this_status_new);
1135        }
1136        /*
1137         * Replace bogus status with software corrected value.
1138         *
1139         * It's not known whether, during this problem occurrence, if the
1140         * INTFULL bit is correctly reported or not.
1141         */
1142        status = (status & (~INTRPTS_NEXTINT_M)) | (ci->intlog.this_status_new);
1143        nextInt = INTRPTS_NEXTINT(status);
1144    }
1145    /**********************************************/
1146    /* Cn847x Bug Fix                             */
1147    /* --------------                             */
1148    /* Fix for inability to write back same index */
1149    /* as read for a full interrupt queue.        */
1150    /**********************************************/
1151
1152    if (intCnt == INT_QUEUE_SIZE)
1153        currInt = ((intCnt - 1) + nextInt) & (INT_QUEUE_SIZE - 1);
1154    else
1155        /************************************************/
1156        /* Interrupt Write Location Issues              */
1157        /* -------------------------------              */
1158        /* When the interrupt status descriptor is      */
1159        /* written, the interrupt line is de-asserted   */
1160        /* by the Cn847x.  In the case of MIPS          */
1161        /* microprocessors, this must occur at the      */
1162        /* beginning of the interrupt handler so that   */
1163        /* the interrupt handle is not re-entered due   */
1164        /* to interrupt dis-assertion latency.          */
1165        /* In the case of all other processors, this    */
1166        /* action should occur at the end of the        */
1167        /* interrupt handler to avoid overwriting the   */
1168        /* interrupt queue.                             */
1169        /************************************************/
1170
1171    if (intCnt)
1172        currInt = (intCnt + nextInt) & (INT_QUEUE_SIZE - 1);
1173    else {
1174        /*
1175         * NOTE: Servicing an interrupt whose ISD contains a count of ZERO
1176         * can be indicative of a Shared Interrupt chain.  Our driver can be
1177         * called from the system's interrupt handler as a matter of the OS
1178         * walking the chain.  As the chain is walked, the interrupt will
1179         * eventually be serviced by the correct driver/handler.
1180         */
1181#if 0
1182        /* chained interrupt = not ours */
1183        pr_info(">> %s: intCnt NULL, sts %x, possibly a chained interrupt!\n",
1184                ci->devname, status);
1185#endif
1186        return IRQ_NONE;
1187    }
1188
1189    ci->iqp_tailx = currInt;
1190
1191    currInt <<= INTRPTS_NEXTINT_S;
1192    ci->intlog.last_status_new = ci->intlog.this_status_new;
1193    ci->intlog.this_status_new = currInt;
1194
1195    if ((cxt1e1_log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M))
1196        pr_info("%s: Interrupt queue full condition occurred\n", ci->devname);
1197    if (cxt1e1_log_level >= LOG_DEBUG)
1198        pr_info("%s: interrupts pending, isd @ 0x%p: %x curr %d cnt %d NEXT %d\n",
1199                ci->devname, &ci->reg->isd,
1200        status, nextInt, intCnt, (intCnt + nextInt) & (INT_QUEUE_SIZE - 1));
1201
1202    FLUSH_MEM_WRITE();
1203#if defined(SBE_ISR_TASKLET)
1204    pci_write_32((u_int32_t *) &ci->reg->isd, currInt);
1205    atomic_inc(&ci->bh_pending);
1206    tasklet_schedule(&ci->ci_musycc_isr_tasklet);
1207#elif defined(SBE_ISR_IMMEDIATE)
1208    pci_write_32((u_int32_t *) &ci->reg->isd, currInt);
1209    atomic_inc(&ci->bh_pending);
1210    queue_task(&ci->ci_musycc_isr_tq, &tq_immediate);
1211    mark_bh(IMMEDIATE_BH);
1212#elif defined(SBE_ISR_INLINE)
1213    (void) musycc_intr_bh_tasklet(ci);
1214    pci_write_32((u_int32_t *) &ci->reg->isd, currInt);
1215#endif
1216    return IRQ_HANDLED;
1217}
1218
1219
1220#if defined(SBE_ISR_IMMEDIATE)
1221unsigned long
1222#else
1223void
1224#endif
1225musycc_intr_bh_tasklet(ci_t *ci)
1226{
1227    mpi_t      *pi;
1228    mch_t      *ch;
1229    unsigned int intCnt;
1230    volatile u_int32_t currInt = 0;
1231    volatile unsigned int headx, tailx;
1232    int         readCount, loopCount;
1233    int         group, gchan, event, err, tx;
1234    u_int32_t   badInt = INT_EMPTY_ENTRY;
1235    u_int32_t   badInt2 = INT_EMPTY_ENTRY2;
1236
1237    /*
1238     * Hardware not available, potential interrupt hang.  But since interrupt
1239     * might be shared, just return.
1240     */
1241    if ((drvr_state != SBE_DRVR_AVAILABLE) || (ci->state == C_INIT)) {
1242#if defined(SBE_ISR_IMMEDIATE)
1243        return 0L;
1244#else
1245        return;
1246#endif
1247    }
1248#if defined(SBE_ISR_TASKLET) || defined(SBE_ISR_IMMEDIATE)
1249    if (drvr_state != SBE_DRVR_AVAILABLE) {
1250#if defined(SBE_ISR_TASKLET)
1251        return;
1252#elif defined(SBE_ISR_IMMEDIATE)
1253        return 0L;
1254#endif
1255    }
1256#elif defined(SBE_ISR_INLINE)
1257    /* no semaphore taken, no double checks */
1258#endif
1259
1260    ci->intlog.drvr_intr_bhcount++;
1261    FLUSH_MEM_READ();
1262    {
1263        unsigned int bh = atomic_read(&ci->bh_pending);
1264
1265        max_bh = max(bh, max_bh);
1266    }
1267    atomic_set(&ci->bh_pending, 0);/* if here, no longer pending */
1268    while ((headx = ci->iqp_headx) != (tailx = ci->iqp_tailx)) {
1269        intCnt = (tailx >= headx) ? (tailx - headx) : (tailx - headx + INT_QUEUE_SIZE);
1270        currInt = le32_to_cpu(ci->iqd_p[headx]);
1271
1272        max_intcnt = max(intCnt, max_intcnt);  /* RLD DEBUG */
1273
1274        /**************************************************/
1275        /* HW Bug Fix                                     */
1276        /* ----------                                     */
1277        /* The following code checks for the condition    */
1278        /* of interrupt assertion before interrupt        */
1279        /* queue update.  This is a problem on several    */
1280        /* PCI-Local bridge chips found on some products. */
1281        /**************************************************/
1282
1283        readCount = 0;
1284        if ((currInt == badInt) || (currInt == badInt2))
1285            ci->intlog.drvr_int_failure++;
1286
1287        while ((currInt == badInt) || (currInt == badInt2)) {
1288            for (loopCount = 0; loopCount < 0x30; loopCount++)
1289                OS_uwait_dummy();  /* use call to avoid optimization removal
1290                                     * of dummy delay */
1291            FLUSH_MEM_READ();
1292            currInt = le32_to_cpu(ci->iqd_p[headx]);
1293            if (readCount++ > 20)
1294                break;
1295        }
1296
1297        if ((currInt == badInt) || (currInt == badInt2)) {      /* catch failure of Bug
1298                                                                 * Fix checking */
1299            if (cxt1e1_log_level >= LOG_WARN)
1300                pr_info("%s: Illegal Interrupt Detected @ 0x%p, mod %d.)\n",
1301                        ci->devname, &ci->iqd_p[headx], headx);
1302
1303            /*
1304             * If the descriptor has not recovered, then leaving the EMPTY
1305             * entry set will not signal to the MUSYCC that this descriptor
1306             * has been serviced. The Interrupt Queue can then start losing
1307             * available descriptors and MUSYCC eventually encounters and
1308             * reports the INTFULL condition.  Per manual, changing any bit
1309             * marks descriptor as available, thus the use of different
1310             * EMPTY_ENTRY values.
1311             */
1312
1313            if (currInt == badInt)
1314                ci->iqd_p[headx] = __constant_cpu_to_le32(INT_EMPTY_ENTRY2);
1315            else
1316                ci->iqd_p[headx] = __constant_cpu_to_le32(INT_EMPTY_ENTRY);
1317            ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */
1318            FLUSH_MEM_WRITE();
1319            FLUSH_MEM_READ();
1320            continue;
1321        }
1322        group = INTRPT_GRP(currInt);
1323        gchan = INTRPT_CH(currInt);
1324        event = INTRPT_EVENT(currInt);
1325        err = INTRPT_ERROR(currInt);
1326        tx = currInt & INTRPT_DIR_M;
1327
1328        ci->iqd_p[headx] = __constant_cpu_to_le32(INT_EMPTY_ENTRY);
1329        FLUSH_MEM_WRITE();
1330
1331        if (cxt1e1_log_level >= LOG_DEBUG) {
1332            if (err != 0)
1333                pr_info(" %08x -> err: %2d,", currInt, err);
1334
1335            pr_info("+ interrupt event: %d, grp: %d, chan: %2d, side: %cX\n",
1336                    event, group, gchan, tx ? 'T' : 'R');
1337        }
1338        pi = &ci->port[group];      /* notice that here we assume 1-1 group -
1339                                     * port mapping */
1340        ch = pi->chan[gchan];
1341        switch (event) {
1342        case EVE_SACK:              /* Service Request Acknowledge */
1343            if (cxt1e1_log_level >= LOG_DEBUG) {
1344                volatile u_int32_t r;
1345
1346                r = pci_read_32((u_int32_t *) &pi->reg->srd);
1347                pr_info("- SACK cmd: %08x (hdw= %08x)\n", pi->sr_last, r);
1348            }
1349            SD_SEM_GIVE(&pi->sr_sem_wait);     /* wake up waiting process */
1350            break;
1351        case EVE_CHABT:     /* Change To Abort Code (0x7e -> 0xff) */
1352        case EVE_CHIC:              /* Change To Idle Code (0xff -> 0x7e) */
1353            break;
1354        case EVE_EOM:               /* End Of Message */
1355        case EVE_EOB:               /* End Of Buffer (Transparent mode) */
1356            if (tx)
1357                musycc_bh_tx_eom(pi, gchan);
1358            else
1359                musycc_bh_rx_eom(pi, gchan);
1360#if 0
1361            break;
1362#else
1363            /*
1364             * MUSYCC Interrupt Descriptor section states that EOB and EOM
1365             * can be combined with the NONE error (as well as others).  So
1366             * drop thru to catch this...
1367             */
1368#endif
1369        case EVE_NONE:
1370            if (err == ERR_SHT)
1371                ch->s.rx_length_errors++;
1372            break;
1373        default:
1374            if (cxt1e1_log_level >= LOG_WARN)
1375                pr_info("%s: unexpected interrupt event: %d, iqd[%d]: %08x, port: %d\n", ci->devname,
1376                        event, headx, currInt, group);
1377            break;
1378        }                           /* switch on event */
1379
1380
1381        /*
1382         * Per MUSYCC Manual, Section 6.4.8.3 [Transmit Errors], TX errors
1383         * are service-affecting and require action to resume normal
1384         * bit-level processing.
1385         */
1386
1387        switch (err) {
1388        case ERR_ONR:
1389            /*
1390             * Per MUSYCC manual, Section  6.4.8.3 [Transmit Errors], this
1391             * error requires Transmit channel reactivation.
1392             *
1393             * Per MUSYCC manual, Section  6.4.8.4 [Receive Errors], this error
1394             * requires Receive channel reactivation.
1395             */
1396            if (tx) {
1397
1398                /*
1399                 * TX ONR Error only occurs when channel is configured for
1400                 * Transparent Mode.  However, this code will catch and
1401                 * re-activate on ANY TX ONR error.
1402                 */
1403
1404                /*
1405                 * Set flag to re-enable on any next transmit attempt.
1406                 */
1407                ch->ch_start_tx = CH_START_TX_ONR;
1408
1409                {
1410#ifdef RLD_TRANS_DEBUG
1411                    if (1 || cxt1e1_log_level >= LOG_MONITOR)
1412#else
1413                    if (cxt1e1_log_level >= LOG_MONITOR)
1414#endif
1415                    {
1416                        pr_info("%s: TX buffer underflow [ONR] on channel %d, mode %x QStopped %x free %d\n",
1417                                ci->devname, ch->channum, ch->p.chan_mode, sd_queue_stopped(ch->user), ch->txd_free);
1418#ifdef RLD_DEBUG
1419                        if (ch->p.chan_mode == 2) {     /* problem = ONR on HDLC
1420                                                         * mode */
1421                            pr_info("++ Failed Last %x Next %x QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
1422                                    (u_int32_t) ch->txd_irq_srv, (u_int32_t) ch->txd_usr_add,
1423                                    sd_queue_stopped(ch->user),
1424                                    ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
1425                            musycc_dump_txbuffer_ring(ch, 0);
1426                        }
1427#endif
1428                    }
1429                }
1430            } else {                 /* RX buffer overrun */
1431                /*
1432                 * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors],
1433                 * channel recovery for this RX ONR error IS required.  It is
1434                 * also suggested to increase the number of receive buffers
1435                 * for this channel.  Receive channel reactivation IS
1436                 * required, and data has been lost.
1437                 */
1438                ch->s.rx_over_errors++;
1439                ch->ch_start_rx = CH_START_RX_ONR;
1440
1441                if (cxt1e1_log_level >= LOG_WARN) {
1442                    pr_info("%s: RX buffer overflow [ONR] on channel %d, mode %x\n",
1443                            ci->devname, ch->channum, ch->p.chan_mode);
1444                    //musycc_dump_rxbuffer_ring (ch, 0);        /* RLD DEBUG */
1445                }
1446            }
1447            musycc_chan_restart(ch);
1448            break;
1449        case ERR_BUF:
1450            if (tx) {
1451                ch->s.tx_fifo_errors++;
1452                ch->ch_start_tx = CH_START_TX_BUF;
1453                /*
1454                 * Per MUSYCC manual, Section  6.4.8.3 [Transmit Errors],
1455                 * this BUFF error requires Transmit channel reactivation.
1456                 */
1457                if (cxt1e1_log_level >= LOG_MONITOR)
1458                    pr_info("%s: TX buffer underrun [BUFF] on channel %d, mode %x\n",
1459                            ci->devname, ch->channum, ch->p.chan_mode);
1460            } else {                 /* RX buffer overrun */
1461                ch->s.rx_over_errors++;
1462                /*
1463                 * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], HDLC
1464                 * mode requires NO recovery for this RX BUFF error is
1465                 * required.  It is suggested to increase the FIFO buffer
1466                 * space for this channel.  Receive channel reactivation is
1467                 * not required, but data has been lost.
1468                 */
1469                if (cxt1e1_log_level >= LOG_WARN)
1470                    pr_info("%s: RX buffer overrun [BUFF] on channel %d, mode %x\n",
1471                            ci->devname, ch->channum, ch->p.chan_mode);
1472                /*
1473                 * Per MUSYCC manual, Section 6.4.9.4 [Receive Errors],
1474                 * Transparent mode DOES require recovery for the RX BUFF
1475                 * error.  It is suggested to increase the FIFO buffer space
1476                 * for this channel.  Receive channel reactivation IS
1477                 * required and data has been lost.
1478                 */
1479                if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
1480                    ch->ch_start_rx = CH_START_RX_BUF;
1481            }
1482
1483            if (tx || (ch->p.chan_mode == CFG_CH_PROTO_TRANS))
1484                musycc_chan_restart(ch);
1485            break;
1486        default:
1487            break;
1488        }                           /* switch on err */
1489
1490        /* Check for interrupt lost condition */
1491        if ((currInt & INTRPT_ILOST_M) && (cxt1e1_log_level >= LOG_ERROR))
1492            pr_info("%s: Interrupt queue overflow - ILOST asserted\n",
1493                    ci->devname);
1494        ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1);     /* insure wrapness */
1495        FLUSH_MEM_WRITE();
1496        FLUSH_MEM_READ();
1497    }                               /* while */
1498    if ((cxt1e1_log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx)) {
1499        int         bh;
1500
1501        bh = atomic_read(&CI->bh_pending);
1502        pr_info("_bh_: late arrivals, head %d != tail %d, pending %d\n",
1503                ci->iqp_headx, ci->iqp_tailx, bh);
1504    }
1505#if defined(SBE_ISR_IMMEDIATE)
1506    return 0L;
1507#endif
1508    /* else, nothing returned */
1509}
1510
1511#if 0
1512int         __init
1513musycc_new_chan(ci_t *ci, int channum, void *user)
1514{
1515    mch_t      *ch;
1516
1517    ch = ci->port[channum / MUSYCC_NCHANS].chan[channum % MUSYCC_NCHANS];
1518
1519    if (ch->state != UNASSIGNED)
1520        return EEXIST;
1521    /* NOTE: mch_t already cleared during OS_kmalloc() */
1522    ch->state = DOWN;
1523    ch->user = user;
1524#if 0
1525    ch->status = 0;
1526    ch->p.status = 0;
1527    ch->p.intr_mask = 0;
1528#endif
1529    ch->p.chan_mode = CFG_CH_PROTO_HDLC_FCS16;
1530    ch->p.idlecode = CFG_CH_FLAG_7E;
1531    ch->p.pad_fill_count = 2;
1532    spin_lock_init(&ch->ch_rxlock);
1533    spin_lock_init(&ch->ch_txlock);
1534
1535    return 0;
1536}
1537#endif
1538
1539
1540#ifdef SBE_PMCC4_ENABLE
1541status_t
1542musycc_chan_down(ci_t *dummy, int channum)
1543{
1544    mpi_t      *pi;
1545    mch_t      *ch;
1546    int         i, gchan;
1547
1548    if (!(ch = sd_find_chan(dummy, channum)))
1549        return EINVAL;
1550    pi = ch->up;
1551    gchan = ch->gchan;
1552
1553    /* Deactivate the channel */
1554    musycc_serv_req(pi, SR_CHANNEL_DEACTIVATE | SR_RX_DIRECTION | gchan);
1555    ch->ch_start_rx = 0;
1556    musycc_serv_req(pi, SR_CHANNEL_DEACTIVATE | SR_TX_DIRECTION | gchan);
1557    ch->ch_start_tx = 0;
1558
1559    if (ch->state == DOWN)
1560        return 0;
1561    ch->state = DOWN;
1562
1563    pi->regram->thp[gchan] = 0;
1564    pi->regram->tmp[gchan] = 0;
1565    pi->regram->rhp[gchan] = 0;
1566    pi->regram->rmp[gchan] = 0;
1567    FLUSH_MEM_WRITE();
1568    for (i = 0; i < ch->txd_num; i++)
1569        if (ch->mdt[i].mem_token != 0)
1570            OS_mem_token_free(ch->mdt[i].mem_token);
1571
1572    for (i = 0; i < ch->rxd_num; i++)
1573        if (ch->mdr[i].mem_token != 0)
1574            OS_mem_token_free(ch->mdr[i].mem_token);
1575
1576    OS_kfree(ch->mdr);
1577    ch->mdr = 0;
1578    ch->rxd_num = 0;
1579    OS_kfree(ch->mdt);
1580    ch->mdt = 0;
1581    ch->txd_num = 0;
1582
1583    musycc_update_timeslots(pi);
1584    c4_fifo_free(pi, ch->gchan);
1585
1586    pi->openchans--;
1587    return 0;
1588}
1589#endif
1590
1591
1592int
1593musycc_del_chan(ci_t *ci, int channum)
1594{
1595    mch_t      *ch;
1596
1597    if ((channum < 0) || (channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)))  /* sanity chk param */
1598        return ECHRNG;
1599    if (!(ch = sd_find_chan(ci, channum)))
1600        return ENOENT;
1601    if (ch->state == UP)
1602        musycc_chan_down(ci, channum);
1603    ch->state = UNASSIGNED;
1604    return 0;
1605}
1606
1607
1608int
1609musycc_del_chan_stats(ci_t *ci, int channum)
1610{
1611    mch_t      *ch;
1612
1613    if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS))      /* sanity chk param */
1614        return ECHRNG;
1615    if (!(ch = sd_find_chan(ci, channum)))
1616        return ENOENT;
1617
1618    memset(&ch->s, 0, sizeof(struct sbecom_chan_stats));
1619    return 0;
1620}
1621
1622
1623int
1624musycc_start_xmit(ci_t *ci, int channum, void *mem_token)
1625{
1626    mch_t      *ch;
1627    struct mdesc *md;
1628    void       *m2;
1629#if 0
1630    unsigned long flags;
1631#endif
1632    int         txd_need_cnt;
1633    u_int32_t   len;
1634
1635    if (!(ch = sd_find_chan(ci, channum)))
1636        return -ENOENT;
1637
1638    if (ci->state != C_RUNNING)     /* full interrupt processing available */
1639        return -EINVAL;
1640    if (ch->state != UP)
1641        return -EINVAL;
1642
1643    if (!(ch->status & TX_ENABLED))
1644        return -EROFS;               /* how else to flag unwritable state ? */
1645
1646#ifdef RLD_TRANS_DEBUGx
1647    if (1 || cxt1e1_log_level >= LOG_MONITOR2)
1648#else
1649    if (cxt1e1_log_level >= LOG_MONITOR2)
1650#endif
1651    {
1652        pr_info("++ start_xmt[%d]: state %x start %x full %d free %d required %d stopped %x\n",
1653                channum, ch->state, ch->ch_start_tx, ch->tx_full,
1654                ch->txd_free, ch->txd_required, sd_queue_stopped(ch->user));
1655    }
1656    /***********************************************/
1657    /** Determine total amount of data to be sent **/
1658    /***********************************************/
1659    m2 = mem_token;
1660    txd_need_cnt = 0;
1661    for (len = OS_mem_token_tlen(m2); len > 0;
1662         m2 = (void *) OS_mem_token_next(m2)) {
1663        if (!OS_mem_token_len(m2))
1664            continue;
1665        txd_need_cnt++;
1666        len -= OS_mem_token_len(m2);
1667    }
1668
1669    if (txd_need_cnt == 0) {
1670        if (cxt1e1_log_level >= LOG_MONITOR2)
1671            pr_info("%s channel %d: no TX data in User buffer\n", ci->devname, channum);
1672        OS_mem_token_free(mem_token);
1673        return 0;                   /* no data to send */
1674    }
1675    /*************************************************/
1676    /** Are there sufficient descriptors available? **/
1677    /*************************************************/
1678    if (txd_need_cnt > ch->txd_num) { /* never enough descriptors for this
1679                                       * large a buffer */
1680        if (cxt1e1_log_level >= LOG_DEBUG)
1681            pr_info("start_xmit: discarding buffer, insufficient descriptor cnt %d, need %d.\n",
1682                    ch->txd_num, txd_need_cnt + 1);
1683        ch->s.tx_dropped++;
1684        OS_mem_token_free(mem_token);
1685        return 0;
1686    }
1687#if 0
1688    spin_lock_irqsave(&ch->ch_txlock, flags);
1689#endif
1690    /************************************************************/
1691    /** flow control the line if not enough descriptors remain **/
1692    /************************************************************/
1693    if (txd_need_cnt > ch->txd_free) {
1694        if (cxt1e1_log_level >= LOG_MONITOR2)
1695            pr_info("start_xmit[%d]: EBUSY - need more descriptors, have %d of %d need %d\n",
1696                    channum, ch->txd_free, ch->txd_num, txd_need_cnt);
1697        ch->tx_full = 1;
1698        ch->txd_required = txd_need_cnt;
1699        sd_disable_xmit(ch->user);
1700#if 0
1701        spin_unlock_irqrestore(&ch->ch_txlock, flags);
1702#endif
1703        return -EBUSY;               /* tell user to try again later */
1704    }
1705    /**************************************************/
1706    /** Put the user data into MUSYCC data buffer(s) **/
1707    /**************************************************/
1708    m2 = mem_token;
1709    md = ch->txd_usr_add;           /* get current available descriptor */
1710
1711    for (len = OS_mem_token_tlen(m2); len > 0; m2 = OS_mem_token_next(m2)) {
1712        int         u = OS_mem_token_len(m2);
1713
1714        if (!u)
1715            continue;
1716        len -= u;
1717
1718        /*
1719         * Enable following chunks, yet wait to enable the FIRST chunk until
1720         * after ALL subsequent chunks are setup.
1721         */
1722        if (md != ch->txd_usr_add)  /* not first chunk */
1723            u |= MUSYCC_TX_OWNED;   /* transfer ownership from HOST to MUSYCC */
1724
1725        if (len)                    /* not last chunk */
1726            u |= EOBIRQ_ENABLE;
1727        else if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) {
1728            /*
1729             * Per MUSYCC Ref 6.4.9 for Transparent Mode, the host must
1730             * always clear EOMIRQ_ENABLE in every Transmit Buffer Descriptor
1731             * (IE. don't set herein).
1732             */
1733            u |= EOBIRQ_ENABLE;
1734        } else
1735            u |= EOMIRQ_ENABLE;     /* EOM, last HDLC chunk */
1736
1737
1738        /* last chunk in hdlc mode */
1739        u |= (ch->p.idlecode << IDLE_CODE);
1740        if (ch->p.pad_fill_count) {
1741#if 0
1742            /* NOOP NOTE: u_int8_t cannot be > 0xFF */
1743            /* sanitize pad_fill_count for maximums allowed by hardware */
1744            if (ch->p.pad_fill_count > EXTRA_FLAGS_MASK)
1745                ch->p.pad_fill_count = EXTRA_FLAGS_MASK;
1746#endif
1747            u |= (PADFILL_ENABLE | (ch->p.pad_fill_count << EXTRA_FLAGS));
1748        }
1749        md->mem_token = len ? 0 : mem_token;    /* Fill in mds on last
1750                                                 * segment, others set ZERO
1751                                                 * so that entire token is
1752                                                 * removed ONLY when ALL
1753                                                 * segments have been
1754                                                 * transmitted. */
1755
1756        md->data = cpu_to_le32(OS_vtophys(OS_mem_token_data(m2)));
1757        FLUSH_MEM_WRITE();
1758        md->status = cpu_to_le32(u);
1759        --ch->txd_free;
1760        md = md->snext;
1761    }
1762    FLUSH_MEM_WRITE();
1763
1764
1765    /*
1766     * Now transfer ownership of first chunk from HOST to MUSYCC in order to
1767     * fire-off this XMIT.
1768     */
1769    ch->txd_usr_add->status |= __constant_cpu_to_le32(MUSYCC_TX_OWNED);
1770    FLUSH_MEM_WRITE();
1771    ch->txd_usr_add = md;
1772
1773    len = OS_mem_token_tlen(mem_token);
1774    atomic_add(len, &ch->tx_pending);
1775    atomic_add(len, &ci->tx_pending);
1776    ch->s.tx_packets++;
1777    ch->s.tx_bytes += len;
1778    /*
1779     * If an ONR was seen, then channel requires poking to restart
1780     * transmission.
1781     */
1782    if (ch->ch_start_tx)
1783        musycc_chan_restart(ch);
1784#ifdef SBE_WAN256T3_ENABLE
1785    wan256t3_led(ci, LED_TX, LEDV_G);
1786#endif
1787    return 0;
1788}
1789
1790
1791/*** End-of-File ***/
1792