uboot/drivers/usb/musb-new/musb_host.c
<<
>>
Prefs
   1/*
   2 * MUSB OTG driver host support
   3 *
   4 * Copyright 2005 Mentor Graphics Corporation
   5 * Copyright (C) 2005-2006 by Texas Instruments
   6 * Copyright (C) 2006-2007 Nokia Corporation
   7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * version 2 as published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21 * 02110-1301 USA
  22 *
  23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33 *
  34 */
  35
  36#define __UBOOT__
  37#ifndef __UBOOT__
  38#include <linux/module.h>
  39#include <linux/kernel.h>
  40#include <linux/delay.h>
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43#include <linux/errno.h>
  44#include <linux/init.h>
  45#include <linux/list.h>
  46#include <linux/dma-mapping.h>
  47#else
  48#include <common.h>
  49#include <usb.h>
  50#include "linux-compat.h"
  51#include "usb-compat.h"
  52#endif
  53
  54#include "musb_core.h"
  55#include "musb_host.h"
  56
  57
  58/* MUSB HOST status 22-mar-2006
  59 *
  60 * - There's still lots of partial code duplication for fault paths, so
  61 *   they aren't handled as consistently as they need to be.
  62 *
  63 * - PIO mostly behaved when last tested.
  64 *     + including ep0, with all usbtest cases 9, 10
  65 *     + usbtest 14 (ep0out) doesn't seem to run at all
  66 *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
  67 *       configurations, but otherwise double buffering passes basic tests.
  68 *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
  69 *
  70 * - DMA (CPPI) ... partially behaves, not currently recommended
  71 *     + about 1/15 the speed of typical EHCI implementations (PCI)
  72 *     + RX, all too often reqpkt seems to misbehave after tx
  73 *     + TX, no known issues (other than evident silicon issue)
  74 *
  75 * - DMA (Mentor/OMAP) ...has at least toggle update problems
  76 *
  77 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
  78 *   starvation ... nothing yet for TX, interrupt, or bulk.
  79 *
  80 * - Not tested with HNP, but some SRP paths seem to behave.
  81 *
  82 * NOTE 24-August-2006:
  83 *
  84 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
  85 *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
  86 *   mostly works, except that with "usbnet" it's easy to trigger cases
  87 *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
  88 *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
  89 *   although ARP RX wins.  (That test was done with a full speed link.)
  90 */
  91
  92
  93/*
  94 * NOTE on endpoint usage:
  95 *
  96 * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
  97 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
  98 * (Yes, bulk _could_ use more of the endpoints than that, and would even
  99 * benefit from it.)
 100 *
 101 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
 102 * So far that scheduling is both dumb and optimistic:  the endpoint will be
 103 * "claimed" until its software queue is no longer refilled.  No multiplexing
 104 * of transfers between endpoints, or anything clever.
 105 */
 106
 107
 108static void musb_ep_program(struct musb *musb, u8 epnum,
 109                        struct urb *urb, int is_out,
 110                        u8 *buf, u32 offset, u32 len);
 111
 112/*
 113 * Clear TX fifo. Needed to avoid BABBLE errors.
 114 */
 115static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
 116{
 117        struct musb     *musb = ep->musb;
 118        void __iomem    *epio = ep->regs;
 119        u16             csr;
 120        u16             lastcsr = 0;
 121        int             retries = 1000;
 122
 123        csr = musb_readw(epio, MUSB_TXCSR);
 124        while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
 125                if (csr != lastcsr)
 126                        dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
 127                lastcsr = csr;
 128                csr |= MUSB_TXCSR_FLUSHFIFO;
 129                musb_writew(epio, MUSB_TXCSR, csr);
 130                csr = musb_readw(epio, MUSB_TXCSR);
 131                if (WARN(retries-- < 1,
 132                                "Could not flush host TX%d fifo: csr: %04x\n",
 133                                ep->epnum, csr))
 134                        return;
 135                mdelay(1);
 136        }
 137}
 138
 139static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
 140{
 141        void __iomem    *epio = ep->regs;
 142        u16             csr;
 143        int             retries = 5;
 144
 145        /* scrub any data left in the fifo */
 146        do {
 147                csr = musb_readw(epio, MUSB_TXCSR);
 148                if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
 149                        break;
 150                musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
 151                csr = musb_readw(epio, MUSB_TXCSR);
 152                udelay(10);
 153        } while (--retries);
 154
 155        WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
 156                        ep->epnum, csr);
 157
 158        /* and reset for the next transfer */
 159        musb_writew(epio, MUSB_TXCSR, 0);
 160}
 161
 162/*
 163 * Start transmit. Caller is responsible for locking shared resources.
 164 * musb must be locked.
 165 */
 166static inline void musb_h_tx_start(struct musb_hw_ep *ep)
 167{
 168        u16     txcsr;
 169
 170        /* NOTE: no locks here; caller should lock and select EP */
 171        if (ep->epnum) {
 172                txcsr = musb_readw(ep->regs, MUSB_TXCSR);
 173                txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
 174                musb_writew(ep->regs, MUSB_TXCSR, txcsr);
 175        } else {
 176                txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
 177                musb_writew(ep->regs, MUSB_CSR0, txcsr);
 178        }
 179
 180}
 181
 182static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
 183{
 184        u16     txcsr;
 185
 186        /* NOTE: no locks here; caller should lock and select EP */
 187        txcsr = musb_readw(ep->regs, MUSB_TXCSR);
 188        txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
 189        if (is_cppi_enabled())
 190                txcsr |= MUSB_TXCSR_DMAMODE;
 191        musb_writew(ep->regs, MUSB_TXCSR, txcsr);
 192}
 193
 194static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
 195{
 196        if (is_in != 0 || ep->is_shared_fifo)
 197                ep->in_qh  = qh;
 198        if (is_in == 0 || ep->is_shared_fifo)
 199                ep->out_qh = qh;
 200}
 201
 202static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
 203{
 204        return is_in ? ep->in_qh : ep->out_qh;
 205}
 206
 207/*
 208 * Start the URB at the front of an endpoint's queue
 209 * end must be claimed from the caller.
 210 *
 211 * Context: controller locked, irqs blocked
 212 */
 213static void
 214musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
 215{
 216        u16                     frame;
 217        u32                     len;
 218        void __iomem            *mbase =  musb->mregs;
 219        struct urb              *urb = next_urb(qh);
 220        void                    *buf = urb->transfer_buffer;
 221        u32                     offset = 0;
 222        struct musb_hw_ep       *hw_ep = qh->hw_ep;
 223        unsigned                pipe = urb->pipe;
 224        u8                      address = usb_pipedevice(pipe);
 225        int                     epnum = hw_ep->epnum;
 226
 227        /* initialize software qh state */
 228        qh->offset = 0;
 229        qh->segsize = 0;
 230
 231        /* gather right source of data */
 232        switch (qh->type) {
 233        case USB_ENDPOINT_XFER_CONTROL:
 234                /* control transfers always start with SETUP */
 235                is_in = 0;
 236                musb->ep0_stage = MUSB_EP0_START;
 237                buf = urb->setup_packet;
 238                len = 8;
 239                break;
 240#ifndef __UBOOT__
 241        case USB_ENDPOINT_XFER_ISOC:
 242                qh->iso_idx = 0;
 243                qh->frame = 0;
 244                offset = urb->iso_frame_desc[0].offset;
 245                len = urb->iso_frame_desc[0].length;
 246                break;
 247#endif
 248        default:                /* bulk, interrupt */
 249                /* actual_length may be nonzero on retry paths */
 250                buf = urb->transfer_buffer + urb->actual_length;
 251                len = urb->transfer_buffer_length - urb->actual_length;
 252        }
 253
 254        dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
 255                        qh, urb, address, qh->epnum,
 256                        is_in ? "in" : "out",
 257                        ({char *s; switch (qh->type) {
 258                        case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
 259                        case USB_ENDPOINT_XFER_BULK:    s = "-bulk"; break;
 260#ifndef __UBOOT__
 261                        case USB_ENDPOINT_XFER_ISOC:    s = "-iso"; break;
 262#endif
 263                        default:                        s = "-intr"; break;
 264                        }; s; }),
 265                        epnum, buf + offset, len);
 266
 267        /* Configure endpoint */
 268        musb_ep_set_qh(hw_ep, is_in, qh);
 269        musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
 270
 271        /* transmit may have more work: start it when it is time */
 272        if (is_in)
 273                return;
 274
 275        /* determine if the time is right for a periodic transfer */
 276        switch (qh->type) {
 277#ifndef __UBOOT__
 278        case USB_ENDPOINT_XFER_ISOC:
 279#endif
 280        case USB_ENDPOINT_XFER_INT:
 281                dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
 282                frame = musb_readw(mbase, MUSB_FRAME);
 283                /* FIXME this doesn't implement that scheduling policy ...
 284                 * or handle framecounter wrapping
 285                 */
 286#ifndef __UBOOT__
 287                if ((urb->transfer_flags & URB_ISO_ASAP)
 288                                || (frame >= urb->start_frame)) {
 289                        /* REVISIT the SOF irq handler shouldn't duplicate
 290                         * this code; and we don't init urb->start_frame...
 291                         */
 292                        qh->frame = 0;
 293                        goto start;
 294                } else {
 295#endif
 296                        qh->frame = urb->start_frame;
 297                        /* enable SOF interrupt so we can count down */
 298                        dev_dbg(musb->controller, "SOF for %d\n", epnum);
 299#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
 300                        musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
 301#endif
 302#ifndef __UBOOT__
 303                }
 304#endif
 305                break;
 306        default:
 307start:
 308                dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
 309                        hw_ep->tx_channel ? "dma" : "pio");
 310
 311                if (!hw_ep->tx_channel)
 312                        musb_h_tx_start(hw_ep);
 313                else if (is_cppi_enabled() || tusb_dma_omap())
 314                        musb_h_tx_dma_start(hw_ep);
 315        }
 316}
 317
 318/* Context: caller owns controller lock, IRQs are blocked */
 319static void musb_giveback(struct musb *musb, struct urb *urb, int status)
 320__releases(musb->lock)
 321__acquires(musb->lock)
 322{
 323        dev_dbg(musb->controller,
 324                        "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
 325                        urb, urb->complete, status,
 326                        usb_pipedevice(urb->pipe),
 327                        usb_pipeendpoint(urb->pipe),
 328                        usb_pipein(urb->pipe) ? "in" : "out",
 329                        urb->actual_length, urb->transfer_buffer_length
 330                        );
 331
 332        usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
 333        spin_unlock(&musb->lock);
 334        usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
 335        spin_lock(&musb->lock);
 336}
 337
 338/* For bulk/interrupt endpoints only */
 339static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
 340                                    struct urb *urb)
 341{
 342        void __iomem            *epio = qh->hw_ep->regs;
 343        u16                     csr;
 344
 345        /*
 346         * FIXME: the current Mentor DMA code seems to have
 347         * problems getting toggle correct.
 348         */
 349
 350        if (is_in)
 351                csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
 352        else
 353                csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
 354
 355        usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
 356}
 357
 358/*
 359 * Advance this hardware endpoint's queue, completing the specified URB and
 360 * advancing to either the next URB queued to that qh, or else invalidating
 361 * that qh and advancing to the next qh scheduled after the current one.
 362 *
 363 * Context: caller owns controller lock, IRQs are blocked
 364 */
 365static void musb_advance_schedule(struct musb *musb, struct urb *urb,
 366                                  struct musb_hw_ep *hw_ep, int is_in)
 367{
 368        struct musb_qh          *qh = musb_ep_get_qh(hw_ep, is_in);
 369        struct musb_hw_ep       *ep = qh->hw_ep;
 370        int                     ready = qh->is_ready;
 371        int                     status;
 372
 373        status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
 374
 375        /* save toggle eagerly, for paranoia */
 376        switch (qh->type) {
 377        case USB_ENDPOINT_XFER_BULK:
 378        case USB_ENDPOINT_XFER_INT:
 379                musb_save_toggle(qh, is_in, urb);
 380                break;
 381#ifndef __UBOOT__
 382        case USB_ENDPOINT_XFER_ISOC:
 383                if (status == 0 && urb->error_count)
 384                        status = -EXDEV;
 385                break;
 386#endif
 387        }
 388
 389        qh->is_ready = 0;
 390        musb_giveback(musb, urb, status);
 391        qh->is_ready = ready;
 392
 393        /* reclaim resources (and bandwidth) ASAP; deschedule it, and
 394         * invalidate qh as soon as list_empty(&hep->urb_list)
 395         */
 396        if (list_empty(&qh->hep->urb_list)) {
 397                struct list_head        *head;
 398                struct dma_controller   *dma = musb->dma_controller;
 399
 400                if (is_in) {
 401                        ep->rx_reinit = 1;
 402                        if (ep->rx_channel) {
 403                                dma->channel_release(ep->rx_channel);
 404                                ep->rx_channel = NULL;
 405                        }
 406                } else {
 407                        ep->tx_reinit = 1;
 408                        if (ep->tx_channel) {
 409                                dma->channel_release(ep->tx_channel);
 410                                ep->tx_channel = NULL;
 411                        }
 412                }
 413
 414                /* Clobber old pointers to this qh */
 415                musb_ep_set_qh(ep, is_in, NULL);
 416                qh->hep->hcpriv = NULL;
 417
 418                switch (qh->type) {
 419
 420                case USB_ENDPOINT_XFER_CONTROL:
 421                case USB_ENDPOINT_XFER_BULK:
 422                        /* fifo policy for these lists, except that NAKing
 423                         * should rotate a qh to the end (for fairness).
 424                         */
 425                        if (qh->mux == 1) {
 426                                head = qh->ring.prev;
 427                                list_del(&qh->ring);
 428                                kfree(qh);
 429                                qh = first_qh(head);
 430                                break;
 431                        }
 432
 433                case USB_ENDPOINT_XFER_ISOC:
 434                case USB_ENDPOINT_XFER_INT:
 435                        /* this is where periodic bandwidth should be
 436                         * de-allocated if it's tracked and allocated;
 437                         * and where we'd update the schedule tree...
 438                         */
 439                        kfree(qh);
 440                        qh = NULL;
 441                        break;
 442                }
 443        }
 444
 445        if (qh != NULL && qh->is_ready) {
 446                dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
 447                    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
 448                musb_start_urb(musb, is_in, qh);
 449        }
 450}
 451
 452static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
 453{
 454        /* we don't want fifo to fill itself again;
 455         * ignore dma (various models),
 456         * leave toggle alone (may not have been saved yet)
 457         */
 458        csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
 459        csr &= ~(MUSB_RXCSR_H_REQPKT
 460                | MUSB_RXCSR_H_AUTOREQ
 461                | MUSB_RXCSR_AUTOCLEAR);
 462
 463        /* write 2x to allow double buffering */
 464        musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
 465        musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
 466
 467        /* flush writebuffer */
 468        return musb_readw(hw_ep->regs, MUSB_RXCSR);
 469}
 470
 471/*
 472 * PIO RX for a packet (or part of it).
 473 */
 474static bool
 475musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
 476{
 477        u16                     rx_count;
 478        u8                      *buf;
 479        u16                     csr;
 480        bool                    done = false;
 481        u32                     length;
 482        int                     do_flush = 0;
 483        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
 484        void __iomem            *epio = hw_ep->regs;
 485        struct musb_qh          *qh = hw_ep->in_qh;
 486        int                     pipe = urb->pipe;
 487        void                    *buffer = urb->transfer_buffer;
 488
 489        /* musb_ep_select(mbase, epnum); */
 490        rx_count = musb_readw(epio, MUSB_RXCOUNT);
 491        dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
 492                        urb->transfer_buffer, qh->offset,
 493                        urb->transfer_buffer_length);
 494
 495        /* unload FIFO */
 496#ifndef __UBOOT__
 497        if (usb_pipeisoc(pipe)) {
 498                int                                     status = 0;
 499                struct usb_iso_packet_descriptor        *d;
 500
 501                if (iso_err) {
 502                        status = -EILSEQ;
 503                        urb->error_count++;
 504                }
 505
 506                d = urb->iso_frame_desc + qh->iso_idx;
 507                buf = buffer + d->offset;
 508                length = d->length;
 509                if (rx_count > length) {
 510                        if (status == 0) {
 511                                status = -EOVERFLOW;
 512                                urb->error_count++;
 513                        }
 514                        dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
 515                        do_flush = 1;
 516                } else
 517                        length = rx_count;
 518                urb->actual_length += length;
 519                d->actual_length = length;
 520
 521                d->status = status;
 522
 523                /* see if we are done */
 524                done = (++qh->iso_idx >= urb->number_of_packets);
 525        } else {
 526#endif
 527                /* non-isoch */
 528                buf = buffer + qh->offset;
 529                length = urb->transfer_buffer_length - qh->offset;
 530                if (rx_count > length) {
 531                        if (urb->status == -EINPROGRESS)
 532                                urb->status = -EOVERFLOW;
 533                        dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
 534                        do_flush = 1;
 535                } else
 536                        length = rx_count;
 537                urb->actual_length += length;
 538                qh->offset += length;
 539
 540                /* see if we are done */
 541                done = (urb->actual_length == urb->transfer_buffer_length)
 542                        || (rx_count < qh->maxpacket)
 543                        || (urb->status != -EINPROGRESS);
 544                if (done
 545                                && (urb->status == -EINPROGRESS)
 546                                && (urb->transfer_flags & URB_SHORT_NOT_OK)
 547                                && (urb->actual_length
 548                                        < urb->transfer_buffer_length))
 549                        urb->status = -EREMOTEIO;
 550#ifndef __UBOOT__
 551        }
 552#endif
 553
 554        musb_read_fifo(hw_ep, length, buf);
 555
 556        csr = musb_readw(epio, MUSB_RXCSR);
 557        csr |= MUSB_RXCSR_H_WZC_BITS;
 558        if (unlikely(do_flush))
 559                musb_h_flush_rxfifo(hw_ep, csr);
 560        else {
 561                /* REVISIT this assumes AUTOCLEAR is never set */
 562                csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
 563                if (!done)
 564                        csr |= MUSB_RXCSR_H_REQPKT;
 565                musb_writew(epio, MUSB_RXCSR, csr);
 566        }
 567
 568        return done;
 569}
 570
 571/* we don't always need to reinit a given side of an endpoint...
 572 * when we do, use tx/rx reinit routine and then construct a new CSR
 573 * to address data toggle, NYET, and DMA or PIO.
 574 *
 575 * it's possible that driver bugs (especially for DMA) or aborting a
 576 * transfer might have left the endpoint busier than it should be.
 577 * the busy/not-empty tests are basically paranoia.
 578 */
 579static void
 580musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
 581{
 582        u16     csr;
 583
 584        /* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
 585         * That always uses tx_reinit since ep0 repurposes TX register
 586         * offsets; the initial SETUP packet is also a kind of OUT.
 587         */
 588
 589        /* if programmed for Tx, put it in RX mode */
 590        if (ep->is_shared_fifo) {
 591                csr = musb_readw(ep->regs, MUSB_TXCSR);
 592                if (csr & MUSB_TXCSR_MODE) {
 593                        musb_h_tx_flush_fifo(ep);
 594                        csr = musb_readw(ep->regs, MUSB_TXCSR);
 595                        musb_writew(ep->regs, MUSB_TXCSR,
 596                                    csr | MUSB_TXCSR_FRCDATATOG);
 597                }
 598
 599                /*
 600                 * Clear the MODE bit (and everything else) to enable Rx.
 601                 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
 602                 */
 603                if (csr & MUSB_TXCSR_DMAMODE)
 604                        musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
 605                musb_writew(ep->regs, MUSB_TXCSR, 0);
 606
 607        /* scrub all previous state, clearing toggle */
 608        } else {
 609                csr = musb_readw(ep->regs, MUSB_RXCSR);
 610                if (csr & MUSB_RXCSR_RXPKTRDY)
 611                        WARNING("rx%d, packet/%d ready?\n", ep->epnum,
 612                                musb_readw(ep->regs, MUSB_RXCOUNT));
 613
 614                musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
 615        }
 616
 617        /* target addr and (for multipoint) hub addr/port */
 618        if (musb->is_multipoint) {
 619                musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
 620                musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
 621                musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
 622
 623        } else
 624                musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
 625
 626        /* protocol/endpoint, interval/NAKlimit, i/o size */
 627        musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
 628        musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
 629        /* NOTE: bulk combining rewrites high bits of maxpacket */
 630        /* Set RXMAXP with the FIFO size of the endpoint
 631         * to disable double buffer mode.
 632         */
 633        if (musb->double_buffer_not_ok)
 634                musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
 635        else
 636                musb_writew(ep->regs, MUSB_RXMAXP,
 637                                qh->maxpacket | ((qh->hb_mult - 1) << 11));
 638
 639        ep->rx_reinit = 0;
 640}
 641
 642static bool musb_tx_dma_program(struct dma_controller *dma,
 643                struct musb_hw_ep *hw_ep, struct musb_qh *qh,
 644                struct urb *urb, u32 offset, u32 length)
 645{
 646        struct dma_channel      *channel = hw_ep->tx_channel;
 647        void __iomem            *epio = hw_ep->regs;
 648        u16                     pkt_size = qh->maxpacket;
 649        u16                     csr;
 650        u8                      mode;
 651
 652#ifdef  CONFIG_USB_INVENTRA_DMA
 653        if (length > channel->max_len)
 654                length = channel->max_len;
 655
 656        csr = musb_readw(epio, MUSB_TXCSR);
 657        if (length > pkt_size) {
 658                mode = 1;
 659                csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
 660                /* autoset shouldn't be set in high bandwidth */
 661                if (qh->hb_mult == 1)
 662                        csr |= MUSB_TXCSR_AUTOSET;
 663        } else {
 664                mode = 0;
 665                csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
 666                csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
 667        }
 668        channel->desired_mode = mode;
 669        musb_writew(epio, MUSB_TXCSR, csr);
 670#else
 671        if (!is_cppi_enabled() && !tusb_dma_omap())
 672                return false;
 673
 674        channel->actual_len = 0;
 675
 676        /*
 677         * TX uses "RNDIS" mode automatically but needs help
 678         * to identify the zero-length-final-packet case.
 679         */
 680        mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
 681#endif
 682
 683        qh->segsize = length;
 684
 685        /*
 686         * Ensure the data reaches to main memory before starting
 687         * DMA transfer
 688         */
 689        wmb();
 690
 691        if (!dma->channel_program(channel, pkt_size, mode,
 692                        urb->transfer_dma + offset, length)) {
 693                dma->channel_release(channel);
 694                hw_ep->tx_channel = NULL;
 695
 696                csr = musb_readw(epio, MUSB_TXCSR);
 697                csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
 698                musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
 699                return false;
 700        }
 701        return true;
 702}
 703
 704/*
 705 * Program an HDRC endpoint as per the given URB
 706 * Context: irqs blocked, controller lock held
 707 */
 708static void musb_ep_program(struct musb *musb, u8 epnum,
 709                        struct urb *urb, int is_out,
 710                        u8 *buf, u32 offset, u32 len)
 711{
 712        struct dma_controller   *dma_controller;
 713        struct dma_channel      *dma_channel;
 714        u8                      dma_ok;
 715        void __iomem            *mbase = musb->mregs;
 716        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
 717        void __iomem            *epio = hw_ep->regs;
 718        struct musb_qh          *qh = musb_ep_get_qh(hw_ep, !is_out);
 719        u16                     packet_sz = qh->maxpacket;
 720
 721        dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
 722                                "h_addr%02x h_port%02x bytes %d\n",
 723                        is_out ? "-->" : "<--",
 724                        epnum, urb, urb->dev->speed,
 725                        qh->addr_reg, qh->epnum, is_out ? "out" : "in",
 726                        qh->h_addr_reg, qh->h_port_reg,
 727                        len);
 728
 729        musb_ep_select(mbase, epnum);
 730
 731        /* candidate for DMA? */
 732        dma_controller = musb->dma_controller;
 733        if (is_dma_capable() && epnum && dma_controller) {
 734                dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
 735                if (!dma_channel) {
 736                        dma_channel = dma_controller->channel_alloc(
 737                                        dma_controller, hw_ep, is_out);
 738                        if (is_out)
 739                                hw_ep->tx_channel = dma_channel;
 740                        else
 741                                hw_ep->rx_channel = dma_channel;
 742                }
 743        } else
 744                dma_channel = NULL;
 745
 746        /* make sure we clear DMAEnab, autoSet bits from previous run */
 747
 748        /* OUT/transmit/EP0 or IN/receive? */
 749        if (is_out) {
 750                u16     csr;
 751                u16     int_txe;
 752                u16     load_count;
 753
 754                csr = musb_readw(epio, MUSB_TXCSR);
 755
 756                /* disable interrupt in case we flush */
 757                int_txe = musb_readw(mbase, MUSB_INTRTXE);
 758                musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
 759
 760                /* general endpoint setup */
 761                if (epnum) {
 762                        /* flush all old state, set default */
 763                        musb_h_tx_flush_fifo(hw_ep);
 764
 765                        /*
 766                         * We must not clear the DMAMODE bit before or in
 767                         * the same cycle with the DMAENAB bit, so we clear
 768                         * the latter first...
 769                         */
 770                        csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
 771                                        | MUSB_TXCSR_AUTOSET
 772                                        | MUSB_TXCSR_DMAENAB
 773                                        | MUSB_TXCSR_FRCDATATOG
 774                                        | MUSB_TXCSR_H_RXSTALL
 775                                        | MUSB_TXCSR_H_ERROR
 776                                        | MUSB_TXCSR_TXPKTRDY
 777                                        );
 778                        csr |= MUSB_TXCSR_MODE;
 779
 780                        if (usb_gettoggle(urb->dev, qh->epnum, 1))
 781                                csr |= MUSB_TXCSR_H_WR_DATATOGGLE
 782                                        | MUSB_TXCSR_H_DATATOGGLE;
 783                        else
 784                                csr |= MUSB_TXCSR_CLRDATATOG;
 785
 786                        musb_writew(epio, MUSB_TXCSR, csr);
 787                        /* REVISIT may need to clear FLUSHFIFO ... */
 788                        csr &= ~MUSB_TXCSR_DMAMODE;
 789                        musb_writew(epio, MUSB_TXCSR, csr);
 790                        csr = musb_readw(epio, MUSB_TXCSR);
 791                } else {
 792                        /* endpoint 0: just flush */
 793                        musb_h_ep0_flush_fifo(hw_ep);
 794                }
 795
 796                /* target addr and (for multipoint) hub addr/port */
 797                if (musb->is_multipoint) {
 798                        musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
 799                        musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
 800                        musb_write_txhubport(mbase, epnum, qh->h_port_reg);
 801/* FIXME if !epnum, do the same for RX ... */
 802                } else
 803                        musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
 804
 805                /* protocol/endpoint/interval/NAKlimit */
 806                if (epnum) {
 807                        musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
 808                        if (musb->double_buffer_not_ok)
 809                                musb_writew(epio, MUSB_TXMAXP,
 810                                                hw_ep->max_packet_sz_tx);
 811                        else if (can_bulk_split(musb, qh->type))
 812                                musb_writew(epio, MUSB_TXMAXP, packet_sz
 813                                        | ((hw_ep->max_packet_sz_tx /
 814                                                packet_sz) - 1) << 11);
 815                        else
 816                                musb_writew(epio, MUSB_TXMAXP,
 817                                                qh->maxpacket |
 818                                                ((qh->hb_mult - 1) << 11));
 819                        musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
 820                } else {
 821                        musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
 822                        if (musb->is_multipoint)
 823                                musb_writeb(epio, MUSB_TYPE0,
 824                                                qh->type_reg);
 825                }
 826
 827                if (can_bulk_split(musb, qh->type))
 828                        load_count = min((u32) hw_ep->max_packet_sz_tx,
 829                                                len);
 830                else
 831                        load_count = min((u32) packet_sz, len);
 832
 833                if (dma_channel && musb_tx_dma_program(dma_controller,
 834                                        hw_ep, qh, urb, offset, len))
 835                        load_count = 0;
 836
 837                if (load_count) {
 838                        /* PIO to load FIFO */
 839                        qh->segsize = load_count;
 840                        musb_write_fifo(hw_ep, load_count, buf);
 841                }
 842
 843                /* re-enable interrupt */
 844                musb_writew(mbase, MUSB_INTRTXE, int_txe);
 845
 846        /* IN/receive */
 847        } else {
 848                u16     csr;
 849
 850                if (hw_ep->rx_reinit) {
 851                        musb_rx_reinit(musb, qh, hw_ep);
 852
 853                        /* init new state: toggle and NYET, maybe DMA later */
 854                        if (usb_gettoggle(urb->dev, qh->epnum, 0))
 855                                csr = MUSB_RXCSR_H_WR_DATATOGGLE
 856                                        | MUSB_RXCSR_H_DATATOGGLE;
 857                        else
 858                                csr = 0;
 859                        if (qh->type == USB_ENDPOINT_XFER_INT)
 860                                csr |= MUSB_RXCSR_DISNYET;
 861
 862                } else {
 863                        csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 864
 865                        if (csr & (MUSB_RXCSR_RXPKTRDY
 866                                        | MUSB_RXCSR_DMAENAB
 867                                        | MUSB_RXCSR_H_REQPKT))
 868                                ERR("broken !rx_reinit, ep%d csr %04x\n",
 869                                                hw_ep->epnum, csr);
 870
 871                        /* scrub any stale state, leaving toggle alone */
 872                        csr &= MUSB_RXCSR_DISNYET;
 873                }
 874
 875                /* kick things off */
 876
 877                if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
 878                        /* Candidate for DMA */
 879                        dma_channel->actual_len = 0L;
 880                        qh->segsize = len;
 881
 882                        /* AUTOREQ is in a DMA register */
 883                        musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
 884                        csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 885
 886                        /*
 887                         * Unless caller treats short RX transfers as
 888                         * errors, we dare not queue multiple transfers.
 889                         */
 890                        dma_ok = dma_controller->channel_program(dma_channel,
 891                                        packet_sz, !(urb->transfer_flags &
 892                                                     URB_SHORT_NOT_OK),
 893                                        urb->transfer_dma + offset,
 894                                        qh->segsize);
 895                        if (!dma_ok) {
 896                                dma_controller->channel_release(dma_channel);
 897                                hw_ep->rx_channel = dma_channel = NULL;
 898                        } else
 899                                csr |= MUSB_RXCSR_DMAENAB;
 900                }
 901
 902                csr |= MUSB_RXCSR_H_REQPKT;
 903                dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
 904                musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
 905                csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 906        }
 907}
 908
 909
 910/*
 911 * Service the default endpoint (ep0) as host.
 912 * Return true until it's time to start the status stage.
 913 */
 914static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
 915{
 916        bool                     more = false;
 917        u8                      *fifo_dest = NULL;
 918        u16                     fifo_count = 0;
 919        struct musb_hw_ep       *hw_ep = musb->control_ep;
 920        struct musb_qh          *qh = hw_ep->in_qh;
 921        struct usb_ctrlrequest  *request;
 922
 923        switch (musb->ep0_stage) {
 924        case MUSB_EP0_IN:
 925                fifo_dest = urb->transfer_buffer + urb->actual_length;
 926                fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
 927                                   urb->actual_length);
 928                if (fifo_count < len)
 929                        urb->status = -EOVERFLOW;
 930
 931                musb_read_fifo(hw_ep, fifo_count, fifo_dest);
 932
 933                urb->actual_length += fifo_count;
 934                if (len < qh->maxpacket) {
 935                        /* always terminate on short read; it's
 936                         * rarely reported as an error.
 937                         */
 938                } else if (urb->actual_length <
 939                                urb->transfer_buffer_length)
 940                        more = true;
 941                break;
 942        case MUSB_EP0_START:
 943                request = (struct usb_ctrlrequest *) urb->setup_packet;
 944
 945                if (!request->wLength) {
 946                        dev_dbg(musb->controller, "start no-DATA\n");
 947                        break;
 948                } else if (request->bRequestType & USB_DIR_IN) {
 949                        dev_dbg(musb->controller, "start IN-DATA\n");
 950                        musb->ep0_stage = MUSB_EP0_IN;
 951                        more = true;
 952                        break;
 953                } else {
 954                        dev_dbg(musb->controller, "start OUT-DATA\n");
 955                        musb->ep0_stage = MUSB_EP0_OUT;
 956                        more = true;
 957                }
 958                /* FALLTHROUGH */
 959        case MUSB_EP0_OUT:
 960                fifo_count = min_t(size_t, qh->maxpacket,
 961                                   urb->transfer_buffer_length -
 962                                   urb->actual_length);
 963                if (fifo_count) {
 964                        fifo_dest = (u8 *) (urb->transfer_buffer
 965                                        + urb->actual_length);
 966                        dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
 967                                        fifo_count,
 968                                        (fifo_count == 1) ? "" : "s",
 969                                        fifo_dest);
 970                        musb_write_fifo(hw_ep, fifo_count, fifo_dest);
 971
 972                        urb->actual_length += fifo_count;
 973                        more = true;
 974                }
 975                break;
 976        default:
 977                ERR("bogus ep0 stage %d\n", musb->ep0_stage);
 978                break;
 979        }
 980
 981        return more;
 982}
 983
 984/*
 985 * Handle default endpoint interrupt as host. Only called in IRQ time
 986 * from musb_interrupt().
 987 *
 988 * called with controller irqlocked
 989 */
 990irqreturn_t musb_h_ep0_irq(struct musb *musb)
 991{
 992        struct urb              *urb;
 993        u16                     csr, len;
 994        int                     status = 0;
 995        void __iomem            *mbase = musb->mregs;
 996        struct musb_hw_ep       *hw_ep = musb->control_ep;
 997        void __iomem            *epio = hw_ep->regs;
 998        struct musb_qh          *qh = hw_ep->in_qh;
 999        bool                    complete = false;
1000        irqreturn_t             retval = IRQ_NONE;
1001
1002        /* ep0 only has one queue, "in" */
1003        urb = next_urb(qh);
1004
1005        musb_ep_select(mbase, 0);
1006        csr = musb_readw(epio, MUSB_CSR0);
1007        len = (csr & MUSB_CSR0_RXPKTRDY)
1008                        ? musb_readb(epio, MUSB_COUNT0)
1009                        : 0;
1010
1011        dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1012                csr, qh, len, urb, musb->ep0_stage);
1013
1014        /* if we just did status stage, we are done */
1015        if (MUSB_EP0_STATUS == musb->ep0_stage) {
1016                retval = IRQ_HANDLED;
1017                complete = true;
1018        }
1019
1020        /* prepare status */
1021        if (csr & MUSB_CSR0_H_RXSTALL) {
1022                dev_dbg(musb->controller, "STALLING ENDPOINT\n");
1023                status = -EPIPE;
1024
1025        } else if (csr & MUSB_CSR0_H_ERROR) {
1026                dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1027                status = -EPROTO;
1028
1029        } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1030                dev_dbg(musb->controller, "control NAK timeout\n");
1031
1032                /* NOTE:  this code path would be a good place to PAUSE a
1033                 * control transfer, if another one is queued, so that
1034                 * ep0 is more likely to stay busy.  That's already done
1035                 * for bulk RX transfers.
1036                 *
1037                 * if (qh->ring.next != &musb->control), then
1038                 * we have a candidate... NAKing is *NOT* an error
1039                 */
1040                musb_writew(epio, MUSB_CSR0, 0);
1041                retval = IRQ_HANDLED;
1042        }
1043
1044        if (status) {
1045                dev_dbg(musb->controller, "aborting\n");
1046                retval = IRQ_HANDLED;
1047                if (urb)
1048                        urb->status = status;
1049                complete = true;
1050
1051                /* use the proper sequence to abort the transfer */
1052                if (csr & MUSB_CSR0_H_REQPKT) {
1053                        csr &= ~MUSB_CSR0_H_REQPKT;
1054                        musb_writew(epio, MUSB_CSR0, csr);
1055                        csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1056                        musb_writew(epio, MUSB_CSR0, csr);
1057                } else {
1058                        musb_h_ep0_flush_fifo(hw_ep);
1059                }
1060
1061                musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1062
1063                /* clear it */
1064                musb_writew(epio, MUSB_CSR0, 0);
1065        }
1066
1067        if (unlikely(!urb)) {
1068                /* stop endpoint since we have no place for its data, this
1069                 * SHOULD NEVER HAPPEN! */
1070                ERR("no URB for end 0\n");
1071
1072                musb_h_ep0_flush_fifo(hw_ep);
1073                goto done;
1074        }
1075
1076        if (!complete) {
1077                /* call common logic and prepare response */
1078                if (musb_h_ep0_continue(musb, len, urb)) {
1079                        /* more packets required */
1080                        csr = (MUSB_EP0_IN == musb->ep0_stage)
1081                                ?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1082                } else {
1083                        /* data transfer complete; perform status phase */
1084                        if (usb_pipeout(urb->pipe)
1085                                        || !urb->transfer_buffer_length)
1086                                csr = MUSB_CSR0_H_STATUSPKT
1087                                        | MUSB_CSR0_H_REQPKT;
1088                        else
1089                                csr = MUSB_CSR0_H_STATUSPKT
1090                                        | MUSB_CSR0_TXPKTRDY;
1091
1092                        /* flag status stage */
1093                        musb->ep0_stage = MUSB_EP0_STATUS;
1094
1095                        dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1096
1097                }
1098                musb_writew(epio, MUSB_CSR0, csr);
1099                retval = IRQ_HANDLED;
1100        } else
1101                musb->ep0_stage = MUSB_EP0_IDLE;
1102
1103        /* call completion handler if done */
1104        if (complete)
1105                musb_advance_schedule(musb, urb, hw_ep, 1);
1106done:
1107        return retval;
1108}
1109
1110
1111#ifdef CONFIG_USB_INVENTRA_DMA
1112
1113/* Host side TX (OUT) using Mentor DMA works as follows:
1114        submit_urb ->
1115                - if queue was empty, Program Endpoint
1116                - ... which starts DMA to fifo in mode 1 or 0
1117
1118        DMA Isr (transfer complete) -> TxAvail()
1119                - Stop DMA (~DmaEnab)   (<--- Alert ... currently happens
1120                                        only in musb_cleanup_urb)
1121                - TxPktRdy has to be set in mode 0 or for
1122                        short packets in mode 1.
1123*/
1124
1125#endif
1126
1127/* Service a Tx-Available or dma completion irq for the endpoint */
1128void musb_host_tx(struct musb *musb, u8 epnum)
1129{
1130        int                     pipe;
1131        bool                    done = false;
1132        u16                     tx_csr;
1133        size_t                  length = 0;
1134        size_t                  offset = 0;
1135        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1136        void __iomem            *epio = hw_ep->regs;
1137        struct musb_qh          *qh = hw_ep->out_qh;
1138        struct urb              *urb = next_urb(qh);
1139        u32                     status = 0;
1140        void __iomem            *mbase = musb->mregs;
1141        struct dma_channel      *dma;
1142        bool                    transfer_pending = false;
1143
1144        musb_ep_select(mbase, epnum);
1145        tx_csr = musb_readw(epio, MUSB_TXCSR);
1146
1147        /* with CPPI, DMA sometimes triggers "extra" irqs */
1148        if (!urb) {
1149                dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1150                return;
1151        }
1152
1153        pipe = urb->pipe;
1154        dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1155        dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1156                        dma ? ", dma" : "");
1157
1158        /* check for errors */
1159        if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1160                /* dma was disabled, fifo flushed */
1161                dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1162
1163                /* stall; record URB status */
1164                status = -EPIPE;
1165
1166        } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1167                /* (NON-ISO) dma was disabled, fifo flushed */
1168                dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1169
1170                status = -ETIMEDOUT;
1171
1172        } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1173                dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
1174
1175                /* NOTE:  this code path would be a good place to PAUSE a
1176                 * transfer, if there's some other (nonperiodic) tx urb
1177                 * that could use this fifo.  (dma complicates it...)
1178                 * That's already done for bulk RX transfers.
1179                 *
1180                 * if (bulk && qh->ring.next != &musb->out_bulk), then
1181                 * we have a candidate... NAKing is *NOT* an error
1182                 */
1183                musb_ep_select(mbase, epnum);
1184                musb_writew(epio, MUSB_TXCSR,
1185                                MUSB_TXCSR_H_WZC_BITS
1186                                | MUSB_TXCSR_TXPKTRDY);
1187                return;
1188        }
1189
1190        if (status) {
1191                if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1192                        dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1193                        (void) musb->dma_controller->channel_abort(dma);
1194                }
1195
1196                /* do the proper sequence to abort the transfer in the
1197                 * usb core; the dma engine should already be stopped.
1198                 */
1199                musb_h_tx_flush_fifo(hw_ep);
1200                tx_csr &= ~(MUSB_TXCSR_AUTOSET
1201                                | MUSB_TXCSR_DMAENAB
1202                                | MUSB_TXCSR_H_ERROR
1203                                | MUSB_TXCSR_H_RXSTALL
1204                                | MUSB_TXCSR_H_NAKTIMEOUT
1205                                );
1206
1207                musb_ep_select(mbase, epnum);
1208                musb_writew(epio, MUSB_TXCSR, tx_csr);
1209                /* REVISIT may need to clear FLUSHFIFO ... */
1210                musb_writew(epio, MUSB_TXCSR, tx_csr);
1211                musb_writeb(epio, MUSB_TXINTERVAL, 0);
1212
1213                done = true;
1214        }
1215
1216        /* second cppi case */
1217        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1218                dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1219                return;
1220        }
1221
1222        if (is_dma_capable() && dma && !status) {
1223                /*
1224                 * DMA has completed.  But if we're using DMA mode 1 (multi
1225                 * packet DMA), we need a terminal TXPKTRDY interrupt before
1226                 * we can consider this transfer completed, lest we trash
1227                 * its last packet when writing the next URB's data.  So we
1228                 * switch back to mode 0 to get that interrupt; we'll come
1229                 * back here once it happens.
1230                 */
1231                if (tx_csr & MUSB_TXCSR_DMAMODE) {
1232                        /*
1233                         * We shouldn't clear DMAMODE with DMAENAB set; so
1234                         * clear them in a safe order.  That should be OK
1235                         * once TXPKTRDY has been set (and I've never seen
1236                         * it being 0 at this moment -- DMA interrupt latency
1237                         * is significant) but if it hasn't been then we have
1238                         * no choice but to stop being polite and ignore the
1239                         * programmer's guide... :-)
1240                         *
1241                         * Note that we must write TXCSR with TXPKTRDY cleared
1242                         * in order not to re-trigger the packet send (this bit
1243                         * can't be cleared by CPU), and there's another caveat:
1244                         * TXPKTRDY may be set shortly and then cleared in the
1245                         * double-buffered FIFO mode, so we do an extra TXCSR
1246                         * read for debouncing...
1247                         */
1248                        tx_csr &= musb_readw(epio, MUSB_TXCSR);
1249                        if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1250                                tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1251                                            MUSB_TXCSR_TXPKTRDY);
1252                                musb_writew(epio, MUSB_TXCSR,
1253                                            tx_csr | MUSB_TXCSR_H_WZC_BITS);
1254                        }
1255                        tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1256                                    MUSB_TXCSR_TXPKTRDY);
1257                        musb_writew(epio, MUSB_TXCSR,
1258                                    tx_csr | MUSB_TXCSR_H_WZC_BITS);
1259
1260                        /*
1261                         * There is no guarantee that we'll get an interrupt
1262                         * after clearing DMAMODE as we might have done this
1263                         * too late (after TXPKTRDY was cleared by controller).
1264                         * Re-read TXCSR as we have spoiled its previous value.
1265                         */
1266                        tx_csr = musb_readw(epio, MUSB_TXCSR);
1267                }
1268
1269                /*
1270                 * We may get here from a DMA completion or TXPKTRDY interrupt.
1271                 * In any case, we must check the FIFO status here and bail out
1272                 * only if the FIFO still has data -- that should prevent the
1273                 * "missed" TXPKTRDY interrupts and deal with double-buffered
1274                 * FIFO mode too...
1275                 */
1276                if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1277                        dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1278                            "CSR %04x\n", tx_csr);
1279                        return;
1280                }
1281        }
1282
1283        if (!status || dma || usb_pipeisoc(pipe)) {
1284                if (dma)
1285                        length = dma->actual_len;
1286                else
1287                        length = qh->segsize;
1288                qh->offset += length;
1289
1290                if (usb_pipeisoc(pipe)) {
1291#ifndef __UBOOT__
1292                        struct usb_iso_packet_descriptor        *d;
1293
1294                        d = urb->iso_frame_desc + qh->iso_idx;
1295                        d->actual_length = length;
1296                        d->status = status;
1297                        if (++qh->iso_idx >= urb->number_of_packets) {
1298                                done = true;
1299                        } else {
1300                                d++;
1301                                offset = d->offset;
1302                                length = d->length;
1303                        }
1304#endif
1305                } else if (dma && urb->transfer_buffer_length == qh->offset) {
1306                        done = true;
1307                } else {
1308                        /* see if we need to send more data, or ZLP */
1309                        if (qh->segsize < qh->maxpacket)
1310                                done = true;
1311                        else if (qh->offset == urb->transfer_buffer_length
1312                                        && !(urb->transfer_flags
1313                                                & URB_ZERO_PACKET))
1314                                done = true;
1315                        if (!done) {
1316                                offset = qh->offset;
1317                                length = urb->transfer_buffer_length - offset;
1318                                transfer_pending = true;
1319                        }
1320                }
1321        }
1322
1323        /* urb->status != -EINPROGRESS means request has been faulted,
1324         * so we must abort this transfer after cleanup
1325         */
1326        if (urb->status != -EINPROGRESS) {
1327                done = true;
1328                if (status == 0)
1329                        status = urb->status;
1330        }
1331
1332        if (done) {
1333                /* set status */
1334                urb->status = status;
1335                urb->actual_length = qh->offset;
1336                musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1337                return;
1338        } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1339                if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1340                                offset, length)) {
1341                        if (is_cppi_enabled() || tusb_dma_omap())
1342                                musb_h_tx_dma_start(hw_ep);
1343                        return;
1344                }
1345        } else  if (tx_csr & MUSB_TXCSR_DMAENAB) {
1346                dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1347                return;
1348        }
1349
1350        /*
1351         * PIO: start next packet in this URB.
1352         *
1353         * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1354         * (and presumably, FIFO is not half-full) we should write *two*
1355         * packets before updating TXCSR; other docs disagree...
1356         */
1357        if (length > qh->maxpacket)
1358                length = qh->maxpacket;
1359        /* Unmap the buffer so that CPU can use it */
1360        usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1361        musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1362        qh->segsize = length;
1363
1364        musb_ep_select(mbase, epnum);
1365        musb_writew(epio, MUSB_TXCSR,
1366                        MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1367}
1368
1369
1370#ifdef CONFIG_USB_INVENTRA_DMA
1371
1372/* Host side RX (IN) using Mentor DMA works as follows:
1373        submit_urb ->
1374                - if queue was empty, ProgramEndpoint
1375                - first IN token is sent out (by setting ReqPkt)
1376        LinuxIsr -> RxReady()
1377        /\      => first packet is received
1378        |       - Set in mode 0 (DmaEnab, ~ReqPkt)
1379        |               -> DMA Isr (transfer complete) -> RxReady()
1380        |                   - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1381        |                   - if urb not complete, send next IN token (ReqPkt)
1382        |                          |            else complete urb.
1383        |                          |
1384        ---------------------------
1385 *
1386 * Nuances of mode 1:
1387 *      For short packets, no ack (+RxPktRdy) is sent automatically
1388 *      (even if AutoClear is ON)
1389 *      For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1390 *      automatically => major problem, as collecting the next packet becomes
1391 *      difficult. Hence mode 1 is not used.
1392 *
1393 * REVISIT
1394 *      All we care about at this driver level is that
1395 *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1396 *       (b) termination conditions are: short RX, or buffer full;
1397 *       (c) fault modes include
1398 *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1399 *             (and that endpoint's dma queue stops immediately)
1400 *           - overflow (full, PLUS more bytes in the terminal packet)
1401 *
1402 *      So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1403 *      thus be a great candidate for using mode 1 ... for all but the
1404 *      last packet of one URB's transfer.
1405 */
1406
1407#endif
1408
1409/* Schedule next QH from musb->in_bulk and move the current qh to
1410 * the end; avoids starvation for other endpoints.
1411 */
1412static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1413{
1414        struct dma_channel      *dma;
1415        struct urb              *urb;
1416        void __iomem            *mbase = musb->mregs;
1417        void __iomem            *epio = ep->regs;
1418        struct musb_qh          *cur_qh, *next_qh;
1419        u16                     rx_csr;
1420
1421        musb_ep_select(mbase, ep->epnum);
1422        dma = is_dma_capable() ? ep->rx_channel : NULL;
1423
1424        /* clear nak timeout bit */
1425        rx_csr = musb_readw(epio, MUSB_RXCSR);
1426        rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1427        rx_csr &= ~MUSB_RXCSR_DATAERROR;
1428        musb_writew(epio, MUSB_RXCSR, rx_csr);
1429
1430        cur_qh = first_qh(&musb->in_bulk);
1431        if (cur_qh) {
1432                urb = next_urb(cur_qh);
1433                if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1434                        dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1435                        musb->dma_controller->channel_abort(dma);
1436                        urb->actual_length += dma->actual_len;
1437                        dma->actual_len = 0L;
1438                }
1439                musb_save_toggle(cur_qh, 1, urb);
1440
1441                /* move cur_qh to end of queue */
1442                list_move_tail(&cur_qh->ring, &musb->in_bulk);
1443
1444                /* get the next qh from musb->in_bulk */
1445                next_qh = first_qh(&musb->in_bulk);
1446
1447                /* set rx_reinit and schedule the next qh */
1448                ep->rx_reinit = 1;
1449                musb_start_urb(musb, 1, next_qh);
1450        }
1451}
1452
1453/*
1454 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1455 * and high-bandwidth IN transfer cases.
1456 */
1457void musb_host_rx(struct musb *musb, u8 epnum)
1458{
1459        struct urb              *urb;
1460        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1461        void __iomem            *epio = hw_ep->regs;
1462        struct musb_qh          *qh = hw_ep->in_qh;
1463        size_t                  xfer_len;
1464        void __iomem            *mbase = musb->mregs;
1465        int                     pipe;
1466        u16                     rx_csr, val;
1467        bool                    iso_err = false;
1468        bool                    done = false;
1469        u32                     status;
1470        struct dma_channel      *dma;
1471
1472        musb_ep_select(mbase, epnum);
1473
1474        urb = next_urb(qh);
1475        dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1476        status = 0;
1477        xfer_len = 0;
1478
1479        rx_csr = musb_readw(epio, MUSB_RXCSR);
1480        val = rx_csr;
1481
1482        if (unlikely(!urb)) {
1483                /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1484                 * usbtest #11 (unlinks) triggers it regularly, sometimes
1485                 * with fifo full.  (Only with DMA??)
1486                 */
1487                dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1488                        musb_readw(epio, MUSB_RXCOUNT));
1489                musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1490                return;
1491        }
1492
1493        pipe = urb->pipe;
1494
1495        dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1496                epnum, rx_csr, urb->actual_length,
1497                dma ? dma->actual_len : 0);
1498
1499        /* check for errors, concurrent stall & unlink is not really
1500         * handled yet! */
1501        if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1502                dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1503
1504                /* stall; record URB status */
1505                status = -EPIPE;
1506
1507        } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1508                dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1509
1510                status = -EPROTO;
1511                musb_writeb(epio, MUSB_RXINTERVAL, 0);
1512
1513        } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1514
1515                if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1516                        dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1517
1518                        /* NOTE: NAKing is *NOT* an error, so we want to
1519                         * continue.  Except ... if there's a request for
1520                         * another QH, use that instead of starving it.
1521                         *
1522                         * Devices like Ethernet and serial adapters keep
1523                         * reads posted at all times, which will starve
1524                         * other devices without this logic.
1525                         */
1526                        if (usb_pipebulk(urb->pipe)
1527                                        && qh->mux == 1
1528                                        && !list_is_singular(&musb->in_bulk)) {
1529                                musb_bulk_rx_nak_timeout(musb, hw_ep);
1530                                return;
1531                        }
1532                        musb_ep_select(mbase, epnum);
1533                        rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1534                        rx_csr &= ~MUSB_RXCSR_DATAERROR;
1535                        musb_writew(epio, MUSB_RXCSR, rx_csr);
1536
1537                        goto finish;
1538                } else {
1539                        dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1540                        /* packet error reported later */
1541                        iso_err = true;
1542                }
1543        } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1544                dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1545                                epnum);
1546                status = -EPROTO;
1547        }
1548
1549        /* faults abort the transfer */
1550        if (status) {
1551                /* clean up dma and collect transfer count */
1552                if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1553                        dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1554                        (void) musb->dma_controller->channel_abort(dma);
1555                        xfer_len = dma->actual_len;
1556                }
1557                musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1558                musb_writeb(epio, MUSB_RXINTERVAL, 0);
1559                done = true;
1560                goto finish;
1561        }
1562
1563        if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1564                /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1565                ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1566                goto finish;
1567        }
1568
1569        /* thorough shutdown for now ... given more precise fault handling
1570         * and better queueing support, we might keep a DMA pipeline going
1571         * while processing this irq for earlier completions.
1572         */
1573
1574        /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1575
1576#ifndef CONFIG_USB_INVENTRA_DMA
1577        if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
1578                /* REVISIT this happened for a while on some short reads...
1579                 * the cleanup still needs investigation... looks bad...
1580                 * and also duplicates dma cleanup code above ... plus,
1581                 * shouldn't this be the "half full" double buffer case?
1582                 */
1583                if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1584                        dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1585                        (void) musb->dma_controller->channel_abort(dma);
1586                        xfer_len = dma->actual_len;
1587                        done = true;
1588                }
1589
1590                dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1591                                xfer_len, dma ? ", dma" : "");
1592                rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1593
1594                musb_ep_select(mbase, epnum);
1595                musb_writew(epio, MUSB_RXCSR,
1596                                MUSB_RXCSR_H_WZC_BITS | rx_csr);
1597        }
1598#endif
1599        if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1600                xfer_len = dma->actual_len;
1601
1602                val &= ~(MUSB_RXCSR_DMAENAB
1603                        | MUSB_RXCSR_H_AUTOREQ
1604                        | MUSB_RXCSR_AUTOCLEAR
1605                        | MUSB_RXCSR_RXPKTRDY);
1606                musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1607
1608#ifdef CONFIG_USB_INVENTRA_DMA
1609                if (usb_pipeisoc(pipe)) {
1610                        struct usb_iso_packet_descriptor *d;
1611
1612                        d = urb->iso_frame_desc + qh->iso_idx;
1613                        d->actual_length = xfer_len;
1614
1615                        /* even if there was an error, we did the dma
1616                         * for iso_frame_desc->length
1617                         */
1618                        if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1619                                d->status = 0;
1620
1621                        if (++qh->iso_idx >= urb->number_of_packets)
1622                                done = true;
1623                        else
1624                                done = false;
1625
1626                } else  {
1627                /* done if urb buffer is full or short packet is recd */
1628                done = (urb->actual_length + xfer_len >=
1629                                urb->transfer_buffer_length
1630                        || dma->actual_len < qh->maxpacket);
1631                }
1632
1633                /* send IN token for next packet, without AUTOREQ */
1634                if (!done) {
1635                        val |= MUSB_RXCSR_H_REQPKT;
1636                        musb_writew(epio, MUSB_RXCSR,
1637                                MUSB_RXCSR_H_WZC_BITS | val);
1638                }
1639
1640                dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1641                        done ? "off" : "reset",
1642                        musb_readw(epio, MUSB_RXCSR),
1643                        musb_readw(epio, MUSB_RXCOUNT));
1644#else
1645                done = true;
1646#endif
1647        } else if (urb->status == -EINPROGRESS) {
1648                /* if no errors, be sure a packet is ready for unloading */
1649                if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1650                        status = -EPROTO;
1651                        ERR("Rx interrupt with no errors or packet!\n");
1652
1653                        /* FIXME this is another "SHOULD NEVER HAPPEN" */
1654
1655/* SCRUB (RX) */
1656                        /* do the proper sequence to abort the transfer */
1657                        musb_ep_select(mbase, epnum);
1658                        val &= ~MUSB_RXCSR_H_REQPKT;
1659                        musb_writew(epio, MUSB_RXCSR, val);
1660                        goto finish;
1661                }
1662
1663                /* we are expecting IN packets */
1664#ifdef CONFIG_USB_INVENTRA_DMA
1665                if (dma) {
1666                        struct dma_controller   *c;
1667                        u16                     rx_count;
1668                        int                     ret, length;
1669                        dma_addr_t              buf;
1670
1671                        rx_count = musb_readw(epio, MUSB_RXCOUNT);
1672
1673                        dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
1674                                        epnum, rx_count,
1675                                        urb->transfer_dma
1676                                                + urb->actual_length,
1677                                        qh->offset,
1678                                        urb->transfer_buffer_length);
1679
1680                        c = musb->dma_controller;
1681
1682                        if (usb_pipeisoc(pipe)) {
1683                                int d_status = 0;
1684                                struct usb_iso_packet_descriptor *d;
1685
1686                                d = urb->iso_frame_desc + qh->iso_idx;
1687
1688                                if (iso_err) {
1689                                        d_status = -EILSEQ;
1690                                        urb->error_count++;
1691                                }
1692                                if (rx_count > d->length) {
1693                                        if (d_status == 0) {
1694                                                d_status = -EOVERFLOW;
1695                                                urb->error_count++;
1696                                        }
1697                                        dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1698                                            rx_count, d->length);
1699
1700                                        length = d->length;
1701                                } else
1702                                        length = rx_count;
1703                                d->status = d_status;
1704                                buf = urb->transfer_dma + d->offset;
1705                        } else {
1706                                length = rx_count;
1707                                buf = urb->transfer_dma +
1708                                                urb->actual_length;
1709                        }
1710
1711                        dma->desired_mode = 0;
1712#ifdef USE_MODE1
1713                        /* because of the issue below, mode 1 will
1714                         * only rarely behave with correct semantics.
1715                         */
1716                        if ((urb->transfer_flags &
1717                                                URB_SHORT_NOT_OK)
1718                                && (urb->transfer_buffer_length -
1719                                                urb->actual_length)
1720                                        > qh->maxpacket)
1721                                dma->desired_mode = 1;
1722                        if (rx_count < hw_ep->max_packet_sz_rx) {
1723                                length = rx_count;
1724                                dma->desired_mode = 0;
1725                        } else {
1726                                length = urb->transfer_buffer_length;
1727                        }
1728#endif
1729
1730/* Disadvantage of using mode 1:
1731 *      It's basically usable only for mass storage class; essentially all
1732 *      other protocols also terminate transfers on short packets.
1733 *
1734 * Details:
1735 *      An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1736 *      If you try to use mode 1 for (transfer_buffer_length - 512), and try
1737 *      to use the extra IN token to grab the last packet using mode 0, then
1738 *      the problem is that you cannot be sure when the device will send the
1739 *      last packet and RxPktRdy set. Sometimes the packet is recd too soon
1740 *      such that it gets lost when RxCSR is re-set at the end of the mode 1
1741 *      transfer, while sometimes it is recd just a little late so that if you
1742 *      try to configure for mode 0 soon after the mode 1 transfer is
1743 *      completed, you will find rxcount 0. Okay, so you might think why not
1744 *      wait for an interrupt when the pkt is recd. Well, you won't get any!
1745 */
1746
1747                        val = musb_readw(epio, MUSB_RXCSR);
1748                        val &= ~MUSB_RXCSR_H_REQPKT;
1749
1750                        if (dma->desired_mode == 0)
1751                                val &= ~MUSB_RXCSR_H_AUTOREQ;
1752                        else
1753                                val |= MUSB_RXCSR_H_AUTOREQ;
1754                        val |= MUSB_RXCSR_DMAENAB;
1755
1756                        /* autoclear shouldn't be set in high bandwidth */
1757                        if (qh->hb_mult == 1)
1758                                val |= MUSB_RXCSR_AUTOCLEAR;
1759
1760                        musb_writew(epio, MUSB_RXCSR,
1761                                MUSB_RXCSR_H_WZC_BITS | val);
1762
1763                        /* REVISIT if when actual_length != 0,
1764                         * transfer_buffer_length needs to be
1765                         * adjusted first...
1766                         */
1767                        ret = c->channel_program(
1768                                dma, qh->maxpacket,
1769                                dma->desired_mode, buf, length);
1770
1771                        if (!ret) {
1772                                c->channel_release(dma);
1773                                hw_ep->rx_channel = NULL;
1774                                dma = NULL;
1775                                val = musb_readw(epio, MUSB_RXCSR);
1776                                val &= ~(MUSB_RXCSR_DMAENAB
1777                                        | MUSB_RXCSR_H_AUTOREQ
1778                                        | MUSB_RXCSR_AUTOCLEAR);
1779                                musb_writew(epio, MUSB_RXCSR, val);
1780                        }
1781                }
1782#endif  /* Mentor DMA */
1783
1784                if (!dma) {
1785                        /* Unmap the buffer so that CPU can use it */
1786                        usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1787                        done = musb_host_packet_rx(musb, urb,
1788                                        epnum, iso_err);
1789                        dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
1790                }
1791        }
1792
1793finish:
1794        urb->actual_length += xfer_len;
1795        qh->offset += xfer_len;
1796        if (done) {
1797                if (urb->status == -EINPROGRESS)
1798                        urb->status = status;
1799                musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1800        }
1801}
1802
1803/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1804 * the software schedule associates multiple such nodes with a given
1805 * host side hardware endpoint + direction; scheduling may activate
1806 * that hardware endpoint.
1807 */
1808static int musb_schedule(
1809        struct musb             *musb,
1810        struct musb_qh          *qh,
1811        int                     is_in)
1812{
1813        int                     idle;
1814        int                     best_diff;
1815        int                     best_end, epnum;
1816        struct musb_hw_ep       *hw_ep = NULL;
1817        struct list_head        *head = NULL;
1818        u8                      toggle;
1819        u8                      txtype;
1820        struct urb              *urb = next_urb(qh);
1821
1822        /* use fixed hardware for control and bulk */
1823        if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1824                head = &musb->control;
1825                hw_ep = musb->control_ep;
1826                goto success;
1827        }
1828
1829        /* else, periodic transfers get muxed to other endpoints */
1830
1831        /*
1832         * We know this qh hasn't been scheduled, so all we need to do
1833         * is choose which hardware endpoint to put it on ...
1834         *
1835         * REVISIT what we really want here is a regular schedule tree
1836         * like e.g. OHCI uses.
1837         */
1838        best_diff = 4096;
1839        best_end = -1;
1840
1841        for (epnum = 1, hw_ep = musb->endpoints + 1;
1842                        epnum < musb->nr_endpoints;
1843                        epnum++, hw_ep++) {
1844                int     diff;
1845
1846                if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1847                        continue;
1848
1849                if (hw_ep == musb->bulk_ep)
1850                        continue;
1851
1852                if (is_in)
1853                        diff = hw_ep->max_packet_sz_rx;
1854                else
1855                        diff = hw_ep->max_packet_sz_tx;
1856                diff -= (qh->maxpacket * qh->hb_mult);
1857
1858                if (diff >= 0 && best_diff > diff) {
1859
1860                        /*
1861                         * Mentor controller has a bug in that if we schedule
1862                         * a BULK Tx transfer on an endpoint that had earlier
1863                         * handled ISOC then the BULK transfer has to start on
1864                         * a zero toggle.  If the BULK transfer starts on a 1
1865                         * toggle then this transfer will fail as the mentor
1866                         * controller starts the Bulk transfer on a 0 toggle
1867                         * irrespective of the programming of the toggle bits
1868                         * in the TXCSR register.  Check for this condition
1869                         * while allocating the EP for a Tx Bulk transfer.  If
1870                         * so skip this EP.
1871                         */
1872                        hw_ep = musb->endpoints + epnum;
1873                        toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1874                        txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1875                                        >> 4) & 0x3;
1876                        if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
1877                                toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
1878                                continue;
1879
1880                        best_diff = diff;
1881                        best_end = epnum;
1882                }
1883        }
1884        /* use bulk reserved ep1 if no other ep is free */
1885        if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1886                hw_ep = musb->bulk_ep;
1887                if (is_in)
1888                        head = &musb->in_bulk;
1889                else
1890                        head = &musb->out_bulk;
1891
1892                /* Enable bulk RX NAK timeout scheme when bulk requests are
1893                 * multiplexed.  This scheme doen't work in high speed to full
1894                 * speed scenario as NAK interrupts are not coming from a
1895                 * full speed device connected to a high speed device.
1896                 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1897                 * 4 (8 frame or 8ms) for FS device.
1898                 */
1899                if (is_in && qh->dev)
1900                        qh->intv_reg =
1901                                (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1902                goto success;
1903        } else if (best_end < 0) {
1904                return -ENOSPC;
1905        }
1906
1907        idle = 1;
1908        qh->mux = 0;
1909        hw_ep = musb->endpoints + best_end;
1910        dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
1911success:
1912        if (head) {
1913                idle = list_empty(head);
1914                list_add_tail(&qh->ring, head);
1915                qh->mux = 1;
1916        }
1917        qh->hw_ep = hw_ep;
1918        qh->hep->hcpriv = qh;
1919        if (idle)
1920                musb_start_urb(musb, is_in, qh);
1921        return 0;
1922}
1923
1924#ifdef __UBOOT__
1925/* check if transaction translator is needed for device */
1926static int tt_needed(struct musb *musb, struct usb_device *dev)
1927{
1928        if ((musb_readb(musb->mregs, MUSB_POWER) & MUSB_POWER_HSMODE) &&
1929                        (dev->speed < USB_SPEED_HIGH))
1930                return 1;
1931        return 0;
1932}
1933#endif
1934
1935#ifndef __UBOOT__
1936static int musb_urb_enqueue(
1937#else
1938int musb_urb_enqueue(
1939#endif
1940        struct usb_hcd                  *hcd,
1941        struct urb                      *urb,
1942        gfp_t                           mem_flags)
1943{
1944        unsigned long                   flags;
1945        struct musb                     *musb = hcd_to_musb(hcd);
1946        struct usb_host_endpoint        *hep = urb->ep;
1947        struct musb_qh                  *qh;
1948        struct usb_endpoint_descriptor  *epd = &hep->desc;
1949        int                             ret;
1950        unsigned                        type_reg;
1951        unsigned                        interval;
1952
1953        /* host role must be active */
1954        if (!is_host_active(musb) || !musb->is_active)
1955                return -ENODEV;
1956
1957        spin_lock_irqsave(&musb->lock, flags);
1958        ret = usb_hcd_link_urb_to_ep(hcd, urb);
1959        qh = ret ? NULL : hep->hcpriv;
1960        if (qh)
1961                urb->hcpriv = qh;
1962        spin_unlock_irqrestore(&musb->lock, flags);
1963
1964        /* DMA mapping was already done, if needed, and this urb is on
1965         * hep->urb_list now ... so we're done, unless hep wasn't yet
1966         * scheduled onto a live qh.
1967         *
1968         * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1969         * disabled, testing for empty qh->ring and avoiding qh setup costs
1970         * except for the first urb queued after a config change.
1971         */
1972        if (qh || ret)
1973                return ret;
1974
1975        /* Allocate and initialize qh, minimizing the work done each time
1976         * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
1977         *
1978         * REVISIT consider a dedicated qh kmem_cache, so it's harder
1979         * for bugs in other kernel code to break this driver...
1980         */
1981        qh = kzalloc(sizeof *qh, mem_flags);
1982        if (!qh) {
1983                spin_lock_irqsave(&musb->lock, flags);
1984                usb_hcd_unlink_urb_from_ep(hcd, urb);
1985                spin_unlock_irqrestore(&musb->lock, flags);
1986                return -ENOMEM;
1987        }
1988
1989        qh->hep = hep;
1990        qh->dev = urb->dev;
1991        INIT_LIST_HEAD(&qh->ring);
1992        qh->is_ready = 1;
1993
1994        qh->maxpacket = usb_endpoint_maxp(epd);
1995        qh->type = usb_endpoint_type(epd);
1996
1997        /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
1998         * Some musb cores don't support high bandwidth ISO transfers; and
1999         * we don't (yet!) support high bandwidth interrupt transfers.
2000         */
2001        qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
2002        if (qh->hb_mult > 1) {
2003                int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2004
2005                if (ok)
2006                        ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2007                                || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2008                if (!ok) {
2009                        ret = -EMSGSIZE;
2010                        goto done;
2011                }
2012                qh->maxpacket &= 0x7ff;
2013        }
2014
2015        qh->epnum = usb_endpoint_num(epd);
2016
2017        /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2018        qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2019
2020        /* precompute rxtype/txtype/type0 register */
2021        type_reg = (qh->type << 4) | qh->epnum;
2022        switch (urb->dev->speed) {
2023        case USB_SPEED_LOW:
2024                type_reg |= 0xc0;
2025                break;
2026        case USB_SPEED_FULL:
2027                type_reg |= 0x80;
2028                break;
2029        default:
2030                type_reg |= 0x40;
2031        }
2032        qh->type_reg = type_reg;
2033
2034        /* Precompute RXINTERVAL/TXINTERVAL register */
2035        switch (qh->type) {
2036        case USB_ENDPOINT_XFER_INT:
2037                /*
2038                 * Full/low speeds use the  linear encoding,
2039                 * high speed uses the logarithmic encoding.
2040                 */
2041                if (urb->dev->speed <= USB_SPEED_FULL) {
2042                        interval = max_t(u8, epd->bInterval, 1);
2043                        break;
2044                }
2045                /* FALLTHROUGH */
2046        case USB_ENDPOINT_XFER_ISOC:
2047                /* ISO always uses logarithmic encoding */
2048                interval = min_t(u8, epd->bInterval, 16);
2049                break;
2050        default:
2051                /* REVISIT we actually want to use NAK limits, hinting to the
2052                 * transfer scheduling logic to try some other qh, e.g. try
2053                 * for 2 msec first:
2054                 *
2055                 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2056                 *
2057                 * The downside of disabling this is that transfer scheduling
2058                 * gets VERY unfair for nonperiodic transfers; a misbehaving
2059                 * peripheral could make that hurt.  That's perfectly normal
2060                 * for reads from network or serial adapters ... so we have
2061                 * partial NAKlimit support for bulk RX.
2062                 *
2063                 * The upside of disabling it is simpler transfer scheduling.
2064                 */
2065                interval = 0;
2066        }
2067        qh->intv_reg = interval;
2068
2069        /* precompute addressing for external hub/tt ports */
2070        if (musb->is_multipoint) {
2071                struct usb_device       *parent = urb->dev->parent;
2072
2073#ifndef __UBOOT__
2074                if (parent != hcd->self.root_hub) {
2075#else
2076                if (parent) {
2077#endif
2078                        qh->h_addr_reg = (u8) parent->devnum;
2079
2080#ifndef __UBOOT__
2081                        /* set up tt info if needed */
2082                        if (urb->dev->tt) {
2083                                qh->h_port_reg = (u8) urb->dev->ttport;
2084                                if (urb->dev->tt->hub)
2085                                        qh->h_addr_reg =
2086                                                (u8) urb->dev->tt->hub->devnum;
2087                                if (urb->dev->tt->multi)
2088                                        qh->h_addr_reg |= 0x80;
2089                        }
2090#else
2091                        if (tt_needed(musb, urb->dev)) {
2092                                u16 hub_port = find_tt(urb->dev);
2093                                qh->h_addr_reg = (u8) (hub_port >> 8);
2094                                qh->h_port_reg = (u8) (hub_port & 0xff);
2095                        }
2096#endif
2097                }
2098        }
2099
2100        /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2101         * until we get real dma queues (with an entry for each urb/buffer),
2102         * we only have work to do in the former case.
2103         */
2104        spin_lock_irqsave(&musb->lock, flags);
2105        if (hep->hcpriv) {
2106                /* some concurrent activity submitted another urb to hep...
2107                 * odd, rare, error prone, but legal.
2108                 */
2109                kfree(qh);
2110                qh = NULL;
2111                ret = 0;
2112        } else
2113                ret = musb_schedule(musb, qh,
2114                                epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2115
2116        if (ret == 0) {
2117                urb->hcpriv = qh;
2118                /* FIXME set urb->start_frame for iso/intr, it's tested in
2119                 * musb_start_urb(), but otherwise only konicawc cares ...
2120                 */
2121        }
2122        spin_unlock_irqrestore(&musb->lock, flags);
2123
2124done:
2125        if (ret != 0) {
2126                spin_lock_irqsave(&musb->lock, flags);
2127                usb_hcd_unlink_urb_from_ep(hcd, urb);
2128                spin_unlock_irqrestore(&musb->lock, flags);
2129                kfree(qh);
2130        }
2131        return ret;
2132}
2133
2134
2135#ifndef __UBOOT__
2136/*
2137 * abort a transfer that's at the head of a hardware queue.
2138 * called with controller locked, irqs blocked
2139 * that hardware queue advances to the next transfer, unless prevented
2140 */
2141static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2142{
2143        struct musb_hw_ep       *ep = qh->hw_ep;
2144        struct musb             *musb = ep->musb;
2145        void __iomem            *epio = ep->regs;
2146        unsigned                hw_end = ep->epnum;
2147        void __iomem            *regs = ep->musb->mregs;
2148        int                     is_in = usb_pipein(urb->pipe);
2149        int                     status = 0;
2150        u16                     csr;
2151
2152        musb_ep_select(regs, hw_end);
2153
2154        if (is_dma_capable()) {
2155                struct dma_channel      *dma;
2156
2157                dma = is_in ? ep->rx_channel : ep->tx_channel;
2158                if (dma) {
2159                        status = ep->musb->dma_controller->channel_abort(dma);
2160                        dev_dbg(musb->controller,
2161                                "abort %cX%d DMA for urb %p --> %d\n",
2162                                is_in ? 'R' : 'T', ep->epnum,
2163                                urb, status);
2164                        urb->actual_length += dma->actual_len;
2165                }
2166        }
2167
2168        /* turn off DMA requests, discard state, stop polling ... */
2169        if (ep->epnum && is_in) {
2170                /* giveback saves bulk toggle */
2171                csr = musb_h_flush_rxfifo(ep, 0);
2172
2173                /* REVISIT we still get an irq; should likely clear the
2174                 * endpoint's irq status here to avoid bogus irqs.
2175                 * clearing that status is platform-specific...
2176                 */
2177        } else if (ep->epnum) {
2178                musb_h_tx_flush_fifo(ep);
2179                csr = musb_readw(epio, MUSB_TXCSR);
2180                csr &= ~(MUSB_TXCSR_AUTOSET
2181                        | MUSB_TXCSR_DMAENAB
2182                        | MUSB_TXCSR_H_RXSTALL
2183                        | MUSB_TXCSR_H_NAKTIMEOUT
2184                        | MUSB_TXCSR_H_ERROR
2185                        | MUSB_TXCSR_TXPKTRDY);
2186                musb_writew(epio, MUSB_TXCSR, csr);
2187                /* REVISIT may need to clear FLUSHFIFO ... */
2188                musb_writew(epio, MUSB_TXCSR, csr);
2189                /* flush cpu writebuffer */
2190                csr = musb_readw(epio, MUSB_TXCSR);
2191        } else  {
2192                musb_h_ep0_flush_fifo(ep);
2193        }
2194        if (status == 0)
2195                musb_advance_schedule(ep->musb, urb, ep, is_in);
2196        return status;
2197}
2198
2199static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2200{
2201        struct musb             *musb = hcd_to_musb(hcd);
2202        struct musb_qh          *qh;
2203        unsigned long           flags;
2204        int                     is_in  = usb_pipein(urb->pipe);
2205        int                     ret;
2206
2207        dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2208                        usb_pipedevice(urb->pipe),
2209                        usb_pipeendpoint(urb->pipe),
2210                        is_in ? "in" : "out");
2211
2212        spin_lock_irqsave(&musb->lock, flags);
2213        ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2214        if (ret)
2215                goto done;
2216
2217        qh = urb->hcpriv;
2218        if (!qh)
2219                goto done;
2220
2221        /*
2222         * Any URB not actively programmed into endpoint hardware can be
2223         * immediately given back; that's any URB not at the head of an
2224         * endpoint queue, unless someday we get real DMA queues.  And even
2225         * if it's at the head, it might not be known to the hardware...
2226         *
2227         * Otherwise abort current transfer, pending DMA, etc.; urb->status
2228         * has already been updated.  This is a synchronous abort; it'd be
2229         * OK to hold off until after some IRQ, though.
2230         *
2231         * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2232         */
2233        if (!qh->is_ready
2234                        || urb->urb_list.prev != &qh->hep->urb_list
2235                        || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2236                int     ready = qh->is_ready;
2237
2238                qh->is_ready = 0;
2239                musb_giveback(musb, urb, 0);
2240                qh->is_ready = ready;
2241
2242                /* If nothing else (usually musb_giveback) is using it
2243                 * and its URB list has emptied, recycle this qh.
2244                 */
2245                if (ready && list_empty(&qh->hep->urb_list)) {
2246                        qh->hep->hcpriv = NULL;
2247                        list_del(&qh->ring);
2248                        kfree(qh);
2249                }
2250        } else
2251                ret = musb_cleanup_urb(urb, qh);
2252done:
2253        spin_unlock_irqrestore(&musb->lock, flags);
2254        return ret;
2255}
2256
2257/* disable an endpoint */
2258static void
2259musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2260{
2261        u8                      is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2262        unsigned long           flags;
2263        struct musb             *musb = hcd_to_musb(hcd);
2264        struct musb_qh          *qh;
2265        struct urb              *urb;
2266
2267        spin_lock_irqsave(&musb->lock, flags);
2268
2269        qh = hep->hcpriv;
2270        if (qh == NULL)
2271                goto exit;
2272
2273        /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2274
2275        /* Kick the first URB off the hardware, if needed */
2276        qh->is_ready = 0;
2277        if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2278                urb = next_urb(qh);
2279
2280                /* make software (then hardware) stop ASAP */
2281                if (!urb->unlinked)
2282                        urb->status = -ESHUTDOWN;
2283
2284                /* cleanup */
2285                musb_cleanup_urb(urb, qh);
2286
2287                /* Then nuke all the others ... and advance the
2288                 * queue on hw_ep (e.g. bulk ring) when we're done.
2289                 */
2290                while (!list_empty(&hep->urb_list)) {
2291                        urb = next_urb(qh);
2292                        urb->status = -ESHUTDOWN;
2293                        musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2294                }
2295        } else {
2296                /* Just empty the queue; the hardware is busy with
2297                 * other transfers, and since !qh->is_ready nothing
2298                 * will activate any of these as it advances.
2299                 */
2300                while (!list_empty(&hep->urb_list))
2301                        musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2302
2303                hep->hcpriv = NULL;
2304                list_del(&qh->ring);
2305                kfree(qh);
2306        }
2307exit:
2308        spin_unlock_irqrestore(&musb->lock, flags);
2309}
2310
2311static int musb_h_get_frame_number(struct usb_hcd *hcd)
2312{
2313        struct musb     *musb = hcd_to_musb(hcd);
2314
2315        return musb_readw(musb->mregs, MUSB_FRAME);
2316}
2317
2318static int musb_h_start(struct usb_hcd *hcd)
2319{
2320        struct musb     *musb = hcd_to_musb(hcd);
2321
2322        /* NOTE: musb_start() is called when the hub driver turns
2323         * on port power, or when (OTG) peripheral starts.
2324         */
2325        hcd->state = HC_STATE_RUNNING;
2326        musb->port1_status = 0;
2327        return 0;
2328}
2329
2330static void musb_h_stop(struct usb_hcd *hcd)
2331{
2332        musb_stop(hcd_to_musb(hcd));
2333        hcd->state = HC_STATE_HALT;
2334}
2335
2336static int musb_bus_suspend(struct usb_hcd *hcd)
2337{
2338        struct musb     *musb = hcd_to_musb(hcd);
2339        u8              devctl;
2340
2341        if (!is_host_active(musb))
2342                return 0;
2343
2344        switch (musb->xceiv->state) {
2345        case OTG_STATE_A_SUSPEND:
2346                return 0;
2347        case OTG_STATE_A_WAIT_VRISE:
2348                /* ID could be grounded even if there's no device
2349                 * on the other end of the cable.  NOTE that the
2350                 * A_WAIT_VRISE timers are messy with MUSB...
2351                 */
2352                devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2353                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2354                        musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2355                break;
2356        default:
2357                break;
2358        }
2359
2360        if (musb->is_active) {
2361                WARNING("trying to suspend as %s while active\n",
2362                                otg_state_string(musb->xceiv->state));
2363                return -EBUSY;
2364        } else
2365                return 0;
2366}
2367
2368static int musb_bus_resume(struct usb_hcd *hcd)
2369{
2370        /* resuming child port does the work */
2371        return 0;
2372}
2373
2374const struct hc_driver musb_hc_driver = {
2375        .description            = "musb-hcd",
2376        .product_desc           = "MUSB HDRC host driver",
2377        .hcd_priv_size          = sizeof(struct musb),
2378        .flags                  = HCD_USB2 | HCD_MEMORY,
2379
2380        /* not using irq handler or reset hooks from usbcore, since
2381         * those must be shared with peripheral code for OTG configs
2382         */
2383
2384        .start                  = musb_h_start,
2385        .stop                   = musb_h_stop,
2386
2387        .get_frame_number       = musb_h_get_frame_number,
2388
2389        .urb_enqueue            = musb_urb_enqueue,
2390        .urb_dequeue            = musb_urb_dequeue,
2391        .endpoint_disable       = musb_h_disable,
2392
2393        .hub_status_data        = musb_hub_status_data,
2394        .hub_control            = musb_hub_control,
2395        .bus_suspend            = musb_bus_suspend,
2396        .bus_resume             = musb_bus_resume,
2397        /* .start_port_reset    = NULL, */
2398        /* .hub_irq_enable      = NULL, */
2399};
2400#endif
2401