linux/drivers/usb/musb/musb_host.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MUSB OTG driver host support
   4 *
   5 * Copyright 2005 Mentor Graphics Corporation
   6 * Copyright (C) 2005-2006 by Texas Instruments
   7 * Copyright (C) 2006-2007 Nokia Corporation
   8 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/delay.h>
  14#include <linux/sched.h>
  15#include <linux/slab.h>
  16#include <linux/errno.h>
  17#include <linux/list.h>
  18#include <linux/dma-mapping.h>
  19
  20#include "musb_core.h"
  21#include "musb_host.h"
  22#include "musb_trace.h"
  23
  24/* MUSB HOST status 22-mar-2006
  25 *
  26 * - There's still lots of partial code duplication for fault paths, so
  27 *   they aren't handled as consistently as they need to be.
  28 *
  29 * - PIO mostly behaved when last tested.
  30 *     + including ep0, with all usbtest cases 9, 10
  31 *     + usbtest 14 (ep0out) doesn't seem to run at all
  32 *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
  33 *       configurations, but otherwise double buffering passes basic tests.
  34 *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
  35 *
  36 * - DMA (CPPI) ... partially behaves, not currently recommended
  37 *     + about 1/15 the speed of typical EHCI implementations (PCI)
  38 *     + RX, all too often reqpkt seems to misbehave after tx
  39 *     + TX, no known issues (other than evident silicon issue)
  40 *
  41 * - DMA (Mentor/OMAP) ...has at least toggle update problems
  42 *
  43 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
  44 *   starvation ... nothing yet for TX, interrupt, or bulk.
  45 *
  46 * - Not tested with HNP, but some SRP paths seem to behave.
  47 *
  48 * NOTE 24-August-2006:
  49 *
  50 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
  51 *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
  52 *   mostly works, except that with "usbnet" it's easy to trigger cases
  53 *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
  54 *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
  55 *   although ARP RX wins.  (That test was done with a full speed link.)
  56 */
  57
  58
  59/*
  60 * NOTE on endpoint usage:
  61 *
  62 * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
  63 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
  64 * (Yes, bulk _could_ use more of the endpoints than that, and would even
  65 * benefit from it.)
  66 *
  67 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
  68 * So far that scheduling is both dumb and optimistic:  the endpoint will be
  69 * "claimed" until its software queue is no longer refilled.  No multiplexing
  70 * of transfers between endpoints, or anything clever.
  71 */
  72
  73struct musb *hcd_to_musb(struct usb_hcd *hcd)
  74{
  75        return *(struct musb **) hcd->hcd_priv;
  76}
  77
  78
  79static void musb_ep_program(struct musb *musb, u8 epnum,
  80                        struct urb *urb, int is_out,
  81                        u8 *buf, u32 offset, u32 len);
  82
  83/*
  84 * Clear TX fifo. Needed to avoid BABBLE errors.
  85 */
  86static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
  87{
  88        struct musb     *musb = ep->musb;
  89        void __iomem    *epio = ep->regs;
  90        u16             csr;
  91        int             retries = 1000;
  92
  93        csr = musb_readw(epio, MUSB_TXCSR);
  94        while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  95                csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
  96                musb_writew(epio, MUSB_TXCSR, csr);
  97                csr = musb_readw(epio, MUSB_TXCSR);
  98
  99                /*
 100                 * FIXME: sometimes the tx fifo flush failed, it has been
 101                 * observed during device disconnect on AM335x.
 102                 *
 103                 * To reproduce the issue, ensure tx urb(s) are queued when
 104                 * unplug the usb device which is connected to AM335x usb
 105                 * host port.
 106                 *
 107                 * I found using a usb-ethernet device and running iperf
 108                 * (client on AM335x) has very high chance to trigger it.
 109                 *
 110                 * Better to turn on musb_dbg() in musb_cleanup_urb() with
 111                 * CPPI enabled to see the issue when aborting the tx channel.
 112                 */
 113                if (dev_WARN_ONCE(musb->controller, retries-- < 1,
 114                                "Could not flush host TX%d fifo: csr: %04x\n",
 115                                ep->epnum, csr))
 116                        return;
 117                mdelay(1);
 118        }
 119}
 120
 121static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
 122{
 123        void __iomem    *epio = ep->regs;
 124        u16             csr;
 125        int             retries = 5;
 126
 127        /* scrub any data left in the fifo */
 128        do {
 129                csr = musb_readw(epio, MUSB_TXCSR);
 130                if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
 131                        break;
 132                musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
 133                csr = musb_readw(epio, MUSB_TXCSR);
 134                udelay(10);
 135        } while (--retries);
 136
 137        WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
 138                        ep->epnum, csr);
 139
 140        /* and reset for the next transfer */
 141        musb_writew(epio, MUSB_TXCSR, 0);
 142}
 143
 144/*
 145 * Start transmit. Caller is responsible for locking shared resources.
 146 * musb must be locked.
 147 */
 148static inline void musb_h_tx_start(struct musb_hw_ep *ep)
 149{
 150        u16     txcsr;
 151
 152        /* NOTE: no locks here; caller should lock and select EP */
 153        if (ep->epnum) {
 154                txcsr = musb_readw(ep->regs, MUSB_TXCSR);
 155                txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
 156                musb_writew(ep->regs, MUSB_TXCSR, txcsr);
 157        } else {
 158                txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
 159                musb_writew(ep->regs, MUSB_CSR0, txcsr);
 160        }
 161
 162}
 163
 164static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
 165{
 166        u16     txcsr;
 167
 168        /* NOTE: no locks here; caller should lock and select EP */
 169        txcsr = musb_readw(ep->regs, MUSB_TXCSR);
 170        txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
 171        if (is_cppi_enabled(ep->musb))
 172                txcsr |= MUSB_TXCSR_DMAMODE;
 173        musb_writew(ep->regs, MUSB_TXCSR, txcsr);
 174}
 175
 176static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
 177{
 178        if (is_in != 0 || ep->is_shared_fifo)
 179                ep->in_qh  = qh;
 180        if (is_in == 0 || ep->is_shared_fifo)
 181                ep->out_qh = qh;
 182}
 183
 184static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
 185{
 186        return is_in ? ep->in_qh : ep->out_qh;
 187}
 188
 189/*
 190 * Start the URB at the front of an endpoint's queue
 191 * end must be claimed from the caller.
 192 *
 193 * Context: controller locked, irqs blocked
 194 */
 195static void
 196musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
 197{
 198        u32                     len;
 199        void __iomem            *mbase =  musb->mregs;
 200        struct urb              *urb = next_urb(qh);
 201        void                    *buf = urb->transfer_buffer;
 202        u32                     offset = 0;
 203        struct musb_hw_ep       *hw_ep = qh->hw_ep;
 204        int                     epnum = hw_ep->epnum;
 205
 206        /* initialize software qh state */
 207        qh->offset = 0;
 208        qh->segsize = 0;
 209
 210        /* gather right source of data */
 211        switch (qh->type) {
 212        case USB_ENDPOINT_XFER_CONTROL:
 213                /* control transfers always start with SETUP */
 214                is_in = 0;
 215                musb->ep0_stage = MUSB_EP0_START;
 216                buf = urb->setup_packet;
 217                len = 8;
 218                break;
 219        case USB_ENDPOINT_XFER_ISOC:
 220                qh->iso_idx = 0;
 221                qh->frame = 0;
 222                offset = urb->iso_frame_desc[0].offset;
 223                len = urb->iso_frame_desc[0].length;
 224                break;
 225        default:                /* bulk, interrupt */
 226                /* actual_length may be nonzero on retry paths */
 227                buf = urb->transfer_buffer + urb->actual_length;
 228                len = urb->transfer_buffer_length - urb->actual_length;
 229        }
 230
 231        trace_musb_urb_start(musb, urb);
 232
 233        /* Configure endpoint */
 234        musb_ep_set_qh(hw_ep, is_in, qh);
 235        musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
 236
 237        /* transmit may have more work: start it when it is time */
 238        if (is_in)
 239                return;
 240
 241        /* determine if the time is right for a periodic transfer */
 242        switch (qh->type) {
 243        case USB_ENDPOINT_XFER_ISOC:
 244        case USB_ENDPOINT_XFER_INT:
 245                musb_dbg(musb, "check whether there's still time for periodic Tx");
 246                /* FIXME this doesn't implement that scheduling policy ...
 247                 * or handle framecounter wrapping
 248                 */
 249                if (1) {        /* Always assume URB_ISO_ASAP */
 250                        /* REVISIT the SOF irq handler shouldn't duplicate
 251                         * this code; and we don't init urb->start_frame...
 252                         */
 253                        qh->frame = 0;
 254                        goto start;
 255                } else {
 256                        qh->frame = urb->start_frame;
 257                        /* enable SOF interrupt so we can count down */
 258                        musb_dbg(musb, "SOF for %d", epnum);
 259#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
 260                        musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
 261#endif
 262                }
 263                break;
 264        default:
 265start:
 266                musb_dbg(musb, "Start TX%d %s", epnum,
 267                        hw_ep->tx_channel ? "dma" : "pio");
 268
 269                if (!hw_ep->tx_channel)
 270                        musb_h_tx_start(hw_ep);
 271                else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
 272                        musb_h_tx_dma_start(hw_ep);
 273        }
 274}
 275
 276/* Context: caller owns controller lock, IRQs are blocked */
 277static void musb_giveback(struct musb *musb, struct urb *urb, int status)
 278__releases(musb->lock)
 279__acquires(musb->lock)
 280{
 281        trace_musb_urb_gb(musb, urb);
 282
 283        usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
 284        spin_unlock(&musb->lock);
 285        usb_hcd_giveback_urb(musb->hcd, urb, status);
 286        spin_lock(&musb->lock);
 287}
 288
 289/* For bulk/interrupt endpoints only */
 290static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
 291                                    struct urb *urb)
 292{
 293        void __iomem            *epio = qh->hw_ep->regs;
 294        u16                     csr;
 295
 296        /*
 297         * FIXME: the current Mentor DMA code seems to have
 298         * problems getting toggle correct.
 299         */
 300
 301        if (is_in)
 302                csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
 303        else
 304                csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
 305
 306        usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
 307}
 308
 309/*
 310 * Advance this hardware endpoint's queue, completing the specified URB and
 311 * advancing to either the next URB queued to that qh, or else invalidating
 312 * that qh and advancing to the next qh scheduled after the current one.
 313 *
 314 * Context: caller owns controller lock, IRQs are blocked
 315 */
 316static void musb_advance_schedule(struct musb *musb, struct urb *urb,
 317                                  struct musb_hw_ep *hw_ep, int is_in)
 318{
 319        struct musb_qh          *qh = musb_ep_get_qh(hw_ep, is_in);
 320        struct musb_hw_ep       *ep = qh->hw_ep;
 321        int                     ready = qh->is_ready;
 322        int                     status;
 323
 324        status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
 325
 326        /* save toggle eagerly, for paranoia */
 327        switch (qh->type) {
 328        case USB_ENDPOINT_XFER_BULK:
 329        case USB_ENDPOINT_XFER_INT:
 330                musb_save_toggle(qh, is_in, urb);
 331                break;
 332        case USB_ENDPOINT_XFER_ISOC:
 333                if (status == 0 && urb->error_count)
 334                        status = -EXDEV;
 335                break;
 336        }
 337
 338        qh->is_ready = 0;
 339        musb_giveback(musb, urb, status);
 340        qh->is_ready = ready;
 341
 342        /* reclaim resources (and bandwidth) ASAP; deschedule it, and
 343         * invalidate qh as soon as list_empty(&hep->urb_list)
 344         */
 345        if (list_empty(&qh->hep->urb_list)) {
 346                struct list_head        *head;
 347                struct dma_controller   *dma = musb->dma_controller;
 348
 349                if (is_in) {
 350                        ep->rx_reinit = 1;
 351                        if (ep->rx_channel) {
 352                                dma->channel_release(ep->rx_channel);
 353                                ep->rx_channel = NULL;
 354                        }
 355                } else {
 356                        ep->tx_reinit = 1;
 357                        if (ep->tx_channel) {
 358                                dma->channel_release(ep->tx_channel);
 359                                ep->tx_channel = NULL;
 360                        }
 361                }
 362
 363                /* Clobber old pointers to this qh */
 364                musb_ep_set_qh(ep, is_in, NULL);
 365                qh->hep->hcpriv = NULL;
 366
 367                switch (qh->type) {
 368
 369                case USB_ENDPOINT_XFER_CONTROL:
 370                case USB_ENDPOINT_XFER_BULK:
 371                        /* fifo policy for these lists, except that NAKing
 372                         * should rotate a qh to the end (for fairness).
 373                         */
 374                        if (qh->mux == 1) {
 375                                head = qh->ring.prev;
 376                                list_del(&qh->ring);
 377                                kfree(qh);
 378                                qh = first_qh(head);
 379                                break;
 380                        }
 381
 382                case USB_ENDPOINT_XFER_ISOC:
 383                case USB_ENDPOINT_XFER_INT:
 384                        /* this is where periodic bandwidth should be
 385                         * de-allocated if it's tracked and allocated;
 386                         * and where we'd update the schedule tree...
 387                         */
 388                        kfree(qh);
 389                        qh = NULL;
 390                        break;
 391                }
 392        }
 393
 394        if (qh != NULL && qh->is_ready) {
 395                musb_dbg(musb, "... next ep%d %cX urb %p",
 396                    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
 397                musb_start_urb(musb, is_in, qh);
 398        }
 399}
 400
 401static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
 402{
 403        /* we don't want fifo to fill itself again;
 404         * ignore dma (various models),
 405         * leave toggle alone (may not have been saved yet)
 406         */
 407        csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
 408        csr &= ~(MUSB_RXCSR_H_REQPKT
 409                | MUSB_RXCSR_H_AUTOREQ
 410                | MUSB_RXCSR_AUTOCLEAR);
 411
 412        /* write 2x to allow double buffering */
 413        musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
 414        musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
 415
 416        /* flush writebuffer */
 417        return musb_readw(hw_ep->regs, MUSB_RXCSR);
 418}
 419
 420/*
 421 * PIO RX for a packet (or part of it).
 422 */
 423static bool
 424musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
 425{
 426        u16                     rx_count;
 427        u8                      *buf;
 428        u16                     csr;
 429        bool                    done = false;
 430        u32                     length;
 431        int                     do_flush = 0;
 432        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
 433        void __iomem            *epio = hw_ep->regs;
 434        struct musb_qh          *qh = hw_ep->in_qh;
 435        int                     pipe = urb->pipe;
 436        void                    *buffer = urb->transfer_buffer;
 437
 438        /* musb_ep_select(mbase, epnum); */
 439        rx_count = musb_readw(epio, MUSB_RXCOUNT);
 440        musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
 441                        urb->transfer_buffer, qh->offset,
 442                        urb->transfer_buffer_length);
 443
 444        /* unload FIFO */
 445        if (usb_pipeisoc(pipe)) {
 446                int                                     status = 0;
 447                struct usb_iso_packet_descriptor        *d;
 448
 449                if (iso_err) {
 450                        status = -EILSEQ;
 451                        urb->error_count++;
 452                }
 453
 454                d = urb->iso_frame_desc + qh->iso_idx;
 455                buf = buffer + d->offset;
 456                length = d->length;
 457                if (rx_count > length) {
 458                        if (status == 0) {
 459                                status = -EOVERFLOW;
 460                                urb->error_count++;
 461                        }
 462                        musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
 463                        do_flush = 1;
 464                } else
 465                        length = rx_count;
 466                urb->actual_length += length;
 467                d->actual_length = length;
 468
 469                d->status = status;
 470
 471                /* see if we are done */
 472                done = (++qh->iso_idx >= urb->number_of_packets);
 473        } else {
 474                /* non-isoch */
 475                buf = buffer + qh->offset;
 476                length = urb->transfer_buffer_length - qh->offset;
 477                if (rx_count > length) {
 478                        if (urb->status == -EINPROGRESS)
 479                                urb->status = -EOVERFLOW;
 480                        musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
 481                        do_flush = 1;
 482                } else
 483                        length = rx_count;
 484                urb->actual_length += length;
 485                qh->offset += length;
 486
 487                /* see if we are done */
 488                done = (urb->actual_length == urb->transfer_buffer_length)
 489                        || (rx_count < qh->maxpacket)
 490                        || (urb->status != -EINPROGRESS);
 491                if (done
 492                                && (urb->status == -EINPROGRESS)
 493                                && (urb->transfer_flags & URB_SHORT_NOT_OK)
 494                                && (urb->actual_length
 495                                        < urb->transfer_buffer_length))
 496                        urb->status = -EREMOTEIO;
 497        }
 498
 499        musb_read_fifo(hw_ep, length, buf);
 500
 501        csr = musb_readw(epio, MUSB_RXCSR);
 502        csr |= MUSB_RXCSR_H_WZC_BITS;
 503        if (unlikely(do_flush))
 504                musb_h_flush_rxfifo(hw_ep, csr);
 505        else {
 506                /* REVISIT this assumes AUTOCLEAR is never set */
 507                csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
 508                if (!done)
 509                        csr |= MUSB_RXCSR_H_REQPKT;
 510                musb_writew(epio, MUSB_RXCSR, csr);
 511        }
 512
 513        return done;
 514}
 515
 516/* we don't always need to reinit a given side of an endpoint...
 517 * when we do, use tx/rx reinit routine and then construct a new CSR
 518 * to address data toggle, NYET, and DMA or PIO.
 519 *
 520 * it's possible that driver bugs (especially for DMA) or aborting a
 521 * transfer might have left the endpoint busier than it should be.
 522 * the busy/not-empty tests are basically paranoia.
 523 */
 524static void
 525musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
 526{
 527        struct musb_hw_ep *ep = musb->endpoints + epnum;
 528        u16     csr;
 529
 530        /* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
 531         * That always uses tx_reinit since ep0 repurposes TX register
 532         * offsets; the initial SETUP packet is also a kind of OUT.
 533         */
 534
 535        /* if programmed for Tx, put it in RX mode */
 536        if (ep->is_shared_fifo) {
 537                csr = musb_readw(ep->regs, MUSB_TXCSR);
 538                if (csr & MUSB_TXCSR_MODE) {
 539                        musb_h_tx_flush_fifo(ep);
 540                        csr = musb_readw(ep->regs, MUSB_TXCSR);
 541                        musb_writew(ep->regs, MUSB_TXCSR,
 542                                    csr | MUSB_TXCSR_FRCDATATOG);
 543                }
 544
 545                /*
 546                 * Clear the MODE bit (and everything else) to enable Rx.
 547                 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
 548                 */
 549                if (csr & MUSB_TXCSR_DMAMODE)
 550                        musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
 551                musb_writew(ep->regs, MUSB_TXCSR, 0);
 552
 553        /* scrub all previous state, clearing toggle */
 554        }
 555        csr = musb_readw(ep->regs, MUSB_RXCSR);
 556        if (csr & MUSB_RXCSR_RXPKTRDY)
 557                WARNING("rx%d, packet/%d ready?\n", ep->epnum,
 558                        musb_readw(ep->regs, MUSB_RXCOUNT));
 559
 560        musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
 561
 562        /* target addr and (for multipoint) hub addr/port */
 563        if (musb->is_multipoint) {
 564                musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
 565                musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
 566                musb_write_rxhubport(musb, epnum, qh->h_port_reg);
 567        } else
 568                musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
 569
 570        /* protocol/endpoint, interval/NAKlimit, i/o size */
 571        musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
 572        musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
 573        /* NOTE: bulk combining rewrites high bits of maxpacket */
 574        /* Set RXMAXP with the FIFO size of the endpoint
 575         * to disable double buffer mode.
 576         */
 577        musb_writew(ep->regs, MUSB_RXMAXP,
 578                        qh->maxpacket | ((qh->hb_mult - 1) << 11));
 579
 580        ep->rx_reinit = 0;
 581}
 582
 583static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
 584                struct musb_hw_ep *hw_ep, struct musb_qh *qh,
 585                struct urb *urb, u32 offset,
 586                u32 *length, u8 *mode)
 587{
 588        struct dma_channel      *channel = hw_ep->tx_channel;
 589        void __iomem            *epio = hw_ep->regs;
 590        u16                     pkt_size = qh->maxpacket;
 591        u16                     csr;
 592
 593        if (*length > channel->max_len)
 594                *length = channel->max_len;
 595
 596        csr = musb_readw(epio, MUSB_TXCSR);
 597        if (*length > pkt_size) {
 598                *mode = 1;
 599                csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
 600                /* autoset shouldn't be set in high bandwidth */
 601                /*
 602                 * Enable Autoset according to table
 603                 * below
 604                 * bulk_split hb_mult   Autoset_Enable
 605                 *      0       1       Yes(Normal)
 606                 *      0       >1      No(High BW ISO)
 607                 *      1       1       Yes(HS bulk)
 608                 *      1       >1      Yes(FS bulk)
 609                 */
 610                if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
 611                                        can_bulk_split(hw_ep->musb, qh->type)))
 612                        csr |= MUSB_TXCSR_AUTOSET;
 613        } else {
 614                *mode = 0;
 615                csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
 616                csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
 617        }
 618        channel->desired_mode = *mode;
 619        musb_writew(epio, MUSB_TXCSR, csr);
 620}
 621
 622static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
 623                                           struct musb_hw_ep *hw_ep,
 624                                           struct musb_qh *qh,
 625                                           struct urb *urb,
 626                                           u32 offset,
 627                                           u32 *length,
 628                                           u8 *mode)
 629{
 630        struct dma_channel *channel = hw_ep->tx_channel;
 631
 632        channel->actual_len = 0;
 633
 634        /*
 635         * TX uses "RNDIS" mode automatically but needs help
 636         * to identify the zero-length-final-packet case.
 637         */
 638        *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
 639}
 640
 641static bool musb_tx_dma_program(struct dma_controller *dma,
 642                struct musb_hw_ep *hw_ep, struct musb_qh *qh,
 643                struct urb *urb, u32 offset, u32 length)
 644{
 645        struct dma_channel      *channel = hw_ep->tx_channel;
 646        u16                     pkt_size = qh->maxpacket;
 647        u8                      mode;
 648
 649        if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
 650                musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
 651                                            &length, &mode);
 652        else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
 653                musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
 654                                               &length, &mode);
 655        else
 656                return false;
 657
 658        qh->segsize = length;
 659
 660        /*
 661         * Ensure the data reaches to main memory before starting
 662         * DMA transfer
 663         */
 664        wmb();
 665
 666        if (!dma->channel_program(channel, pkt_size, mode,
 667                        urb->transfer_dma + offset, length)) {
 668                void __iomem *epio = hw_ep->regs;
 669                u16 csr;
 670
 671                dma->channel_release(channel);
 672                hw_ep->tx_channel = NULL;
 673
 674                csr = musb_readw(epio, MUSB_TXCSR);
 675                csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
 676                musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
 677                return false;
 678        }
 679        return true;
 680}
 681
 682/*
 683 * Program an HDRC endpoint as per the given URB
 684 * Context: irqs blocked, controller lock held
 685 */
 686static void musb_ep_program(struct musb *musb, u8 epnum,
 687                        struct urb *urb, int is_out,
 688                        u8 *buf, u32 offset, u32 len)
 689{
 690        struct dma_controller   *dma_controller;
 691        struct dma_channel      *dma_channel;
 692        u8                      dma_ok;
 693        void __iomem            *mbase = musb->mregs;
 694        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
 695        void __iomem            *epio = hw_ep->regs;
 696        struct musb_qh          *qh = musb_ep_get_qh(hw_ep, !is_out);
 697        u16                     packet_sz = qh->maxpacket;
 698        u8                      use_dma = 1;
 699        u16                     csr;
 700
 701        musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
 702                                "h_addr%02x h_port%02x bytes %d",
 703                        is_out ? "-->" : "<--",
 704                        epnum, urb, urb->dev->speed,
 705                        qh->addr_reg, qh->epnum, is_out ? "out" : "in",
 706                        qh->h_addr_reg, qh->h_port_reg,
 707                        len);
 708
 709        musb_ep_select(mbase, epnum);
 710
 711        if (is_out && !len) {
 712                use_dma = 0;
 713                csr = musb_readw(epio, MUSB_TXCSR);
 714                csr &= ~MUSB_TXCSR_DMAENAB;
 715                musb_writew(epio, MUSB_TXCSR, csr);
 716                hw_ep->tx_channel = NULL;
 717        }
 718
 719        /* candidate for DMA? */
 720        dma_controller = musb->dma_controller;
 721        if (use_dma && is_dma_capable() && epnum && dma_controller) {
 722                dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
 723                if (!dma_channel) {
 724                        dma_channel = dma_controller->channel_alloc(
 725                                        dma_controller, hw_ep, is_out);
 726                        if (is_out)
 727                                hw_ep->tx_channel = dma_channel;
 728                        else
 729                                hw_ep->rx_channel = dma_channel;
 730                }
 731        } else
 732                dma_channel = NULL;
 733
 734        /* make sure we clear DMAEnab, autoSet bits from previous run */
 735
 736        /* OUT/transmit/EP0 or IN/receive? */
 737        if (is_out) {
 738                u16     csr;
 739                u16     int_txe;
 740                u16     load_count;
 741
 742                csr = musb_readw(epio, MUSB_TXCSR);
 743
 744                /* disable interrupt in case we flush */
 745                int_txe = musb->intrtxe;
 746                musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
 747
 748                /* general endpoint setup */
 749                if (epnum) {
 750                        /* flush all old state, set default */
 751                        /*
 752                         * We could be flushing valid
 753                         * packets in double buffering
 754                         * case
 755                         */
 756                        if (!hw_ep->tx_double_buffered)
 757                                musb_h_tx_flush_fifo(hw_ep);
 758
 759                        /*
 760                         * We must not clear the DMAMODE bit before or in
 761                         * the same cycle with the DMAENAB bit, so we clear
 762                         * the latter first...
 763                         */
 764                        csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
 765                                        | MUSB_TXCSR_AUTOSET
 766                                        | MUSB_TXCSR_DMAENAB
 767                                        | MUSB_TXCSR_FRCDATATOG
 768                                        | MUSB_TXCSR_H_RXSTALL
 769                                        | MUSB_TXCSR_H_ERROR
 770                                        | MUSB_TXCSR_TXPKTRDY
 771                                        );
 772                        csr |= MUSB_TXCSR_MODE;
 773
 774                        if (!hw_ep->tx_double_buffered) {
 775                                if (usb_gettoggle(urb->dev, qh->epnum, 1))
 776                                        csr |= MUSB_TXCSR_H_WR_DATATOGGLE
 777                                                | MUSB_TXCSR_H_DATATOGGLE;
 778                                else
 779                                        csr |= MUSB_TXCSR_CLRDATATOG;
 780                        }
 781
 782                        musb_writew(epio, MUSB_TXCSR, csr);
 783                        /* REVISIT may need to clear FLUSHFIFO ... */
 784                        csr &= ~MUSB_TXCSR_DMAMODE;
 785                        musb_writew(epio, MUSB_TXCSR, csr);
 786                        csr = musb_readw(epio, MUSB_TXCSR);
 787                } else {
 788                        /* endpoint 0: just flush */
 789                        musb_h_ep0_flush_fifo(hw_ep);
 790                }
 791
 792                /* target addr and (for multipoint) hub addr/port */
 793                if (musb->is_multipoint) {
 794                        musb_write_txfunaddr(musb, epnum, qh->addr_reg);
 795                        musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
 796                        musb_write_txhubport(musb, epnum, qh->h_port_reg);
 797/* FIXME if !epnum, do the same for RX ... */
 798                } else
 799                        musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
 800
 801                /* protocol/endpoint/interval/NAKlimit */
 802                if (epnum) {
 803                        musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
 804                        if (can_bulk_split(musb, qh->type)) {
 805                                qh->hb_mult = hw_ep->max_packet_sz_tx
 806                                                / packet_sz;
 807                                musb_writew(epio, MUSB_TXMAXP, packet_sz
 808                                        | ((qh->hb_mult) - 1) << 11);
 809                        } else {
 810                                musb_writew(epio, MUSB_TXMAXP,
 811                                                qh->maxpacket |
 812                                                ((qh->hb_mult - 1) << 11));
 813                        }
 814                        musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
 815                } else {
 816                        musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
 817                        if (musb->is_multipoint)
 818                                musb_writeb(epio, MUSB_TYPE0,
 819                                                qh->type_reg);
 820                }
 821
 822                if (can_bulk_split(musb, qh->type))
 823                        load_count = min((u32) hw_ep->max_packet_sz_tx,
 824                                                len);
 825                else
 826                        load_count = min((u32) packet_sz, len);
 827
 828                if (dma_channel && musb_tx_dma_program(dma_controller,
 829                                        hw_ep, qh, urb, offset, len))
 830                        load_count = 0;
 831
 832                if (load_count) {
 833                        /* PIO to load FIFO */
 834                        qh->segsize = load_count;
 835                        if (!buf) {
 836                                sg_miter_start(&qh->sg_miter, urb->sg, 1,
 837                                                SG_MITER_ATOMIC
 838                                                | SG_MITER_FROM_SG);
 839                                if (!sg_miter_next(&qh->sg_miter)) {
 840                                        dev_err(musb->controller,
 841                                                        "error: sg"
 842                                                        "list empty\n");
 843                                        sg_miter_stop(&qh->sg_miter);
 844                                        goto finish;
 845                                }
 846                                buf = qh->sg_miter.addr + urb->sg->offset +
 847                                        urb->actual_length;
 848                                load_count = min_t(u32, load_count,
 849                                                qh->sg_miter.length);
 850                                musb_write_fifo(hw_ep, load_count, buf);
 851                                qh->sg_miter.consumed = load_count;
 852                                sg_miter_stop(&qh->sg_miter);
 853                        } else
 854                                musb_write_fifo(hw_ep, load_count, buf);
 855                }
 856finish:
 857                /* re-enable interrupt */
 858                musb_writew(mbase, MUSB_INTRTXE, int_txe);
 859
 860        /* IN/receive */
 861        } else {
 862                u16     csr;
 863
 864                if (hw_ep->rx_reinit) {
 865                        musb_rx_reinit(musb, qh, epnum);
 866
 867                        /* init new state: toggle and NYET, maybe DMA later */
 868                        if (usb_gettoggle(urb->dev, qh->epnum, 0))
 869                                csr = MUSB_RXCSR_H_WR_DATATOGGLE
 870                                        | MUSB_RXCSR_H_DATATOGGLE;
 871                        else
 872                                csr = 0;
 873                        if (qh->type == USB_ENDPOINT_XFER_INT)
 874                                csr |= MUSB_RXCSR_DISNYET;
 875
 876                } else {
 877                        csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 878
 879                        if (csr & (MUSB_RXCSR_RXPKTRDY
 880                                        | MUSB_RXCSR_DMAENAB
 881                                        | MUSB_RXCSR_H_REQPKT))
 882                                ERR("broken !rx_reinit, ep%d csr %04x\n",
 883                                                hw_ep->epnum, csr);
 884
 885                        /* scrub any stale state, leaving toggle alone */
 886                        csr &= MUSB_RXCSR_DISNYET;
 887                }
 888
 889                /* kick things off */
 890
 891                if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
 892                        /* Candidate for DMA */
 893                        dma_channel->actual_len = 0L;
 894                        qh->segsize = len;
 895
 896                        /* AUTOREQ is in a DMA register */
 897                        musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
 898                        csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 899
 900                        /*
 901                         * Unless caller treats short RX transfers as
 902                         * errors, we dare not queue multiple transfers.
 903                         */
 904                        dma_ok = dma_controller->channel_program(dma_channel,
 905                                        packet_sz, !(urb->transfer_flags &
 906                                                     URB_SHORT_NOT_OK),
 907                                        urb->transfer_dma + offset,
 908                                        qh->segsize);
 909                        if (!dma_ok) {
 910                                dma_controller->channel_release(dma_channel);
 911                                hw_ep->rx_channel = dma_channel = NULL;
 912                        } else
 913                                csr |= MUSB_RXCSR_DMAENAB;
 914                }
 915
 916                csr |= MUSB_RXCSR_H_REQPKT;
 917                musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
 918                musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
 919                csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 920        }
 921}
 922
 923/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
 924 * the end; avoids starvation for other endpoints.
 925 */
 926static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
 927        int is_in)
 928{
 929        struct dma_channel      *dma;
 930        struct urb              *urb;
 931        void __iomem            *mbase = musb->mregs;
 932        void __iomem            *epio = ep->regs;
 933        struct musb_qh          *cur_qh, *next_qh;
 934        u16                     rx_csr, tx_csr;
 935
 936        musb_ep_select(mbase, ep->epnum);
 937        if (is_in) {
 938                dma = is_dma_capable() ? ep->rx_channel : NULL;
 939
 940                /*
 941                 * Need to stop the transaction by clearing REQPKT first
 942                 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
 943                 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
 944                 */
 945                rx_csr = musb_readw(epio, MUSB_RXCSR);
 946                rx_csr |= MUSB_RXCSR_H_WZC_BITS;
 947                rx_csr &= ~MUSB_RXCSR_H_REQPKT;
 948                musb_writew(epio, MUSB_RXCSR, rx_csr);
 949                rx_csr &= ~MUSB_RXCSR_DATAERROR;
 950                musb_writew(epio, MUSB_RXCSR, rx_csr);
 951
 952                cur_qh = first_qh(&musb->in_bulk);
 953        } else {
 954                dma = is_dma_capable() ? ep->tx_channel : NULL;
 955
 956                /* clear nak timeout bit */
 957                tx_csr = musb_readw(epio, MUSB_TXCSR);
 958                tx_csr |= MUSB_TXCSR_H_WZC_BITS;
 959                tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
 960                musb_writew(epio, MUSB_TXCSR, tx_csr);
 961
 962                cur_qh = first_qh(&musb->out_bulk);
 963        }
 964        if (cur_qh) {
 965                urb = next_urb(cur_qh);
 966                if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 967                        dma->status = MUSB_DMA_STATUS_CORE_ABORT;
 968                        musb->dma_controller->channel_abort(dma);
 969                        urb->actual_length += dma->actual_len;
 970                        dma->actual_len = 0L;
 971                }
 972                musb_save_toggle(cur_qh, is_in, urb);
 973
 974                if (is_in) {
 975                        /* move cur_qh to end of queue */
 976                        list_move_tail(&cur_qh->ring, &musb->in_bulk);
 977
 978                        /* get the next qh from musb->in_bulk */
 979                        next_qh = first_qh(&musb->in_bulk);
 980
 981                        /* set rx_reinit and schedule the next qh */
 982                        ep->rx_reinit = 1;
 983                } else {
 984                        /* move cur_qh to end of queue */
 985                        list_move_tail(&cur_qh->ring, &musb->out_bulk);
 986
 987                        /* get the next qh from musb->out_bulk */
 988                        next_qh = first_qh(&musb->out_bulk);
 989
 990                        /* set tx_reinit and schedule the next qh */
 991                        ep->tx_reinit = 1;
 992                }
 993
 994                if (next_qh)
 995                        musb_start_urb(musb, is_in, next_qh);
 996        }
 997}
 998
 999/*
1000 * Service the default endpoint (ep0) as host.
1001 * Return true until it's time to start the status stage.
1002 */
1003static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1004{
1005        bool                     more = false;
1006        u8                      *fifo_dest = NULL;
1007        u16                     fifo_count = 0;
1008        struct musb_hw_ep       *hw_ep = musb->control_ep;
1009        struct musb_qh          *qh = hw_ep->in_qh;
1010        struct usb_ctrlrequest  *request;
1011
1012        switch (musb->ep0_stage) {
1013        case MUSB_EP0_IN:
1014                fifo_dest = urb->transfer_buffer + urb->actual_length;
1015                fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1016                                   urb->actual_length);
1017                if (fifo_count < len)
1018                        urb->status = -EOVERFLOW;
1019
1020                musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1021
1022                urb->actual_length += fifo_count;
1023                if (len < qh->maxpacket) {
1024                        /* always terminate on short read; it's
1025                         * rarely reported as an error.
1026                         */
1027                } else if (urb->actual_length <
1028                                urb->transfer_buffer_length)
1029                        more = true;
1030                break;
1031        case MUSB_EP0_START:
1032                request = (struct usb_ctrlrequest *) urb->setup_packet;
1033
1034                if (!request->wLength) {
1035                        musb_dbg(musb, "start no-DATA");
1036                        break;
1037                } else if (request->bRequestType & USB_DIR_IN) {
1038                        musb_dbg(musb, "start IN-DATA");
1039                        musb->ep0_stage = MUSB_EP0_IN;
1040                        more = true;
1041                        break;
1042                } else {
1043                        musb_dbg(musb, "start OUT-DATA");
1044                        musb->ep0_stage = MUSB_EP0_OUT;
1045                        more = true;
1046                }
1047                /* FALLTHROUGH */
1048        case MUSB_EP0_OUT:
1049                fifo_count = min_t(size_t, qh->maxpacket,
1050                                   urb->transfer_buffer_length -
1051                                   urb->actual_length);
1052                if (fifo_count) {
1053                        fifo_dest = (u8 *) (urb->transfer_buffer
1054                                        + urb->actual_length);
1055                        musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1056                                        fifo_count,
1057                                        (fifo_count == 1) ? "" : "s",
1058                                        fifo_dest);
1059                        musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1060
1061                        urb->actual_length += fifo_count;
1062                        more = true;
1063                }
1064                break;
1065        default:
1066                ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1067                break;
1068        }
1069
1070        return more;
1071}
1072
1073/*
1074 * Handle default endpoint interrupt as host. Only called in IRQ time
1075 * from musb_interrupt().
1076 *
1077 * called with controller irqlocked
1078 */
1079irqreturn_t musb_h_ep0_irq(struct musb *musb)
1080{
1081        struct urb              *urb;
1082        u16                     csr, len;
1083        int                     status = 0;
1084        void __iomem            *mbase = musb->mregs;
1085        struct musb_hw_ep       *hw_ep = musb->control_ep;
1086        void __iomem            *epio = hw_ep->regs;
1087        struct musb_qh          *qh = hw_ep->in_qh;
1088        bool                    complete = false;
1089        irqreturn_t             retval = IRQ_NONE;
1090
1091        /* ep0 only has one queue, "in" */
1092        urb = next_urb(qh);
1093
1094        musb_ep_select(mbase, 0);
1095        csr = musb_readw(epio, MUSB_CSR0);
1096        len = (csr & MUSB_CSR0_RXPKTRDY)
1097                        ? musb_readb(epio, MUSB_COUNT0)
1098                        : 0;
1099
1100        musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1101                csr, qh, len, urb, musb->ep0_stage);
1102
1103        /* if we just did status stage, we are done */
1104        if (MUSB_EP0_STATUS == musb->ep0_stage) {
1105                retval = IRQ_HANDLED;
1106                complete = true;
1107        }
1108
1109        /* prepare status */
1110        if (csr & MUSB_CSR0_H_RXSTALL) {
1111                musb_dbg(musb, "STALLING ENDPOINT");
1112                status = -EPIPE;
1113
1114        } else if (csr & MUSB_CSR0_H_ERROR) {
1115                musb_dbg(musb, "no response, csr0 %04x", csr);
1116                status = -EPROTO;
1117
1118        } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1119                musb_dbg(musb, "control NAK timeout");
1120
1121                /* NOTE:  this code path would be a good place to PAUSE a
1122                 * control transfer, if another one is queued, so that
1123                 * ep0 is more likely to stay busy.  That's already done
1124                 * for bulk RX transfers.
1125                 *
1126                 * if (qh->ring.next != &musb->control), then
1127                 * we have a candidate... NAKing is *NOT* an error
1128                 */
1129                musb_writew(epio, MUSB_CSR0, 0);
1130                retval = IRQ_HANDLED;
1131        }
1132
1133        if (status) {
1134                musb_dbg(musb, "aborting");
1135                retval = IRQ_HANDLED;
1136                if (urb)
1137                        urb->status = status;
1138                complete = true;
1139
1140                /* use the proper sequence to abort the transfer */
1141                if (csr & MUSB_CSR0_H_REQPKT) {
1142                        csr &= ~MUSB_CSR0_H_REQPKT;
1143                        musb_writew(epio, MUSB_CSR0, csr);
1144                        csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1145                        musb_writew(epio, MUSB_CSR0, csr);
1146                } else {
1147                        musb_h_ep0_flush_fifo(hw_ep);
1148                }
1149
1150                musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1151
1152                /* clear it */
1153                musb_writew(epio, MUSB_CSR0, 0);
1154        }
1155
1156        if (unlikely(!urb)) {
1157                /* stop endpoint since we have no place for its data, this
1158                 * SHOULD NEVER HAPPEN! */
1159                ERR("no URB for end 0\n");
1160
1161                musb_h_ep0_flush_fifo(hw_ep);
1162                goto done;
1163        }
1164
1165        if (!complete) {
1166                /* call common logic and prepare response */
1167                if (musb_h_ep0_continue(musb, len, urb)) {
1168                        /* more packets required */
1169                        csr = (MUSB_EP0_IN == musb->ep0_stage)
1170                                ?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1171                } else {
1172                        /* data transfer complete; perform status phase */
1173                        if (usb_pipeout(urb->pipe)
1174                                        || !urb->transfer_buffer_length)
1175                                csr = MUSB_CSR0_H_STATUSPKT
1176                                        | MUSB_CSR0_H_REQPKT;
1177                        else
1178                                csr = MUSB_CSR0_H_STATUSPKT
1179                                        | MUSB_CSR0_TXPKTRDY;
1180
1181                        /* disable ping token in status phase */
1182                        csr |= MUSB_CSR0_H_DIS_PING;
1183
1184                        /* flag status stage */
1185                        musb->ep0_stage = MUSB_EP0_STATUS;
1186
1187                        musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1188
1189                }
1190                musb_writew(epio, MUSB_CSR0, csr);
1191                retval = IRQ_HANDLED;
1192        } else
1193                musb->ep0_stage = MUSB_EP0_IDLE;
1194
1195        /* call completion handler if done */
1196        if (complete)
1197                musb_advance_schedule(musb, urb, hw_ep, 1);
1198done:
1199        return retval;
1200}
1201
1202
1203#ifdef CONFIG_USB_INVENTRA_DMA
1204
1205/* Host side TX (OUT) using Mentor DMA works as follows:
1206        submit_urb ->
1207                - if queue was empty, Program Endpoint
1208                - ... which starts DMA to fifo in mode 1 or 0
1209
1210        DMA Isr (transfer complete) -> TxAvail()
1211                - Stop DMA (~DmaEnab)   (<--- Alert ... currently happens
1212                                        only in musb_cleanup_urb)
1213                - TxPktRdy has to be set in mode 0 or for
1214                        short packets in mode 1.
1215*/
1216
1217#endif
1218
1219/* Service a Tx-Available or dma completion irq for the endpoint */
1220void musb_host_tx(struct musb *musb, u8 epnum)
1221{
1222        int                     pipe;
1223        bool                    done = false;
1224        u16                     tx_csr;
1225        size_t                  length = 0;
1226        size_t                  offset = 0;
1227        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1228        void __iomem            *epio = hw_ep->regs;
1229        struct musb_qh          *qh = hw_ep->out_qh;
1230        struct urb              *urb = next_urb(qh);
1231        u32                     status = 0;
1232        void __iomem            *mbase = musb->mregs;
1233        struct dma_channel      *dma;
1234        bool                    transfer_pending = false;
1235
1236        musb_ep_select(mbase, epnum);
1237        tx_csr = musb_readw(epio, MUSB_TXCSR);
1238
1239        /* with CPPI, DMA sometimes triggers "extra" irqs */
1240        if (!urb) {
1241                musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1242                return;
1243        }
1244
1245        pipe = urb->pipe;
1246        dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1247        trace_musb_urb_tx(musb, urb);
1248        musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1249                        dma ? ", dma" : "");
1250
1251        /* check for errors */
1252        if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1253                /* dma was disabled, fifo flushed */
1254                musb_dbg(musb, "TX end %d stall", epnum);
1255
1256                /* stall; record URB status */
1257                status = -EPIPE;
1258
1259        } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1260                /* (NON-ISO) dma was disabled, fifo flushed */
1261                musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1262
1263                status = -ETIMEDOUT;
1264
1265        } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1266                if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1267                                && !list_is_singular(&musb->out_bulk)) {
1268                        musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1269                        musb_bulk_nak_timeout(musb, hw_ep, 0);
1270                } else {
1271                        musb_dbg(musb, "TX ep%d device not responding", epnum);
1272                        /* NOTE:  this code path would be a good place to PAUSE a
1273                         * transfer, if there's some other (nonperiodic) tx urb
1274                         * that could use this fifo.  (dma complicates it...)
1275                         * That's already done for bulk RX transfers.
1276                         *
1277                         * if (bulk && qh->ring.next != &musb->out_bulk), then
1278                         * we have a candidate... NAKing is *NOT* an error
1279                         */
1280                        musb_ep_select(mbase, epnum);
1281                        musb_writew(epio, MUSB_TXCSR,
1282                                        MUSB_TXCSR_H_WZC_BITS
1283                                        | MUSB_TXCSR_TXPKTRDY);
1284                }
1285                        return;
1286        }
1287
1288done:
1289        if (status) {
1290                if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1291                        dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1292                        musb->dma_controller->channel_abort(dma);
1293                }
1294
1295                /* do the proper sequence to abort the transfer in the
1296                 * usb core; the dma engine should already be stopped.
1297                 */
1298                musb_h_tx_flush_fifo(hw_ep);
1299                tx_csr &= ~(MUSB_TXCSR_AUTOSET
1300                                | MUSB_TXCSR_DMAENAB
1301                                | MUSB_TXCSR_H_ERROR
1302                                | MUSB_TXCSR_H_RXSTALL
1303                                | MUSB_TXCSR_H_NAKTIMEOUT
1304                                );
1305
1306                musb_ep_select(mbase, epnum);
1307                musb_writew(epio, MUSB_TXCSR, tx_csr);
1308                /* REVISIT may need to clear FLUSHFIFO ... */
1309                musb_writew(epio, MUSB_TXCSR, tx_csr);
1310                musb_writeb(epio, MUSB_TXINTERVAL, 0);
1311
1312                done = true;
1313        }
1314
1315        /* second cppi case */
1316        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1317                musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1318                return;
1319        }
1320
1321        if (is_dma_capable() && dma && !status) {
1322                /*
1323                 * DMA has completed.  But if we're using DMA mode 1 (multi
1324                 * packet DMA), we need a terminal TXPKTRDY interrupt before
1325                 * we can consider this transfer completed, lest we trash
1326                 * its last packet when writing the next URB's data.  So we
1327                 * switch back to mode 0 to get that interrupt; we'll come
1328                 * back here once it happens.
1329                 */
1330                if (tx_csr & MUSB_TXCSR_DMAMODE) {
1331                        /*
1332                         * We shouldn't clear DMAMODE with DMAENAB set; so
1333                         * clear them in a safe order.  That should be OK
1334                         * once TXPKTRDY has been set (and I've never seen
1335                         * it being 0 at this moment -- DMA interrupt latency
1336                         * is significant) but if it hasn't been then we have
1337                         * no choice but to stop being polite and ignore the
1338                         * programmer's guide... :-)
1339                         *
1340                         * Note that we must write TXCSR with TXPKTRDY cleared
1341                         * in order not to re-trigger the packet send (this bit
1342                         * can't be cleared by CPU), and there's another caveat:
1343                         * TXPKTRDY may be set shortly and then cleared in the
1344                         * double-buffered FIFO mode, so we do an extra TXCSR
1345                         * read for debouncing...
1346                         */
1347                        tx_csr &= musb_readw(epio, MUSB_TXCSR);
1348                        if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1349                                tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1350                                            MUSB_TXCSR_TXPKTRDY);
1351                                musb_writew(epio, MUSB_TXCSR,
1352                                            tx_csr | MUSB_TXCSR_H_WZC_BITS);
1353                        }
1354                        tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1355                                    MUSB_TXCSR_TXPKTRDY);
1356                        musb_writew(epio, MUSB_TXCSR,
1357                                    tx_csr | MUSB_TXCSR_H_WZC_BITS);
1358
1359                        /*
1360                         * There is no guarantee that we'll get an interrupt
1361                         * after clearing DMAMODE as we might have done this
1362                         * too late (after TXPKTRDY was cleared by controller).
1363                         * Re-read TXCSR as we have spoiled its previous value.
1364                         */
1365                        tx_csr = musb_readw(epio, MUSB_TXCSR);
1366                }
1367
1368                /*
1369                 * We may get here from a DMA completion or TXPKTRDY interrupt.
1370                 * In any case, we must check the FIFO status here and bail out
1371                 * only if the FIFO still has data -- that should prevent the
1372                 * "missed" TXPKTRDY interrupts and deal with double-buffered
1373                 * FIFO mode too...
1374                 */
1375                if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1376                        musb_dbg(musb,
1377                                "DMA complete but FIFO not empty, CSR %04x",
1378                                tx_csr);
1379                        return;
1380                }
1381        }
1382
1383        if (!status || dma || usb_pipeisoc(pipe)) {
1384                if (dma)
1385                        length = dma->actual_len;
1386                else
1387                        length = qh->segsize;
1388                qh->offset += length;
1389
1390                if (usb_pipeisoc(pipe)) {
1391                        struct usb_iso_packet_descriptor        *d;
1392
1393                        d = urb->iso_frame_desc + qh->iso_idx;
1394                        d->actual_length = length;
1395                        d->status = status;
1396                        if (++qh->iso_idx >= urb->number_of_packets) {
1397                                done = true;
1398                        } else {
1399                                d++;
1400                                offset = d->offset;
1401                                length = d->length;
1402                        }
1403                } else if (dma && urb->transfer_buffer_length == qh->offset) {
1404                        done = true;
1405                } else {
1406                        /* see if we need to send more data, or ZLP */
1407                        if (qh->segsize < qh->maxpacket)
1408                                done = true;
1409                        else if (qh->offset == urb->transfer_buffer_length
1410                                        && !(urb->transfer_flags
1411                                                & URB_ZERO_PACKET))
1412                                done = true;
1413                        if (!done) {
1414                                offset = qh->offset;
1415                                length = urb->transfer_buffer_length - offset;
1416                                transfer_pending = true;
1417                        }
1418                }
1419        }
1420
1421        /* urb->status != -EINPROGRESS means request has been faulted,
1422         * so we must abort this transfer after cleanup
1423         */
1424        if (urb->status != -EINPROGRESS) {
1425                done = true;
1426                if (status == 0)
1427                        status = urb->status;
1428        }
1429
1430        if (done) {
1431                /* set status */
1432                urb->status = status;
1433                urb->actual_length = qh->offset;
1434                musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1435                return;
1436        } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1437                if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1438                                offset, length)) {
1439                        if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1440                                musb_h_tx_dma_start(hw_ep);
1441                        return;
1442                }
1443        } else  if (tx_csr & MUSB_TXCSR_DMAENAB) {
1444                musb_dbg(musb, "not complete, but DMA enabled?");
1445                return;
1446        }
1447
1448        /*
1449         * PIO: start next packet in this URB.
1450         *
1451         * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1452         * (and presumably, FIFO is not half-full) we should write *two*
1453         * packets before updating TXCSR; other docs disagree...
1454         */
1455        if (length > qh->maxpacket)
1456                length = qh->maxpacket;
1457        /* Unmap the buffer so that CPU can use it */
1458        usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1459
1460        /*
1461         * We need to map sg if the transfer_buffer is
1462         * NULL.
1463         */
1464        if (!urb->transfer_buffer)
1465                qh->use_sg = true;
1466
1467        if (qh->use_sg) {
1468                /* sg_miter_start is already done in musb_ep_program */
1469                if (!sg_miter_next(&qh->sg_miter)) {
1470                        dev_err(musb->controller, "error: sg list empty\n");
1471                        sg_miter_stop(&qh->sg_miter);
1472                        status = -EINVAL;
1473                        goto done;
1474                }
1475                urb->transfer_buffer = qh->sg_miter.addr;
1476                length = min_t(u32, length, qh->sg_miter.length);
1477                musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1478                qh->sg_miter.consumed = length;
1479                sg_miter_stop(&qh->sg_miter);
1480        } else {
1481                musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1482        }
1483
1484        qh->segsize = length;
1485
1486        if (qh->use_sg) {
1487                if (offset + length >= urb->transfer_buffer_length)
1488                        qh->use_sg = false;
1489        }
1490
1491        musb_ep_select(mbase, epnum);
1492        musb_writew(epio, MUSB_TXCSR,
1493                        MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1494}
1495
1496#ifdef CONFIG_USB_TI_CPPI41_DMA
1497/* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
1498static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1499                                  struct musb_hw_ep *hw_ep,
1500                                  struct musb_qh *qh,
1501                                  struct urb *urb,
1502                                  size_t len)
1503{
1504        struct dma_channel *channel = hw_ep->rx_channel;
1505        void __iomem *epio = hw_ep->regs;
1506        dma_addr_t *buf;
1507        u32 length;
1508        u16 val;
1509
1510        buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1511                (u32)urb->transfer_dma;
1512
1513        length = urb->iso_frame_desc[qh->iso_idx].length;
1514
1515        val = musb_readw(epio, MUSB_RXCSR);
1516        val |= MUSB_RXCSR_DMAENAB;
1517        musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1518
1519        return dma->channel_program(channel, qh->maxpacket, 0,
1520                                   (u32)buf, length);
1521}
1522#else
1523static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1524                                         struct musb_hw_ep *hw_ep,
1525                                         struct musb_qh *qh,
1526                                         struct urb *urb,
1527                                         size_t len)
1528{
1529        return false;
1530}
1531#endif
1532
1533#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1534        defined(CONFIG_USB_TI_CPPI41_DMA)
1535/* Host side RX (IN) using Mentor DMA works as follows:
1536        submit_urb ->
1537                - if queue was empty, ProgramEndpoint
1538                - first IN token is sent out (by setting ReqPkt)
1539        LinuxIsr -> RxReady()
1540        /\      => first packet is received
1541        |       - Set in mode 0 (DmaEnab, ~ReqPkt)
1542        |               -> DMA Isr (transfer complete) -> RxReady()
1543        |                   - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1544        |                   - if urb not complete, send next IN token (ReqPkt)
1545        |                          |            else complete urb.
1546        |                          |
1547        ---------------------------
1548 *
1549 * Nuances of mode 1:
1550 *      For short packets, no ack (+RxPktRdy) is sent automatically
1551 *      (even if AutoClear is ON)
1552 *      For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1553 *      automatically => major problem, as collecting the next packet becomes
1554 *      difficult. Hence mode 1 is not used.
1555 *
1556 * REVISIT
1557 *      All we care about at this driver level is that
1558 *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1559 *       (b) termination conditions are: short RX, or buffer full;
1560 *       (c) fault modes include
1561 *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1562 *             (and that endpoint's dma queue stops immediately)
1563 *           - overflow (full, PLUS more bytes in the terminal packet)
1564 *
1565 *      So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1566 *      thus be a great candidate for using mode 1 ... for all but the
1567 *      last packet of one URB's transfer.
1568 */
1569static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1570                                       struct musb_hw_ep *hw_ep,
1571                                       struct musb_qh *qh,
1572                                       struct urb *urb,
1573                                       size_t len)
1574{
1575        struct dma_channel *channel = hw_ep->rx_channel;
1576        void __iomem *epio = hw_ep->regs;
1577        u16 val;
1578        int pipe;
1579        bool done;
1580
1581        pipe = urb->pipe;
1582
1583        if (usb_pipeisoc(pipe)) {
1584                struct usb_iso_packet_descriptor *d;
1585
1586                d = urb->iso_frame_desc + qh->iso_idx;
1587                d->actual_length = len;
1588
1589                /* even if there was an error, we did the dma
1590                 * for iso_frame_desc->length
1591                 */
1592                if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1593                        d->status = 0;
1594
1595                if (++qh->iso_idx >= urb->number_of_packets) {
1596                        done = true;
1597                } else {
1598                        /* REVISIT: Why ignore return value here? */
1599                        if (musb_dma_cppi41(hw_ep->musb))
1600                                done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1601                                                              urb, len);
1602                        done = false;
1603                }
1604
1605        } else  {
1606                /* done if urb buffer is full or short packet is recd */
1607                done = (urb->actual_length + len >=
1608                        urb->transfer_buffer_length
1609                        || channel->actual_len < qh->maxpacket
1610                        || channel->rx_packet_done);
1611        }
1612
1613        /* send IN token for next packet, without AUTOREQ */
1614        if (!done) {
1615                val = musb_readw(epio, MUSB_RXCSR);
1616                val |= MUSB_RXCSR_H_REQPKT;
1617                musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1618        }
1619
1620        return done;
1621}
1622
1623/* Disadvantage of using mode 1:
1624 *      It's basically usable only for mass storage class; essentially all
1625 *      other protocols also terminate transfers on short packets.
1626 *
1627 * Details:
1628 *      An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1629 *      If you try to use mode 1 for (transfer_buffer_length - 512), and try
1630 *      to use the extra IN token to grab the last packet using mode 0, then
1631 *      the problem is that you cannot be sure when the device will send the
1632 *      last packet and RxPktRdy set. Sometimes the packet is recd too soon
1633 *      such that it gets lost when RxCSR is re-set at the end of the mode 1
1634 *      transfer, while sometimes it is recd just a little late so that if you
1635 *      try to configure for mode 0 soon after the mode 1 transfer is
1636 *      completed, you will find rxcount 0. Okay, so you might think why not
1637 *      wait for an interrupt when the pkt is recd. Well, you won't get any!
1638 */
1639static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1640                                          struct musb_hw_ep *hw_ep,
1641                                          struct musb_qh *qh,
1642                                          struct urb *urb,
1643                                          size_t len,
1644                                          u8 iso_err)
1645{
1646        struct musb *musb = hw_ep->musb;
1647        void __iomem *epio = hw_ep->regs;
1648        struct dma_channel *channel = hw_ep->rx_channel;
1649        u16 rx_count, val;
1650        int length, pipe, done;
1651        dma_addr_t buf;
1652
1653        rx_count = musb_readw(epio, MUSB_RXCOUNT);
1654        pipe = urb->pipe;
1655
1656        if (usb_pipeisoc(pipe)) {
1657                int d_status = 0;
1658                struct usb_iso_packet_descriptor *d;
1659
1660                d = urb->iso_frame_desc + qh->iso_idx;
1661
1662                if (iso_err) {
1663                        d_status = -EILSEQ;
1664                        urb->error_count++;
1665                }
1666                if (rx_count > d->length) {
1667                        if (d_status == 0) {
1668                                d_status = -EOVERFLOW;
1669                                urb->error_count++;
1670                        }
1671                        musb_dbg(musb, "** OVERFLOW %d into %d",
1672                                rx_count, d->length);
1673
1674                        length = d->length;
1675                } else
1676                        length = rx_count;
1677                d->status = d_status;
1678                buf = urb->transfer_dma + d->offset;
1679        } else {
1680                length = rx_count;
1681                buf = urb->transfer_dma + urb->actual_length;
1682        }
1683
1684        channel->desired_mode = 0;
1685#ifdef USE_MODE1
1686        /* because of the issue below, mode 1 will
1687         * only rarely behave with correct semantics.
1688         */
1689        if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1690            && (urb->transfer_buffer_length - urb->actual_length)
1691            > qh->maxpacket)
1692                channel->desired_mode = 1;
1693        if (rx_count < hw_ep->max_packet_sz_rx) {
1694                length = rx_count;
1695                channel->desired_mode = 0;
1696        } else {
1697                length = urb->transfer_buffer_length;
1698        }
1699#endif
1700
1701        /* See comments above on disadvantages of using mode 1 */
1702        val = musb_readw(epio, MUSB_RXCSR);
1703        val &= ~MUSB_RXCSR_H_REQPKT;
1704
1705        if (channel->desired_mode == 0)
1706                val &= ~MUSB_RXCSR_H_AUTOREQ;
1707        else
1708                val |= MUSB_RXCSR_H_AUTOREQ;
1709        val |= MUSB_RXCSR_DMAENAB;
1710
1711        /* autoclear shouldn't be set in high bandwidth */
1712        if (qh->hb_mult == 1)
1713                val |= MUSB_RXCSR_AUTOCLEAR;
1714
1715        musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1716
1717        /* REVISIT if when actual_length != 0,
1718         * transfer_buffer_length needs to be
1719         * adjusted first...
1720         */
1721        done = dma->channel_program(channel, qh->maxpacket,
1722                                   channel->desired_mode,
1723                                   buf, length);
1724
1725        if (!done) {
1726                dma->channel_release(channel);
1727                hw_ep->rx_channel = NULL;
1728                channel = NULL;
1729                val = musb_readw(epio, MUSB_RXCSR);
1730                val &= ~(MUSB_RXCSR_DMAENAB
1731                         | MUSB_RXCSR_H_AUTOREQ
1732                         | MUSB_RXCSR_AUTOCLEAR);
1733                musb_writew(epio, MUSB_RXCSR, val);
1734        }
1735
1736        return done;
1737}
1738#else
1739static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1740                                              struct musb_hw_ep *hw_ep,
1741                                              struct musb_qh *qh,
1742                                              struct urb *urb,
1743                                              size_t len)
1744{
1745        return false;
1746}
1747
1748static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1749                                                 struct musb_hw_ep *hw_ep,
1750                                                 struct musb_qh *qh,
1751                                                 struct urb *urb,
1752                                                 size_t len,
1753                                                 u8 iso_err)
1754{
1755        return false;
1756}
1757#endif
1758
1759/*
1760 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1761 * and high-bandwidth IN transfer cases.
1762 */
1763void musb_host_rx(struct musb *musb, u8 epnum)
1764{
1765        struct urb              *urb;
1766        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1767        struct dma_controller   *c = musb->dma_controller;
1768        void __iomem            *epio = hw_ep->regs;
1769        struct musb_qh          *qh = hw_ep->in_qh;
1770        size_t                  xfer_len;
1771        void __iomem            *mbase = musb->mregs;
1772        u16                     rx_csr, val;
1773        bool                    iso_err = false;
1774        bool                    done = false;
1775        u32                     status;
1776        struct dma_channel      *dma;
1777        unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1778
1779        musb_ep_select(mbase, epnum);
1780
1781        urb = next_urb(qh);
1782        dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1783        status = 0;
1784        xfer_len = 0;
1785
1786        rx_csr = musb_readw(epio, MUSB_RXCSR);
1787        val = rx_csr;
1788
1789        if (unlikely(!urb)) {
1790                /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1791                 * usbtest #11 (unlinks) triggers it regularly, sometimes
1792                 * with fifo full.  (Only with DMA??)
1793                 */
1794                musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1795                        epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1796                musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1797                return;
1798        }
1799
1800        trace_musb_urb_rx(musb, urb);
1801
1802        /* check for errors, concurrent stall & unlink is not really
1803         * handled yet! */
1804        if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1805                musb_dbg(musb, "RX end %d STALL", epnum);
1806
1807                /* stall; record URB status */
1808                status = -EPIPE;
1809
1810        } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1811                musb_dbg(musb, "end %d RX proto error", epnum);
1812
1813                status = -EPROTO;
1814                musb_writeb(epio, MUSB_RXINTERVAL, 0);
1815
1816                rx_csr &= ~MUSB_RXCSR_H_ERROR;
1817                musb_writew(epio, MUSB_RXCSR, rx_csr);
1818
1819        } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1820
1821                if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1822                        musb_dbg(musb, "RX end %d NAK timeout", epnum);
1823
1824                        /* NOTE: NAKing is *NOT* an error, so we want to
1825                         * continue.  Except ... if there's a request for
1826                         * another QH, use that instead of starving it.
1827                         *
1828                         * Devices like Ethernet and serial adapters keep
1829                         * reads posted at all times, which will starve
1830                         * other devices without this logic.
1831                         */
1832                        if (usb_pipebulk(urb->pipe)
1833                                        && qh->mux == 1
1834                                        && !list_is_singular(&musb->in_bulk)) {
1835                                musb_bulk_nak_timeout(musb, hw_ep, 1);
1836                                return;
1837                        }
1838                        musb_ep_select(mbase, epnum);
1839                        rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1840                        rx_csr &= ~MUSB_RXCSR_DATAERROR;
1841                        musb_writew(epio, MUSB_RXCSR, rx_csr);
1842
1843                        goto finish;
1844                } else {
1845                        musb_dbg(musb, "RX end %d ISO data error", epnum);
1846                        /* packet error reported later */
1847                        iso_err = true;
1848                }
1849        } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1850                musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1851                                epnum);
1852                status = -EPROTO;
1853        }
1854
1855        /* faults abort the transfer */
1856        if (status) {
1857                /* clean up dma and collect transfer count */
1858                if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1859                        dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1860                        musb->dma_controller->channel_abort(dma);
1861                        xfer_len = dma->actual_len;
1862                }
1863                musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1864                musb_writeb(epio, MUSB_RXINTERVAL, 0);
1865                done = true;
1866                goto finish;
1867        }
1868
1869        if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1870                /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1871                ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1872                goto finish;
1873        }
1874
1875        /* thorough shutdown for now ... given more precise fault handling
1876         * and better queueing support, we might keep a DMA pipeline going
1877         * while processing this irq for earlier completions.
1878         */
1879
1880        /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1881        if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1882            (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1883                /* REVISIT this happened for a while on some short reads...
1884                 * the cleanup still needs investigation... looks bad...
1885                 * and also duplicates dma cleanup code above ... plus,
1886                 * shouldn't this be the "half full" double buffer case?
1887                 */
1888                if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1889                        dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1890                        musb->dma_controller->channel_abort(dma);
1891                        xfer_len = dma->actual_len;
1892                        done = true;
1893                }
1894
1895                musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1896                                xfer_len, dma ? ", dma" : "");
1897                rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1898
1899                musb_ep_select(mbase, epnum);
1900                musb_writew(epio, MUSB_RXCSR,
1901                                MUSB_RXCSR_H_WZC_BITS | rx_csr);
1902        }
1903
1904        if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1905                xfer_len = dma->actual_len;
1906
1907                val &= ~(MUSB_RXCSR_DMAENAB
1908                        | MUSB_RXCSR_H_AUTOREQ
1909                        | MUSB_RXCSR_AUTOCLEAR
1910                        | MUSB_RXCSR_RXPKTRDY);
1911                musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1912
1913                if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1914                    musb_dma_cppi41(musb)) {
1915                            done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1916                            musb_dbg(hw_ep->musb,
1917                                    "ep %d dma %s, rxcsr %04x, rxcount %d",
1918                                    epnum, done ? "off" : "reset",
1919                                    musb_readw(epio, MUSB_RXCSR),
1920                                    musb_readw(epio, MUSB_RXCOUNT));
1921                } else {
1922                        done = true;
1923                }
1924
1925        } else if (urb->status == -EINPROGRESS) {
1926                /* if no errors, be sure a packet is ready for unloading */
1927                if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1928                        status = -EPROTO;
1929                        ERR("Rx interrupt with no errors or packet!\n");
1930
1931                        /* FIXME this is another "SHOULD NEVER HAPPEN" */
1932
1933/* SCRUB (RX) */
1934                        /* do the proper sequence to abort the transfer */
1935                        musb_ep_select(mbase, epnum);
1936                        val &= ~MUSB_RXCSR_H_REQPKT;
1937                        musb_writew(epio, MUSB_RXCSR, val);
1938                        goto finish;
1939                }
1940
1941                /* we are expecting IN packets */
1942                if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1943                    musb_dma_cppi41(musb)) && dma) {
1944                        musb_dbg(hw_ep->musb,
1945                                "RX%d count %d, buffer 0x%llx len %d/%d",
1946                                epnum, musb_readw(epio, MUSB_RXCOUNT),
1947                                (unsigned long long) urb->transfer_dma
1948                                + urb->actual_length,
1949                                qh->offset,
1950                                urb->transfer_buffer_length);
1951
1952                        if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1953                                                           xfer_len, iso_err))
1954                                goto finish;
1955                        else
1956                                dev_err(musb->controller, "error: rx_dma failed\n");
1957                }
1958
1959                if (!dma) {
1960                        unsigned int received_len;
1961
1962                        /* Unmap the buffer so that CPU can use it */
1963                        usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1964
1965                        /*
1966                         * We need to map sg if the transfer_buffer is
1967                         * NULL.
1968                         */
1969                        if (!urb->transfer_buffer) {
1970                                qh->use_sg = true;
1971                                sg_miter_start(&qh->sg_miter, urb->sg, 1,
1972                                                sg_flags);
1973                        }
1974
1975                        if (qh->use_sg) {
1976                                if (!sg_miter_next(&qh->sg_miter)) {
1977                                        dev_err(musb->controller, "error: sg list empty\n");
1978                                        sg_miter_stop(&qh->sg_miter);
1979                                        status = -EINVAL;
1980                                        done = true;
1981                                        goto finish;
1982                                }
1983                                urb->transfer_buffer = qh->sg_miter.addr;
1984                                received_len = urb->actual_length;
1985                                qh->offset = 0x0;
1986                                done = musb_host_packet_rx(musb, urb, epnum,
1987                                                iso_err);
1988                                /* Calculate the number of bytes received */
1989                                received_len = urb->actual_length -
1990                                        received_len;
1991                                qh->sg_miter.consumed = received_len;
1992                                sg_miter_stop(&qh->sg_miter);
1993                        } else {
1994                                done = musb_host_packet_rx(musb, urb,
1995                                                epnum, iso_err);
1996                        }
1997                        musb_dbg(musb, "read %spacket", done ? "last " : "");
1998                }
1999        }
2000
2001finish:
2002        urb->actual_length += xfer_len;
2003        qh->offset += xfer_len;
2004        if (done) {
2005                if (qh->use_sg)
2006                        qh->use_sg = false;
2007
2008                if (urb->status == -EINPROGRESS)
2009                        urb->status = status;
2010                musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
2011        }
2012}
2013
2014/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
2015 * the software schedule associates multiple such nodes with a given
2016 * host side hardware endpoint + direction; scheduling may activate
2017 * that hardware endpoint.
2018 */
2019static int musb_schedule(
2020        struct musb             *musb,
2021        struct musb_qh          *qh,
2022        int                     is_in)
2023{
2024        int                     idle = 0;
2025        int                     best_diff;
2026        int                     best_end, epnum;
2027        struct musb_hw_ep       *hw_ep = NULL;
2028        struct list_head        *head = NULL;
2029        u8                      toggle;
2030        u8                      txtype;
2031        struct urb              *urb = next_urb(qh);
2032
2033        /* use fixed hardware for control and bulk */
2034        if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2035                head = &musb->control;
2036                hw_ep = musb->control_ep;
2037                goto success;
2038        }
2039
2040        /* else, periodic transfers get muxed to other endpoints */
2041
2042        /*
2043         * We know this qh hasn't been scheduled, so all we need to do
2044         * is choose which hardware endpoint to put it on ...
2045         *
2046         * REVISIT what we really want here is a regular schedule tree
2047         * like e.g. OHCI uses.
2048         */
2049        best_diff = 4096;
2050        best_end = -1;
2051
2052        for (epnum = 1, hw_ep = musb->endpoints + 1;
2053                        epnum < musb->nr_endpoints;
2054                        epnum++, hw_ep++) {
2055                int     diff;
2056
2057                if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2058                        continue;
2059
2060                if (hw_ep == musb->bulk_ep)
2061                        continue;
2062
2063                if (is_in)
2064                        diff = hw_ep->max_packet_sz_rx;
2065                else
2066                        diff = hw_ep->max_packet_sz_tx;
2067                diff -= (qh->maxpacket * qh->hb_mult);
2068
2069                if (diff >= 0 && best_diff > diff) {
2070
2071                        /*
2072                         * Mentor controller has a bug in that if we schedule
2073                         * a BULK Tx transfer on an endpoint that had earlier
2074                         * handled ISOC then the BULK transfer has to start on
2075                         * a zero toggle.  If the BULK transfer starts on a 1
2076                         * toggle then this transfer will fail as the mentor
2077                         * controller starts the Bulk transfer on a 0 toggle
2078                         * irrespective of the programming of the toggle bits
2079                         * in the TXCSR register.  Check for this condition
2080                         * while allocating the EP for a Tx Bulk transfer.  If
2081                         * so skip this EP.
2082                         */
2083                        hw_ep = musb->endpoints + epnum;
2084                        toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2085                        txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2086                                        >> 4) & 0x3;
2087                        if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2088                                toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2089                                continue;
2090
2091                        best_diff = diff;
2092                        best_end = epnum;
2093                }
2094        }
2095        /* use bulk reserved ep1 if no other ep is free */
2096        if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2097                hw_ep = musb->bulk_ep;
2098                if (is_in)
2099                        head = &musb->in_bulk;
2100                else
2101                        head = &musb->out_bulk;
2102
2103                /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2104                 * multiplexed. This scheme does not work in high speed to full
2105                 * speed scenario as NAK interrupts are not coming from a
2106                 * full speed device connected to a high speed device.
2107                 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2108                 * 4 (8 frame or 8ms) for FS device.
2109                 */
2110                if (qh->dev)
2111                        qh->intv_reg =
2112                                (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2113                goto success;
2114        } else if (best_end < 0) {
2115                dev_err(musb->controller,
2116                                "%s hwep alloc failed for %dx%d\n",
2117                                musb_ep_xfertype_string(qh->type),
2118                                qh->hb_mult, qh->maxpacket);
2119                return -ENOSPC;
2120        }
2121
2122        idle = 1;
2123        qh->mux = 0;
2124        hw_ep = musb->endpoints + best_end;
2125        musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2126success:
2127        if (head) {
2128                idle = list_empty(head);
2129                list_add_tail(&qh->ring, head);
2130                qh->mux = 1;
2131        }
2132        qh->hw_ep = hw_ep;
2133        qh->hep->hcpriv = qh;
2134        if (idle)
2135                musb_start_urb(musb, is_in, qh);
2136        return 0;
2137}
2138
2139static int musb_urb_enqueue(
2140        struct usb_hcd                  *hcd,
2141        struct urb                      *urb,
2142        gfp_t                           mem_flags)
2143{
2144        unsigned long                   flags;
2145        struct musb                     *musb = hcd_to_musb(hcd);
2146        struct usb_host_endpoint        *hep = urb->ep;
2147        struct musb_qh                  *qh;
2148        struct usb_endpoint_descriptor  *epd = &hep->desc;
2149        int                             ret;
2150        unsigned                        type_reg;
2151        unsigned                        interval;
2152
2153        /* host role must be active */
2154        if (!is_host_active(musb) || !musb->is_active)
2155                return -ENODEV;
2156
2157        trace_musb_urb_enq(musb, urb);
2158
2159        spin_lock_irqsave(&musb->lock, flags);
2160        ret = usb_hcd_link_urb_to_ep(hcd, urb);
2161        qh = ret ? NULL : hep->hcpriv;
2162        if (qh)
2163                urb->hcpriv = qh;
2164        spin_unlock_irqrestore(&musb->lock, flags);
2165
2166        /* DMA mapping was already done, if needed, and this urb is on
2167         * hep->urb_list now ... so we're done, unless hep wasn't yet
2168         * scheduled onto a live qh.
2169         *
2170         * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2171         * disabled, testing for empty qh->ring and avoiding qh setup costs
2172         * except for the first urb queued after a config change.
2173         */
2174        if (qh || ret)
2175                return ret;
2176
2177        /* Allocate and initialize qh, minimizing the work done each time
2178         * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
2179         *
2180         * REVISIT consider a dedicated qh kmem_cache, so it's harder
2181         * for bugs in other kernel code to break this driver...
2182         */
2183        qh = kzalloc(sizeof *qh, mem_flags);
2184        if (!qh) {
2185                spin_lock_irqsave(&musb->lock, flags);
2186                usb_hcd_unlink_urb_from_ep(hcd, urb);
2187                spin_unlock_irqrestore(&musb->lock, flags);
2188                return -ENOMEM;
2189        }
2190
2191        qh->hep = hep;
2192        qh->dev = urb->dev;
2193        INIT_LIST_HEAD(&qh->ring);
2194        qh->is_ready = 1;
2195
2196        qh->maxpacket = usb_endpoint_maxp(epd);
2197        qh->type = usb_endpoint_type(epd);
2198
2199        /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2200         * Some musb cores don't support high bandwidth ISO transfers; and
2201         * we don't (yet!) support high bandwidth interrupt transfers.
2202         */
2203        qh->hb_mult = usb_endpoint_maxp_mult(epd);
2204        if (qh->hb_mult > 1) {
2205                int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2206
2207                if (ok)
2208                        ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2209                                || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2210                if (!ok) {
2211                        dev_err(musb->controller,
2212                                "high bandwidth %s (%dx%d) not supported\n",
2213                                musb_ep_xfertype_string(qh->type),
2214                                qh->hb_mult, qh->maxpacket & 0x7ff);
2215                        ret = -EMSGSIZE;
2216                        goto done;
2217                }
2218                qh->maxpacket &= 0x7ff;
2219        }
2220
2221        qh->epnum = usb_endpoint_num(epd);
2222
2223        /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2224        qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2225
2226        /* precompute rxtype/txtype/type0 register */
2227        type_reg = (qh->type << 4) | qh->epnum;
2228        switch (urb->dev->speed) {
2229        case USB_SPEED_LOW:
2230                type_reg |= 0xc0;
2231                break;
2232        case USB_SPEED_FULL:
2233                type_reg |= 0x80;
2234                break;
2235        default:
2236                type_reg |= 0x40;
2237        }
2238        qh->type_reg = type_reg;
2239
2240        /* Precompute RXINTERVAL/TXINTERVAL register */
2241        switch (qh->type) {
2242        case USB_ENDPOINT_XFER_INT:
2243                /*
2244                 * Full/low speeds use the  linear encoding,
2245                 * high speed uses the logarithmic encoding.
2246                 */
2247                if (urb->dev->speed <= USB_SPEED_FULL) {
2248                        interval = max_t(u8, epd->bInterval, 1);
2249                        break;
2250                }
2251                /* FALLTHROUGH */
2252        case USB_ENDPOINT_XFER_ISOC:
2253                /* ISO always uses logarithmic encoding */
2254                interval = min_t(u8, epd->bInterval, 16);
2255                break;
2256        default:
2257                /* REVISIT we actually want to use NAK limits, hinting to the
2258                 * transfer scheduling logic to try some other qh, e.g. try
2259                 * for 2 msec first:
2260                 *
2261                 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2262                 *
2263                 * The downside of disabling this is that transfer scheduling
2264                 * gets VERY unfair for nonperiodic transfers; a misbehaving
2265                 * peripheral could make that hurt.  That's perfectly normal
2266                 * for reads from network or serial adapters ... so we have
2267                 * partial NAKlimit support for bulk RX.
2268                 *
2269                 * The upside of disabling it is simpler transfer scheduling.
2270                 */
2271                interval = 0;
2272        }
2273        qh->intv_reg = interval;
2274
2275        /* precompute addressing for external hub/tt ports */
2276        if (musb->is_multipoint) {
2277                struct usb_device       *parent = urb->dev->parent;
2278
2279                if (parent != hcd->self.root_hub) {
2280                        qh->h_addr_reg = (u8) parent->devnum;
2281
2282                        /* set up tt info if needed */
2283                        if (urb->dev->tt) {
2284                                qh->h_port_reg = (u8) urb->dev->ttport;
2285                                if (urb->dev->tt->hub)
2286                                        qh->h_addr_reg =
2287                                                (u8) urb->dev->tt->hub->devnum;
2288                                if (urb->dev->tt->multi)
2289                                        qh->h_addr_reg |= 0x80;
2290                        }
2291                }
2292        }
2293
2294        /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2295         * until we get real dma queues (with an entry for each urb/buffer),
2296         * we only have work to do in the former case.
2297         */
2298        spin_lock_irqsave(&musb->lock, flags);
2299        if (hep->hcpriv || !next_urb(qh)) {
2300                /* some concurrent activity submitted another urb to hep...
2301                 * odd, rare, error prone, but legal.
2302                 */
2303                kfree(qh);
2304                qh = NULL;
2305                ret = 0;
2306        } else
2307                ret = musb_schedule(musb, qh,
2308                                epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2309
2310        if (ret == 0) {
2311                urb->hcpriv = qh;
2312                /* FIXME set urb->start_frame for iso/intr, it's tested in
2313                 * musb_start_urb(), but otherwise only konicawc cares ...
2314                 */
2315        }
2316        spin_unlock_irqrestore(&musb->lock, flags);
2317
2318done:
2319        if (ret != 0) {
2320                spin_lock_irqsave(&musb->lock, flags);
2321                usb_hcd_unlink_urb_from_ep(hcd, urb);
2322                spin_unlock_irqrestore(&musb->lock, flags);
2323                kfree(qh);
2324        }
2325        return ret;
2326}
2327
2328
2329/*
2330 * abort a transfer that's at the head of a hardware queue.
2331 * called with controller locked, irqs blocked
2332 * that hardware queue advances to the next transfer, unless prevented
2333 */
2334static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2335{
2336        struct musb_hw_ep       *ep = qh->hw_ep;
2337        struct musb             *musb = ep->musb;
2338        void __iomem            *epio = ep->regs;
2339        unsigned                hw_end = ep->epnum;
2340        void __iomem            *regs = ep->musb->mregs;
2341        int                     is_in = usb_pipein(urb->pipe);
2342        int                     status = 0;
2343        u16                     csr;
2344        struct dma_channel      *dma = NULL;
2345
2346        musb_ep_select(regs, hw_end);
2347
2348        if (is_dma_capable()) {
2349                dma = is_in ? ep->rx_channel : ep->tx_channel;
2350                if (dma) {
2351                        status = ep->musb->dma_controller->channel_abort(dma);
2352                        musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2353                                is_in ? 'R' : 'T', ep->epnum,
2354                                urb, status);
2355                        urb->actual_length += dma->actual_len;
2356                }
2357        }
2358
2359        /* turn off DMA requests, discard state, stop polling ... */
2360        if (ep->epnum && is_in) {
2361                /* giveback saves bulk toggle */
2362                csr = musb_h_flush_rxfifo(ep, 0);
2363
2364                /* clear the endpoint's irq status here to avoid bogus irqs */
2365                if (is_dma_capable() && dma)
2366                        musb_platform_clear_ep_rxintr(musb, ep->epnum);
2367        } else if (ep->epnum) {
2368                musb_h_tx_flush_fifo(ep);
2369                csr = musb_readw(epio, MUSB_TXCSR);
2370                csr &= ~(MUSB_TXCSR_AUTOSET
2371                        | MUSB_TXCSR_DMAENAB
2372                        | MUSB_TXCSR_H_RXSTALL
2373                        | MUSB_TXCSR_H_NAKTIMEOUT
2374                        | MUSB_TXCSR_H_ERROR
2375                        | MUSB_TXCSR_TXPKTRDY);
2376                musb_writew(epio, MUSB_TXCSR, csr);
2377                /* REVISIT may need to clear FLUSHFIFO ... */
2378                musb_writew(epio, MUSB_TXCSR, csr);
2379                /* flush cpu writebuffer */
2380                csr = musb_readw(epio, MUSB_TXCSR);
2381        } else  {
2382                musb_h_ep0_flush_fifo(ep);
2383        }
2384        if (status == 0)
2385                musb_advance_schedule(ep->musb, urb, ep, is_in);
2386        return status;
2387}
2388
2389static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2390{
2391        struct musb             *musb = hcd_to_musb(hcd);
2392        struct musb_qh          *qh;
2393        unsigned long           flags;
2394        int                     is_in  = usb_pipein(urb->pipe);
2395        int                     ret;
2396
2397        trace_musb_urb_deq(musb, urb);
2398
2399        spin_lock_irqsave(&musb->lock, flags);
2400        ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2401        if (ret)
2402                goto done;
2403
2404        qh = urb->hcpriv;
2405        if (!qh)
2406                goto done;
2407
2408        /*
2409         * Any URB not actively programmed into endpoint hardware can be
2410         * immediately given back; that's any URB not at the head of an
2411         * endpoint queue, unless someday we get real DMA queues.  And even
2412         * if it's at the head, it might not be known to the hardware...
2413         *
2414         * Otherwise abort current transfer, pending DMA, etc.; urb->status
2415         * has already been updated.  This is a synchronous abort; it'd be
2416         * OK to hold off until after some IRQ, though.
2417         *
2418         * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2419         */
2420        if (!qh->is_ready
2421                        || urb->urb_list.prev != &qh->hep->urb_list
2422                        || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2423                int     ready = qh->is_ready;
2424
2425                qh->is_ready = 0;
2426                musb_giveback(musb, urb, 0);
2427                qh->is_ready = ready;
2428
2429                /* If nothing else (usually musb_giveback) is using it
2430                 * and its URB list has emptied, recycle this qh.
2431                 */
2432                if (ready && list_empty(&qh->hep->urb_list)) {
2433                        qh->hep->hcpriv = NULL;
2434                        list_del(&qh->ring);
2435                        kfree(qh);
2436                }
2437        } else
2438                ret = musb_cleanup_urb(urb, qh);
2439done:
2440        spin_unlock_irqrestore(&musb->lock, flags);
2441        return ret;
2442}
2443
2444/* disable an endpoint */
2445static void
2446musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2447{
2448        u8                      is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2449        unsigned long           flags;
2450        struct musb             *musb = hcd_to_musb(hcd);
2451        struct musb_qh          *qh;
2452        struct urb              *urb;
2453
2454        spin_lock_irqsave(&musb->lock, flags);
2455
2456        qh = hep->hcpriv;
2457        if (qh == NULL)
2458                goto exit;
2459
2460        /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2461
2462        /* Kick the first URB off the hardware, if needed */
2463        qh->is_ready = 0;
2464        if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2465                urb = next_urb(qh);
2466
2467                /* make software (then hardware) stop ASAP */
2468                if (!urb->unlinked)
2469                        urb->status = -ESHUTDOWN;
2470
2471                /* cleanup */
2472                musb_cleanup_urb(urb, qh);
2473
2474                /* Then nuke all the others ... and advance the
2475                 * queue on hw_ep (e.g. bulk ring) when we're done.
2476                 */
2477                while (!list_empty(&hep->urb_list)) {
2478                        urb = next_urb(qh);
2479                        urb->status = -ESHUTDOWN;
2480                        musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2481                }
2482        } else {
2483                /* Just empty the queue; the hardware is busy with
2484                 * other transfers, and since !qh->is_ready nothing
2485                 * will activate any of these as it advances.
2486                 */
2487                while (!list_empty(&hep->urb_list))
2488                        musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2489
2490                hep->hcpriv = NULL;
2491                list_del(&qh->ring);
2492                kfree(qh);
2493        }
2494exit:
2495        spin_unlock_irqrestore(&musb->lock, flags);
2496}
2497
2498static int musb_h_get_frame_number(struct usb_hcd *hcd)
2499{
2500        struct musb     *musb = hcd_to_musb(hcd);
2501
2502        return musb_readw(musb->mregs, MUSB_FRAME);
2503}
2504
2505static int musb_h_start(struct usb_hcd *hcd)
2506{
2507        struct musb     *musb = hcd_to_musb(hcd);
2508
2509        /* NOTE: musb_start() is called when the hub driver turns
2510         * on port power, or when (OTG) peripheral starts.
2511         */
2512        hcd->state = HC_STATE_RUNNING;
2513        musb->port1_status = 0;
2514        return 0;
2515}
2516
2517static void musb_h_stop(struct usb_hcd *hcd)
2518{
2519        musb_stop(hcd_to_musb(hcd));
2520        hcd->state = HC_STATE_HALT;
2521}
2522
2523static int musb_bus_suspend(struct usb_hcd *hcd)
2524{
2525        struct musb     *musb = hcd_to_musb(hcd);
2526        u8              devctl;
2527        int             ret;
2528
2529        ret = musb_port_suspend(musb, true);
2530        if (ret)
2531                return ret;
2532
2533        if (!is_host_active(musb))
2534                return 0;
2535
2536        switch (musb->xceiv->otg->state) {
2537        case OTG_STATE_A_SUSPEND:
2538                return 0;
2539        case OTG_STATE_A_WAIT_VRISE:
2540                /* ID could be grounded even if there's no device
2541                 * on the other end of the cable.  NOTE that the
2542                 * A_WAIT_VRISE timers are messy with MUSB...
2543                 */
2544                devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2545                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2546                        musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2547                break;
2548        default:
2549                break;
2550        }
2551
2552        if (musb->is_active) {
2553                WARNING("trying to suspend as %s while active\n",
2554                                usb_otg_state_string(musb->xceiv->otg->state));
2555                return -EBUSY;
2556        } else
2557                return 0;
2558}
2559
2560static int musb_bus_resume(struct usb_hcd *hcd)
2561{
2562        struct musb *musb = hcd_to_musb(hcd);
2563
2564        if (musb->config &&
2565            musb->config->host_port_deassert_reset_at_resume)
2566                musb_port_reset(musb, false);
2567
2568        return 0;
2569}
2570
2571#ifndef CONFIG_MUSB_PIO_ONLY
2572
2573#define MUSB_USB_DMA_ALIGN 4
2574
2575struct musb_temp_buffer {
2576        void *kmalloc_ptr;
2577        void *old_xfer_buffer;
2578        u8 data[0];
2579};
2580
2581static void musb_free_temp_buffer(struct urb *urb)
2582{
2583        enum dma_data_direction dir;
2584        struct musb_temp_buffer *temp;
2585        size_t length;
2586
2587        if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2588                return;
2589
2590        dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2591
2592        temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2593                            data);
2594
2595        if (dir == DMA_FROM_DEVICE) {
2596                if (usb_pipeisoc(urb->pipe))
2597                        length = urb->transfer_buffer_length;
2598                else
2599                        length = urb->actual_length;
2600
2601                memcpy(temp->old_xfer_buffer, temp->data, length);
2602        }
2603        urb->transfer_buffer = temp->old_xfer_buffer;
2604        kfree(temp->kmalloc_ptr);
2605
2606        urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2607}
2608
2609static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2610{
2611        enum dma_data_direction dir;
2612        struct musb_temp_buffer *temp;
2613        void *kmalloc_ptr;
2614        size_t kmalloc_size;
2615
2616        if (urb->num_sgs || urb->sg ||
2617            urb->transfer_buffer_length == 0 ||
2618            !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2619                return 0;
2620
2621        dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2622
2623        /* Allocate a buffer with enough padding for alignment */
2624        kmalloc_size = urb->transfer_buffer_length +
2625                sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2626
2627        kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2628        if (!kmalloc_ptr)
2629                return -ENOMEM;
2630
2631        /* Position our struct temp_buffer such that data is aligned */
2632        temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2633
2634
2635        temp->kmalloc_ptr = kmalloc_ptr;
2636        temp->old_xfer_buffer = urb->transfer_buffer;
2637        if (dir == DMA_TO_DEVICE)
2638                memcpy(temp->data, urb->transfer_buffer,
2639                       urb->transfer_buffer_length);
2640        urb->transfer_buffer = temp->data;
2641
2642        urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2643
2644        return 0;
2645}
2646
2647static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2648                                      gfp_t mem_flags)
2649{
2650        struct musb     *musb = hcd_to_musb(hcd);
2651        int ret;
2652
2653        /*
2654         * The DMA engine in RTL1.8 and above cannot handle
2655         * DMA addresses that are not aligned to a 4 byte boundary.
2656         * For such engine implemented (un)map_urb_for_dma hooks.
2657         * Do not use these hooks for RTL<1.8
2658         */
2659        if (musb->hwvers < MUSB_HWVERS_1800)
2660                return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2661
2662        ret = musb_alloc_temp_buffer(urb, mem_flags);
2663        if (ret)
2664                return ret;
2665
2666        ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2667        if (ret)
2668                musb_free_temp_buffer(urb);
2669
2670        return ret;
2671}
2672
2673static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2674{
2675        struct musb     *musb = hcd_to_musb(hcd);
2676
2677        usb_hcd_unmap_urb_for_dma(hcd, urb);
2678
2679        /* Do not use this hook for RTL<1.8 (see description above) */
2680        if (musb->hwvers < MUSB_HWVERS_1800)
2681                return;
2682
2683        musb_free_temp_buffer(urb);
2684}
2685#endif /* !CONFIG_MUSB_PIO_ONLY */
2686
2687static const struct hc_driver musb_hc_driver = {
2688        .description            = "musb-hcd",
2689        .product_desc           = "MUSB HDRC host driver",
2690        .hcd_priv_size          = sizeof(struct musb *),
2691        .flags                  = HCD_USB2 | HCD_MEMORY,
2692
2693        /* not using irq handler or reset hooks from usbcore, since
2694         * those must be shared with peripheral code for OTG configs
2695         */
2696
2697        .start                  = musb_h_start,
2698        .stop                   = musb_h_stop,
2699
2700        .get_frame_number       = musb_h_get_frame_number,
2701
2702        .urb_enqueue            = musb_urb_enqueue,
2703        .urb_dequeue            = musb_urb_dequeue,
2704        .endpoint_disable       = musb_h_disable,
2705
2706#ifndef CONFIG_MUSB_PIO_ONLY
2707        .map_urb_for_dma        = musb_map_urb_for_dma,
2708        .unmap_urb_for_dma      = musb_unmap_urb_for_dma,
2709#endif
2710
2711        .hub_status_data        = musb_hub_status_data,
2712        .hub_control            = musb_hub_control,
2713        .bus_suspend            = musb_bus_suspend,
2714        .bus_resume             = musb_bus_resume,
2715        /* .start_port_reset    = NULL, */
2716        /* .hub_irq_enable      = NULL, */
2717};
2718
2719int musb_host_alloc(struct musb *musb)
2720{
2721        struct device   *dev = musb->controller;
2722
2723        /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2724        musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2725        if (!musb->hcd)
2726                return -EINVAL;
2727
2728        *musb->hcd->hcd_priv = (unsigned long) musb;
2729        musb->hcd->self.uses_pio_for_control = 1;
2730        musb->hcd->uses_new_polling = 1;
2731        musb->hcd->has_tt = 1;
2732
2733        return 0;
2734}
2735
2736void musb_host_cleanup(struct musb *musb)
2737{
2738        if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2739                return;
2740        usb_remove_hcd(musb->hcd);
2741}
2742
2743void musb_host_free(struct musb *musb)
2744{
2745        usb_put_hcd(musb->hcd);
2746}
2747
2748int musb_host_setup(struct musb *musb, int power_budget)
2749{
2750        int ret;
2751        struct usb_hcd *hcd = musb->hcd;
2752
2753        if (musb->port_mode == MUSB_PORT_MODE_HOST) {
2754                MUSB_HST_MODE(musb);
2755                musb->xceiv->otg->default_a = 1;
2756                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2757        }
2758        otg_set_host(musb->xceiv->otg, &hcd->self);
2759        hcd->self.otg_port = 1;
2760        musb->xceiv->otg->host = &hcd->self;
2761        hcd->power_budget = 2 * (power_budget ? : 250);
2762        hcd->skip_phy_initialization = 1;
2763
2764        ret = usb_add_hcd(hcd, 0, 0);
2765        if (ret < 0)
2766                return ret;
2767
2768        device_wakeup_enable(hcd->self.controller);
2769        return 0;
2770}
2771
2772void musb_host_resume_root_hub(struct musb *musb)
2773{
2774        usb_hcd_resume_root_hub(musb->hcd);
2775}
2776
2777void musb_host_poke_root_hub(struct musb *musb)
2778{
2779        MUSB_HST_MODE(musb);
2780        if (musb->hcd->status_urb)
2781                usb_hcd_poll_rh_status(musb->hcd);
2782        else
2783                usb_hcd_resume_root_hub(musb->hcd);
2784}
2785