uboot/drivers/usb/musb-new/musb_gadget.c
<<
>>
Prefs
   1/*
   2 * MUSB OTG driver peripheral support
   3 *
   4 * Copyright 2005 Mentor Graphics Corporation
   5 * Copyright (C) 2005-2006 by Texas Instruments
   6 * Copyright (C) 2006-2007 Nokia Corporation
   7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * version 2 as published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21 * 02110-1301 USA
  22 *
  23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33 *
  34 */
  35
  36#ifndef __UBOOT__
  37#include <linux/kernel.h>
  38#include <linux/list.h>
  39#include <linux/timer.h>
  40#include <linux/module.h>
  41#include <linux/smp.h>
  42#include <linux/spinlock.h>
  43#include <linux/delay.h>
  44#include <linux/dma-mapping.h>
  45#include <linux/slab.h>
  46#else
  47#include <common.h>
  48#include <linux/usb/ch9.h>
  49#include "linux-compat.h"
  50#endif
  51
  52#include "musb_core.h"
  53
  54
  55/* MUSB PERIPHERAL status 3-mar-2006:
  56 *
  57 * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
  58 *   Minor glitches:
  59 *
  60 *     + remote wakeup to Linux hosts work, but saw USBCV failures;
  61 *       in one test run (operator error?)
  62 *     + endpoint halt tests -- in both usbtest and usbcv -- seem
  63 *       to break when dma is enabled ... is something wrongly
  64 *       clearing SENDSTALL?
  65 *
  66 * - Mass storage behaved ok when last tested.  Network traffic patterns
  67 *   (with lots of short transfers etc) need retesting; they turn up the
  68 *   worst cases of the DMA, since short packets are typical but are not
  69 *   required.
  70 *
  71 * - TX/IN
  72 *     + both pio and dma behave in with network and g_zero tests
  73 *     + no cppi throughput issues other than no-hw-queueing
  74 *     + failed with FLAT_REG (DaVinci)
  75 *     + seems to behave with double buffering, PIO -and- CPPI
  76 *     + with gadgetfs + AIO, requests got lost?
  77 *
  78 * - RX/OUT
  79 *     + both pio and dma behave in with network and g_zero tests
  80 *     + dma is slow in typical case (short_not_ok is clear)
  81 *     + double buffering ok with PIO
  82 *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
  83 *     + request lossage observed with gadgetfs
  84 *
  85 * - ISO not tested ... might work, but only weakly isochronous
  86 *
  87 * - Gadget driver disabling of softconnect during bind() is ignored; so
  88 *   drivers can't hold off host requests until userspace is ready.
  89 *   (Workaround:  they can turn it off later.)
  90 *
  91 * - PORTABILITY (assumes PIO works):
  92 *     + DaVinci, basically works with cppi dma
  93 *     + OMAP 2430, ditto with mentor dma
  94 *     + TUSB 6010, platform-specific dma in the works
  95 */
  96
  97/* ----------------------------------------------------------------------- */
  98
  99#define is_buffer_mapped(req) (is_dma_capable() && \
 100                                        (req->map_state != UN_MAPPED))
 101
 102#ifndef CONFIG_USB_MUSB_PIO_ONLY
 103/* Maps the buffer to dma  */
 104
 105static inline void map_dma_buffer(struct musb_request *request,
 106                        struct musb *musb, struct musb_ep *musb_ep)
 107{
 108        int compatible = true;
 109        struct dma_controller *dma = musb->dma_controller;
 110
 111        request->map_state = UN_MAPPED;
 112
 113        if (!is_dma_capable() || !musb_ep->dma)
 114                return;
 115
 116        /* Check if DMA engine can handle this request.
 117         * DMA code must reject the USB request explicitly.
 118         * Default behaviour is to map the request.
 119         */
 120        if (dma->is_compatible)
 121                compatible = dma->is_compatible(musb_ep->dma,
 122                                musb_ep->packet_sz, request->request.buf,
 123                                request->request.length);
 124        if (!compatible)
 125                return;
 126
 127        if (request->request.dma == DMA_ADDR_INVALID) {
 128                request->request.dma = dma_map_single(
 129                                musb->controller,
 130                                request->request.buf,
 131                                request->request.length,
 132                                request->tx
 133                                        ? DMA_TO_DEVICE
 134                                        : DMA_FROM_DEVICE);
 135                request->map_state = MUSB_MAPPED;
 136        } else {
 137                dma_sync_single_for_device(musb->controller,
 138                        request->request.dma,
 139                        request->request.length,
 140                        request->tx
 141                                ? DMA_TO_DEVICE
 142                                : DMA_FROM_DEVICE);
 143                request->map_state = PRE_MAPPED;
 144        }
 145}
 146
 147/* Unmap the buffer from dma and maps it back to cpu */
 148static inline void unmap_dma_buffer(struct musb_request *request,
 149                                struct musb *musb)
 150{
 151        if (!is_buffer_mapped(request))
 152                return;
 153
 154        if (request->request.dma == DMA_ADDR_INVALID) {
 155                dev_vdbg(musb->controller,
 156                                "not unmapping a never mapped buffer\n");
 157                return;
 158        }
 159        if (request->map_state == MUSB_MAPPED) {
 160                dma_unmap_single(musb->controller,
 161                        request->request.dma,
 162                        request->request.length,
 163                        request->tx
 164                                ? DMA_TO_DEVICE
 165                                : DMA_FROM_DEVICE);
 166                request->request.dma = DMA_ADDR_INVALID;
 167        } else { /* PRE_MAPPED */
 168                dma_sync_single_for_cpu(musb->controller,
 169                        request->request.dma,
 170                        request->request.length,
 171                        request->tx
 172                                ? DMA_TO_DEVICE
 173                                : DMA_FROM_DEVICE);
 174        }
 175        request->map_state = UN_MAPPED;
 176}
 177#else
 178static inline void map_dma_buffer(struct musb_request *request,
 179                        struct musb *musb, struct musb_ep *musb_ep)
 180{
 181}
 182
 183static inline void unmap_dma_buffer(struct musb_request *request,
 184                                struct musb *musb)
 185{
 186}
 187#endif
 188
 189/*
 190 * Immediately complete a request.
 191 *
 192 * @param request the request to complete
 193 * @param status the status to complete the request with
 194 * Context: controller locked, IRQs blocked.
 195 */
 196void musb_g_giveback(
 197        struct musb_ep          *ep,
 198        struct usb_request      *request,
 199        int                     status)
 200__releases(ep->musb->lock)
 201__acquires(ep->musb->lock)
 202{
 203        struct musb_request     *req;
 204        struct musb             *musb;
 205        int                     busy = ep->busy;
 206
 207        req = to_musb_request(request);
 208
 209        list_del(&req->list);
 210        if (req->request.status == -EINPROGRESS)
 211                req->request.status = status;
 212        musb = req->musb;
 213
 214        ep->busy = 1;
 215        spin_unlock(&musb->lock);
 216        unmap_dma_buffer(req, musb);
 217        if (request->status == 0)
 218                dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
 219                                ep->end_point.name, request,
 220                                req->request.actual, req->request.length);
 221        else
 222                dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
 223                                ep->end_point.name, request,
 224                                req->request.actual, req->request.length,
 225                                request->status);
 226        req->request.complete(&req->ep->end_point, &req->request);
 227        spin_lock(&musb->lock);
 228        ep->busy = busy;
 229}
 230
 231/* ----------------------------------------------------------------------- */
 232
 233/*
 234 * Abort requests queued to an endpoint using the status. Synchronous.
 235 * caller locked controller and blocked irqs, and selected this ep.
 236 */
 237static void nuke(struct musb_ep *ep, const int status)
 238{
 239        struct musb             *musb = ep->musb;
 240        struct musb_request     *req = NULL;
 241        void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
 242
 243        ep->busy = 1;
 244
 245        if (is_dma_capable() && ep->dma) {
 246                struct dma_controller   *c = ep->musb->dma_controller;
 247                int value;
 248
 249                if (ep->is_in) {
 250                        /*
 251                         * The programming guide says that we must not clear
 252                         * the DMAMODE bit before DMAENAB, so we only
 253                         * clear it in the second write...
 254                         */
 255                        musb_writew(epio, MUSB_TXCSR,
 256                                    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
 257                        musb_writew(epio, MUSB_TXCSR,
 258                                        0 | MUSB_TXCSR_FLUSHFIFO);
 259                } else {
 260                        musb_writew(epio, MUSB_RXCSR,
 261                                        0 | MUSB_RXCSR_FLUSHFIFO);
 262                        musb_writew(epio, MUSB_RXCSR,
 263                                        0 | MUSB_RXCSR_FLUSHFIFO);
 264                }
 265
 266                value = c->channel_abort(ep->dma);
 267                dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
 268                                ep->name, value);
 269                c->channel_release(ep->dma);
 270                ep->dma = NULL;
 271        }
 272
 273        while (!list_empty(&ep->req_list)) {
 274                req = list_first_entry(&ep->req_list, struct musb_request, list);
 275                musb_g_giveback(ep, &req->request, status);
 276        }
 277}
 278
 279/* ----------------------------------------------------------------------- */
 280
 281/* Data transfers - pure PIO, pure DMA, or mixed mode */
 282
 283/*
 284 * This assumes the separate CPPI engine is responding to DMA requests
 285 * from the usb core ... sequenced a bit differently from mentor dma.
 286 */
 287
 288static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
 289{
 290        if (can_bulk_split(musb, ep->type))
 291                return ep->hw_ep->max_packet_sz_tx;
 292        else
 293                return ep->packet_sz;
 294}
 295
 296
 297#ifdef CONFIG_USB_INVENTRA_DMA
 298
 299/* Peripheral tx (IN) using Mentor DMA works as follows:
 300        Only mode 0 is used for transfers <= wPktSize,
 301        mode 1 is used for larger transfers,
 302
 303        One of the following happens:
 304        - Host sends IN token which causes an endpoint interrupt
 305                -> TxAvail
 306                        -> if DMA is currently busy, exit.
 307                        -> if queue is non-empty, txstate().
 308
 309        - Request is queued by the gadget driver.
 310                -> if queue was previously empty, txstate()
 311
 312        txstate()
 313                -> start
 314                  /\    -> setup DMA
 315                  |     (data is transferred to the FIFO, then sent out when
 316                  |     IN token(s) are recd from Host.
 317                  |             -> DMA interrupt on completion
 318                  |                calls TxAvail.
 319                  |                   -> stop DMA, ~DMAENAB,
 320                  |                   -> set TxPktRdy for last short pkt or zlp
 321                  |                   -> Complete Request
 322                  |                   -> Continue next request (call txstate)
 323                  |___________________________________|
 324
 325 * Non-Mentor DMA engines can of course work differently, such as by
 326 * upleveling from irq-per-packet to irq-per-buffer.
 327 */
 328
 329#endif
 330
 331/*
 332 * An endpoint is transmitting data. This can be called either from
 333 * the IRQ routine or from ep.queue() to kickstart a request on an
 334 * endpoint.
 335 *
 336 * Context: controller locked, IRQs blocked, endpoint selected
 337 */
 338static void txstate(struct musb *musb, struct musb_request *req)
 339{
 340        u8                      epnum = req->epnum;
 341        struct musb_ep          *musb_ep;
 342        void __iomem            *epio = musb->endpoints[epnum].regs;
 343        struct usb_request      *request;
 344        u16                     fifo_count = 0, csr;
 345        int                     use_dma = 0;
 346
 347        musb_ep = req->ep;
 348
 349        /* Check if EP is disabled */
 350        if (!musb_ep->desc) {
 351                dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
 352                                                musb_ep->end_point.name);
 353                return;
 354        }
 355
 356        /* we shouldn't get here while DMA is active ... but we do ... */
 357        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 358                dev_dbg(musb->controller, "dma pending...\n");
 359                return;
 360        }
 361
 362        /* read TXCSR before */
 363        csr = musb_readw(epio, MUSB_TXCSR);
 364
 365        request = &req->request;
 366        fifo_count = min(max_ep_writesize(musb, musb_ep),
 367                        (int)(request->length - request->actual));
 368
 369        if (csr & MUSB_TXCSR_TXPKTRDY) {
 370                dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
 371                                musb_ep->end_point.name, csr);
 372                return;
 373        }
 374
 375        if (csr & MUSB_TXCSR_P_SENDSTALL) {
 376                dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
 377                                musb_ep->end_point.name, csr);
 378                return;
 379        }
 380
 381        dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
 382                        epnum, musb_ep->packet_sz, fifo_count,
 383                        csr);
 384
 385#ifndef CONFIG_USB_MUSB_PIO_ONLY
 386        if (is_buffer_mapped(req)) {
 387                struct dma_controller   *c = musb->dma_controller;
 388                size_t request_size;
 389
 390                /* setup DMA, then program endpoint CSR */
 391                request_size = min_t(size_t, request->length - request->actual,
 392                                        musb_ep->dma->max_len);
 393
 394                use_dma = (request->dma != DMA_ADDR_INVALID);
 395
 396                /* MUSB_TXCSR_P_ISO is still set correctly */
 397
 398#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
 399                {
 400                        if (request_size < musb_ep->packet_sz)
 401                                musb_ep->dma->desired_mode = 0;
 402                        else
 403                                musb_ep->dma->desired_mode = 1;
 404
 405                        use_dma = use_dma && c->channel_program(
 406                                        musb_ep->dma, musb_ep->packet_sz,
 407                                        musb_ep->dma->desired_mode,
 408                                        request->dma + request->actual, request_size);
 409                        if (use_dma) {
 410                                if (musb_ep->dma->desired_mode == 0) {
 411                                        /*
 412                                         * We must not clear the DMAMODE bit
 413                                         * before the DMAENAB bit -- and the
 414                                         * latter doesn't always get cleared
 415                                         * before we get here...
 416                                         */
 417                                        csr &= ~(MUSB_TXCSR_AUTOSET
 418                                                | MUSB_TXCSR_DMAENAB);
 419                                        musb_writew(epio, MUSB_TXCSR, csr
 420                                                | MUSB_TXCSR_P_WZC_BITS);
 421                                        csr &= ~MUSB_TXCSR_DMAMODE;
 422                                        csr |= (MUSB_TXCSR_DMAENAB |
 423                                                        MUSB_TXCSR_MODE);
 424                                        /* against programming guide */
 425                                } else {
 426                                        csr |= (MUSB_TXCSR_DMAENAB
 427                                                        | MUSB_TXCSR_DMAMODE
 428                                                        | MUSB_TXCSR_MODE);
 429                                        if (!musb_ep->hb_mult)
 430                                                csr |= MUSB_TXCSR_AUTOSET;
 431                                }
 432                                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 433
 434                                musb_writew(epio, MUSB_TXCSR, csr);
 435                        }
 436                }
 437
 438#elif defined(CONFIG_USB_TI_CPPI_DMA)
 439                /* program endpoint CSR first, then setup DMA */
 440                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 441                csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
 442                       MUSB_TXCSR_MODE;
 443                musb_writew(epio, MUSB_TXCSR,
 444                        (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
 445                                | csr);
 446
 447                /* ensure writebuffer is empty */
 448                csr = musb_readw(epio, MUSB_TXCSR);
 449
 450                /* NOTE host side sets DMAENAB later than this; both are
 451                 * OK since the transfer dma glue (between CPPI and Mentor
 452                 * fifos) just tells CPPI it could start.  Data only moves
 453                 * to the USB TX fifo when both fifos are ready.
 454                 */
 455
 456                /* "mode" is irrelevant here; handle terminating ZLPs like
 457                 * PIO does, since the hardware RNDIS mode seems unreliable
 458                 * except for the last-packet-is-already-short case.
 459                 */
 460                use_dma = use_dma && c->channel_program(
 461                                musb_ep->dma, musb_ep->packet_sz,
 462                                0,
 463                                request->dma + request->actual,
 464                                request_size);
 465                if (!use_dma) {
 466                        c->channel_release(musb_ep->dma);
 467                        musb_ep->dma = NULL;
 468                        csr &= ~MUSB_TXCSR_DMAENAB;
 469                        musb_writew(epio, MUSB_TXCSR, csr);
 470                        /* invariant: prequest->buf is non-null */
 471                }
 472#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
 473                use_dma = use_dma && c->channel_program(
 474                                musb_ep->dma, musb_ep->packet_sz,
 475                                request->zero,
 476                                request->dma + request->actual,
 477                                request_size);
 478#endif
 479        }
 480#endif
 481
 482        if (!use_dma) {
 483                /*
 484                 * Unmap the dma buffer back to cpu if dma channel
 485                 * programming fails
 486                 */
 487                unmap_dma_buffer(req, musb);
 488
 489                musb_write_fifo(musb_ep->hw_ep, fifo_count,
 490                                (u8 *) (request->buf + request->actual));
 491                request->actual += fifo_count;
 492                csr |= MUSB_TXCSR_TXPKTRDY;
 493                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 494                musb_writew(epio, MUSB_TXCSR, csr);
 495        }
 496
 497        /* host may already have the data when this message shows... */
 498        dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
 499                        musb_ep->end_point.name, use_dma ? "dma" : "pio",
 500                        request->actual, request->length,
 501                        musb_readw(epio, MUSB_TXCSR),
 502                        fifo_count,
 503                        musb_readw(epio, MUSB_TXMAXP));
 504}
 505
 506/*
 507 * FIFO state update (e.g. data ready).
 508 * Called from IRQ,  with controller locked.
 509 */
 510void musb_g_tx(struct musb *musb, u8 epnum)
 511{
 512        u16                     csr;
 513        struct musb_request     *req;
 514        struct usb_request      *request;
 515        u8 __iomem              *mbase = musb->mregs;
 516        struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_in;
 517        void __iomem            *epio = musb->endpoints[epnum].regs;
 518        struct dma_channel      *dma;
 519
 520        musb_ep_select(mbase, epnum);
 521        req = next_request(musb_ep);
 522        request = &req->request;
 523
 524        csr = musb_readw(epio, MUSB_TXCSR);
 525        dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
 526
 527        dma = is_dma_capable() ? musb_ep->dma : NULL;
 528
 529        /*
 530         * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
 531         * probably rates reporting as a host error.
 532         */
 533        if (csr & MUSB_TXCSR_P_SENTSTALL) {
 534                csr |=  MUSB_TXCSR_P_WZC_BITS;
 535                csr &= ~MUSB_TXCSR_P_SENTSTALL;
 536                musb_writew(epio, MUSB_TXCSR, csr);
 537                return;
 538        }
 539
 540        if (csr & MUSB_TXCSR_P_UNDERRUN) {
 541                /* We NAKed, no big deal... little reason to care. */
 542                csr |=   MUSB_TXCSR_P_WZC_BITS;
 543                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 544                musb_writew(epio, MUSB_TXCSR, csr);
 545                dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
 546                                epnum, request);
 547        }
 548
 549        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 550                /*
 551                 * SHOULD NOT HAPPEN... has with CPPI though, after
 552                 * changing SENDSTALL (and other cases); harmless?
 553                 */
 554                dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
 555                return;
 556        }
 557
 558        if (request) {
 559                u8      is_dma = 0;
 560
 561                if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
 562                        is_dma = 1;
 563                        csr |= MUSB_TXCSR_P_WZC_BITS;
 564                        csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
 565                                 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
 566                        musb_writew(epio, MUSB_TXCSR, csr);
 567                        /* Ensure writebuffer is empty. */
 568                        csr = musb_readw(epio, MUSB_TXCSR);
 569                        request->actual += musb_ep->dma->actual_len;
 570                        dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
 571                                epnum, csr, musb_ep->dma->actual_len, request);
 572                }
 573
 574                /*
 575                 * First, maybe a terminating short packet. Some DMA
 576                 * engines might handle this by themselves.
 577                 */
 578                if ((request->zero && request->length
 579                        && (request->length % musb_ep->packet_sz == 0)
 580                        && (request->actual == request->length))
 581#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
 582                        || (is_dma && (!dma->desired_mode ||
 583                                (request->actual &
 584                                        (musb_ep->packet_sz - 1))))
 585#endif
 586                ) {
 587                        /*
 588                         * On DMA completion, FIFO may not be
 589                         * available yet...
 590                         */
 591                        if (csr & MUSB_TXCSR_TXPKTRDY)
 592                                return;
 593
 594                        dev_dbg(musb->controller, "sending zero pkt\n");
 595                        musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
 596                                        | MUSB_TXCSR_TXPKTRDY);
 597                        request->zero = 0;
 598                }
 599
 600                if (request->actual == request->length) {
 601                        musb_g_giveback(musb_ep, request, 0);
 602                        /*
 603                         * In the giveback function the MUSB lock is
 604                         * released and acquired after sometime. During
 605                         * this time period the INDEX register could get
 606                         * changed by the gadget_queue function especially
 607                         * on SMP systems. Reselect the INDEX to be sure
 608                         * we are reading/modifying the right registers
 609                         */
 610                        musb_ep_select(mbase, epnum);
 611                        req = musb_ep->desc ? next_request(musb_ep) : NULL;
 612                        if (!req) {
 613                                dev_dbg(musb->controller, "%s idle now\n",
 614                                        musb_ep->end_point.name);
 615                                return;
 616                        }
 617                }
 618
 619                txstate(musb, req);
 620        }
 621}
 622
 623/* ------------------------------------------------------------ */
 624
 625#ifdef CONFIG_USB_INVENTRA_DMA
 626
 627/* Peripheral rx (OUT) using Mentor DMA works as follows:
 628        - Only mode 0 is used.
 629
 630        - Request is queued by the gadget class driver.
 631                -> if queue was previously empty, rxstate()
 632
 633        - Host sends OUT token which causes an endpoint interrupt
 634          /\      -> RxReady
 635          |           -> if request queued, call rxstate
 636          |             /\      -> setup DMA
 637          |             |            -> DMA interrupt on completion
 638          |             |               -> RxReady
 639          |             |                     -> stop DMA
 640          |             |                     -> ack the read
 641          |             |                     -> if data recd = max expected
 642          |             |                               by the request, or host
 643          |             |                               sent a short packet,
 644          |             |                               complete the request,
 645          |             |                               and start the next one.
 646          |             |_____________________________________|
 647          |                                      else just wait for the host
 648          |                                         to send the next OUT token.
 649          |__________________________________________________|
 650
 651 * Non-Mentor DMA engines can of course work differently.
 652 */
 653
 654#endif
 655
 656/*
 657 * Context: controller locked, IRQs blocked, endpoint selected
 658 */
 659static void rxstate(struct musb *musb, struct musb_request *req)
 660{
 661        const u8                epnum = req->epnum;
 662        struct usb_request      *request = &req->request;
 663        struct musb_ep          *musb_ep;
 664        void __iomem            *epio = musb->endpoints[epnum].regs;
 665        unsigned                fifo_count = 0;
 666        u16                     len;
 667        u16                     csr = musb_readw(epio, MUSB_RXCSR);
 668        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 669        u8                      use_mode_1;
 670
 671        if (hw_ep->is_shared_fifo)
 672                musb_ep = &hw_ep->ep_in;
 673        else
 674                musb_ep = &hw_ep->ep_out;
 675
 676        len = musb_ep->packet_sz;
 677
 678        /* Check if EP is disabled */
 679        if (!musb_ep->desc) {
 680                dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
 681                                                musb_ep->end_point.name);
 682                return;
 683        }
 684
 685        /* We shouldn't get here while DMA is active, but we do... */
 686        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 687                dev_dbg(musb->controller, "DMA pending...\n");
 688                return;
 689        }
 690
 691        if (csr & MUSB_RXCSR_P_SENDSTALL) {
 692                dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
 693                    musb_ep->end_point.name, csr);
 694                return;
 695        }
 696
 697        if (is_cppi_enabled() && is_buffer_mapped(req)) {
 698                struct dma_controller   *c = musb->dma_controller;
 699                struct dma_channel      *channel = musb_ep->dma;
 700
 701                /* NOTE:  CPPI won't actually stop advancing the DMA
 702                 * queue after short packet transfers, so this is almost
 703                 * always going to run as IRQ-per-packet DMA so that
 704                 * faults will be handled correctly.
 705                 */
 706                if (c->channel_program(channel,
 707                                musb_ep->packet_sz,
 708                                !request->short_not_ok,
 709                                request->dma + request->actual,
 710                                request->length - request->actual)) {
 711
 712                        /* make sure that if an rxpkt arrived after the irq,
 713                         * the cppi engine will be ready to take it as soon
 714                         * as DMA is enabled
 715                         */
 716                        csr &= ~(MUSB_RXCSR_AUTOCLEAR
 717                                        | MUSB_RXCSR_DMAMODE);
 718                        csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
 719                        musb_writew(epio, MUSB_RXCSR, csr);
 720                        return;
 721                }
 722        }
 723
 724        if (csr & MUSB_RXCSR_RXPKTRDY) {
 725                len = musb_readw(epio, MUSB_RXCOUNT);
 726
 727                /*
 728                 * Enable Mode 1 on RX transfers only when short_not_ok flag
 729                 * is set. Currently short_not_ok flag is set only from
 730                 * file_storage and f_mass_storage drivers
 731                 */
 732
 733                if (request->short_not_ok && len == musb_ep->packet_sz)
 734                        use_mode_1 = 1;
 735                else
 736                        use_mode_1 = 0;
 737
 738                if (request->actual < request->length) {
 739#ifdef CONFIG_USB_INVENTRA_DMA
 740                        if (is_buffer_mapped(req)) {
 741                                struct dma_controller   *c;
 742                                struct dma_channel      *channel;
 743                                int                     use_dma = 0;
 744
 745                                c = musb->dma_controller;
 746                                channel = musb_ep->dma;
 747
 748        /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
 749         * mode 0 only. So we do not get endpoint interrupts due to DMA
 750         * completion. We only get interrupts from DMA controller.
 751         *
 752         * We could operate in DMA mode 1 if we knew the size of the tranfer
 753         * in advance. For mass storage class, request->length = what the host
 754         * sends, so that'd work.  But for pretty much everything else,
 755         * request->length is routinely more than what the host sends. For
 756         * most these gadgets, end of is signified either by a short packet,
 757         * or filling the last byte of the buffer.  (Sending extra data in
 758         * that last pckate should trigger an overflow fault.)  But in mode 1,
 759         * we don't get DMA completion interrupt for short packets.
 760         *
 761         * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
 762         * to get endpoint interrupt on every DMA req, but that didn't seem
 763         * to work reliably.
 764         *
 765         * REVISIT an updated g_file_storage can set req->short_not_ok, which
 766         * then becomes usable as a runtime "use mode 1" hint...
 767         */
 768
 769                                /* Experimental: Mode1 works with mass storage use cases */
 770                                if (use_mode_1) {
 771                                        csr |= MUSB_RXCSR_AUTOCLEAR;
 772                                        musb_writew(epio, MUSB_RXCSR, csr);
 773                                        csr |= MUSB_RXCSR_DMAENAB;
 774                                        musb_writew(epio, MUSB_RXCSR, csr);
 775
 776                                        /*
 777                                         * this special sequence (enabling and then
 778                                         * disabling MUSB_RXCSR_DMAMODE) is required
 779                                         * to get DMAReq to activate
 780                                         */
 781                                        musb_writew(epio, MUSB_RXCSR,
 782                                                csr | MUSB_RXCSR_DMAMODE);
 783                                        musb_writew(epio, MUSB_RXCSR, csr);
 784
 785                                } else {
 786                                        if (!musb_ep->hb_mult &&
 787                                                musb_ep->hw_ep->rx_double_buffered)
 788                                                csr |= MUSB_RXCSR_AUTOCLEAR;
 789                                        csr |= MUSB_RXCSR_DMAENAB;
 790                                        musb_writew(epio, MUSB_RXCSR, csr);
 791                                }
 792
 793                                if (request->actual < request->length) {
 794                                        int transfer_size = 0;
 795                                        if (use_mode_1) {
 796                                                transfer_size = min(request->length - request->actual,
 797                                                                channel->max_len);
 798                                                musb_ep->dma->desired_mode = 1;
 799                                        } else {
 800                                                transfer_size = min(request->length - request->actual,
 801                                                                (unsigned)len);
 802                                                musb_ep->dma->desired_mode = 0;
 803                                        }
 804
 805                                        use_dma = c->channel_program(
 806                                                        channel,
 807                                                        musb_ep->packet_sz,
 808                                                        channel->desired_mode,
 809                                                        request->dma
 810                                                        + request->actual,
 811                                                        transfer_size);
 812                                }
 813
 814                                if (use_dma)
 815                                        return;
 816                        }
 817#elif defined(CONFIG_USB_UX500_DMA)
 818                        if ((is_buffer_mapped(req)) &&
 819                                (request->actual < request->length)) {
 820
 821                                struct dma_controller *c;
 822                                struct dma_channel *channel;
 823                                int transfer_size = 0;
 824
 825                                c = musb->dma_controller;
 826                                channel = musb_ep->dma;
 827
 828                                /* In case first packet is short */
 829                                if (len < musb_ep->packet_sz)
 830                                        transfer_size = len;
 831                                else if (request->short_not_ok)
 832                                        transfer_size = min(request->length -
 833                                                        request->actual,
 834                                                        channel->max_len);
 835                                else
 836                                        transfer_size = min(request->length -
 837                                                        request->actual,
 838                                                        (unsigned)len);
 839
 840                                csr &= ~MUSB_RXCSR_DMAMODE;
 841                                csr |= (MUSB_RXCSR_DMAENAB |
 842                                        MUSB_RXCSR_AUTOCLEAR);
 843
 844                                musb_writew(epio, MUSB_RXCSR, csr);
 845
 846                                if (transfer_size <= musb_ep->packet_sz) {
 847                                        musb_ep->dma->desired_mode = 0;
 848                                } else {
 849                                        musb_ep->dma->desired_mode = 1;
 850                                        /* Mode must be set after DMAENAB */
 851                                        csr |= MUSB_RXCSR_DMAMODE;
 852                                        musb_writew(epio, MUSB_RXCSR, csr);
 853                                }
 854
 855                                if (c->channel_program(channel,
 856                                                        musb_ep->packet_sz,
 857                                                        channel->desired_mode,
 858                                                        request->dma
 859                                                        + request->actual,
 860                                                        transfer_size))
 861
 862                                        return;
 863                        }
 864#endif  /* Mentor's DMA */
 865
 866                        fifo_count = request->length - request->actual;
 867                        dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
 868                                        musb_ep->end_point.name,
 869                                        len, fifo_count,
 870                                        musb_ep->packet_sz);
 871
 872                        fifo_count = min_t(unsigned, len, fifo_count);
 873
 874#ifdef  CONFIG_USB_TUSB_OMAP_DMA
 875                        if (tusb_dma_omap() && is_buffer_mapped(req)) {
 876                                struct dma_controller *c = musb->dma_controller;
 877                                struct dma_channel *channel = musb_ep->dma;
 878                                u32 dma_addr = request->dma + request->actual;
 879                                int ret;
 880
 881                                ret = c->channel_program(channel,
 882                                                musb_ep->packet_sz,
 883                                                channel->desired_mode,
 884                                                dma_addr,
 885                                                fifo_count);
 886                                if (ret)
 887                                        return;
 888                        }
 889#endif
 890                        /*
 891                         * Unmap the dma buffer back to cpu if dma channel
 892                         * programming fails. This buffer is mapped if the
 893                         * channel allocation is successful
 894                         */
 895                         if (is_buffer_mapped(req)) {
 896                                unmap_dma_buffer(req, musb);
 897
 898                                /*
 899                                 * Clear DMAENAB and AUTOCLEAR for the
 900                                 * PIO mode transfer
 901                                 */
 902                                csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
 903                                musb_writew(epio, MUSB_RXCSR, csr);
 904                        }
 905
 906                        musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
 907                                        (request->buf + request->actual));
 908                        request->actual += fifo_count;
 909
 910                        /* REVISIT if we left anything in the fifo, flush
 911                         * it and report -EOVERFLOW
 912                         */
 913
 914                        /* ack the read! */
 915                        csr |= MUSB_RXCSR_P_WZC_BITS;
 916                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 917                        musb_writew(epio, MUSB_RXCSR, csr);
 918                }
 919        }
 920
 921        /* reach the end or short packet detected */
 922        if (request->actual == request->length || len < musb_ep->packet_sz)
 923                musb_g_giveback(musb_ep, request, 0);
 924}
 925
 926/*
 927 * Data ready for a request; called from IRQ
 928 */
 929void musb_g_rx(struct musb *musb, u8 epnum)
 930{
 931        u16                     csr;
 932        struct musb_request     *req;
 933        struct usb_request      *request;
 934        void __iomem            *mbase = musb->mregs;
 935        struct musb_ep          *musb_ep;
 936        void __iomem            *epio = musb->endpoints[epnum].regs;
 937        struct dma_channel      *dma;
 938        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 939
 940        if (hw_ep->is_shared_fifo)
 941                musb_ep = &hw_ep->ep_in;
 942        else
 943                musb_ep = &hw_ep->ep_out;
 944
 945        musb_ep_select(mbase, epnum);
 946
 947        req = next_request(musb_ep);
 948        if (!req)
 949                return;
 950
 951        request = &req->request;
 952
 953        csr = musb_readw(epio, MUSB_RXCSR);
 954        dma = is_dma_capable() ? musb_ep->dma : NULL;
 955
 956        dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
 957                        csr, dma ? " (dma)" : "", request);
 958
 959        if (csr & MUSB_RXCSR_P_SENTSTALL) {
 960                csr |= MUSB_RXCSR_P_WZC_BITS;
 961                csr &= ~MUSB_RXCSR_P_SENTSTALL;
 962                musb_writew(epio, MUSB_RXCSR, csr);
 963                return;
 964        }
 965
 966        if (csr & MUSB_RXCSR_P_OVERRUN) {
 967                /* csr |= MUSB_RXCSR_P_WZC_BITS; */
 968                csr &= ~MUSB_RXCSR_P_OVERRUN;
 969                musb_writew(epio, MUSB_RXCSR, csr);
 970
 971                dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
 972                if (request->status == -EINPROGRESS)
 973                        request->status = -EOVERFLOW;
 974        }
 975        if (csr & MUSB_RXCSR_INCOMPRX) {
 976                /* REVISIT not necessarily an error */
 977                dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
 978        }
 979
 980        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 981                /* "should not happen"; likely RXPKTRDY pending for DMA */
 982                dev_dbg(musb->controller, "%s busy, csr %04x\n",
 983                        musb_ep->end_point.name, csr);
 984                return;
 985        }
 986
 987        if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
 988                csr &= ~(MUSB_RXCSR_AUTOCLEAR
 989                                | MUSB_RXCSR_DMAENAB
 990                                | MUSB_RXCSR_DMAMODE);
 991                musb_writew(epio, MUSB_RXCSR,
 992                        MUSB_RXCSR_P_WZC_BITS | csr);
 993
 994                request->actual += musb_ep->dma->actual_len;
 995
 996                dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
 997                        epnum, csr,
 998                        musb_readw(epio, MUSB_RXCSR),
 999                        musb_ep->dma->actual_len, request);
1000
1001#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1002        defined(CONFIG_USB_UX500_DMA)
1003                /* Autoclear doesn't clear RxPktRdy for short packets */
1004                if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
1005                                || (dma->actual_len
1006                                        & (musb_ep->packet_sz - 1))) {
1007                        /* ack the read! */
1008                        csr &= ~MUSB_RXCSR_RXPKTRDY;
1009                        musb_writew(epio, MUSB_RXCSR, csr);
1010                }
1011
1012                /* incomplete, and not short? wait for next IN packet */
1013                if ((request->actual < request->length)
1014                                && (musb_ep->dma->actual_len
1015                                        == musb_ep->packet_sz)) {
1016                        /* In double buffer case, continue to unload fifo if
1017                         * there is Rx packet in FIFO.
1018                         **/
1019                        csr = musb_readw(epio, MUSB_RXCSR);
1020                        if ((csr & MUSB_RXCSR_RXPKTRDY) &&
1021                                hw_ep->rx_double_buffered)
1022                                goto exit;
1023                        return;
1024                }
1025#endif
1026                musb_g_giveback(musb_ep, request, 0);
1027                /*
1028                 * In the giveback function the MUSB lock is
1029                 * released and acquired after sometime. During
1030                 * this time period the INDEX register could get
1031                 * changed by the gadget_queue function especially
1032                 * on SMP systems. Reselect the INDEX to be sure
1033                 * we are reading/modifying the right registers
1034                 */
1035                musb_ep_select(mbase, epnum);
1036
1037                req = next_request(musb_ep);
1038                if (!req)
1039                        return;
1040        }
1041#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1042        defined(CONFIG_USB_UX500_DMA)
1043exit:
1044#endif
1045        /* Analyze request */
1046        rxstate(musb, req);
1047}
1048
1049/* ------------------------------------------------------------ */
1050
1051static int musb_gadget_enable(struct usb_ep *ep,
1052                        const struct usb_endpoint_descriptor *desc)
1053{
1054        unsigned long           flags;
1055        struct musb_ep          *musb_ep;
1056        struct musb_hw_ep       *hw_ep;
1057        void __iomem            *regs;
1058        struct musb             *musb;
1059        void __iomem    *mbase;
1060        u8              epnum;
1061        u16             csr;
1062        unsigned        tmp;
1063        int             status = -EINVAL;
1064
1065        if (!ep || !desc)
1066                return -EINVAL;
1067
1068        musb_ep = to_musb_ep(ep);
1069        hw_ep = musb_ep->hw_ep;
1070        regs = hw_ep->regs;
1071        musb = musb_ep->musb;
1072        mbase = musb->mregs;
1073        epnum = musb_ep->current_epnum;
1074
1075        spin_lock_irqsave(&musb->lock, flags);
1076
1077        if (musb_ep->desc) {
1078                status = -EBUSY;
1079                goto fail;
1080        }
1081        musb_ep->type = usb_endpoint_type(desc);
1082
1083        /* check direction and (later) maxpacket size against endpoint */
1084        if (usb_endpoint_num(desc) != epnum)
1085                goto fail;
1086
1087        /* REVISIT this rules out high bandwidth periodic transfers */
1088        tmp = usb_endpoint_maxp(desc);
1089        if (tmp & ~0x07ff) {
1090                int ok;
1091
1092                if (usb_endpoint_dir_in(desc))
1093                        ok = musb->hb_iso_tx;
1094                else
1095                        ok = musb->hb_iso_rx;
1096
1097                if (!ok) {
1098                        dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1099                        goto fail;
1100                }
1101                musb_ep->hb_mult = (tmp >> 11) & 3;
1102        } else {
1103                musb_ep->hb_mult = 0;
1104        }
1105
1106        musb_ep->packet_sz = tmp & 0x7ff;
1107        tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1108
1109        /* enable the interrupts for the endpoint, set the endpoint
1110         * packet size (or fail), set the mode, clear the fifo
1111         */
1112        musb_ep_select(mbase, epnum);
1113        if (usb_endpoint_dir_in(desc)) {
1114                u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1115
1116                if (hw_ep->is_shared_fifo)
1117                        musb_ep->is_in = 1;
1118                if (!musb_ep->is_in)
1119                        goto fail;
1120
1121                if (tmp > hw_ep->max_packet_sz_tx) {
1122                        dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1123                        goto fail;
1124                }
1125
1126                int_txe |= (1 << epnum);
1127                musb_writew(mbase, MUSB_INTRTXE, int_txe);
1128
1129                /* REVISIT if can_bulk_split(), use by updating "tmp";
1130                 * likewise high bandwidth periodic tx
1131                 */
1132                /* Set TXMAXP with the FIFO size of the endpoint
1133                 * to disable double buffering mode.
1134                 */
1135                if (musb->double_buffer_not_ok)
1136                        musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1137                else
1138                        musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1139                                        | (musb_ep->hb_mult << 11));
1140
1141                csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1142                if (musb_readw(regs, MUSB_TXCSR)
1143                                & MUSB_TXCSR_FIFONOTEMPTY)
1144                        csr |= MUSB_TXCSR_FLUSHFIFO;
1145                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1146                        csr |= MUSB_TXCSR_P_ISO;
1147
1148                /* set twice in case of double buffering */
1149                musb_writew(regs, MUSB_TXCSR, csr);
1150                /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1151                musb_writew(regs, MUSB_TXCSR, csr);
1152
1153        } else {
1154                u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1155
1156                if (hw_ep->is_shared_fifo)
1157                        musb_ep->is_in = 0;
1158                if (musb_ep->is_in)
1159                        goto fail;
1160
1161                if (tmp > hw_ep->max_packet_sz_rx) {
1162                        dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1163                        goto fail;
1164                }
1165
1166                int_rxe |= (1 << epnum);
1167                musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1168
1169                /* REVISIT if can_bulk_combine() use by updating "tmp"
1170                 * likewise high bandwidth periodic rx
1171                 */
1172                /* Set RXMAXP with the FIFO size of the endpoint
1173                 * to disable double buffering mode.
1174                 */
1175                if (musb->double_buffer_not_ok)
1176                        musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1177                else
1178                        musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1179                                        | (musb_ep->hb_mult << 11));
1180
1181                /* force shared fifo to OUT-only mode */
1182                if (hw_ep->is_shared_fifo) {
1183                        csr = musb_readw(regs, MUSB_TXCSR);
1184                        csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1185                        musb_writew(regs, MUSB_TXCSR, csr);
1186                }
1187
1188                csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1189                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1190                        csr |= MUSB_RXCSR_P_ISO;
1191                else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1192                        csr |= MUSB_RXCSR_DISNYET;
1193
1194                /* set twice in case of double buffering */
1195                musb_writew(regs, MUSB_RXCSR, csr);
1196                musb_writew(regs, MUSB_RXCSR, csr);
1197        }
1198
1199        /* NOTE:  all the I/O code _should_ work fine without DMA, in case
1200         * for some reason you run out of channels here.
1201         */
1202        if (is_dma_capable() && musb->dma_controller) {
1203                struct dma_controller   *c = musb->dma_controller;
1204
1205                musb_ep->dma = c->channel_alloc(c, hw_ep,
1206                                (desc->bEndpointAddress & USB_DIR_IN));
1207        } else
1208                musb_ep->dma = NULL;
1209
1210        musb_ep->desc = desc;
1211        musb_ep->busy = 0;
1212        musb_ep->wedged = 0;
1213        status = 0;
1214
1215        pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1216                        musb_driver_name, musb_ep->end_point.name,
1217                        ({ char *s; switch (musb_ep->type) {
1218                        case USB_ENDPOINT_XFER_BULK:    s = "bulk"; break;
1219                        case USB_ENDPOINT_XFER_INT:     s = "int"; break;
1220                        default:                        s = "iso"; break;
1221                        }; s; }),
1222                        musb_ep->is_in ? "IN" : "OUT",
1223                        musb_ep->dma ? "dma, " : "",
1224                        musb_ep->packet_sz);
1225
1226        schedule_work(&musb->irq_work);
1227
1228fail:
1229        spin_unlock_irqrestore(&musb->lock, flags);
1230        return status;
1231}
1232
1233/*
1234 * Disable an endpoint flushing all requests queued.
1235 */
1236static int musb_gadget_disable(struct usb_ep *ep)
1237{
1238        unsigned long   flags;
1239        struct musb     *musb;
1240        u8              epnum;
1241        struct musb_ep  *musb_ep;
1242        void __iomem    *epio;
1243        int             status = 0;
1244
1245        musb_ep = to_musb_ep(ep);
1246        musb = musb_ep->musb;
1247        epnum = musb_ep->current_epnum;
1248        epio = musb->endpoints[epnum].regs;
1249
1250        spin_lock_irqsave(&musb->lock, flags);
1251        musb_ep_select(musb->mregs, epnum);
1252
1253        /* zero the endpoint sizes */
1254        if (musb_ep->is_in) {
1255                u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1256                int_txe &= ~(1 << epnum);
1257                musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1258                musb_writew(epio, MUSB_TXMAXP, 0);
1259        } else {
1260                u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1261                int_rxe &= ~(1 << epnum);
1262                musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1263                musb_writew(epio, MUSB_RXMAXP, 0);
1264        }
1265
1266        musb_ep->desc = NULL;
1267#ifndef __UBOOT__
1268        musb_ep->end_point.desc = NULL;
1269#endif
1270
1271        /* abort all pending DMA and requests */
1272        nuke(musb_ep, -ESHUTDOWN);
1273
1274        schedule_work(&musb->irq_work);
1275
1276        spin_unlock_irqrestore(&(musb->lock), flags);
1277
1278        dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1279
1280        return status;
1281}
1282
1283/*
1284 * Allocate a request for an endpoint.
1285 * Reused by ep0 code.
1286 */
1287struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1288{
1289        struct musb_ep          *musb_ep = to_musb_ep(ep);
1290        struct musb             *musb = musb_ep->musb;
1291        struct musb_request     *request = NULL;
1292
1293        request = kzalloc(sizeof *request, gfp_flags);
1294        if (!request) {
1295                dev_dbg(musb->controller, "not enough memory\n");
1296                return NULL;
1297        }
1298
1299        request->request.dma = DMA_ADDR_INVALID;
1300        request->epnum = musb_ep->current_epnum;
1301        request->ep = musb_ep;
1302
1303        return &request->request;
1304}
1305
1306/*
1307 * Free a request
1308 * Reused by ep0 code.
1309 */
1310void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1311{
1312        kfree(to_musb_request(req));
1313}
1314
1315static LIST_HEAD(buffers);
1316
1317struct free_record {
1318        struct list_head        list;
1319        struct device           *dev;
1320        unsigned                bytes;
1321        dma_addr_t              dma;
1322};
1323
1324/*
1325 * Context: controller locked, IRQs blocked.
1326 */
1327void musb_ep_restart(struct musb *musb, struct musb_request *req)
1328{
1329        dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1330                req->tx ? "TX/IN" : "RX/OUT",
1331                &req->request, req->request.length, req->epnum);
1332
1333        musb_ep_select(musb->mregs, req->epnum);
1334        if (req->tx)
1335                txstate(musb, req);
1336        else
1337                rxstate(musb, req);
1338}
1339
1340static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1341                        gfp_t gfp_flags)
1342{
1343        struct musb_ep          *musb_ep;
1344        struct musb_request     *request;
1345        struct musb             *musb;
1346        int                     status = 0;
1347        unsigned long           lockflags;
1348
1349        if (!ep || !req)
1350                return -EINVAL;
1351        if (!req->buf)
1352                return -ENODATA;
1353
1354        musb_ep = to_musb_ep(ep);
1355        musb = musb_ep->musb;
1356
1357        request = to_musb_request(req);
1358        request->musb = musb;
1359
1360        if (request->ep != musb_ep)
1361                return -EINVAL;
1362
1363        dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1364
1365        /* request is mine now... */
1366        request->request.actual = 0;
1367        request->request.status = -EINPROGRESS;
1368        request->epnum = musb_ep->current_epnum;
1369        request->tx = musb_ep->is_in;
1370
1371        map_dma_buffer(request, musb, musb_ep);
1372
1373        spin_lock_irqsave(&musb->lock, lockflags);
1374
1375        /* don't queue if the ep is down */
1376        if (!musb_ep->desc) {
1377                dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1378                                req, ep->name, "disabled");
1379                status = -ESHUTDOWN;
1380                goto cleanup;
1381        }
1382
1383        /* add request to the list */
1384        list_add_tail(&request->list, &musb_ep->req_list);
1385
1386        /* it this is the head of the queue, start i/o ... */
1387        if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1388                musb_ep_restart(musb, request);
1389
1390cleanup:
1391        spin_unlock_irqrestore(&musb->lock, lockflags);
1392        return status;
1393}
1394
1395static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1396{
1397        struct musb_ep          *musb_ep = to_musb_ep(ep);
1398        struct musb_request     *req = to_musb_request(request);
1399        struct musb_request     *r;
1400        unsigned long           flags;
1401        int                     status = 0;
1402        struct musb             *musb = musb_ep->musb;
1403
1404        if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1405                return -EINVAL;
1406
1407        spin_lock_irqsave(&musb->lock, flags);
1408
1409        list_for_each_entry(r, &musb_ep->req_list, list) {
1410                if (r == req)
1411                        break;
1412        }
1413        if (r != req) {
1414                dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1415                status = -EINVAL;
1416                goto done;
1417        }
1418
1419        /* if the hardware doesn't have the request, easy ... */
1420        if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1421                musb_g_giveback(musb_ep, request, -ECONNRESET);
1422
1423        /* ... else abort the dma transfer ... */
1424        else if (is_dma_capable() && musb_ep->dma) {
1425                struct dma_controller   *c = musb->dma_controller;
1426
1427                musb_ep_select(musb->mregs, musb_ep->current_epnum);
1428                if (c->channel_abort)
1429                        status = c->channel_abort(musb_ep->dma);
1430                else
1431                        status = -EBUSY;
1432                if (status == 0)
1433                        musb_g_giveback(musb_ep, request, -ECONNRESET);
1434        } else {
1435                /* NOTE: by sticking to easily tested hardware/driver states,
1436                 * we leave counting of in-flight packets imprecise.
1437                 */
1438                musb_g_giveback(musb_ep, request, -ECONNRESET);
1439        }
1440
1441done:
1442        spin_unlock_irqrestore(&musb->lock, flags);
1443        return status;
1444}
1445
1446/*
1447 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1448 * data but will queue requests.
1449 *
1450 * exported to ep0 code
1451 */
1452static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1453{
1454        struct musb_ep          *musb_ep = to_musb_ep(ep);
1455        u8                      epnum = musb_ep->current_epnum;
1456        struct musb             *musb = musb_ep->musb;
1457        void __iomem            *epio = musb->endpoints[epnum].regs;
1458        void __iomem            *mbase;
1459        unsigned long           flags;
1460        u16                     csr;
1461        struct musb_request     *request;
1462        int                     status = 0;
1463
1464        if (!ep)
1465                return -EINVAL;
1466        mbase = musb->mregs;
1467
1468        spin_lock_irqsave(&musb->lock, flags);
1469
1470        if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1471                status = -EINVAL;
1472                goto done;
1473        }
1474
1475        musb_ep_select(mbase, epnum);
1476
1477        request = next_request(musb_ep);
1478        if (value) {
1479                if (request) {
1480                        dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1481                            ep->name);
1482                        status = -EAGAIN;
1483                        goto done;
1484                }
1485                /* Cannot portably stall with non-empty FIFO */
1486                if (musb_ep->is_in) {
1487                        csr = musb_readw(epio, MUSB_TXCSR);
1488                        if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1489                                dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1490                                status = -EAGAIN;
1491                                goto done;
1492                        }
1493                }
1494        } else
1495                musb_ep->wedged = 0;
1496
1497        /* set/clear the stall and toggle bits */
1498        dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1499        if (musb_ep->is_in) {
1500                csr = musb_readw(epio, MUSB_TXCSR);
1501                csr |= MUSB_TXCSR_P_WZC_BITS
1502                        | MUSB_TXCSR_CLRDATATOG;
1503                if (value)
1504                        csr |= MUSB_TXCSR_P_SENDSTALL;
1505                else
1506                        csr &= ~(MUSB_TXCSR_P_SENDSTALL
1507                                | MUSB_TXCSR_P_SENTSTALL);
1508                csr &= ~MUSB_TXCSR_TXPKTRDY;
1509                musb_writew(epio, MUSB_TXCSR, csr);
1510        } else {
1511                csr = musb_readw(epio, MUSB_RXCSR);
1512                csr |= MUSB_RXCSR_P_WZC_BITS
1513                        | MUSB_RXCSR_FLUSHFIFO
1514                        | MUSB_RXCSR_CLRDATATOG;
1515                if (value)
1516                        csr |= MUSB_RXCSR_P_SENDSTALL;
1517                else
1518                        csr &= ~(MUSB_RXCSR_P_SENDSTALL
1519                                | MUSB_RXCSR_P_SENTSTALL);
1520                musb_writew(epio, MUSB_RXCSR, csr);
1521        }
1522
1523        /* maybe start the first request in the queue */
1524        if (!musb_ep->busy && !value && request) {
1525                dev_dbg(musb->controller, "restarting the request\n");
1526                musb_ep_restart(musb, request);
1527        }
1528
1529done:
1530        spin_unlock_irqrestore(&musb->lock, flags);
1531        return status;
1532}
1533
1534#ifndef __UBOOT__
1535/*
1536 * Sets the halt feature with the clear requests ignored
1537 */
1538static int musb_gadget_set_wedge(struct usb_ep *ep)
1539{
1540        struct musb_ep          *musb_ep = to_musb_ep(ep);
1541
1542        if (!ep)
1543                return -EINVAL;
1544
1545        musb_ep->wedged = 1;
1546
1547        return usb_ep_set_halt(ep);
1548}
1549#endif
1550
1551static int musb_gadget_fifo_status(struct usb_ep *ep)
1552{
1553        struct musb_ep          *musb_ep = to_musb_ep(ep);
1554        void __iomem            *epio = musb_ep->hw_ep->regs;
1555        int                     retval = -EINVAL;
1556
1557        if (musb_ep->desc && !musb_ep->is_in) {
1558                struct musb             *musb = musb_ep->musb;
1559                int                     epnum = musb_ep->current_epnum;
1560                void __iomem            *mbase = musb->mregs;
1561                unsigned long           flags;
1562
1563                spin_lock_irqsave(&musb->lock, flags);
1564
1565                musb_ep_select(mbase, epnum);
1566                /* FIXME return zero unless RXPKTRDY is set */
1567                retval = musb_readw(epio, MUSB_RXCOUNT);
1568
1569                spin_unlock_irqrestore(&musb->lock, flags);
1570        }
1571        return retval;
1572}
1573
1574static void musb_gadget_fifo_flush(struct usb_ep *ep)
1575{
1576        struct musb_ep  *musb_ep = to_musb_ep(ep);
1577        struct musb     *musb = musb_ep->musb;
1578        u8              epnum = musb_ep->current_epnum;
1579        void __iomem    *epio = musb->endpoints[epnum].regs;
1580        void __iomem    *mbase;
1581        unsigned long   flags;
1582        u16             csr, int_txe;
1583
1584        mbase = musb->mregs;
1585
1586        spin_lock_irqsave(&musb->lock, flags);
1587        musb_ep_select(mbase, (u8) epnum);
1588
1589        /* disable interrupts */
1590        int_txe = musb_readw(mbase, MUSB_INTRTXE);
1591        musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1592
1593        if (musb_ep->is_in) {
1594                csr = musb_readw(epio, MUSB_TXCSR);
1595                if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1596                        csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1597                        /*
1598                         * Setting both TXPKTRDY and FLUSHFIFO makes controller
1599                         * to interrupt current FIFO loading, but not flushing
1600                         * the already loaded ones.
1601                         */
1602                        csr &= ~MUSB_TXCSR_TXPKTRDY;
1603                        musb_writew(epio, MUSB_TXCSR, csr);
1604                        /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1605                        musb_writew(epio, MUSB_TXCSR, csr);
1606                }
1607        } else {
1608                csr = musb_readw(epio, MUSB_RXCSR);
1609                csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1610                musb_writew(epio, MUSB_RXCSR, csr);
1611                musb_writew(epio, MUSB_RXCSR, csr);
1612        }
1613
1614        /* re-enable interrupt */
1615        musb_writew(mbase, MUSB_INTRTXE, int_txe);
1616        spin_unlock_irqrestore(&musb->lock, flags);
1617}
1618
1619static const struct usb_ep_ops musb_ep_ops = {
1620        .enable         = musb_gadget_enable,
1621        .disable        = musb_gadget_disable,
1622        .alloc_request  = musb_alloc_request,
1623        .free_request   = musb_free_request,
1624        .queue          = musb_gadget_queue,
1625        .dequeue        = musb_gadget_dequeue,
1626        .set_halt       = musb_gadget_set_halt,
1627#ifndef __UBOOT__
1628        .set_wedge      = musb_gadget_set_wedge,
1629#endif
1630        .fifo_status    = musb_gadget_fifo_status,
1631        .fifo_flush     = musb_gadget_fifo_flush
1632};
1633
1634/* ----------------------------------------------------------------------- */
1635
1636static int musb_gadget_get_frame(struct usb_gadget *gadget)
1637{
1638        struct musb     *musb = gadget_to_musb(gadget);
1639
1640        return (int)musb_readw(musb->mregs, MUSB_FRAME);
1641}
1642
1643static int musb_gadget_wakeup(struct usb_gadget *gadget)
1644{
1645#ifndef __UBOOT__
1646        struct musb     *musb = gadget_to_musb(gadget);
1647        void __iomem    *mregs = musb->mregs;
1648        unsigned long   flags;
1649        int             status = -EINVAL;
1650        u8              power, devctl;
1651        int             retries;
1652
1653        spin_lock_irqsave(&musb->lock, flags);
1654
1655        switch (musb->xceiv->state) {
1656        case OTG_STATE_B_PERIPHERAL:
1657                /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1658                 * that's part of the standard usb 1.1 state machine, and
1659                 * doesn't affect OTG transitions.
1660                 */
1661                if (musb->may_wakeup && musb->is_suspended)
1662                        break;
1663                goto done;
1664        case OTG_STATE_B_IDLE:
1665                /* Start SRP ... OTG not required. */
1666                devctl = musb_readb(mregs, MUSB_DEVCTL);
1667                dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1668                devctl |= MUSB_DEVCTL_SESSION;
1669                musb_writeb(mregs, MUSB_DEVCTL, devctl);
1670                devctl = musb_readb(mregs, MUSB_DEVCTL);
1671                retries = 100;
1672                while (!(devctl & MUSB_DEVCTL_SESSION)) {
1673                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1674                        if (retries-- < 1)
1675                                break;
1676                }
1677                retries = 10000;
1678                while (devctl & MUSB_DEVCTL_SESSION) {
1679                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1680                        if (retries-- < 1)
1681                                break;
1682                }
1683
1684                spin_unlock_irqrestore(&musb->lock, flags);
1685                otg_start_srp(musb->xceiv->otg);
1686                spin_lock_irqsave(&musb->lock, flags);
1687
1688                /* Block idling for at least 1s */
1689                musb_platform_try_idle(musb,
1690                        jiffies + msecs_to_jiffies(1 * HZ));
1691
1692                status = 0;
1693                goto done;
1694        default:
1695                dev_dbg(musb->controller, "Unhandled wake: %s\n",
1696                        otg_state_string(musb->xceiv->state));
1697                goto done;
1698        }
1699
1700        status = 0;
1701
1702        power = musb_readb(mregs, MUSB_POWER);
1703        power |= MUSB_POWER_RESUME;
1704        musb_writeb(mregs, MUSB_POWER, power);
1705        dev_dbg(musb->controller, "issue wakeup\n");
1706
1707        /* FIXME do this next chunk in a timer callback, no udelay */
1708        mdelay(2);
1709
1710        power = musb_readb(mregs, MUSB_POWER);
1711        power &= ~MUSB_POWER_RESUME;
1712        musb_writeb(mregs, MUSB_POWER, power);
1713done:
1714        spin_unlock_irqrestore(&musb->lock, flags);
1715        return status;
1716#else
1717        return 0;
1718#endif
1719}
1720
1721static int
1722musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1723{
1724        struct musb     *musb = gadget_to_musb(gadget);
1725
1726        musb->is_self_powered = !!is_selfpowered;
1727        return 0;
1728}
1729
1730static void musb_pullup(struct musb *musb, int is_on)
1731{
1732        u8 power;
1733
1734        power = musb_readb(musb->mregs, MUSB_POWER);
1735        if (is_on)
1736                power |= MUSB_POWER_SOFTCONN;
1737        else
1738                power &= ~MUSB_POWER_SOFTCONN;
1739
1740        /* FIXME if on, HdrcStart; if off, HdrcStop */
1741
1742        dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1743                is_on ? "on" : "off");
1744        musb_writeb(musb->mregs, MUSB_POWER, power);
1745}
1746
1747#if 0
1748static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1749{
1750        dev_dbg(musb->controller, "<= %s =>\n", __func__);
1751
1752        /*
1753         * FIXME iff driver's softconnect flag is set (as it is during probe,
1754         * though that can clear it), just musb_pullup().
1755         */
1756
1757        return -EINVAL;
1758}
1759#endif
1760
1761static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1762{
1763#ifndef __UBOOT__
1764        struct musb     *musb = gadget_to_musb(gadget);
1765
1766        if (!musb->xceiv->set_power)
1767                return -EOPNOTSUPP;
1768        return usb_phy_set_power(musb->xceiv, mA);
1769#else
1770        return 0;
1771#endif
1772}
1773
1774static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1775{
1776        struct musb     *musb = gadget_to_musb(gadget);
1777        unsigned long   flags;
1778
1779        is_on = !!is_on;
1780
1781        pm_runtime_get_sync(musb->controller);
1782
1783        /* NOTE: this assumes we are sensing vbus; we'd rather
1784         * not pullup unless the B-session is active.
1785         */
1786        spin_lock_irqsave(&musb->lock, flags);
1787        if (is_on != musb->softconnect) {
1788                musb->softconnect = is_on;
1789                musb_pullup(musb, is_on);
1790        }
1791        spin_unlock_irqrestore(&musb->lock, flags);
1792
1793        pm_runtime_put(musb->controller);
1794
1795        return 0;
1796}
1797
1798#ifndef __UBOOT__
1799static int musb_gadget_start(struct usb_gadget *g,
1800                struct usb_gadget_driver *driver);
1801static int musb_gadget_stop(struct usb_gadget *g,
1802                struct usb_gadget_driver *driver);
1803#endif
1804
1805static const struct usb_gadget_ops musb_gadget_operations = {
1806        .get_frame              = musb_gadget_get_frame,
1807        .wakeup                 = musb_gadget_wakeup,
1808        .set_selfpowered        = musb_gadget_set_self_powered,
1809        /* .vbus_session                = musb_gadget_vbus_session, */
1810        .vbus_draw              = musb_gadget_vbus_draw,
1811        .pullup                 = musb_gadget_pullup,
1812#ifndef __UBOOT__
1813        .udc_start              = musb_gadget_start,
1814        .udc_stop               = musb_gadget_stop,
1815#endif
1816};
1817
1818/* ----------------------------------------------------------------------- */
1819
1820/* Registration */
1821
1822/* Only this registration code "knows" the rule (from USB standards)
1823 * about there being only one external upstream port.  It assumes
1824 * all peripheral ports are external...
1825 */
1826
1827#ifndef __UBOOT__
1828static void musb_gadget_release(struct device *dev)
1829{
1830        /* kref_put(WHAT) */
1831        dev_dbg(dev, "%s\n", __func__);
1832}
1833#endif
1834
1835
1836static void __devinit
1837init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1838{
1839        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1840
1841        memset(ep, 0, sizeof *ep);
1842
1843        ep->current_epnum = epnum;
1844        ep->musb = musb;
1845        ep->hw_ep = hw_ep;
1846        ep->is_in = is_in;
1847
1848        INIT_LIST_HEAD(&ep->req_list);
1849
1850        sprintf(ep->name, "ep%d%s", epnum,
1851                        (!epnum || hw_ep->is_shared_fifo) ? "" : (
1852                                is_in ? "in" : "out"));
1853        ep->end_point.name = ep->name;
1854        INIT_LIST_HEAD(&ep->end_point.ep_list);
1855        if (!epnum) {
1856                ep->end_point.maxpacket = 64;
1857                ep->end_point.ops = &musb_g_ep0_ops;
1858                musb->g.ep0 = &ep->end_point;
1859        } else {
1860                if (is_in)
1861                        ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1862                else
1863                        ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1864                ep->end_point.ops = &musb_ep_ops;
1865                list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1866        }
1867}
1868
1869/*
1870 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1871 * to the rest of the driver state.
1872 */
1873static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1874{
1875        u8                      epnum;
1876        struct musb_hw_ep       *hw_ep;
1877        unsigned                count = 0;
1878
1879        /* initialize endpoint list just once */
1880        INIT_LIST_HEAD(&(musb->g.ep_list));
1881
1882        for (epnum = 0, hw_ep = musb->endpoints;
1883                        epnum < musb->nr_endpoints;
1884                        epnum++, hw_ep++) {
1885                if (hw_ep->is_shared_fifo /* || !epnum */) {
1886                        init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1887                        count++;
1888                } else {
1889                        if (hw_ep->max_packet_sz_tx) {
1890                                init_peripheral_ep(musb, &hw_ep->ep_in,
1891                                                        epnum, 1);
1892                                count++;
1893                        }
1894                        if (hw_ep->max_packet_sz_rx) {
1895                                init_peripheral_ep(musb, &hw_ep->ep_out,
1896                                                        epnum, 0);
1897                                count++;
1898                        }
1899                }
1900        }
1901}
1902
1903/* called once during driver setup to initialize and link into
1904 * the driver model; memory is zeroed.
1905 */
1906int __devinit musb_gadget_setup(struct musb *musb)
1907{
1908        int status;
1909
1910        /* REVISIT minor race:  if (erroneously) setting up two
1911         * musb peripherals at the same time, only the bus lock
1912         * is probably held.
1913         */
1914
1915        musb->g.ops = &musb_gadget_operations;
1916#ifndef __UBOOT__
1917        musb->g.max_speed = USB_SPEED_HIGH;
1918#endif
1919        musb->g.speed = USB_SPEED_UNKNOWN;
1920
1921#ifndef __UBOOT__
1922        /* this "gadget" abstracts/virtualizes the controller */
1923        dev_set_name(&musb->g.dev, "gadget");
1924        musb->g.dev.parent = musb->controller;
1925        musb->g.dev.dma_mask = musb->controller->dma_mask;
1926        musb->g.dev.release = musb_gadget_release;
1927#endif
1928        musb->g.name = musb_driver_name;
1929
1930#ifndef __UBOOT__
1931        if (is_otg_enabled(musb))
1932                musb->g.is_otg = 1;
1933#endif
1934
1935        musb_g_init_endpoints(musb);
1936
1937        musb->is_active = 0;
1938        musb_platform_try_idle(musb, 0);
1939
1940#ifndef __UBOOT__
1941        status = device_register(&musb->g.dev);
1942        if (status != 0) {
1943                put_device(&musb->g.dev);
1944                return status;
1945        }
1946        status = usb_add_gadget_udc(musb->controller, &musb->g);
1947        if (status)
1948                goto err;
1949#endif
1950
1951        return 0;
1952#ifndef __UBOOT__
1953err:
1954        musb->g.dev.parent = NULL;
1955        device_unregister(&musb->g.dev);
1956        return status;
1957#endif
1958}
1959
1960void musb_gadget_cleanup(struct musb *musb)
1961{
1962#ifndef __UBOOT__
1963        usb_del_gadget_udc(&musb->g);
1964        if (musb->g.dev.parent)
1965                device_unregister(&musb->g.dev);
1966#endif
1967}
1968
1969/*
1970 * Register the gadget driver. Used by gadget drivers when
1971 * registering themselves with the controller.
1972 *
1973 * -EINVAL something went wrong (not driver)
1974 * -EBUSY another gadget is already using the controller
1975 * -ENOMEM no memory to perform the operation
1976 *
1977 * @param driver the gadget driver
1978 * @return <0 if error, 0 if everything is fine
1979 */
1980#ifndef __UBOOT__
1981static int musb_gadget_start(struct usb_gadget *g,
1982                struct usb_gadget_driver *driver)
1983#else
1984int musb_gadget_start(struct usb_gadget *g,
1985                struct usb_gadget_driver *driver)
1986#endif
1987{
1988        struct musb             *musb = gadget_to_musb(g);
1989#ifndef __UBOOT__
1990        struct usb_otg          *otg = musb->xceiv->otg;
1991#endif
1992        unsigned long           flags;
1993        int                     retval = -EINVAL;
1994
1995#ifndef __UBOOT__
1996        if (driver->max_speed < USB_SPEED_HIGH)
1997                goto err0;
1998#endif
1999
2000        pm_runtime_get_sync(musb->controller);
2001
2002#ifndef __UBOOT__
2003        dev_dbg(musb->controller, "registering driver %s\n", driver->function);
2004#endif
2005
2006        musb->softconnect = 0;
2007        musb->gadget_driver = driver;
2008
2009        spin_lock_irqsave(&musb->lock, flags);
2010        musb->is_active = 1;
2011
2012#ifndef __UBOOT__
2013        otg_set_peripheral(otg, &musb->g);
2014        musb->xceiv->state = OTG_STATE_B_IDLE;
2015
2016        /*
2017         * FIXME this ignores the softconnect flag.  Drivers are
2018         * allowed hold the peripheral inactive until for example
2019         * userspace hooks up printer hardware or DSP codecs, so
2020         * hosts only see fully functional devices.
2021         */
2022
2023        if (!is_otg_enabled(musb))
2024#endif
2025                musb_start(musb);
2026
2027        spin_unlock_irqrestore(&musb->lock, flags);
2028
2029#ifndef __UBOOT__
2030        if (is_otg_enabled(musb)) {
2031                struct usb_hcd  *hcd = musb_to_hcd(musb);
2032
2033                dev_dbg(musb->controller, "OTG startup...\n");
2034
2035                /* REVISIT:  funcall to other code, which also
2036                 * handles power budgeting ... this way also
2037                 * ensures HdrcStart is indirectly called.
2038                 */
2039                retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
2040                if (retval < 0) {
2041                        dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
2042                        goto err2;
2043                }
2044
2045                if ((musb->xceiv->last_event == USB_EVENT_ID)
2046                                        && otg->set_vbus)
2047                        otg_set_vbus(otg, 1);
2048
2049                hcd->self.uses_pio_for_control = 1;
2050        }
2051        if (musb->xceiv->last_event == USB_EVENT_NONE)
2052                pm_runtime_put(musb->controller);
2053#endif
2054
2055        return 0;
2056
2057#ifndef __UBOOT__
2058err2:
2059        if (!is_otg_enabled(musb))
2060                musb_stop(musb);
2061err0:
2062        return retval;
2063#endif
2064}
2065
2066#ifndef __UBOOT__
2067static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
2068{
2069        int                     i;
2070        struct musb_hw_ep       *hw_ep;
2071
2072        /* don't disconnect if it's not connected */
2073        if (musb->g.speed == USB_SPEED_UNKNOWN)
2074                driver = NULL;
2075        else
2076                musb->g.speed = USB_SPEED_UNKNOWN;
2077
2078        /* deactivate the hardware */
2079        if (musb->softconnect) {
2080                musb->softconnect = 0;
2081                musb_pullup(musb, 0);
2082        }
2083        musb_stop(musb);
2084
2085        /* killing any outstanding requests will quiesce the driver;
2086         * then report disconnect
2087         */
2088        if (driver) {
2089                for (i = 0, hw_ep = musb->endpoints;
2090                                i < musb->nr_endpoints;
2091                                i++, hw_ep++) {
2092                        musb_ep_select(musb->mregs, i);
2093                        if (hw_ep->is_shared_fifo /* || !epnum */) {
2094                                nuke(&hw_ep->ep_in, -ESHUTDOWN);
2095                        } else {
2096                                if (hw_ep->max_packet_sz_tx)
2097                                        nuke(&hw_ep->ep_in, -ESHUTDOWN);
2098                                if (hw_ep->max_packet_sz_rx)
2099                                        nuke(&hw_ep->ep_out, -ESHUTDOWN);
2100                        }
2101                }
2102        }
2103}
2104
2105/*
2106 * Unregister the gadget driver. Used by gadget drivers when
2107 * unregistering themselves from the controller.
2108 *
2109 * @param driver the gadget driver to unregister
2110 */
2111static int musb_gadget_stop(struct usb_gadget *g,
2112                struct usb_gadget_driver *driver)
2113{
2114        struct musb     *musb = gadget_to_musb(g);
2115        unsigned long   flags;
2116
2117        if (musb->xceiv->last_event == USB_EVENT_NONE)
2118                pm_runtime_get_sync(musb->controller);
2119
2120        /*
2121         * REVISIT always use otg_set_peripheral() here too;
2122         * this needs to shut down the OTG engine.
2123         */
2124
2125        spin_lock_irqsave(&musb->lock, flags);
2126
2127        musb_hnp_stop(musb);
2128
2129        (void) musb_gadget_vbus_draw(&musb->g, 0);
2130
2131        musb->xceiv->state = OTG_STATE_UNDEFINED;
2132        stop_activity(musb, driver);
2133        otg_set_peripheral(musb->xceiv->otg, NULL);
2134
2135        dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2136
2137        musb->is_active = 0;
2138        musb_platform_try_idle(musb, 0);
2139        spin_unlock_irqrestore(&musb->lock, flags);
2140
2141        if (is_otg_enabled(musb)) {
2142                usb_remove_hcd(musb_to_hcd(musb));
2143                /* FIXME we need to be able to register another
2144                 * gadget driver here and have everything work;
2145                 * that currently misbehaves.
2146                 */
2147        }
2148
2149        if (!is_otg_enabled(musb))
2150                musb_stop(musb);
2151
2152        pm_runtime_put(musb->controller);
2153
2154        return 0;
2155}
2156#endif
2157
2158/* ----------------------------------------------------------------------- */
2159
2160/* lifecycle operations called through plat_uds.c */
2161
2162void musb_g_resume(struct musb *musb)
2163{
2164#ifndef __UBOOT__
2165        musb->is_suspended = 0;
2166        switch (musb->xceiv->state) {
2167        case OTG_STATE_B_IDLE:
2168                break;
2169        case OTG_STATE_B_WAIT_ACON:
2170        case OTG_STATE_B_PERIPHERAL:
2171                musb->is_active = 1;
2172                if (musb->gadget_driver && musb->gadget_driver->resume) {
2173                        spin_unlock(&musb->lock);
2174                        musb->gadget_driver->resume(&musb->g);
2175                        spin_lock(&musb->lock);
2176                }
2177                break;
2178        default:
2179                WARNING("unhandled RESUME transition (%s)\n",
2180                                otg_state_string(musb->xceiv->state));
2181        }
2182#endif
2183}
2184
2185/* called when SOF packets stop for 3+ msec */
2186void musb_g_suspend(struct musb *musb)
2187{
2188#ifndef __UBOOT__
2189        u8      devctl;
2190
2191        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2192        dev_dbg(musb->controller, "devctl %02x\n", devctl);
2193
2194        switch (musb->xceiv->state) {
2195        case OTG_STATE_B_IDLE:
2196                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2197                        musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2198                break;
2199        case OTG_STATE_B_PERIPHERAL:
2200                musb->is_suspended = 1;
2201                if (musb->gadget_driver && musb->gadget_driver->suspend) {
2202                        spin_unlock(&musb->lock);
2203                        musb->gadget_driver->suspend(&musb->g);
2204                        spin_lock(&musb->lock);
2205                }
2206                break;
2207        default:
2208                /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2209                 * A_PERIPHERAL may need care too
2210                 */
2211                WARNING("unhandled SUSPEND transition (%s)\n",
2212                                otg_state_string(musb->xceiv->state));
2213        }
2214#endif
2215}
2216
2217/* Called during SRP */
2218void musb_g_wakeup(struct musb *musb)
2219{
2220        musb_gadget_wakeup(&musb->g);
2221}
2222
2223/* called when VBUS drops below session threshold, and in other cases */
2224void musb_g_disconnect(struct musb *musb)
2225{
2226        void __iomem    *mregs = musb->mregs;
2227        u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
2228
2229        dev_dbg(musb->controller, "devctl %02x\n", devctl);
2230
2231        /* clear HR */
2232        musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2233
2234        /* don't draw vbus until new b-default session */
2235        (void) musb_gadget_vbus_draw(&musb->g, 0);
2236
2237        musb->g.speed = USB_SPEED_UNKNOWN;
2238        if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2239                spin_unlock(&musb->lock);
2240                musb->gadget_driver->disconnect(&musb->g);
2241                spin_lock(&musb->lock);
2242        }
2243
2244#ifndef __UBOOT__
2245        switch (musb->xceiv->state) {
2246        default:
2247                dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2248                        otg_state_string(musb->xceiv->state));
2249                musb->xceiv->state = OTG_STATE_A_IDLE;
2250                MUSB_HST_MODE(musb);
2251                break;
2252        case OTG_STATE_A_PERIPHERAL:
2253                musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2254                MUSB_HST_MODE(musb);
2255                break;
2256        case OTG_STATE_B_WAIT_ACON:
2257        case OTG_STATE_B_HOST:
2258        case OTG_STATE_B_PERIPHERAL:
2259        case OTG_STATE_B_IDLE:
2260                musb->xceiv->state = OTG_STATE_B_IDLE;
2261                break;
2262        case OTG_STATE_B_SRP_INIT:
2263                break;
2264        }
2265#endif
2266
2267        musb->is_active = 0;
2268}
2269
2270void musb_g_reset(struct musb *musb)
2271__releases(musb->lock)
2272__acquires(musb->lock)
2273{
2274        void __iomem    *mbase = musb->mregs;
2275        u8              devctl = musb_readb(mbase, MUSB_DEVCTL);
2276        u8              power;
2277
2278#ifndef __UBOOT__
2279        dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2280                        (devctl & MUSB_DEVCTL_BDEVICE)
2281                                ? "B-Device" : "A-Device",
2282                        musb_readb(mbase, MUSB_FADDR),
2283                        musb->gadget_driver
2284                                ? musb->gadget_driver->driver.name
2285                                : NULL
2286                        );
2287#endif
2288
2289        /* report disconnect, if we didn't already (flushing EP state) */
2290        if (musb->g.speed != USB_SPEED_UNKNOWN)
2291                musb_g_disconnect(musb);
2292
2293        /* clear HR */
2294        else if (devctl & MUSB_DEVCTL_HR)
2295                musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2296
2297
2298        /* what speed did we negotiate? */
2299        power = musb_readb(mbase, MUSB_POWER);
2300        musb->g.speed = (power & MUSB_POWER_HSMODE)
2301                        ? USB_SPEED_HIGH : USB_SPEED_FULL;
2302
2303        /* start in USB_STATE_DEFAULT */
2304        musb->is_active = 1;
2305        musb->is_suspended = 0;
2306        MUSB_DEV_MODE(musb);
2307        musb->address = 0;
2308        musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2309
2310        musb->may_wakeup = 0;
2311        musb->g.b_hnp_enable = 0;
2312        musb->g.a_alt_hnp_support = 0;
2313        musb->g.a_hnp_support = 0;
2314
2315#ifndef __UBOOT__
2316        /* Normal reset, as B-Device;
2317         * or else after HNP, as A-Device
2318         */
2319        if (devctl & MUSB_DEVCTL_BDEVICE) {
2320                musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2321                musb->g.is_a_peripheral = 0;
2322        } else if (is_otg_enabled(musb)) {
2323                musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2324                musb->g.is_a_peripheral = 1;
2325        } else
2326                WARN_ON(1);
2327
2328        /* start with default limits on VBUS power draw */
2329        (void) musb_gadget_vbus_draw(&musb->g,
2330                        is_otg_enabled(musb) ? 8 : 100);
2331#endif
2332}
2333