linux/drivers/usb/musb/musb_gadget.c
<<
>>
Prefs
   1/*
   2 * MUSB OTG driver peripheral support
   3 *
   4 * Copyright 2005 Mentor Graphics Corporation
   5 * Copyright (C) 2005-2006 by Texas Instruments
   6 * Copyright (C) 2006-2007 Nokia Corporation
   7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * version 2 as published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21 * 02110-1301 USA
  22 *
  23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33 *
  34 */
  35
  36#include <linux/kernel.h>
  37#include <linux/list.h>
  38#include <linux/timer.h>
  39#include <linux/module.h>
  40#include <linux/smp.h>
  41#include <linux/spinlock.h>
  42#include <linux/delay.h>
  43#include <linux/dma-mapping.h>
  44#include <linux/slab.h>
  45
  46#include "musb_core.h"
  47
  48
  49/* ----------------------------------------------------------------------- */
  50
  51#define is_buffer_mapped(req) (is_dma_capable() && \
  52                                        (req->map_state != UN_MAPPED))
  53
  54/* Maps the buffer to dma  */
  55
  56static inline void map_dma_buffer(struct musb_request *request,
  57                        struct musb *musb, struct musb_ep *musb_ep)
  58{
  59        int compatible = true;
  60        struct dma_controller *dma = musb->dma_controller;
  61
  62        request->map_state = UN_MAPPED;
  63
  64        if (!is_dma_capable() || !musb_ep->dma)
  65                return;
  66
  67        /* Check if DMA engine can handle this request.
  68         * DMA code must reject the USB request explicitly.
  69         * Default behaviour is to map the request.
  70         */
  71        if (dma->is_compatible)
  72                compatible = dma->is_compatible(musb_ep->dma,
  73                                musb_ep->packet_sz, request->request.buf,
  74                                request->request.length);
  75        if (!compatible)
  76                return;
  77
  78        if (request->request.dma == DMA_ADDR_INVALID) {
  79                request->request.dma = dma_map_single(
  80                                musb->controller,
  81                                request->request.buf,
  82                                request->request.length,
  83                                request->tx
  84                                        ? DMA_TO_DEVICE
  85                                        : DMA_FROM_DEVICE);
  86                request->map_state = MUSB_MAPPED;
  87        } else {
  88                dma_sync_single_for_device(musb->controller,
  89                        request->request.dma,
  90                        request->request.length,
  91                        request->tx
  92                                ? DMA_TO_DEVICE
  93                                : DMA_FROM_DEVICE);
  94                request->map_state = PRE_MAPPED;
  95        }
  96}
  97
  98/* Unmap the buffer from dma and maps it back to cpu */
  99static inline void unmap_dma_buffer(struct musb_request *request,
 100                                struct musb *musb)
 101{
 102        struct musb_ep *musb_ep = request->ep;
 103
 104        if (!is_buffer_mapped(request) || !musb_ep->dma)
 105                return;
 106
 107        if (request->request.dma == DMA_ADDR_INVALID) {
 108                dev_vdbg(musb->controller,
 109                                "not unmapping a never mapped buffer\n");
 110                return;
 111        }
 112        if (request->map_state == MUSB_MAPPED) {
 113                dma_unmap_single(musb->controller,
 114                        request->request.dma,
 115                        request->request.length,
 116                        request->tx
 117                                ? DMA_TO_DEVICE
 118                                : DMA_FROM_DEVICE);
 119                request->request.dma = DMA_ADDR_INVALID;
 120        } else { /* PRE_MAPPED */
 121                dma_sync_single_for_cpu(musb->controller,
 122                        request->request.dma,
 123                        request->request.length,
 124                        request->tx
 125                                ? DMA_TO_DEVICE
 126                                : DMA_FROM_DEVICE);
 127        }
 128        request->map_state = UN_MAPPED;
 129}
 130
 131/*
 132 * Immediately complete a request.
 133 *
 134 * @param request the request to complete
 135 * @param status the status to complete the request with
 136 * Context: controller locked, IRQs blocked.
 137 */
 138void musb_g_giveback(
 139        struct musb_ep          *ep,
 140        struct usb_request      *request,
 141        int                     status)
 142__releases(ep->musb->lock)
 143__acquires(ep->musb->lock)
 144{
 145        struct musb_request     *req;
 146        struct musb             *musb;
 147        int                     busy = ep->busy;
 148
 149        req = to_musb_request(request);
 150
 151        list_del(&req->list);
 152        if (req->request.status == -EINPROGRESS)
 153                req->request.status = status;
 154        musb = req->musb;
 155
 156        ep->busy = 1;
 157        spin_unlock(&musb->lock);
 158
 159        if (!dma_mapping_error(&musb->g.dev, request->dma))
 160                unmap_dma_buffer(req, musb);
 161
 162        if (request->status == 0)
 163                dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
 164                                ep->end_point.name, request,
 165                                req->request.actual, req->request.length);
 166        else
 167                dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
 168                                ep->end_point.name, request,
 169                                req->request.actual, req->request.length,
 170                                request->status);
 171        req->request.complete(&req->ep->end_point, &req->request);
 172        spin_lock(&musb->lock);
 173        ep->busy = busy;
 174}
 175
 176/* ----------------------------------------------------------------------- */
 177
 178/*
 179 * Abort requests queued to an endpoint using the status. Synchronous.
 180 * caller locked controller and blocked irqs, and selected this ep.
 181 */
 182static void nuke(struct musb_ep *ep, const int status)
 183{
 184        struct musb             *musb = ep->musb;
 185        struct musb_request     *req = NULL;
 186        void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
 187
 188        ep->busy = 1;
 189
 190        if (is_dma_capable() && ep->dma) {
 191                struct dma_controller   *c = ep->musb->dma_controller;
 192                int value;
 193
 194                if (ep->is_in) {
 195                        /*
 196                         * The programming guide says that we must not clear
 197                         * the DMAMODE bit before DMAENAB, so we only
 198                         * clear it in the second write...
 199                         */
 200                        musb_writew(epio, MUSB_TXCSR,
 201                                    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
 202                        musb_writew(epio, MUSB_TXCSR,
 203                                        0 | MUSB_TXCSR_FLUSHFIFO);
 204                } else {
 205                        musb_writew(epio, MUSB_RXCSR,
 206                                        0 | MUSB_RXCSR_FLUSHFIFO);
 207                        musb_writew(epio, MUSB_RXCSR,
 208                                        0 | MUSB_RXCSR_FLUSHFIFO);
 209                }
 210
 211                value = c->channel_abort(ep->dma);
 212                dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
 213                                ep->name, value);
 214                c->channel_release(ep->dma);
 215                ep->dma = NULL;
 216        }
 217
 218        while (!list_empty(&ep->req_list)) {
 219                req = list_first_entry(&ep->req_list, struct musb_request, list);
 220                musb_g_giveback(ep, &req->request, status);
 221        }
 222}
 223
 224/* ----------------------------------------------------------------------- */
 225
 226/* Data transfers - pure PIO, pure DMA, or mixed mode */
 227
 228/*
 229 * This assumes the separate CPPI engine is responding to DMA requests
 230 * from the usb core ... sequenced a bit differently from mentor dma.
 231 */
 232
 233static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
 234{
 235        if (can_bulk_split(musb, ep->type))
 236                return ep->hw_ep->max_packet_sz_tx;
 237        else
 238                return ep->packet_sz;
 239}
 240
 241/*
 242 * An endpoint is transmitting data. This can be called either from
 243 * the IRQ routine or from ep.queue() to kickstart a request on an
 244 * endpoint.
 245 *
 246 * Context: controller locked, IRQs blocked, endpoint selected
 247 */
 248static void txstate(struct musb *musb, struct musb_request *req)
 249{
 250        u8                      epnum = req->epnum;
 251        struct musb_ep          *musb_ep;
 252        void __iomem            *epio = musb->endpoints[epnum].regs;
 253        struct usb_request      *request;
 254        u16                     fifo_count = 0, csr;
 255        int                     use_dma = 0;
 256
 257        musb_ep = req->ep;
 258
 259        /* Check if EP is disabled */
 260        if (!musb_ep->desc) {
 261                dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
 262                                                musb_ep->end_point.name);
 263                return;
 264        }
 265
 266        /* we shouldn't get here while DMA is active ... but we do ... */
 267        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 268                dev_dbg(musb->controller, "dma pending...\n");
 269                return;
 270        }
 271
 272        /* read TXCSR before */
 273        csr = musb_readw(epio, MUSB_TXCSR);
 274
 275        request = &req->request;
 276        fifo_count = min(max_ep_writesize(musb, musb_ep),
 277                        (int)(request->length - request->actual));
 278
 279        if (csr & MUSB_TXCSR_TXPKTRDY) {
 280                dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
 281                                musb_ep->end_point.name, csr);
 282                return;
 283        }
 284
 285        if (csr & MUSB_TXCSR_P_SENDSTALL) {
 286                dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
 287                                musb_ep->end_point.name, csr);
 288                return;
 289        }
 290
 291        dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
 292                        epnum, musb_ep->packet_sz, fifo_count,
 293                        csr);
 294
 295#ifndef CONFIG_MUSB_PIO_ONLY
 296        if (is_buffer_mapped(req)) {
 297                struct dma_controller   *c = musb->dma_controller;
 298                size_t request_size;
 299
 300                /* setup DMA, then program endpoint CSR */
 301                request_size = min_t(size_t, request->length - request->actual,
 302                                        musb_ep->dma->max_len);
 303
 304                use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
 305
 306                /* MUSB_TXCSR_P_ISO is still set correctly */
 307
 308#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
 309                {
 310                        if (request_size < musb_ep->packet_sz)
 311                                musb_ep->dma->desired_mode = 0;
 312                        else
 313                                musb_ep->dma->desired_mode = 1;
 314
 315                        use_dma = use_dma && c->channel_program(
 316                                        musb_ep->dma, musb_ep->packet_sz,
 317                                        musb_ep->dma->desired_mode,
 318                                        request->dma + request->actual, request_size);
 319                        if (use_dma) {
 320                                if (musb_ep->dma->desired_mode == 0) {
 321                                        /*
 322                                         * We must not clear the DMAMODE bit
 323                                         * before the DMAENAB bit -- and the
 324                                         * latter doesn't always get cleared
 325                                         * before we get here...
 326                                         */
 327                                        csr &= ~(MUSB_TXCSR_AUTOSET
 328                                                | MUSB_TXCSR_DMAENAB);
 329                                        musb_writew(epio, MUSB_TXCSR, csr
 330                                                | MUSB_TXCSR_P_WZC_BITS);
 331                                        csr &= ~MUSB_TXCSR_DMAMODE;
 332                                        csr |= (MUSB_TXCSR_DMAENAB |
 333                                                        MUSB_TXCSR_MODE);
 334                                        /* against programming guide */
 335                                } else {
 336                                        csr |= (MUSB_TXCSR_DMAENAB
 337                                                        | MUSB_TXCSR_DMAMODE
 338                                                        | MUSB_TXCSR_MODE);
 339                                        /*
 340                                         * Enable Autoset according to table
 341                                         * below
 342                                         * bulk_split hb_mult   Autoset_Enable
 343                                         *      0       0       Yes(Normal)
 344                                         *      0       >0      No(High BW ISO)
 345                                         *      1       0       Yes(HS bulk)
 346                                         *      1       >0      Yes(FS bulk)
 347                                         */
 348                                        if (!musb_ep->hb_mult ||
 349                                                (musb_ep->hb_mult &&
 350                                                 can_bulk_split(musb,
 351                                                    musb_ep->type)))
 352                                                csr |= MUSB_TXCSR_AUTOSET;
 353                                }
 354                                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 355
 356                                musb_writew(epio, MUSB_TXCSR, csr);
 357                        }
 358                }
 359
 360#elif defined(CONFIG_USB_TI_CPPI_DMA)
 361                /* program endpoint CSR first, then setup DMA */
 362                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 363                csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
 364                       MUSB_TXCSR_MODE;
 365                musb_writew(epio, MUSB_TXCSR,
 366                        (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
 367                                | csr);
 368
 369                /* ensure writebuffer is empty */
 370                csr = musb_readw(epio, MUSB_TXCSR);
 371
 372                /* NOTE host side sets DMAENAB later than this; both are
 373                 * OK since the transfer dma glue (between CPPI and Mentor
 374                 * fifos) just tells CPPI it could start.  Data only moves
 375                 * to the USB TX fifo when both fifos are ready.
 376                 */
 377
 378                /* "mode" is irrelevant here; handle terminating ZLPs like
 379                 * PIO does, since the hardware RNDIS mode seems unreliable
 380                 * except for the last-packet-is-already-short case.
 381                 */
 382                use_dma = use_dma && c->channel_program(
 383                                musb_ep->dma, musb_ep->packet_sz,
 384                                0,
 385                                request->dma + request->actual,
 386                                request_size);
 387                if (!use_dma) {
 388                        c->channel_release(musb_ep->dma);
 389                        musb_ep->dma = NULL;
 390                        csr &= ~MUSB_TXCSR_DMAENAB;
 391                        musb_writew(epio, MUSB_TXCSR, csr);
 392                        /* invariant: prequest->buf is non-null */
 393                }
 394#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
 395                use_dma = use_dma && c->channel_program(
 396                                musb_ep->dma, musb_ep->packet_sz,
 397                                request->zero,
 398                                request->dma + request->actual,
 399                                request_size);
 400#endif
 401        }
 402#endif
 403
 404        if (!use_dma) {
 405                /*
 406                 * Unmap the dma buffer back to cpu if dma channel
 407                 * programming fails
 408                 */
 409                unmap_dma_buffer(req, musb);
 410
 411                musb_write_fifo(musb_ep->hw_ep, fifo_count,
 412                                (u8 *) (request->buf + request->actual));
 413                request->actual += fifo_count;
 414                csr |= MUSB_TXCSR_TXPKTRDY;
 415                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 416                musb_writew(epio, MUSB_TXCSR, csr);
 417        }
 418
 419        /* host may already have the data when this message shows... */
 420        dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
 421                        musb_ep->end_point.name, use_dma ? "dma" : "pio",
 422                        request->actual, request->length,
 423                        musb_readw(epio, MUSB_TXCSR),
 424                        fifo_count,
 425                        musb_readw(epio, MUSB_TXMAXP));
 426}
 427
 428/*
 429 * FIFO state update (e.g. data ready).
 430 * Called from IRQ,  with controller locked.
 431 */
 432void musb_g_tx(struct musb *musb, u8 epnum)
 433{
 434        u16                     csr;
 435        struct musb_request     *req;
 436        struct usb_request      *request;
 437        u8 __iomem              *mbase = musb->mregs;
 438        struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_in;
 439        void __iomem            *epio = musb->endpoints[epnum].regs;
 440        struct dma_channel      *dma;
 441
 442        musb_ep_select(mbase, epnum);
 443        req = next_request(musb_ep);
 444        request = &req->request;
 445
 446        csr = musb_readw(epio, MUSB_TXCSR);
 447        dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
 448
 449        dma = is_dma_capable() ? musb_ep->dma : NULL;
 450
 451        /*
 452         * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
 453         * probably rates reporting as a host error.
 454         */
 455        if (csr & MUSB_TXCSR_P_SENTSTALL) {
 456                csr |=  MUSB_TXCSR_P_WZC_BITS;
 457                csr &= ~MUSB_TXCSR_P_SENTSTALL;
 458                musb_writew(epio, MUSB_TXCSR, csr);
 459                return;
 460        }
 461
 462        if (csr & MUSB_TXCSR_P_UNDERRUN) {
 463                /* We NAKed, no big deal... little reason to care. */
 464                csr |=   MUSB_TXCSR_P_WZC_BITS;
 465                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 466                musb_writew(epio, MUSB_TXCSR, csr);
 467                dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
 468                                epnum, request);
 469        }
 470
 471        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 472                /*
 473                 * SHOULD NOT HAPPEN... has with CPPI though, after
 474                 * changing SENDSTALL (and other cases); harmless?
 475                 */
 476                dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
 477                return;
 478        }
 479
 480        if (request) {
 481                u8      is_dma = 0;
 482
 483                if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
 484                        is_dma = 1;
 485                        csr |= MUSB_TXCSR_P_WZC_BITS;
 486                        csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
 487                                 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
 488                        musb_writew(epio, MUSB_TXCSR, csr);
 489                        /* Ensure writebuffer is empty. */
 490                        csr = musb_readw(epio, MUSB_TXCSR);
 491                        request->actual += musb_ep->dma->actual_len;
 492                        dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
 493                                epnum, csr, musb_ep->dma->actual_len, request);
 494                }
 495
 496                /*
 497                 * First, maybe a terminating short packet. Some DMA
 498                 * engines might handle this by themselves.
 499                 */
 500                if ((request->zero && request->length
 501                        && (request->length % musb_ep->packet_sz == 0)
 502                        && (request->actual == request->length))
 503#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
 504                        || (is_dma && (!dma->desired_mode ||
 505                                (request->actual &
 506                                        (musb_ep->packet_sz - 1))))
 507#endif
 508                ) {
 509                        /*
 510                         * On DMA completion, FIFO may not be
 511                         * available yet...
 512                         */
 513                        if (csr & MUSB_TXCSR_TXPKTRDY)
 514                                return;
 515
 516                        dev_dbg(musb->controller, "sending zero pkt\n");
 517                        musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
 518                                        | MUSB_TXCSR_TXPKTRDY);
 519                        request->zero = 0;
 520                }
 521
 522                if (request->actual == request->length) {
 523                        musb_g_giveback(musb_ep, request, 0);
 524                        /*
 525                         * In the giveback function the MUSB lock is
 526                         * released and acquired after sometime. During
 527                         * this time period the INDEX register could get
 528                         * changed by the gadget_queue function especially
 529                         * on SMP systems. Reselect the INDEX to be sure
 530                         * we are reading/modifying the right registers
 531                         */
 532                        musb_ep_select(mbase, epnum);
 533                        req = musb_ep->desc ? next_request(musb_ep) : NULL;
 534                        if (!req) {
 535                                dev_dbg(musb->controller, "%s idle now\n",
 536                                        musb_ep->end_point.name);
 537                                return;
 538                        }
 539                }
 540
 541                txstate(musb, req);
 542        }
 543}
 544
 545/* ------------------------------------------------------------ */
 546
 547/*
 548 * Context: controller locked, IRQs blocked, endpoint selected
 549 */
 550static void rxstate(struct musb *musb, struct musb_request *req)
 551{
 552        const u8                epnum = req->epnum;
 553        struct usb_request      *request = &req->request;
 554        struct musb_ep          *musb_ep;
 555        void __iomem            *epio = musb->endpoints[epnum].regs;
 556        unsigned                len = 0;
 557        u16                     fifo_count;
 558        u16                     csr = musb_readw(epio, MUSB_RXCSR);
 559        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 560        u8                      use_mode_1;
 561
 562        if (hw_ep->is_shared_fifo)
 563                musb_ep = &hw_ep->ep_in;
 564        else
 565                musb_ep = &hw_ep->ep_out;
 566
 567        fifo_count = musb_ep->packet_sz;
 568
 569        /* Check if EP is disabled */
 570        if (!musb_ep->desc) {
 571                dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
 572                                                musb_ep->end_point.name);
 573                return;
 574        }
 575
 576        /* We shouldn't get here while DMA is active, but we do... */
 577        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 578                dev_dbg(musb->controller, "DMA pending...\n");
 579                return;
 580        }
 581
 582        if (csr & MUSB_RXCSR_P_SENDSTALL) {
 583                dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
 584                    musb_ep->end_point.name, csr);
 585                return;
 586        }
 587
 588        if (is_cppi_enabled() && is_buffer_mapped(req)) {
 589                struct dma_controller   *c = musb->dma_controller;
 590                struct dma_channel      *channel = musb_ep->dma;
 591
 592                /* NOTE:  CPPI won't actually stop advancing the DMA
 593                 * queue after short packet transfers, so this is almost
 594                 * always going to run as IRQ-per-packet DMA so that
 595                 * faults will be handled correctly.
 596                 */
 597                if (c->channel_program(channel,
 598                                musb_ep->packet_sz,
 599                                !request->short_not_ok,
 600                                request->dma + request->actual,
 601                                request->length - request->actual)) {
 602
 603                        /* make sure that if an rxpkt arrived after the irq,
 604                         * the cppi engine will be ready to take it as soon
 605                         * as DMA is enabled
 606                         */
 607                        csr &= ~(MUSB_RXCSR_AUTOCLEAR
 608                                        | MUSB_RXCSR_DMAMODE);
 609                        csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
 610                        musb_writew(epio, MUSB_RXCSR, csr);
 611                        return;
 612                }
 613        }
 614
 615        if (csr & MUSB_RXCSR_RXPKTRDY) {
 616                fifo_count = musb_readw(epio, MUSB_RXCOUNT);
 617
 618                /*
 619                 * Enable Mode 1 on RX transfers only when short_not_ok flag
 620                 * is set. Currently short_not_ok flag is set only from
 621                 * file_storage and f_mass_storage drivers
 622                 */
 623
 624                if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
 625                        use_mode_1 = 1;
 626                else
 627                        use_mode_1 = 0;
 628
 629                if (request->actual < request->length) {
 630#ifdef CONFIG_USB_INVENTRA_DMA
 631                        if (is_buffer_mapped(req)) {
 632                                struct dma_controller   *c;
 633                                struct dma_channel      *channel;
 634                                int                     use_dma = 0;
 635                                unsigned int transfer_size;
 636
 637                                c = musb->dma_controller;
 638                                channel = musb_ep->dma;
 639
 640        /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
 641         * mode 0 only. So we do not get endpoint interrupts due to DMA
 642         * completion. We only get interrupts from DMA controller.
 643         *
 644         * We could operate in DMA mode 1 if we knew the size of the tranfer
 645         * in advance. For mass storage class, request->length = what the host
 646         * sends, so that'd work.  But for pretty much everything else,
 647         * request->length is routinely more than what the host sends. For
 648         * most these gadgets, end of is signified either by a short packet,
 649         * or filling the last byte of the buffer.  (Sending extra data in
 650         * that last pckate should trigger an overflow fault.)  But in mode 1,
 651         * we don't get DMA completion interrupt for short packets.
 652         *
 653         * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
 654         * to get endpoint interrupt on every DMA req, but that didn't seem
 655         * to work reliably.
 656         *
 657         * REVISIT an updated g_file_storage can set req->short_not_ok, which
 658         * then becomes usable as a runtime "use mode 1" hint...
 659         */
 660
 661                                /* Experimental: Mode1 works with mass storage use cases */
 662                                if (use_mode_1) {
 663                                        csr |= MUSB_RXCSR_AUTOCLEAR;
 664                                        musb_writew(epio, MUSB_RXCSR, csr);
 665                                        csr |= MUSB_RXCSR_DMAENAB;
 666                                        musb_writew(epio, MUSB_RXCSR, csr);
 667
 668                                        /*
 669                                         * this special sequence (enabling and then
 670                                         * disabling MUSB_RXCSR_DMAMODE) is required
 671                                         * to get DMAReq to activate
 672                                         */
 673                                        musb_writew(epio, MUSB_RXCSR,
 674                                                csr | MUSB_RXCSR_DMAMODE);
 675                                        musb_writew(epio, MUSB_RXCSR, csr);
 676
 677                                        transfer_size = min_t(unsigned int,
 678                                                        request->length -
 679                                                        request->actual,
 680                                                        channel->max_len);
 681                                        musb_ep->dma->desired_mode = 1;
 682                                } else {
 683                                        if (!musb_ep->hb_mult &&
 684                                                musb_ep->hw_ep->rx_double_buffered)
 685                                                csr |= MUSB_RXCSR_AUTOCLEAR;
 686                                        csr |= MUSB_RXCSR_DMAENAB;
 687                                        musb_writew(epio, MUSB_RXCSR, csr);
 688
 689                                        transfer_size = min(request->length - request->actual,
 690                                                        (unsigned)fifo_count);
 691                                        musb_ep->dma->desired_mode = 0;
 692                                }
 693
 694                                use_dma = c->channel_program(
 695                                                channel,
 696                                                musb_ep->packet_sz,
 697                                                channel->desired_mode,
 698                                                request->dma
 699                                                + request->actual,
 700                                                transfer_size);
 701
 702                                if (use_dma)
 703                                        return;
 704                        }
 705#elif defined(CONFIG_USB_UX500_DMA)
 706                        if ((is_buffer_mapped(req)) &&
 707                                (request->actual < request->length)) {
 708
 709                                struct dma_controller *c;
 710                                struct dma_channel *channel;
 711                                unsigned int transfer_size = 0;
 712
 713                                c = musb->dma_controller;
 714                                channel = musb_ep->dma;
 715
 716                                /* In case first packet is short */
 717                                if (fifo_count < musb_ep->packet_sz)
 718                                        transfer_size = fifo_count;
 719                                else if (request->short_not_ok)
 720                                        transfer_size = min_t(unsigned int,
 721                                                        request->length -
 722                                                        request->actual,
 723                                                        channel->max_len);
 724                                else
 725                                        transfer_size = min_t(unsigned int,
 726                                                        request->length -
 727                                                        request->actual,
 728                                                        (unsigned)fifo_count);
 729
 730                                csr &= ~MUSB_RXCSR_DMAMODE;
 731                                csr |= (MUSB_RXCSR_DMAENAB |
 732                                        MUSB_RXCSR_AUTOCLEAR);
 733
 734                                musb_writew(epio, MUSB_RXCSR, csr);
 735
 736                                if (transfer_size <= musb_ep->packet_sz) {
 737                                        musb_ep->dma->desired_mode = 0;
 738                                } else {
 739                                        musb_ep->dma->desired_mode = 1;
 740                                        /* Mode must be set after DMAENAB */
 741                                        csr |= MUSB_RXCSR_DMAMODE;
 742                                        musb_writew(epio, MUSB_RXCSR, csr);
 743                                }
 744
 745                                if (c->channel_program(channel,
 746                                                        musb_ep->packet_sz,
 747                                                        channel->desired_mode,
 748                                                        request->dma
 749                                                        + request->actual,
 750                                                        transfer_size))
 751
 752                                        return;
 753                        }
 754#endif  /* Mentor's DMA */
 755
 756                        len = request->length - request->actual;
 757                        dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
 758                                        musb_ep->end_point.name,
 759                                        fifo_count, len,
 760                                        musb_ep->packet_sz);
 761
 762                        fifo_count = min_t(unsigned, len, fifo_count);
 763
 764#ifdef  CONFIG_USB_TUSB_OMAP_DMA
 765                        if (tusb_dma_omap() && is_buffer_mapped(req)) {
 766                                struct dma_controller *c = musb->dma_controller;
 767                                struct dma_channel *channel = musb_ep->dma;
 768                                u32 dma_addr = request->dma + request->actual;
 769                                int ret;
 770
 771                                ret = c->channel_program(channel,
 772                                                musb_ep->packet_sz,
 773                                                channel->desired_mode,
 774                                                dma_addr,
 775                                                fifo_count);
 776                                if (ret)
 777                                        return;
 778                        }
 779#endif
 780                        /*
 781                         * Unmap the dma buffer back to cpu if dma channel
 782                         * programming fails. This buffer is mapped if the
 783                         * channel allocation is successful
 784                         */
 785                         if (is_buffer_mapped(req)) {
 786                                unmap_dma_buffer(req, musb);
 787
 788                                /*
 789                                 * Clear DMAENAB and AUTOCLEAR for the
 790                                 * PIO mode transfer
 791                                 */
 792                                csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
 793                                musb_writew(epio, MUSB_RXCSR, csr);
 794                        }
 795
 796                        musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
 797                                        (request->buf + request->actual));
 798                        request->actual += fifo_count;
 799
 800                        /* REVISIT if we left anything in the fifo, flush
 801                         * it and report -EOVERFLOW
 802                         */
 803
 804                        /* ack the read! */
 805                        csr |= MUSB_RXCSR_P_WZC_BITS;
 806                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 807                        musb_writew(epio, MUSB_RXCSR, csr);
 808                }
 809        }
 810
 811        /* reach the end or short packet detected */
 812        if (request->actual == request->length ||
 813            fifo_count < musb_ep->packet_sz)
 814                musb_g_giveback(musb_ep, request, 0);
 815}
 816
 817/*
 818 * Data ready for a request; called from IRQ
 819 */
 820void musb_g_rx(struct musb *musb, u8 epnum)
 821{
 822        u16                     csr;
 823        struct musb_request     *req;
 824        struct usb_request      *request;
 825        void __iomem            *mbase = musb->mregs;
 826        struct musb_ep          *musb_ep;
 827        void __iomem            *epio = musb->endpoints[epnum].regs;
 828        struct dma_channel      *dma;
 829        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 830
 831        if (hw_ep->is_shared_fifo)
 832                musb_ep = &hw_ep->ep_in;
 833        else
 834                musb_ep = &hw_ep->ep_out;
 835
 836        musb_ep_select(mbase, epnum);
 837
 838        req = next_request(musb_ep);
 839        if (!req)
 840                return;
 841
 842        request = &req->request;
 843
 844        csr = musb_readw(epio, MUSB_RXCSR);
 845        dma = is_dma_capable() ? musb_ep->dma : NULL;
 846
 847        dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
 848                        csr, dma ? " (dma)" : "", request);
 849
 850        if (csr & MUSB_RXCSR_P_SENTSTALL) {
 851                csr |= MUSB_RXCSR_P_WZC_BITS;
 852                csr &= ~MUSB_RXCSR_P_SENTSTALL;
 853                musb_writew(epio, MUSB_RXCSR, csr);
 854                return;
 855        }
 856
 857        if (csr & MUSB_RXCSR_P_OVERRUN) {
 858                /* csr |= MUSB_RXCSR_P_WZC_BITS; */
 859                csr &= ~MUSB_RXCSR_P_OVERRUN;
 860                musb_writew(epio, MUSB_RXCSR, csr);
 861
 862                dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
 863                if (request->status == -EINPROGRESS)
 864                        request->status = -EOVERFLOW;
 865        }
 866        if (csr & MUSB_RXCSR_INCOMPRX) {
 867                /* REVISIT not necessarily an error */
 868                dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
 869        }
 870
 871        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 872                /* "should not happen"; likely RXPKTRDY pending for DMA */
 873                dev_dbg(musb->controller, "%s busy, csr %04x\n",
 874                        musb_ep->end_point.name, csr);
 875                return;
 876        }
 877
 878        if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
 879                csr &= ~(MUSB_RXCSR_AUTOCLEAR
 880                                | MUSB_RXCSR_DMAENAB
 881                                | MUSB_RXCSR_DMAMODE);
 882                musb_writew(epio, MUSB_RXCSR,
 883                        MUSB_RXCSR_P_WZC_BITS | csr);
 884
 885                request->actual += musb_ep->dma->actual_len;
 886
 887                dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
 888                        epnum, csr,
 889                        musb_readw(epio, MUSB_RXCSR),
 890                        musb_ep->dma->actual_len, request);
 891
 892#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 893        defined(CONFIG_USB_UX500_DMA)
 894                /* Autoclear doesn't clear RxPktRdy for short packets */
 895                if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
 896                                || (dma->actual_len
 897                                        & (musb_ep->packet_sz - 1))) {
 898                        /* ack the read! */
 899                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 900                        musb_writew(epio, MUSB_RXCSR, csr);
 901                }
 902
 903                /* incomplete, and not short? wait for next IN packet */
 904                if ((request->actual < request->length)
 905                                && (musb_ep->dma->actual_len
 906                                        == musb_ep->packet_sz)) {
 907                        /* In double buffer case, continue to unload fifo if
 908                         * there is Rx packet in FIFO.
 909                         **/
 910                        csr = musb_readw(epio, MUSB_RXCSR);
 911                        if ((csr & MUSB_RXCSR_RXPKTRDY) &&
 912                                hw_ep->rx_double_buffered)
 913                                goto exit;
 914                        return;
 915                }
 916#endif
 917                musb_g_giveback(musb_ep, request, 0);
 918                /*
 919                 * In the giveback function the MUSB lock is
 920                 * released and acquired after sometime. During
 921                 * this time period the INDEX register could get
 922                 * changed by the gadget_queue function especially
 923                 * on SMP systems. Reselect the INDEX to be sure
 924                 * we are reading/modifying the right registers
 925                 */
 926                musb_ep_select(mbase, epnum);
 927
 928                req = next_request(musb_ep);
 929                if (!req)
 930                        return;
 931        }
 932#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 933        defined(CONFIG_USB_UX500_DMA)
 934exit:
 935#endif
 936        /* Analyze request */
 937        rxstate(musb, req);
 938}
 939
 940/* ------------------------------------------------------------ */
 941
 942static int musb_gadget_enable(struct usb_ep *ep,
 943                        const struct usb_endpoint_descriptor *desc)
 944{
 945        unsigned long           flags;
 946        struct musb_ep          *musb_ep;
 947        struct musb_hw_ep       *hw_ep;
 948        void __iomem            *regs;
 949        struct musb             *musb;
 950        void __iomem    *mbase;
 951        u8              epnum;
 952        u16             csr;
 953        unsigned        tmp;
 954        int             status = -EINVAL;
 955
 956        if (!ep || !desc)
 957                return -EINVAL;
 958
 959        musb_ep = to_musb_ep(ep);
 960        hw_ep = musb_ep->hw_ep;
 961        regs = hw_ep->regs;
 962        musb = musb_ep->musb;
 963        mbase = musb->mregs;
 964        epnum = musb_ep->current_epnum;
 965
 966        spin_lock_irqsave(&musb->lock, flags);
 967
 968        if (musb_ep->desc) {
 969                status = -EBUSY;
 970                goto fail;
 971        }
 972        musb_ep->type = usb_endpoint_type(desc);
 973
 974        /* check direction and (later) maxpacket size against endpoint */
 975        if (usb_endpoint_num(desc) != epnum)
 976                goto fail;
 977
 978        /* REVISIT this rules out high bandwidth periodic transfers */
 979        tmp = usb_endpoint_maxp(desc);
 980        if (tmp & ~0x07ff) {
 981                int ok;
 982
 983                if (usb_endpoint_dir_in(desc))
 984                        ok = musb->hb_iso_tx;
 985                else
 986                        ok = musb->hb_iso_rx;
 987
 988                if (!ok) {
 989                        dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
 990                        goto fail;
 991                }
 992                musb_ep->hb_mult = (tmp >> 11) & 3;
 993        } else {
 994                musb_ep->hb_mult = 0;
 995        }
 996
 997        musb_ep->packet_sz = tmp & 0x7ff;
 998        tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
 999
1000        /* enable the interrupts for the endpoint, set the endpoint
1001         * packet size (or fail), set the mode, clear the fifo
1002         */
1003        musb_ep_select(mbase, epnum);
1004        if (usb_endpoint_dir_in(desc)) {
1005
1006                if (hw_ep->is_shared_fifo)
1007                        musb_ep->is_in = 1;
1008                if (!musb_ep->is_in)
1009                        goto fail;
1010
1011                if (tmp > hw_ep->max_packet_sz_tx) {
1012                        dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1013                        goto fail;
1014                }
1015
1016                musb->intrtxe |= (1 << epnum);
1017                musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1018
1019                /* REVISIT if can_bulk_split(), use by updating "tmp";
1020                 * likewise high bandwidth periodic tx
1021                 */
1022                /* Set TXMAXP with the FIFO size of the endpoint
1023                 * to disable double buffering mode.
1024                 */
1025                if (musb->double_buffer_not_ok) {
1026                        musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1027                } else {
1028                        if (can_bulk_split(musb, musb_ep->type))
1029                                musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
1030                                                        musb_ep->packet_sz) - 1;
1031                        musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1032                                        | (musb_ep->hb_mult << 11));
1033                }
1034
1035                csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1036                if (musb_readw(regs, MUSB_TXCSR)
1037                                & MUSB_TXCSR_FIFONOTEMPTY)
1038                        csr |= MUSB_TXCSR_FLUSHFIFO;
1039                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1040                        csr |= MUSB_TXCSR_P_ISO;
1041
1042                /* set twice in case of double buffering */
1043                musb_writew(regs, MUSB_TXCSR, csr);
1044                /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1045                musb_writew(regs, MUSB_TXCSR, csr);
1046
1047        } else {
1048
1049                if (hw_ep->is_shared_fifo)
1050                        musb_ep->is_in = 0;
1051                if (musb_ep->is_in)
1052                        goto fail;
1053
1054                if (tmp > hw_ep->max_packet_sz_rx) {
1055                        dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1056                        goto fail;
1057                }
1058
1059                musb->intrrxe |= (1 << epnum);
1060                musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1061
1062                /* REVISIT if can_bulk_combine() use by updating "tmp"
1063                 * likewise high bandwidth periodic rx
1064                 */
1065                /* Set RXMAXP with the FIFO size of the endpoint
1066                 * to disable double buffering mode.
1067                 */
1068                if (musb->double_buffer_not_ok)
1069                        musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1070                else
1071                        musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1072                                        | (musb_ep->hb_mult << 11));
1073
1074                /* force shared fifo to OUT-only mode */
1075                if (hw_ep->is_shared_fifo) {
1076                        csr = musb_readw(regs, MUSB_TXCSR);
1077                        csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1078                        musb_writew(regs, MUSB_TXCSR, csr);
1079                }
1080
1081                csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1082                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1083                        csr |= MUSB_RXCSR_P_ISO;
1084                else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1085                        csr |= MUSB_RXCSR_DISNYET;
1086
1087                /* set twice in case of double buffering */
1088                musb_writew(regs, MUSB_RXCSR, csr);
1089                musb_writew(regs, MUSB_RXCSR, csr);
1090        }
1091
1092        /* NOTE:  all the I/O code _should_ work fine without DMA, in case
1093         * for some reason you run out of channels here.
1094         */
1095        if (is_dma_capable() && musb->dma_controller) {
1096                struct dma_controller   *c = musb->dma_controller;
1097
1098                musb_ep->dma = c->channel_alloc(c, hw_ep,
1099                                (desc->bEndpointAddress & USB_DIR_IN));
1100        } else
1101                musb_ep->dma = NULL;
1102
1103        musb_ep->desc = desc;
1104        musb_ep->busy = 0;
1105        musb_ep->wedged = 0;
1106        status = 0;
1107
1108        pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1109                        musb_driver_name, musb_ep->end_point.name,
1110                        ({ char *s; switch (musb_ep->type) {
1111                        case USB_ENDPOINT_XFER_BULK:    s = "bulk"; break;
1112                        case USB_ENDPOINT_XFER_INT:     s = "int"; break;
1113                        default:                        s = "iso"; break;
1114                        }; s; }),
1115                        musb_ep->is_in ? "IN" : "OUT",
1116                        musb_ep->dma ? "dma, " : "",
1117                        musb_ep->packet_sz);
1118
1119        schedule_work(&musb->irq_work);
1120
1121fail:
1122        spin_unlock_irqrestore(&musb->lock, flags);
1123        return status;
1124}
1125
1126/*
1127 * Disable an endpoint flushing all requests queued.
1128 */
1129static int musb_gadget_disable(struct usb_ep *ep)
1130{
1131        unsigned long   flags;
1132        struct musb     *musb;
1133        u8              epnum;
1134        struct musb_ep  *musb_ep;
1135        void __iomem    *epio;
1136        int             status = 0;
1137
1138        musb_ep = to_musb_ep(ep);
1139        musb = musb_ep->musb;
1140        epnum = musb_ep->current_epnum;
1141        epio = musb->endpoints[epnum].regs;
1142
1143        spin_lock_irqsave(&musb->lock, flags);
1144        musb_ep_select(musb->mregs, epnum);
1145
1146        /* zero the endpoint sizes */
1147        if (musb_ep->is_in) {
1148                musb->intrtxe &= ~(1 << epnum);
1149                musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1150                musb_writew(epio, MUSB_TXMAXP, 0);
1151        } else {
1152                musb->intrrxe &= ~(1 << epnum);
1153                musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1154                musb_writew(epio, MUSB_RXMAXP, 0);
1155        }
1156
1157        musb_ep->desc = NULL;
1158        musb_ep->end_point.desc = NULL;
1159
1160        /* abort all pending DMA and requests */
1161        nuke(musb_ep, -ESHUTDOWN);
1162
1163        schedule_work(&musb->irq_work);
1164
1165        spin_unlock_irqrestore(&(musb->lock), flags);
1166
1167        dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1168
1169        return status;
1170}
1171
1172/*
1173 * Allocate a request for an endpoint.
1174 * Reused by ep0 code.
1175 */
1176struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1177{
1178        struct musb_ep          *musb_ep = to_musb_ep(ep);
1179        struct musb             *musb = musb_ep->musb;
1180        struct musb_request     *request = NULL;
1181
1182        request = kzalloc(sizeof *request, gfp_flags);
1183        if (!request) {
1184                dev_dbg(musb->controller, "not enough memory\n");
1185                return NULL;
1186        }
1187
1188        request->request.dma = DMA_ADDR_INVALID;
1189        request->epnum = musb_ep->current_epnum;
1190        request->ep = musb_ep;
1191
1192        return &request->request;
1193}
1194
1195/*
1196 * Free a request
1197 * Reused by ep0 code.
1198 */
1199void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1200{
1201        kfree(to_musb_request(req));
1202}
1203
1204static LIST_HEAD(buffers);
1205
1206struct free_record {
1207        struct list_head        list;
1208        struct device           *dev;
1209        unsigned                bytes;
1210        dma_addr_t              dma;
1211};
1212
1213/*
1214 * Context: controller locked, IRQs blocked.
1215 */
1216void musb_ep_restart(struct musb *musb, struct musb_request *req)
1217{
1218        dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1219                req->tx ? "TX/IN" : "RX/OUT",
1220                &req->request, req->request.length, req->epnum);
1221
1222        musb_ep_select(musb->mregs, req->epnum);
1223        if (req->tx)
1224                txstate(musb, req);
1225        else
1226                rxstate(musb, req);
1227}
1228
1229static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1230                        gfp_t gfp_flags)
1231{
1232        struct musb_ep          *musb_ep;
1233        struct musb_request     *request;
1234        struct musb             *musb;
1235        int                     status = 0;
1236        unsigned long           lockflags;
1237
1238        if (!ep || !req)
1239                return -EINVAL;
1240        if (!req->buf)
1241                return -ENODATA;
1242
1243        musb_ep = to_musb_ep(ep);
1244        musb = musb_ep->musb;
1245
1246        request = to_musb_request(req);
1247        request->musb = musb;
1248
1249        if (request->ep != musb_ep)
1250                return -EINVAL;
1251
1252        dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1253
1254        /* request is mine now... */
1255        request->request.actual = 0;
1256        request->request.status = -EINPROGRESS;
1257        request->epnum = musb_ep->current_epnum;
1258        request->tx = musb_ep->is_in;
1259
1260        map_dma_buffer(request, musb, musb_ep);
1261
1262        spin_lock_irqsave(&musb->lock, lockflags);
1263
1264        /* don't queue if the ep is down */
1265        if (!musb_ep->desc) {
1266                dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1267                                req, ep->name, "disabled");
1268                status = -ESHUTDOWN;
1269                goto cleanup;
1270        }
1271
1272        /* add request to the list */
1273        list_add_tail(&request->list, &musb_ep->req_list);
1274
1275        /* it this is the head of the queue, start i/o ... */
1276        if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1277                musb_ep_restart(musb, request);
1278
1279cleanup:
1280        spin_unlock_irqrestore(&musb->lock, lockflags);
1281        return status;
1282}
1283
1284static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1285{
1286        struct musb_ep          *musb_ep = to_musb_ep(ep);
1287        struct musb_request     *req = to_musb_request(request);
1288        struct musb_request     *r;
1289        unsigned long           flags;
1290        int                     status = 0;
1291        struct musb             *musb = musb_ep->musb;
1292
1293        if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1294                return -EINVAL;
1295
1296        spin_lock_irqsave(&musb->lock, flags);
1297
1298        list_for_each_entry(r, &musb_ep->req_list, list) {
1299                if (r == req)
1300                        break;
1301        }
1302        if (r != req) {
1303                dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1304                status = -EINVAL;
1305                goto done;
1306        }
1307
1308        /* if the hardware doesn't have the request, easy ... */
1309        if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1310                musb_g_giveback(musb_ep, request, -ECONNRESET);
1311
1312        /* ... else abort the dma transfer ... */
1313        else if (is_dma_capable() && musb_ep->dma) {
1314                struct dma_controller   *c = musb->dma_controller;
1315
1316                musb_ep_select(musb->mregs, musb_ep->current_epnum);
1317                if (c->channel_abort)
1318                        status = c->channel_abort(musb_ep->dma);
1319                else
1320                        status = -EBUSY;
1321                if (status == 0)
1322                        musb_g_giveback(musb_ep, request, -ECONNRESET);
1323        } else {
1324                /* NOTE: by sticking to easily tested hardware/driver states,
1325                 * we leave counting of in-flight packets imprecise.
1326                 */
1327                musb_g_giveback(musb_ep, request, -ECONNRESET);
1328        }
1329
1330done:
1331        spin_unlock_irqrestore(&musb->lock, flags);
1332        return status;
1333}
1334
1335/*
1336 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1337 * data but will queue requests.
1338 *
1339 * exported to ep0 code
1340 */
1341static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1342{
1343        struct musb_ep          *musb_ep = to_musb_ep(ep);
1344        u8                      epnum = musb_ep->current_epnum;
1345        struct musb             *musb = musb_ep->musb;
1346        void __iomem            *epio = musb->endpoints[epnum].regs;
1347        void __iomem            *mbase;
1348        unsigned long           flags;
1349        u16                     csr;
1350        struct musb_request     *request;
1351        int                     status = 0;
1352
1353        if (!ep)
1354                return -EINVAL;
1355        mbase = musb->mregs;
1356
1357        spin_lock_irqsave(&musb->lock, flags);
1358
1359        if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1360                status = -EINVAL;
1361                goto done;
1362        }
1363
1364        musb_ep_select(mbase, epnum);
1365
1366        request = next_request(musb_ep);
1367        if (value) {
1368                if (request) {
1369                        dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1370                            ep->name);
1371                        status = -EAGAIN;
1372                        goto done;
1373                }
1374                /* Cannot portably stall with non-empty FIFO */
1375                if (musb_ep->is_in) {
1376                        csr = musb_readw(epio, MUSB_TXCSR);
1377                        if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1378                                dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1379                                status = -EAGAIN;
1380                                goto done;
1381                        }
1382                }
1383        } else
1384                musb_ep->wedged = 0;
1385
1386        /* set/clear the stall and toggle bits */
1387        dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1388        if (musb_ep->is_in) {
1389                csr = musb_readw(epio, MUSB_TXCSR);
1390                csr |= MUSB_TXCSR_P_WZC_BITS
1391                        | MUSB_TXCSR_CLRDATATOG;
1392                if (value)
1393                        csr |= MUSB_TXCSR_P_SENDSTALL;
1394                else
1395                        csr &= ~(MUSB_TXCSR_P_SENDSTALL
1396                                | MUSB_TXCSR_P_SENTSTALL);
1397                csr &= ~MUSB_TXCSR_TXPKTRDY;
1398                musb_writew(epio, MUSB_TXCSR, csr);
1399        } else {
1400                csr = musb_readw(epio, MUSB_RXCSR);
1401                csr |= MUSB_RXCSR_P_WZC_BITS
1402                        | MUSB_RXCSR_FLUSHFIFO
1403                        | MUSB_RXCSR_CLRDATATOG;
1404                if (value)
1405                        csr |= MUSB_RXCSR_P_SENDSTALL;
1406                else
1407                        csr &= ~(MUSB_RXCSR_P_SENDSTALL
1408                                | MUSB_RXCSR_P_SENTSTALL);
1409                musb_writew(epio, MUSB_RXCSR, csr);
1410        }
1411
1412        /* maybe start the first request in the queue */
1413        if (!musb_ep->busy && !value && request) {
1414                dev_dbg(musb->controller, "restarting the request\n");
1415                musb_ep_restart(musb, request);
1416        }
1417
1418done:
1419        spin_unlock_irqrestore(&musb->lock, flags);
1420        return status;
1421}
1422
1423/*
1424 * Sets the halt feature with the clear requests ignored
1425 */
1426static int musb_gadget_set_wedge(struct usb_ep *ep)
1427{
1428        struct musb_ep          *musb_ep = to_musb_ep(ep);
1429
1430        if (!ep)
1431                return -EINVAL;
1432
1433        musb_ep->wedged = 1;
1434
1435        return usb_ep_set_halt(ep);
1436}
1437
1438static int musb_gadget_fifo_status(struct usb_ep *ep)
1439{
1440        struct musb_ep          *musb_ep = to_musb_ep(ep);
1441        void __iomem            *epio = musb_ep->hw_ep->regs;
1442        int                     retval = -EINVAL;
1443
1444        if (musb_ep->desc && !musb_ep->is_in) {
1445                struct musb             *musb = musb_ep->musb;
1446                int                     epnum = musb_ep->current_epnum;
1447                void __iomem            *mbase = musb->mregs;
1448                unsigned long           flags;
1449
1450                spin_lock_irqsave(&musb->lock, flags);
1451
1452                musb_ep_select(mbase, epnum);
1453                /* FIXME return zero unless RXPKTRDY is set */
1454                retval = musb_readw(epio, MUSB_RXCOUNT);
1455
1456                spin_unlock_irqrestore(&musb->lock, flags);
1457        }
1458        return retval;
1459}
1460
1461static void musb_gadget_fifo_flush(struct usb_ep *ep)
1462{
1463        struct musb_ep  *musb_ep = to_musb_ep(ep);
1464        struct musb     *musb = musb_ep->musb;
1465        u8              epnum = musb_ep->current_epnum;
1466        void __iomem    *epio = musb->endpoints[epnum].regs;
1467        void __iomem    *mbase;
1468        unsigned long   flags;
1469        u16             csr;
1470
1471        mbase = musb->mregs;
1472
1473        spin_lock_irqsave(&musb->lock, flags);
1474        musb_ep_select(mbase, (u8) epnum);
1475
1476        /* disable interrupts */
1477        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1478
1479        if (musb_ep->is_in) {
1480                csr = musb_readw(epio, MUSB_TXCSR);
1481                if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1482                        csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1483                        /*
1484                         * Setting both TXPKTRDY and FLUSHFIFO makes controller
1485                         * to interrupt current FIFO loading, but not flushing
1486                         * the already loaded ones.
1487                         */
1488                        csr &= ~MUSB_TXCSR_TXPKTRDY;
1489                        musb_writew(epio, MUSB_TXCSR, csr);
1490                        /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1491                        musb_writew(epio, MUSB_TXCSR, csr);
1492                }
1493        } else {
1494                csr = musb_readw(epio, MUSB_RXCSR);
1495                csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1496                musb_writew(epio, MUSB_RXCSR, csr);
1497                musb_writew(epio, MUSB_RXCSR, csr);
1498        }
1499
1500        /* re-enable interrupt */
1501        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1502        spin_unlock_irqrestore(&musb->lock, flags);
1503}
1504
1505static const struct usb_ep_ops musb_ep_ops = {
1506        .enable         = musb_gadget_enable,
1507        .disable        = musb_gadget_disable,
1508        .alloc_request  = musb_alloc_request,
1509        .free_request   = musb_free_request,
1510        .queue          = musb_gadget_queue,
1511        .dequeue        = musb_gadget_dequeue,
1512        .set_halt       = musb_gadget_set_halt,
1513        .set_wedge      = musb_gadget_set_wedge,
1514        .fifo_status    = musb_gadget_fifo_status,
1515        .fifo_flush     = musb_gadget_fifo_flush
1516};
1517
1518/* ----------------------------------------------------------------------- */
1519
1520static int musb_gadget_get_frame(struct usb_gadget *gadget)
1521{
1522        struct musb     *musb = gadget_to_musb(gadget);
1523
1524        return (int)musb_readw(musb->mregs, MUSB_FRAME);
1525}
1526
1527static int musb_gadget_wakeup(struct usb_gadget *gadget)
1528{
1529        struct musb     *musb = gadget_to_musb(gadget);
1530        void __iomem    *mregs = musb->mregs;
1531        unsigned long   flags;
1532        int             status = -EINVAL;
1533        u8              power, devctl;
1534        int             retries;
1535
1536        spin_lock_irqsave(&musb->lock, flags);
1537
1538        switch (musb->xceiv->state) {
1539        case OTG_STATE_B_PERIPHERAL:
1540                /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1541                 * that's part of the standard usb 1.1 state machine, and
1542                 * doesn't affect OTG transitions.
1543                 */
1544                if (musb->may_wakeup && musb->is_suspended)
1545                        break;
1546                goto done;
1547        case OTG_STATE_B_IDLE:
1548                /* Start SRP ... OTG not required. */
1549                devctl = musb_readb(mregs, MUSB_DEVCTL);
1550                dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1551                devctl |= MUSB_DEVCTL_SESSION;
1552                musb_writeb(mregs, MUSB_DEVCTL, devctl);
1553                devctl = musb_readb(mregs, MUSB_DEVCTL);
1554                retries = 100;
1555                while (!(devctl & MUSB_DEVCTL_SESSION)) {
1556                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1557                        if (retries-- < 1)
1558                                break;
1559                }
1560                retries = 10000;
1561                while (devctl & MUSB_DEVCTL_SESSION) {
1562                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1563                        if (retries-- < 1)
1564                                break;
1565                }
1566
1567                spin_unlock_irqrestore(&musb->lock, flags);
1568                otg_start_srp(musb->xceiv->otg);
1569                spin_lock_irqsave(&musb->lock, flags);
1570
1571                /* Block idling for at least 1s */
1572                musb_platform_try_idle(musb,
1573                        jiffies + msecs_to_jiffies(1 * HZ));
1574
1575                status = 0;
1576                goto done;
1577        default:
1578                dev_dbg(musb->controller, "Unhandled wake: %s\n",
1579                        usb_otg_state_string(musb->xceiv->state));
1580                goto done;
1581        }
1582
1583        status = 0;
1584
1585        power = musb_readb(mregs, MUSB_POWER);
1586        power |= MUSB_POWER_RESUME;
1587        musb_writeb(mregs, MUSB_POWER, power);
1588        dev_dbg(musb->controller, "issue wakeup\n");
1589
1590        /* FIXME do this next chunk in a timer callback, no udelay */
1591        mdelay(2);
1592
1593        power = musb_readb(mregs, MUSB_POWER);
1594        power &= ~MUSB_POWER_RESUME;
1595        musb_writeb(mregs, MUSB_POWER, power);
1596done:
1597        spin_unlock_irqrestore(&musb->lock, flags);
1598        return status;
1599}
1600
1601static int
1602musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1603{
1604        struct musb     *musb = gadget_to_musb(gadget);
1605
1606        musb->is_self_powered = !!is_selfpowered;
1607        return 0;
1608}
1609
1610static void musb_pullup(struct musb *musb, int is_on)
1611{
1612        u8 power;
1613
1614        power = musb_readb(musb->mregs, MUSB_POWER);
1615        if (is_on)
1616                power |= MUSB_POWER_SOFTCONN;
1617        else
1618                power &= ~MUSB_POWER_SOFTCONN;
1619
1620        /* FIXME if on, HdrcStart; if off, HdrcStop */
1621
1622        dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1623                is_on ? "on" : "off");
1624        musb_writeb(musb->mregs, MUSB_POWER, power);
1625}
1626
1627#if 0
1628static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1629{
1630        dev_dbg(musb->controller, "<= %s =>\n", __func__);
1631
1632        /*
1633         * FIXME iff driver's softconnect flag is set (as it is during probe,
1634         * though that can clear it), just musb_pullup().
1635         */
1636
1637        return -EINVAL;
1638}
1639#endif
1640
1641static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1642{
1643        struct musb     *musb = gadget_to_musb(gadget);
1644
1645        if (!musb->xceiv->set_power)
1646                return -EOPNOTSUPP;
1647        return usb_phy_set_power(musb->xceiv, mA);
1648}
1649
1650static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1651{
1652        struct musb     *musb = gadget_to_musb(gadget);
1653        unsigned long   flags;
1654
1655        is_on = !!is_on;
1656
1657        pm_runtime_get_sync(musb->controller);
1658
1659        /* NOTE: this assumes we are sensing vbus; we'd rather
1660         * not pullup unless the B-session is active.
1661         */
1662        spin_lock_irqsave(&musb->lock, flags);
1663        if (is_on != musb->softconnect) {
1664                musb->softconnect = is_on;
1665                musb_pullup(musb, is_on);
1666        }
1667        spin_unlock_irqrestore(&musb->lock, flags);
1668
1669        pm_runtime_put(musb->controller);
1670
1671        return 0;
1672}
1673
1674static int musb_gadget_start(struct usb_gadget *g,
1675                struct usb_gadget_driver *driver);
1676static int musb_gadget_stop(struct usb_gadget *g,
1677                struct usb_gadget_driver *driver);
1678
1679static const struct usb_gadget_ops musb_gadget_operations = {
1680        .get_frame              = musb_gadget_get_frame,
1681        .wakeup                 = musb_gadget_wakeup,
1682        .set_selfpowered        = musb_gadget_set_self_powered,
1683        /* .vbus_session                = musb_gadget_vbus_session, */
1684        .vbus_draw              = musb_gadget_vbus_draw,
1685        .pullup                 = musb_gadget_pullup,
1686        .udc_start              = musb_gadget_start,
1687        .udc_stop               = musb_gadget_stop,
1688};
1689
1690/* ----------------------------------------------------------------------- */
1691
1692/* Registration */
1693
1694/* Only this registration code "knows" the rule (from USB standards)
1695 * about there being only one external upstream port.  It assumes
1696 * all peripheral ports are external...
1697 */
1698
1699static void
1700init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1701{
1702        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1703
1704        memset(ep, 0, sizeof *ep);
1705
1706        ep->current_epnum = epnum;
1707        ep->musb = musb;
1708        ep->hw_ep = hw_ep;
1709        ep->is_in = is_in;
1710
1711        INIT_LIST_HEAD(&ep->req_list);
1712
1713        sprintf(ep->name, "ep%d%s", epnum,
1714                        (!epnum || hw_ep->is_shared_fifo) ? "" : (
1715                                is_in ? "in" : "out"));
1716        ep->end_point.name = ep->name;
1717        INIT_LIST_HEAD(&ep->end_point.ep_list);
1718        if (!epnum) {
1719                ep->end_point.maxpacket = 64;
1720                ep->end_point.ops = &musb_g_ep0_ops;
1721                musb->g.ep0 = &ep->end_point;
1722        } else {
1723                if (is_in)
1724                        ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1725                else
1726                        ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1727                ep->end_point.ops = &musb_ep_ops;
1728                list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1729        }
1730}
1731
1732/*
1733 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1734 * to the rest of the driver state.
1735 */
1736static inline void musb_g_init_endpoints(struct musb *musb)
1737{
1738        u8                      epnum;
1739        struct musb_hw_ep       *hw_ep;
1740        unsigned                count = 0;
1741
1742        /* initialize endpoint list just once */
1743        INIT_LIST_HEAD(&(musb->g.ep_list));
1744
1745        for (epnum = 0, hw_ep = musb->endpoints;
1746                        epnum < musb->nr_endpoints;
1747                        epnum++, hw_ep++) {
1748                if (hw_ep->is_shared_fifo /* || !epnum */) {
1749                        init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1750                        count++;
1751                } else {
1752                        if (hw_ep->max_packet_sz_tx) {
1753                                init_peripheral_ep(musb, &hw_ep->ep_in,
1754                                                        epnum, 1);
1755                                count++;
1756                        }
1757                        if (hw_ep->max_packet_sz_rx) {
1758                                init_peripheral_ep(musb, &hw_ep->ep_out,
1759                                                        epnum, 0);
1760                                count++;
1761                        }
1762                }
1763        }
1764}
1765
1766/* called once during driver setup to initialize and link into
1767 * the driver model; memory is zeroed.
1768 */
1769int musb_gadget_setup(struct musb *musb)
1770{
1771        int status;
1772
1773        /* REVISIT minor race:  if (erroneously) setting up two
1774         * musb peripherals at the same time, only the bus lock
1775         * is probably held.
1776         */
1777
1778        musb->g.ops = &musb_gadget_operations;
1779        musb->g.max_speed = USB_SPEED_HIGH;
1780        musb->g.speed = USB_SPEED_UNKNOWN;
1781
1782        /* this "gadget" abstracts/virtualizes the controller */
1783        musb->g.name = musb_driver_name;
1784        musb->g.is_otg = 1;
1785
1786        musb_g_init_endpoints(musb);
1787
1788        musb->is_active = 0;
1789        musb_platform_try_idle(musb, 0);
1790
1791        status = usb_add_gadget_udc(musb->controller, &musb->g);
1792        if (status)
1793                goto err;
1794
1795        return 0;
1796err:
1797        musb->g.dev.parent = NULL;
1798        device_unregister(&musb->g.dev);
1799        return status;
1800}
1801
1802void musb_gadget_cleanup(struct musb *musb)
1803{
1804        usb_del_gadget_udc(&musb->g);
1805}
1806
1807/*
1808 * Register the gadget driver. Used by gadget drivers when
1809 * registering themselves with the controller.
1810 *
1811 * -EINVAL something went wrong (not driver)
1812 * -EBUSY another gadget is already using the controller
1813 * -ENOMEM no memory to perform the operation
1814 *
1815 * @param driver the gadget driver
1816 * @return <0 if error, 0 if everything is fine
1817 */
1818static int musb_gadget_start(struct usb_gadget *g,
1819                struct usb_gadget_driver *driver)
1820{
1821        struct musb             *musb = gadget_to_musb(g);
1822        struct usb_otg          *otg = musb->xceiv->otg;
1823        struct usb_hcd          *hcd = musb_to_hcd(musb);
1824        unsigned long           flags;
1825        int                     retval = 0;
1826
1827        if (driver->max_speed < USB_SPEED_HIGH) {
1828                retval = -EINVAL;
1829                goto err;
1830        }
1831
1832        pm_runtime_get_sync(musb->controller);
1833
1834        dev_dbg(musb->controller, "registering driver %s\n", driver->function);
1835
1836        musb->softconnect = 0;
1837        musb->gadget_driver = driver;
1838
1839        spin_lock_irqsave(&musb->lock, flags);
1840        musb->is_active = 1;
1841
1842        otg_set_peripheral(otg, &musb->g);
1843        musb->xceiv->state = OTG_STATE_B_IDLE;
1844        spin_unlock_irqrestore(&musb->lock, flags);
1845
1846        /* REVISIT:  funcall to other code, which also
1847         * handles power budgeting ... this way also
1848         * ensures HdrcStart is indirectly called.
1849         */
1850        retval = usb_add_hcd(hcd, 0, 0);
1851        if (retval < 0) {
1852                dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
1853                goto err;
1854        }
1855
1856        if (musb->xceiv->last_event == USB_EVENT_ID)
1857                musb_platform_set_vbus(musb, 1);
1858
1859        hcd->self.uses_pio_for_control = 1;
1860
1861        if (musb->xceiv->last_event == USB_EVENT_NONE)
1862                pm_runtime_put(musb->controller);
1863
1864        return 0;
1865
1866err:
1867        return retval;
1868}
1869
1870static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1871{
1872        int                     i;
1873        struct musb_hw_ep       *hw_ep;
1874
1875        /* don't disconnect if it's not connected */
1876        if (musb->g.speed == USB_SPEED_UNKNOWN)
1877                driver = NULL;
1878        else
1879                musb->g.speed = USB_SPEED_UNKNOWN;
1880
1881        /* deactivate the hardware */
1882        if (musb->softconnect) {
1883                musb->softconnect = 0;
1884                musb_pullup(musb, 0);
1885        }
1886        musb_stop(musb);
1887
1888        /* killing any outstanding requests will quiesce the driver;
1889         * then report disconnect
1890         */
1891        if (driver) {
1892                for (i = 0, hw_ep = musb->endpoints;
1893                                i < musb->nr_endpoints;
1894                                i++, hw_ep++) {
1895                        musb_ep_select(musb->mregs, i);
1896                        if (hw_ep->is_shared_fifo /* || !epnum */) {
1897                                nuke(&hw_ep->ep_in, -ESHUTDOWN);
1898                        } else {
1899                                if (hw_ep->max_packet_sz_tx)
1900                                        nuke(&hw_ep->ep_in, -ESHUTDOWN);
1901                                if (hw_ep->max_packet_sz_rx)
1902                                        nuke(&hw_ep->ep_out, -ESHUTDOWN);
1903                        }
1904                }
1905        }
1906}
1907
1908/*
1909 * Unregister the gadget driver. Used by gadget drivers when
1910 * unregistering themselves from the controller.
1911 *
1912 * @param driver the gadget driver to unregister
1913 */
1914static int musb_gadget_stop(struct usb_gadget *g,
1915                struct usb_gadget_driver *driver)
1916{
1917        struct musb     *musb = gadget_to_musb(g);
1918        unsigned long   flags;
1919
1920        if (musb->xceiv->last_event == USB_EVENT_NONE)
1921                pm_runtime_get_sync(musb->controller);
1922
1923        /*
1924         * REVISIT always use otg_set_peripheral() here too;
1925         * this needs to shut down the OTG engine.
1926         */
1927
1928        spin_lock_irqsave(&musb->lock, flags);
1929
1930        musb_hnp_stop(musb);
1931
1932        (void) musb_gadget_vbus_draw(&musb->g, 0);
1933
1934        musb->xceiv->state = OTG_STATE_UNDEFINED;
1935        stop_activity(musb, driver);
1936        otg_set_peripheral(musb->xceiv->otg, NULL);
1937
1938        dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
1939
1940        musb->is_active = 0;
1941        musb->gadget_driver = NULL;
1942        musb_platform_try_idle(musb, 0);
1943        spin_unlock_irqrestore(&musb->lock, flags);
1944
1945        usb_remove_hcd(musb_to_hcd(musb));
1946        /*
1947         * FIXME we need to be able to register another
1948         * gadget driver here and have everything work;
1949         * that currently misbehaves.
1950         */
1951
1952        pm_runtime_put(musb->controller);
1953
1954        return 0;
1955}
1956
1957/* ----------------------------------------------------------------------- */
1958
1959/* lifecycle operations called through plat_uds.c */
1960
1961void musb_g_resume(struct musb *musb)
1962{
1963        musb->is_suspended = 0;
1964        switch (musb->xceiv->state) {
1965        case OTG_STATE_B_IDLE:
1966                break;
1967        case OTG_STATE_B_WAIT_ACON:
1968        case OTG_STATE_B_PERIPHERAL:
1969                musb->is_active = 1;
1970                if (musb->gadget_driver && musb->gadget_driver->resume) {
1971                        spin_unlock(&musb->lock);
1972                        musb->gadget_driver->resume(&musb->g);
1973                        spin_lock(&musb->lock);
1974                }
1975                break;
1976        default:
1977                WARNING("unhandled RESUME transition (%s)\n",
1978                                usb_otg_state_string(musb->xceiv->state));
1979        }
1980}
1981
1982/* called when SOF packets stop for 3+ msec */
1983void musb_g_suspend(struct musb *musb)
1984{
1985        u8      devctl;
1986
1987        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1988        dev_dbg(musb->controller, "devctl %02x\n", devctl);
1989
1990        switch (musb->xceiv->state) {
1991        case OTG_STATE_B_IDLE:
1992                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1993                        musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
1994                break;
1995        case OTG_STATE_B_PERIPHERAL:
1996                musb->is_suspended = 1;
1997                if (musb->gadget_driver && musb->gadget_driver->suspend) {
1998                        spin_unlock(&musb->lock);
1999                        musb->gadget_driver->suspend(&musb->g);
2000                        spin_lock(&musb->lock);
2001                }
2002                break;
2003        default:
2004                /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2005                 * A_PERIPHERAL may need care too
2006                 */
2007                WARNING("unhandled SUSPEND transition (%s)\n",
2008                                usb_otg_state_string(musb->xceiv->state));
2009        }
2010}
2011
2012/* Called during SRP */
2013void musb_g_wakeup(struct musb *musb)
2014{
2015        musb_gadget_wakeup(&musb->g);
2016}
2017
2018/* called when VBUS drops below session threshold, and in other cases */
2019void musb_g_disconnect(struct musb *musb)
2020{
2021        void __iomem    *mregs = musb->mregs;
2022        u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
2023
2024        dev_dbg(musb->controller, "devctl %02x\n", devctl);
2025
2026        /* clear HR */
2027        musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2028
2029        /* don't draw vbus until new b-default session */
2030        (void) musb_gadget_vbus_draw(&musb->g, 0);
2031
2032        musb->g.speed = USB_SPEED_UNKNOWN;
2033        if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2034                spin_unlock(&musb->lock);
2035                musb->gadget_driver->disconnect(&musb->g);
2036                spin_lock(&musb->lock);
2037        }
2038
2039        switch (musb->xceiv->state) {
2040        default:
2041                dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2042                        usb_otg_state_string(musb->xceiv->state));
2043                musb->xceiv->state = OTG_STATE_A_IDLE;
2044                MUSB_HST_MODE(musb);
2045                break;
2046        case OTG_STATE_A_PERIPHERAL:
2047                musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2048                MUSB_HST_MODE(musb);
2049                break;
2050        case OTG_STATE_B_WAIT_ACON:
2051        case OTG_STATE_B_HOST:
2052        case OTG_STATE_B_PERIPHERAL:
2053        case OTG_STATE_B_IDLE:
2054                musb->xceiv->state = OTG_STATE_B_IDLE;
2055                break;
2056        case OTG_STATE_B_SRP_INIT:
2057                break;
2058        }
2059
2060        musb->is_active = 0;
2061}
2062
2063void musb_g_reset(struct musb *musb)
2064__releases(musb->lock)
2065__acquires(musb->lock)
2066{
2067        void __iomem    *mbase = musb->mregs;
2068        u8              devctl = musb_readb(mbase, MUSB_DEVCTL);
2069        u8              power;
2070
2071        dev_dbg(musb->controller, "<== %s driver '%s'\n",
2072                        (devctl & MUSB_DEVCTL_BDEVICE)
2073                                ? "B-Device" : "A-Device",
2074                        musb->gadget_driver
2075                                ? musb->gadget_driver->driver.name
2076                                : NULL
2077                        );
2078
2079        /* report disconnect, if we didn't already (flushing EP state) */
2080        if (musb->g.speed != USB_SPEED_UNKNOWN)
2081                musb_g_disconnect(musb);
2082
2083        /* clear HR */
2084        else if (devctl & MUSB_DEVCTL_HR)
2085                musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2086
2087
2088        /* what speed did we negotiate? */
2089        power = musb_readb(mbase, MUSB_POWER);
2090        musb->g.speed = (power & MUSB_POWER_HSMODE)
2091                        ? USB_SPEED_HIGH : USB_SPEED_FULL;
2092
2093        /* start in USB_STATE_DEFAULT */
2094        musb->is_active = 1;
2095        musb->is_suspended = 0;
2096        MUSB_DEV_MODE(musb);
2097        musb->address = 0;
2098        musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2099
2100        musb->may_wakeup = 0;
2101        musb->g.b_hnp_enable = 0;
2102        musb->g.a_alt_hnp_support = 0;
2103        musb->g.a_hnp_support = 0;
2104
2105        /* Normal reset, as B-Device;
2106         * or else after HNP, as A-Device
2107         */
2108        if (devctl & MUSB_DEVCTL_BDEVICE) {
2109                musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2110                musb->g.is_a_peripheral = 0;
2111        } else {
2112                musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2113                musb->g.is_a_peripheral = 1;
2114        }
2115
2116        /* start with default limits on VBUS power draw */
2117        (void) musb_gadget_vbus_draw(&musb->g, 8);
2118}
2119