linux/drivers/usb/musb/musb_gadget.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MUSB OTG driver peripheral support
   4 *
   5 * Copyright 2005 Mentor Graphics Corporation
   6 * Copyright (C) 2005-2006 by Texas Instruments
   7 * Copyright (C) 2006-2007 Nokia Corporation
   8 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/list.h>
  13#include <linux/timer.h>
  14#include <linux/module.h>
  15#include <linux/smp.h>
  16#include <linux/spinlock.h>
  17#include <linux/delay.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/slab.h>
  20
  21#include "musb_core.h"
  22#include "musb_trace.h"
  23
  24
  25/* ----------------------------------------------------------------------- */
  26
  27#define is_buffer_mapped(req) (is_dma_capable() && \
  28                                        (req->map_state != UN_MAPPED))
  29
  30/* Maps the buffer to dma  */
  31
  32static inline void map_dma_buffer(struct musb_request *request,
  33                        struct musb *musb, struct musb_ep *musb_ep)
  34{
  35        int compatible = true;
  36        struct dma_controller *dma = musb->dma_controller;
  37
  38        request->map_state = UN_MAPPED;
  39
  40        if (!is_dma_capable() || !musb_ep->dma)
  41                return;
  42
  43        /* Check if DMA engine can handle this request.
  44         * DMA code must reject the USB request explicitly.
  45         * Default behaviour is to map the request.
  46         */
  47        if (dma->is_compatible)
  48                compatible = dma->is_compatible(musb_ep->dma,
  49                                musb_ep->packet_sz, request->request.buf,
  50                                request->request.length);
  51        if (!compatible)
  52                return;
  53
  54        if (request->request.dma == DMA_ADDR_INVALID) {
  55                dma_addr_t dma_addr;
  56                int ret;
  57
  58                dma_addr = dma_map_single(
  59                                musb->controller,
  60                                request->request.buf,
  61                                request->request.length,
  62                                request->tx
  63                                        ? DMA_TO_DEVICE
  64                                        : DMA_FROM_DEVICE);
  65                ret = dma_mapping_error(musb->controller, dma_addr);
  66                if (ret)
  67                        return;
  68
  69                request->request.dma = dma_addr;
  70                request->map_state = MUSB_MAPPED;
  71        } else {
  72                dma_sync_single_for_device(musb->controller,
  73                        request->request.dma,
  74                        request->request.length,
  75                        request->tx
  76                                ? DMA_TO_DEVICE
  77                                : DMA_FROM_DEVICE);
  78                request->map_state = PRE_MAPPED;
  79        }
  80}
  81
  82/* Unmap the buffer from dma and maps it back to cpu */
  83static inline void unmap_dma_buffer(struct musb_request *request,
  84                                struct musb *musb)
  85{
  86        struct musb_ep *musb_ep = request->ep;
  87
  88        if (!is_buffer_mapped(request) || !musb_ep->dma)
  89                return;
  90
  91        if (request->request.dma == DMA_ADDR_INVALID) {
  92                dev_vdbg(musb->controller,
  93                                "not unmapping a never mapped buffer\n");
  94                return;
  95        }
  96        if (request->map_state == MUSB_MAPPED) {
  97                dma_unmap_single(musb->controller,
  98                        request->request.dma,
  99                        request->request.length,
 100                        request->tx
 101                                ? DMA_TO_DEVICE
 102                                : DMA_FROM_DEVICE);
 103                request->request.dma = DMA_ADDR_INVALID;
 104        } else { /* PRE_MAPPED */
 105                dma_sync_single_for_cpu(musb->controller,
 106                        request->request.dma,
 107                        request->request.length,
 108                        request->tx
 109                                ? DMA_TO_DEVICE
 110                                : DMA_FROM_DEVICE);
 111        }
 112        request->map_state = UN_MAPPED;
 113}
 114
 115/*
 116 * Immediately complete a request.
 117 *
 118 * @param request the request to complete
 119 * @param status the status to complete the request with
 120 * Context: controller locked, IRQs blocked.
 121 */
 122void musb_g_giveback(
 123        struct musb_ep          *ep,
 124        struct usb_request      *request,
 125        int                     status)
 126__releases(ep->musb->lock)
 127__acquires(ep->musb->lock)
 128{
 129        struct musb_request     *req;
 130        struct musb             *musb;
 131        int                     busy = ep->busy;
 132
 133        req = to_musb_request(request);
 134
 135        list_del(&req->list);
 136        if (req->request.status == -EINPROGRESS)
 137                req->request.status = status;
 138        musb = req->musb;
 139
 140        ep->busy = 1;
 141        spin_unlock(&musb->lock);
 142
 143        if (!dma_mapping_error(&musb->g.dev, request->dma))
 144                unmap_dma_buffer(req, musb);
 145
 146        trace_musb_req_gb(req);
 147        usb_gadget_giveback_request(&req->ep->end_point, &req->request);
 148        spin_lock(&musb->lock);
 149        ep->busy = busy;
 150}
 151
 152/* ----------------------------------------------------------------------- */
 153
 154/*
 155 * Abort requests queued to an endpoint using the status. Synchronous.
 156 * caller locked controller and blocked irqs, and selected this ep.
 157 */
 158static void nuke(struct musb_ep *ep, const int status)
 159{
 160        struct musb             *musb = ep->musb;
 161        struct musb_request     *req = NULL;
 162        void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
 163
 164        ep->busy = 1;
 165
 166        if (is_dma_capable() && ep->dma) {
 167                struct dma_controller   *c = ep->musb->dma_controller;
 168                int value;
 169
 170                if (ep->is_in) {
 171                        /*
 172                         * The programming guide says that we must not clear
 173                         * the DMAMODE bit before DMAENAB, so we only
 174                         * clear it in the second write...
 175                         */
 176                        musb_writew(epio, MUSB_TXCSR,
 177                                    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
 178                        musb_writew(epio, MUSB_TXCSR,
 179                                        0 | MUSB_TXCSR_FLUSHFIFO);
 180                } else {
 181                        musb_writew(epio, MUSB_RXCSR,
 182                                        0 | MUSB_RXCSR_FLUSHFIFO);
 183                        musb_writew(epio, MUSB_RXCSR,
 184                                        0 | MUSB_RXCSR_FLUSHFIFO);
 185                }
 186
 187                value = c->channel_abort(ep->dma);
 188                musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
 189                c->channel_release(ep->dma);
 190                ep->dma = NULL;
 191        }
 192
 193        while (!list_empty(&ep->req_list)) {
 194                req = list_first_entry(&ep->req_list, struct musb_request, list);
 195                musb_g_giveback(ep, &req->request, status);
 196        }
 197}
 198
 199/* ----------------------------------------------------------------------- */
 200
 201/* Data transfers - pure PIO, pure DMA, or mixed mode */
 202
 203/*
 204 * This assumes the separate CPPI engine is responding to DMA requests
 205 * from the usb core ... sequenced a bit differently from mentor dma.
 206 */
 207
 208static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
 209{
 210        if (can_bulk_split(musb, ep->type))
 211                return ep->hw_ep->max_packet_sz_tx;
 212        else
 213                return ep->packet_sz;
 214}
 215
 216/*
 217 * An endpoint is transmitting data. This can be called either from
 218 * the IRQ routine or from ep.queue() to kickstart a request on an
 219 * endpoint.
 220 *
 221 * Context: controller locked, IRQs blocked, endpoint selected
 222 */
 223static void txstate(struct musb *musb, struct musb_request *req)
 224{
 225        u8                      epnum = req->epnum;
 226        struct musb_ep          *musb_ep;
 227        void __iomem            *epio = musb->endpoints[epnum].regs;
 228        struct usb_request      *request;
 229        u16                     fifo_count = 0, csr;
 230        int                     use_dma = 0;
 231
 232        musb_ep = req->ep;
 233
 234        /* Check if EP is disabled */
 235        if (!musb_ep->desc) {
 236                musb_dbg(musb, "ep:%s disabled - ignore request",
 237                                                musb_ep->end_point.name);
 238                return;
 239        }
 240
 241        /* we shouldn't get here while DMA is active ... but we do ... */
 242        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 243                musb_dbg(musb, "dma pending...");
 244                return;
 245        }
 246
 247        /* read TXCSR before */
 248        csr = musb_readw(epio, MUSB_TXCSR);
 249
 250        request = &req->request;
 251        fifo_count = min(max_ep_writesize(musb, musb_ep),
 252                        (int)(request->length - request->actual));
 253
 254        if (csr & MUSB_TXCSR_TXPKTRDY) {
 255                musb_dbg(musb, "%s old packet still ready , txcsr %03x",
 256                                musb_ep->end_point.name, csr);
 257                return;
 258        }
 259
 260        if (csr & MUSB_TXCSR_P_SENDSTALL) {
 261                musb_dbg(musb, "%s stalling, txcsr %03x",
 262                                musb_ep->end_point.name, csr);
 263                return;
 264        }
 265
 266        musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
 267                        epnum, musb_ep->packet_sz, fifo_count,
 268                        csr);
 269
 270#ifndef CONFIG_MUSB_PIO_ONLY
 271        if (is_buffer_mapped(req)) {
 272                struct dma_controller   *c = musb->dma_controller;
 273                size_t request_size;
 274
 275                /* setup DMA, then program endpoint CSR */
 276                request_size = min_t(size_t, request->length - request->actual,
 277                                        musb_ep->dma->max_len);
 278
 279                use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
 280
 281                /* MUSB_TXCSR_P_ISO is still set correctly */
 282
 283                if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
 284                        if (request_size < musb_ep->packet_sz)
 285                                musb_ep->dma->desired_mode = 0;
 286                        else
 287                                musb_ep->dma->desired_mode = 1;
 288
 289                        use_dma = use_dma && c->channel_program(
 290                                        musb_ep->dma, musb_ep->packet_sz,
 291                                        musb_ep->dma->desired_mode,
 292                                        request->dma + request->actual, request_size);
 293                        if (use_dma) {
 294                                if (musb_ep->dma->desired_mode == 0) {
 295                                        /*
 296                                         * We must not clear the DMAMODE bit
 297                                         * before the DMAENAB bit -- and the
 298                                         * latter doesn't always get cleared
 299                                         * before we get here...
 300                                         */
 301                                        csr &= ~(MUSB_TXCSR_AUTOSET
 302                                                | MUSB_TXCSR_DMAENAB);
 303                                        musb_writew(epio, MUSB_TXCSR, csr
 304                                                | MUSB_TXCSR_P_WZC_BITS);
 305                                        csr &= ~MUSB_TXCSR_DMAMODE;
 306                                        csr |= (MUSB_TXCSR_DMAENAB |
 307                                                        MUSB_TXCSR_MODE);
 308                                        /* against programming guide */
 309                                } else {
 310                                        csr |= (MUSB_TXCSR_DMAENAB
 311                                                        | MUSB_TXCSR_DMAMODE
 312                                                        | MUSB_TXCSR_MODE);
 313                                        /*
 314                                         * Enable Autoset according to table
 315                                         * below
 316                                         * bulk_split hb_mult   Autoset_Enable
 317                                         *      0       0       Yes(Normal)
 318                                         *      0       >0      No(High BW ISO)
 319                                         *      1       0       Yes(HS bulk)
 320                                         *      1       >0      Yes(FS bulk)
 321                                         */
 322                                        if (!musb_ep->hb_mult ||
 323                                            can_bulk_split(musb,
 324                                                           musb_ep->type))
 325                                                csr |= MUSB_TXCSR_AUTOSET;
 326                                }
 327                                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 328
 329                                musb_writew(epio, MUSB_TXCSR, csr);
 330                        }
 331                }
 332
 333                if (is_cppi_enabled(musb)) {
 334                        /* program endpoint CSR first, then setup DMA */
 335                        csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 336                        csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
 337                                MUSB_TXCSR_MODE;
 338                        musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
 339                                                ~MUSB_TXCSR_P_UNDERRUN) | csr);
 340
 341                        /* ensure writebuffer is empty */
 342                        csr = musb_readw(epio, MUSB_TXCSR);
 343
 344                        /*
 345                         * NOTE host side sets DMAENAB later than this; both are
 346                         * OK since the transfer dma glue (between CPPI and
 347                         * Mentor fifos) just tells CPPI it could start. Data
 348                         * only moves to the USB TX fifo when both fifos are
 349                         * ready.
 350                         */
 351                        /*
 352                         * "mode" is irrelevant here; handle terminating ZLPs
 353                         * like PIO does, since the hardware RNDIS mode seems
 354                         * unreliable except for the
 355                         * last-packet-is-already-short case.
 356                         */
 357                        use_dma = use_dma && c->channel_program(
 358                                        musb_ep->dma, musb_ep->packet_sz,
 359                                        0,
 360                                        request->dma + request->actual,
 361                                        request_size);
 362                        if (!use_dma) {
 363                                c->channel_release(musb_ep->dma);
 364                                musb_ep->dma = NULL;
 365                                csr &= ~MUSB_TXCSR_DMAENAB;
 366                                musb_writew(epio, MUSB_TXCSR, csr);
 367                                /* invariant: prequest->buf is non-null */
 368                        }
 369                } else if (tusb_dma_omap(musb))
 370                        use_dma = use_dma && c->channel_program(
 371                                        musb_ep->dma, musb_ep->packet_sz,
 372                                        request->zero,
 373                                        request->dma + request->actual,
 374                                        request_size);
 375        }
 376#endif
 377
 378        if (!use_dma) {
 379                /*
 380                 * Unmap the dma buffer back to cpu if dma channel
 381                 * programming fails
 382                 */
 383                unmap_dma_buffer(req, musb);
 384
 385                musb_write_fifo(musb_ep->hw_ep, fifo_count,
 386                                (u8 *) (request->buf + request->actual));
 387                request->actual += fifo_count;
 388                csr |= MUSB_TXCSR_TXPKTRDY;
 389                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 390                musb_writew(epio, MUSB_TXCSR, csr);
 391        }
 392
 393        /* host may already have the data when this message shows... */
 394        musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
 395                        musb_ep->end_point.name, use_dma ? "dma" : "pio",
 396                        request->actual, request->length,
 397                        musb_readw(epio, MUSB_TXCSR),
 398                        fifo_count,
 399                        musb_readw(epio, MUSB_TXMAXP));
 400}
 401
 402/*
 403 * FIFO state update (e.g. data ready).
 404 * Called from IRQ,  with controller locked.
 405 */
 406void musb_g_tx(struct musb *musb, u8 epnum)
 407{
 408        u16                     csr;
 409        struct musb_request     *req;
 410        struct usb_request      *request;
 411        u8 __iomem              *mbase = musb->mregs;
 412        struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_in;
 413        void __iomem            *epio = musb->endpoints[epnum].regs;
 414        struct dma_channel      *dma;
 415
 416        musb_ep_select(mbase, epnum);
 417        req = next_request(musb_ep);
 418        request = &req->request;
 419
 420        csr = musb_readw(epio, MUSB_TXCSR);
 421        musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
 422
 423        dma = is_dma_capable() ? musb_ep->dma : NULL;
 424
 425        /*
 426         * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
 427         * probably rates reporting as a host error.
 428         */
 429        if (csr & MUSB_TXCSR_P_SENTSTALL) {
 430                csr |=  MUSB_TXCSR_P_WZC_BITS;
 431                csr &= ~MUSB_TXCSR_P_SENTSTALL;
 432                musb_writew(epio, MUSB_TXCSR, csr);
 433                return;
 434        }
 435
 436        if (csr & MUSB_TXCSR_P_UNDERRUN) {
 437                /* We NAKed, no big deal... little reason to care. */
 438                csr |=   MUSB_TXCSR_P_WZC_BITS;
 439                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 440                musb_writew(epio, MUSB_TXCSR, csr);
 441                dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
 442                                epnum, request);
 443        }
 444
 445        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 446                /*
 447                 * SHOULD NOT HAPPEN... has with CPPI though, after
 448                 * changing SENDSTALL (and other cases); harmless?
 449                 */
 450                musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
 451                return;
 452        }
 453
 454        if (request) {
 455                u8      is_dma = 0;
 456                bool    short_packet = false;
 457
 458                trace_musb_req_tx(req);
 459
 460                if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
 461                        is_dma = 1;
 462                        csr |= MUSB_TXCSR_P_WZC_BITS;
 463                        csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
 464                                 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
 465                        musb_writew(epio, MUSB_TXCSR, csr);
 466                        /* Ensure writebuffer is empty. */
 467                        csr = musb_readw(epio, MUSB_TXCSR);
 468                        request->actual += musb_ep->dma->actual_len;
 469                        musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
 470                                epnum, csr, musb_ep->dma->actual_len, request);
 471                }
 472
 473                /*
 474                 * First, maybe a terminating short packet. Some DMA
 475                 * engines might handle this by themselves.
 476                 */
 477                if ((request->zero && request->length)
 478                        && (request->length % musb_ep->packet_sz == 0)
 479                        && (request->actual == request->length))
 480                                short_packet = true;
 481
 482                if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
 483                        (is_dma && (!dma->desired_mode ||
 484                                (request->actual &
 485                                        (musb_ep->packet_sz - 1)))))
 486                                short_packet = true;
 487
 488                if (short_packet) {
 489                        /*
 490                         * On DMA completion, FIFO may not be
 491                         * available yet...
 492                         */
 493                        if (csr & MUSB_TXCSR_TXPKTRDY)
 494                                return;
 495
 496                        musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
 497                                        | MUSB_TXCSR_TXPKTRDY);
 498                        request->zero = 0;
 499                }
 500
 501                if (request->actual == request->length) {
 502                        musb_g_giveback(musb_ep, request, 0);
 503                        /*
 504                         * In the giveback function the MUSB lock is
 505                         * released and acquired after sometime. During
 506                         * this time period the INDEX register could get
 507                         * changed by the gadget_queue function especially
 508                         * on SMP systems. Reselect the INDEX to be sure
 509                         * we are reading/modifying the right registers
 510                         */
 511                        musb_ep_select(mbase, epnum);
 512                        req = musb_ep->desc ? next_request(musb_ep) : NULL;
 513                        if (!req) {
 514                                musb_dbg(musb, "%s idle now",
 515                                        musb_ep->end_point.name);
 516                                return;
 517                        }
 518                }
 519
 520                txstate(musb, req);
 521        }
 522}
 523
 524/* ------------------------------------------------------------ */
 525
 526/*
 527 * Context: controller locked, IRQs blocked, endpoint selected
 528 */
 529static void rxstate(struct musb *musb, struct musb_request *req)
 530{
 531        const u8                epnum = req->epnum;
 532        struct usb_request      *request = &req->request;
 533        struct musb_ep          *musb_ep;
 534        void __iomem            *epio = musb->endpoints[epnum].regs;
 535        unsigned                len = 0;
 536        u16                     fifo_count;
 537        u16                     csr = musb_readw(epio, MUSB_RXCSR);
 538        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 539        u8                      use_mode_1;
 540
 541        if (hw_ep->is_shared_fifo)
 542                musb_ep = &hw_ep->ep_in;
 543        else
 544                musb_ep = &hw_ep->ep_out;
 545
 546        fifo_count = musb_ep->packet_sz;
 547
 548        /* Check if EP is disabled */
 549        if (!musb_ep->desc) {
 550                musb_dbg(musb, "ep:%s disabled - ignore request",
 551                                                musb_ep->end_point.name);
 552                return;
 553        }
 554
 555        /* We shouldn't get here while DMA is active, but we do... */
 556        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 557                musb_dbg(musb, "DMA pending...");
 558                return;
 559        }
 560
 561        if (csr & MUSB_RXCSR_P_SENDSTALL) {
 562                musb_dbg(musb, "%s stalling, RXCSR %04x",
 563                    musb_ep->end_point.name, csr);
 564                return;
 565        }
 566
 567        if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
 568                struct dma_controller   *c = musb->dma_controller;
 569                struct dma_channel      *channel = musb_ep->dma;
 570
 571                /* NOTE:  CPPI won't actually stop advancing the DMA
 572                 * queue after short packet transfers, so this is almost
 573                 * always going to run as IRQ-per-packet DMA so that
 574                 * faults will be handled correctly.
 575                 */
 576                if (c->channel_program(channel,
 577                                musb_ep->packet_sz,
 578                                !request->short_not_ok,
 579                                request->dma + request->actual,
 580                                request->length - request->actual)) {
 581
 582                        /* make sure that if an rxpkt arrived after the irq,
 583                         * the cppi engine will be ready to take it as soon
 584                         * as DMA is enabled
 585                         */
 586                        csr &= ~(MUSB_RXCSR_AUTOCLEAR
 587                                        | MUSB_RXCSR_DMAMODE);
 588                        csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
 589                        musb_writew(epio, MUSB_RXCSR, csr);
 590                        return;
 591                }
 592        }
 593
 594        if (csr & MUSB_RXCSR_RXPKTRDY) {
 595                fifo_count = musb_readw(epio, MUSB_RXCOUNT);
 596
 597                /*
 598                 * Enable Mode 1 on RX transfers only when short_not_ok flag
 599                 * is set. Currently short_not_ok flag is set only from
 600                 * file_storage and f_mass_storage drivers
 601                 */
 602
 603                if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
 604                        use_mode_1 = 1;
 605                else
 606                        use_mode_1 = 0;
 607
 608                if (request->actual < request->length) {
 609                        if (!is_buffer_mapped(req))
 610                                goto buffer_aint_mapped;
 611
 612                        if (musb_dma_inventra(musb)) {
 613                                struct dma_controller   *c;
 614                                struct dma_channel      *channel;
 615                                int                     use_dma = 0;
 616                                unsigned int transfer_size;
 617
 618                                c = musb->dma_controller;
 619                                channel = musb_ep->dma;
 620
 621        /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
 622         * mode 0 only. So we do not get endpoint interrupts due to DMA
 623         * completion. We only get interrupts from DMA controller.
 624         *
 625         * We could operate in DMA mode 1 if we knew the size of the tranfer
 626         * in advance. For mass storage class, request->length = what the host
 627         * sends, so that'd work.  But for pretty much everything else,
 628         * request->length is routinely more than what the host sends. For
 629         * most these gadgets, end of is signified either by a short packet,
 630         * or filling the last byte of the buffer.  (Sending extra data in
 631         * that last pckate should trigger an overflow fault.)  But in mode 1,
 632         * we don't get DMA completion interrupt for short packets.
 633         *
 634         * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
 635         * to get endpoint interrupt on every DMA req, but that didn't seem
 636         * to work reliably.
 637         *
 638         * REVISIT an updated g_file_storage can set req->short_not_ok, which
 639         * then becomes usable as a runtime "use mode 1" hint...
 640         */
 641
 642                                /* Experimental: Mode1 works with mass storage use cases */
 643                                if (use_mode_1) {
 644                                        csr |= MUSB_RXCSR_AUTOCLEAR;
 645                                        musb_writew(epio, MUSB_RXCSR, csr);
 646                                        csr |= MUSB_RXCSR_DMAENAB;
 647                                        musb_writew(epio, MUSB_RXCSR, csr);
 648
 649                                        /*
 650                                         * this special sequence (enabling and then
 651                                         * disabling MUSB_RXCSR_DMAMODE) is required
 652                                         * to get DMAReq to activate
 653                                         */
 654                                        musb_writew(epio, MUSB_RXCSR,
 655                                                csr | MUSB_RXCSR_DMAMODE);
 656                                        musb_writew(epio, MUSB_RXCSR, csr);
 657
 658                                        transfer_size = min_t(unsigned int,
 659                                                        request->length -
 660                                                        request->actual,
 661                                                        channel->max_len);
 662                                        musb_ep->dma->desired_mode = 1;
 663                                } else {
 664                                        if (!musb_ep->hb_mult &&
 665                                                musb_ep->hw_ep->rx_double_buffered)
 666                                                csr |= MUSB_RXCSR_AUTOCLEAR;
 667                                        csr |= MUSB_RXCSR_DMAENAB;
 668                                        musb_writew(epio, MUSB_RXCSR, csr);
 669
 670                                        transfer_size = min(request->length - request->actual,
 671                                                        (unsigned)fifo_count);
 672                                        musb_ep->dma->desired_mode = 0;
 673                                }
 674
 675                                use_dma = c->channel_program(
 676                                                channel,
 677                                                musb_ep->packet_sz,
 678                                                channel->desired_mode,
 679                                                request->dma
 680                                                + request->actual,
 681                                                transfer_size);
 682
 683                                if (use_dma)
 684                                        return;
 685                        }
 686
 687                        if ((musb_dma_ux500(musb)) &&
 688                                (request->actual < request->length)) {
 689
 690                                struct dma_controller *c;
 691                                struct dma_channel *channel;
 692                                unsigned int transfer_size = 0;
 693
 694                                c = musb->dma_controller;
 695                                channel = musb_ep->dma;
 696
 697                                /* In case first packet is short */
 698                                if (fifo_count < musb_ep->packet_sz)
 699                                        transfer_size = fifo_count;
 700                                else if (request->short_not_ok)
 701                                        transfer_size = min_t(unsigned int,
 702                                                        request->length -
 703                                                        request->actual,
 704                                                        channel->max_len);
 705                                else
 706                                        transfer_size = min_t(unsigned int,
 707                                                        request->length -
 708                                                        request->actual,
 709                                                        (unsigned)fifo_count);
 710
 711                                csr &= ~MUSB_RXCSR_DMAMODE;
 712                                csr |= (MUSB_RXCSR_DMAENAB |
 713                                        MUSB_RXCSR_AUTOCLEAR);
 714
 715                                musb_writew(epio, MUSB_RXCSR, csr);
 716
 717                                if (transfer_size <= musb_ep->packet_sz) {
 718                                        musb_ep->dma->desired_mode = 0;
 719                                } else {
 720                                        musb_ep->dma->desired_mode = 1;
 721                                        /* Mode must be set after DMAENAB */
 722                                        csr |= MUSB_RXCSR_DMAMODE;
 723                                        musb_writew(epio, MUSB_RXCSR, csr);
 724                                }
 725
 726                                if (c->channel_program(channel,
 727                                                        musb_ep->packet_sz,
 728                                                        channel->desired_mode,
 729                                                        request->dma
 730                                                        + request->actual,
 731                                                        transfer_size))
 732
 733                                        return;
 734                        }
 735
 736                        len = request->length - request->actual;
 737                        musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
 738                                        musb_ep->end_point.name,
 739                                        fifo_count, len,
 740                                        musb_ep->packet_sz);
 741
 742                        fifo_count = min_t(unsigned, len, fifo_count);
 743
 744                        if (tusb_dma_omap(musb)) {
 745                                struct dma_controller *c = musb->dma_controller;
 746                                struct dma_channel *channel = musb_ep->dma;
 747                                u32 dma_addr = request->dma + request->actual;
 748                                int ret;
 749
 750                                ret = c->channel_program(channel,
 751                                                musb_ep->packet_sz,
 752                                                channel->desired_mode,
 753                                                dma_addr,
 754                                                fifo_count);
 755                                if (ret)
 756                                        return;
 757                        }
 758
 759                        /*
 760                         * Unmap the dma buffer back to cpu if dma channel
 761                         * programming fails. This buffer is mapped if the
 762                         * channel allocation is successful
 763                         */
 764                        unmap_dma_buffer(req, musb);
 765
 766                        /*
 767                         * Clear DMAENAB and AUTOCLEAR for the
 768                         * PIO mode transfer
 769                         */
 770                        csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
 771                        musb_writew(epio, MUSB_RXCSR, csr);
 772
 773buffer_aint_mapped:
 774                        musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
 775                                        (request->buf + request->actual));
 776                        request->actual += fifo_count;
 777
 778                        /* REVISIT if we left anything in the fifo, flush
 779                         * it and report -EOVERFLOW
 780                         */
 781
 782                        /* ack the read! */
 783                        csr |= MUSB_RXCSR_P_WZC_BITS;
 784                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 785                        musb_writew(epio, MUSB_RXCSR, csr);
 786                }
 787        }
 788
 789        /* reach the end or short packet detected */
 790        if (request->actual == request->length ||
 791            fifo_count < musb_ep->packet_sz)
 792                musb_g_giveback(musb_ep, request, 0);
 793}
 794
 795/*
 796 * Data ready for a request; called from IRQ
 797 */
 798void musb_g_rx(struct musb *musb, u8 epnum)
 799{
 800        u16                     csr;
 801        struct musb_request     *req;
 802        struct usb_request      *request;
 803        void __iomem            *mbase = musb->mregs;
 804        struct musb_ep          *musb_ep;
 805        void __iomem            *epio = musb->endpoints[epnum].regs;
 806        struct dma_channel      *dma;
 807        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 808
 809        if (hw_ep->is_shared_fifo)
 810                musb_ep = &hw_ep->ep_in;
 811        else
 812                musb_ep = &hw_ep->ep_out;
 813
 814        musb_ep_select(mbase, epnum);
 815
 816        req = next_request(musb_ep);
 817        if (!req)
 818                return;
 819
 820        trace_musb_req_rx(req);
 821        request = &req->request;
 822
 823        csr = musb_readw(epio, MUSB_RXCSR);
 824        dma = is_dma_capable() ? musb_ep->dma : NULL;
 825
 826        musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
 827                        csr, dma ? " (dma)" : "", request);
 828
 829        if (csr & MUSB_RXCSR_P_SENTSTALL) {
 830                csr |= MUSB_RXCSR_P_WZC_BITS;
 831                csr &= ~MUSB_RXCSR_P_SENTSTALL;
 832                musb_writew(epio, MUSB_RXCSR, csr);
 833                return;
 834        }
 835
 836        if (csr & MUSB_RXCSR_P_OVERRUN) {
 837                /* csr |= MUSB_RXCSR_P_WZC_BITS; */
 838                csr &= ~MUSB_RXCSR_P_OVERRUN;
 839                musb_writew(epio, MUSB_RXCSR, csr);
 840
 841                musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
 842                if (request->status == -EINPROGRESS)
 843                        request->status = -EOVERFLOW;
 844        }
 845        if (csr & MUSB_RXCSR_INCOMPRX) {
 846                /* REVISIT not necessarily an error */
 847                musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
 848        }
 849
 850        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 851                /* "should not happen"; likely RXPKTRDY pending for DMA */
 852                musb_dbg(musb, "%s busy, csr %04x",
 853                        musb_ep->end_point.name, csr);
 854                return;
 855        }
 856
 857        if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
 858                csr &= ~(MUSB_RXCSR_AUTOCLEAR
 859                                | MUSB_RXCSR_DMAENAB
 860                                | MUSB_RXCSR_DMAMODE);
 861                musb_writew(epio, MUSB_RXCSR,
 862                        MUSB_RXCSR_P_WZC_BITS | csr);
 863
 864                request->actual += musb_ep->dma->actual_len;
 865
 866#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 867        defined(CONFIG_USB_UX500_DMA)
 868                /* Autoclear doesn't clear RxPktRdy for short packets */
 869                if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
 870                                || (dma->actual_len
 871                                        & (musb_ep->packet_sz - 1))) {
 872                        /* ack the read! */
 873                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 874                        musb_writew(epio, MUSB_RXCSR, csr);
 875                }
 876
 877                /* incomplete, and not short? wait for next IN packet */
 878                if ((request->actual < request->length)
 879                                && (musb_ep->dma->actual_len
 880                                        == musb_ep->packet_sz)) {
 881                        /* In double buffer case, continue to unload fifo if
 882                         * there is Rx packet in FIFO.
 883                         **/
 884                        csr = musb_readw(epio, MUSB_RXCSR);
 885                        if ((csr & MUSB_RXCSR_RXPKTRDY) &&
 886                                hw_ep->rx_double_buffered)
 887                                goto exit;
 888                        return;
 889                }
 890#endif
 891                musb_g_giveback(musb_ep, request, 0);
 892                /*
 893                 * In the giveback function the MUSB lock is
 894                 * released and acquired after sometime. During
 895                 * this time period the INDEX register could get
 896                 * changed by the gadget_queue function especially
 897                 * on SMP systems. Reselect the INDEX to be sure
 898                 * we are reading/modifying the right registers
 899                 */
 900                musb_ep_select(mbase, epnum);
 901
 902                req = next_request(musb_ep);
 903                if (!req)
 904                        return;
 905        }
 906#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 907        defined(CONFIG_USB_UX500_DMA)
 908exit:
 909#endif
 910        /* Analyze request */
 911        rxstate(musb, req);
 912}
 913
 914/* ------------------------------------------------------------ */
 915
 916static int musb_gadget_enable(struct usb_ep *ep,
 917                        const struct usb_endpoint_descriptor *desc)
 918{
 919        unsigned long           flags;
 920        struct musb_ep          *musb_ep;
 921        struct musb_hw_ep       *hw_ep;
 922        void __iomem            *regs;
 923        struct musb             *musb;
 924        void __iomem    *mbase;
 925        u8              epnum;
 926        u16             csr;
 927        unsigned        tmp;
 928        int             status = -EINVAL;
 929
 930        if (!ep || !desc)
 931                return -EINVAL;
 932
 933        musb_ep = to_musb_ep(ep);
 934        hw_ep = musb_ep->hw_ep;
 935        regs = hw_ep->regs;
 936        musb = musb_ep->musb;
 937        mbase = musb->mregs;
 938        epnum = musb_ep->current_epnum;
 939
 940        spin_lock_irqsave(&musb->lock, flags);
 941
 942        if (musb_ep->desc) {
 943                status = -EBUSY;
 944                goto fail;
 945        }
 946        musb_ep->type = usb_endpoint_type(desc);
 947
 948        /* check direction and (later) maxpacket size against endpoint */
 949        if (usb_endpoint_num(desc) != epnum)
 950                goto fail;
 951
 952        /* REVISIT this rules out high bandwidth periodic transfers */
 953        tmp = usb_endpoint_maxp_mult(desc) - 1;
 954        if (tmp) {
 955                int ok;
 956
 957                if (usb_endpoint_dir_in(desc))
 958                        ok = musb->hb_iso_tx;
 959                else
 960                        ok = musb->hb_iso_rx;
 961
 962                if (!ok) {
 963                        musb_dbg(musb, "no support for high bandwidth ISO");
 964                        goto fail;
 965                }
 966                musb_ep->hb_mult = tmp;
 967        } else {
 968                musb_ep->hb_mult = 0;
 969        }
 970
 971        musb_ep->packet_sz = usb_endpoint_maxp(desc);
 972        tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
 973
 974        /* enable the interrupts for the endpoint, set the endpoint
 975         * packet size (or fail), set the mode, clear the fifo
 976         */
 977        musb_ep_select(mbase, epnum);
 978        if (usb_endpoint_dir_in(desc)) {
 979
 980                if (hw_ep->is_shared_fifo)
 981                        musb_ep->is_in = 1;
 982                if (!musb_ep->is_in)
 983                        goto fail;
 984
 985                if (tmp > hw_ep->max_packet_sz_tx) {
 986                        musb_dbg(musb, "packet size beyond hardware FIFO size");
 987                        goto fail;
 988                }
 989
 990                musb->intrtxe |= (1 << epnum);
 991                musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
 992
 993                /* REVISIT if can_bulk_split(), use by updating "tmp";
 994                 * likewise high bandwidth periodic tx
 995                 */
 996                /* Set TXMAXP with the FIFO size of the endpoint
 997                 * to disable double buffering mode.
 998                 */
 999                if (can_bulk_split(musb, musb_ep->type))
1000                        musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
1001                                                musb_ep->packet_sz) - 1;
1002                musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1003                                | (musb_ep->hb_mult << 11));
1004
1005                csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1006                if (musb_readw(regs, MUSB_TXCSR)
1007                                & MUSB_TXCSR_FIFONOTEMPTY)
1008                        csr |= MUSB_TXCSR_FLUSHFIFO;
1009                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1010                        csr |= MUSB_TXCSR_P_ISO;
1011
1012                /* set twice in case of double buffering */
1013                musb_writew(regs, MUSB_TXCSR, csr);
1014                /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1015                musb_writew(regs, MUSB_TXCSR, csr);
1016
1017        } else {
1018
1019                if (hw_ep->is_shared_fifo)
1020                        musb_ep->is_in = 0;
1021                if (musb_ep->is_in)
1022                        goto fail;
1023
1024                if (tmp > hw_ep->max_packet_sz_rx) {
1025                        musb_dbg(musb, "packet size beyond hardware FIFO size");
1026                        goto fail;
1027                }
1028
1029                musb->intrrxe |= (1 << epnum);
1030                musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1031
1032                /* REVISIT if can_bulk_combine() use by updating "tmp"
1033                 * likewise high bandwidth periodic rx
1034                 */
1035                /* Set RXMAXP with the FIFO size of the endpoint
1036                 * to disable double buffering mode.
1037                 */
1038                musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1039                                | (musb_ep->hb_mult << 11));
1040
1041                /* force shared fifo to OUT-only mode */
1042                if (hw_ep->is_shared_fifo) {
1043                        csr = musb_readw(regs, MUSB_TXCSR);
1044                        csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1045                        musb_writew(regs, MUSB_TXCSR, csr);
1046                }
1047
1048                csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1049                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1050                        csr |= MUSB_RXCSR_P_ISO;
1051                else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1052                        csr |= MUSB_RXCSR_DISNYET;
1053
1054                /* set twice in case of double buffering */
1055                musb_writew(regs, MUSB_RXCSR, csr);
1056                musb_writew(regs, MUSB_RXCSR, csr);
1057        }
1058
1059        /* NOTE:  all the I/O code _should_ work fine without DMA, in case
1060         * for some reason you run out of channels here.
1061         */
1062        if (is_dma_capable() && musb->dma_controller) {
1063                struct dma_controller   *c = musb->dma_controller;
1064
1065                musb_ep->dma = c->channel_alloc(c, hw_ep,
1066                                (desc->bEndpointAddress & USB_DIR_IN));
1067        } else
1068                musb_ep->dma = NULL;
1069
1070        musb_ep->desc = desc;
1071        musb_ep->busy = 0;
1072        musb_ep->wedged = 0;
1073        status = 0;
1074
1075        pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1076                        musb_driver_name, musb_ep->end_point.name,
1077                        musb_ep_xfertype_string(musb_ep->type),
1078                        musb_ep->is_in ? "IN" : "OUT",
1079                        musb_ep->dma ? "dma, " : "",
1080                        musb_ep->packet_sz);
1081
1082        schedule_delayed_work(&musb->irq_work, 0);
1083
1084fail:
1085        spin_unlock_irqrestore(&musb->lock, flags);
1086        return status;
1087}
1088
1089/*
1090 * Disable an endpoint flushing all requests queued.
1091 */
1092static int musb_gadget_disable(struct usb_ep *ep)
1093{
1094        unsigned long   flags;
1095        struct musb     *musb;
1096        u8              epnum;
1097        struct musb_ep  *musb_ep;
1098        void __iomem    *epio;
1099        int             status = 0;
1100
1101        musb_ep = to_musb_ep(ep);
1102        musb = musb_ep->musb;
1103        epnum = musb_ep->current_epnum;
1104        epio = musb->endpoints[epnum].regs;
1105
1106        spin_lock_irqsave(&musb->lock, flags);
1107        musb_ep_select(musb->mregs, epnum);
1108
1109        /* zero the endpoint sizes */
1110        if (musb_ep->is_in) {
1111                musb->intrtxe &= ~(1 << epnum);
1112                musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1113                musb_writew(epio, MUSB_TXMAXP, 0);
1114        } else {
1115                musb->intrrxe &= ~(1 << epnum);
1116                musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1117                musb_writew(epio, MUSB_RXMAXP, 0);
1118        }
1119
1120        /* abort all pending DMA and requests */
1121        nuke(musb_ep, -ESHUTDOWN);
1122
1123        musb_ep->desc = NULL;
1124        musb_ep->end_point.desc = NULL;
1125
1126        schedule_delayed_work(&musb->irq_work, 0);
1127
1128        spin_unlock_irqrestore(&(musb->lock), flags);
1129
1130        musb_dbg(musb, "%s", musb_ep->end_point.name);
1131
1132        return status;
1133}
1134
1135/*
1136 * Allocate a request for an endpoint.
1137 * Reused by ep0 code.
1138 */
1139struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1140{
1141        struct musb_ep          *musb_ep = to_musb_ep(ep);
1142        struct musb_request     *request = NULL;
1143
1144        request = kzalloc(sizeof *request, gfp_flags);
1145        if (!request)
1146                return NULL;
1147
1148        request->request.dma = DMA_ADDR_INVALID;
1149        request->epnum = musb_ep->current_epnum;
1150        request->ep = musb_ep;
1151
1152        trace_musb_req_alloc(request);
1153        return &request->request;
1154}
1155
1156/*
1157 * Free a request
1158 * Reused by ep0 code.
1159 */
1160void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1161{
1162        struct musb_request *request = to_musb_request(req);
1163
1164        trace_musb_req_free(request);
1165        kfree(request);
1166}
1167
1168static LIST_HEAD(buffers);
1169
1170struct free_record {
1171        struct list_head        list;
1172        struct device           *dev;
1173        unsigned                bytes;
1174        dma_addr_t              dma;
1175};
1176
1177/*
1178 * Context: controller locked, IRQs blocked.
1179 */
1180void musb_ep_restart(struct musb *musb, struct musb_request *req)
1181{
1182        trace_musb_req_start(req);
1183        musb_ep_select(musb->mregs, req->epnum);
1184        if (req->tx)
1185                txstate(musb, req);
1186        else
1187                rxstate(musb, req);
1188}
1189
1190static int musb_ep_restart_resume_work(struct musb *musb, void *data)
1191{
1192        struct musb_request *req = data;
1193
1194        musb_ep_restart(musb, req);
1195
1196        return 0;
1197}
1198
1199static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1200                        gfp_t gfp_flags)
1201{
1202        struct musb_ep          *musb_ep;
1203        struct musb_request     *request;
1204        struct musb             *musb;
1205        int                     status;
1206        unsigned long           lockflags;
1207
1208        if (!ep || !req)
1209                return -EINVAL;
1210        if (!req->buf)
1211                return -ENODATA;
1212
1213        musb_ep = to_musb_ep(ep);
1214        musb = musb_ep->musb;
1215
1216        request = to_musb_request(req);
1217        request->musb = musb;
1218
1219        if (request->ep != musb_ep)
1220                return -EINVAL;
1221
1222        status = pm_runtime_get(musb->controller);
1223        if ((status != -EINPROGRESS) && status < 0) {
1224                dev_err(musb->controller,
1225                        "pm runtime get failed in %s\n",
1226                        __func__);
1227                pm_runtime_put_noidle(musb->controller);
1228
1229                return status;
1230        }
1231        status = 0;
1232
1233        trace_musb_req_enq(request);
1234
1235        /* request is mine now... */
1236        request->request.actual = 0;
1237        request->request.status = -EINPROGRESS;
1238        request->epnum = musb_ep->current_epnum;
1239        request->tx = musb_ep->is_in;
1240
1241        map_dma_buffer(request, musb, musb_ep);
1242
1243        spin_lock_irqsave(&musb->lock, lockflags);
1244
1245        /* don't queue if the ep is down */
1246        if (!musb_ep->desc) {
1247                musb_dbg(musb, "req %p queued to %s while ep %s",
1248                                req, ep->name, "disabled");
1249                status = -ESHUTDOWN;
1250                unmap_dma_buffer(request, musb);
1251                goto unlock;
1252        }
1253
1254        /* add request to the list */
1255        list_add_tail(&request->list, &musb_ep->req_list);
1256
1257        /* it this is the head of the queue, start i/o ... */
1258        if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
1259                status = musb_queue_resume_work(musb,
1260                                                musb_ep_restart_resume_work,
1261                                                request);
1262                if (status < 0)
1263                        dev_err(musb->controller, "%s resume work: %i\n",
1264                                __func__, status);
1265        }
1266
1267unlock:
1268        spin_unlock_irqrestore(&musb->lock, lockflags);
1269        pm_runtime_mark_last_busy(musb->controller);
1270        pm_runtime_put_autosuspend(musb->controller);
1271
1272        return status;
1273}
1274
1275static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1276{
1277        struct musb_ep          *musb_ep = to_musb_ep(ep);
1278        struct musb_request     *req = to_musb_request(request);
1279        struct musb_request     *r;
1280        unsigned long           flags;
1281        int                     status = 0;
1282        struct musb             *musb = musb_ep->musb;
1283
1284        if (!ep || !request || req->ep != musb_ep)
1285                return -EINVAL;
1286
1287        trace_musb_req_deq(req);
1288
1289        spin_lock_irqsave(&musb->lock, flags);
1290
1291        list_for_each_entry(r, &musb_ep->req_list, list) {
1292                if (r == req)
1293                        break;
1294        }
1295        if (r != req) {
1296                dev_err(musb->controller, "request %p not queued to %s\n",
1297                                request, ep->name);
1298                status = -EINVAL;
1299                goto done;
1300        }
1301
1302        /* if the hardware doesn't have the request, easy ... */
1303        if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1304                musb_g_giveback(musb_ep, request, -ECONNRESET);
1305
1306        /* ... else abort the dma transfer ... */
1307        else if (is_dma_capable() && musb_ep->dma) {
1308                struct dma_controller   *c = musb->dma_controller;
1309
1310                musb_ep_select(musb->mregs, musb_ep->current_epnum);
1311                if (c->channel_abort)
1312                        status = c->channel_abort(musb_ep->dma);
1313                else
1314                        status = -EBUSY;
1315                if (status == 0)
1316                        musb_g_giveback(musb_ep, request, -ECONNRESET);
1317        } else {
1318                /* NOTE: by sticking to easily tested hardware/driver states,
1319                 * we leave counting of in-flight packets imprecise.
1320                 */
1321                musb_g_giveback(musb_ep, request, -ECONNRESET);
1322        }
1323
1324done:
1325        spin_unlock_irqrestore(&musb->lock, flags);
1326        return status;
1327}
1328
1329/*
1330 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1331 * data but will queue requests.
1332 *
1333 * exported to ep0 code
1334 */
1335static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1336{
1337        struct musb_ep          *musb_ep = to_musb_ep(ep);
1338        u8                      epnum = musb_ep->current_epnum;
1339        struct musb             *musb = musb_ep->musb;
1340        void __iomem            *epio = musb->endpoints[epnum].regs;
1341        void __iomem            *mbase;
1342        unsigned long           flags;
1343        u16                     csr;
1344        struct musb_request     *request;
1345        int                     status = 0;
1346
1347        if (!ep)
1348                return -EINVAL;
1349        mbase = musb->mregs;
1350
1351        spin_lock_irqsave(&musb->lock, flags);
1352
1353        if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1354                status = -EINVAL;
1355                goto done;
1356        }
1357
1358        musb_ep_select(mbase, epnum);
1359
1360        request = next_request(musb_ep);
1361        if (value) {
1362                if (request) {
1363                        musb_dbg(musb, "request in progress, cannot halt %s",
1364                            ep->name);
1365                        status = -EAGAIN;
1366                        goto done;
1367                }
1368                /* Cannot portably stall with non-empty FIFO */
1369                if (musb_ep->is_in) {
1370                        csr = musb_readw(epio, MUSB_TXCSR);
1371                        if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1372                                musb_dbg(musb, "FIFO busy, cannot halt %s",
1373                                                ep->name);
1374                                status = -EAGAIN;
1375                                goto done;
1376                        }
1377                }
1378        } else
1379                musb_ep->wedged = 0;
1380
1381        /* set/clear the stall and toggle bits */
1382        musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
1383        if (musb_ep->is_in) {
1384                csr = musb_readw(epio, MUSB_TXCSR);
1385                csr |= MUSB_TXCSR_P_WZC_BITS
1386                        | MUSB_TXCSR_CLRDATATOG;
1387                if (value)
1388                        csr |= MUSB_TXCSR_P_SENDSTALL;
1389                else
1390                        csr &= ~(MUSB_TXCSR_P_SENDSTALL
1391                                | MUSB_TXCSR_P_SENTSTALL);
1392                csr &= ~MUSB_TXCSR_TXPKTRDY;
1393                musb_writew(epio, MUSB_TXCSR, csr);
1394        } else {
1395                csr = musb_readw(epio, MUSB_RXCSR);
1396                csr |= MUSB_RXCSR_P_WZC_BITS
1397                        | MUSB_RXCSR_FLUSHFIFO
1398                        | MUSB_RXCSR_CLRDATATOG;
1399                if (value)
1400                        csr |= MUSB_RXCSR_P_SENDSTALL;
1401                else
1402                        csr &= ~(MUSB_RXCSR_P_SENDSTALL
1403                                | MUSB_RXCSR_P_SENTSTALL);
1404                musb_writew(epio, MUSB_RXCSR, csr);
1405        }
1406
1407        /* maybe start the first request in the queue */
1408        if (!musb_ep->busy && !value && request) {
1409                musb_dbg(musb, "restarting the request");
1410                musb_ep_restart(musb, request);
1411        }
1412
1413done:
1414        spin_unlock_irqrestore(&musb->lock, flags);
1415        return status;
1416}
1417
1418/*
1419 * Sets the halt feature with the clear requests ignored
1420 */
1421static int musb_gadget_set_wedge(struct usb_ep *ep)
1422{
1423        struct musb_ep          *musb_ep = to_musb_ep(ep);
1424
1425        if (!ep)
1426                return -EINVAL;
1427
1428        musb_ep->wedged = 1;
1429
1430        return usb_ep_set_halt(ep);
1431}
1432
1433static int musb_gadget_fifo_status(struct usb_ep *ep)
1434{
1435        struct musb_ep          *musb_ep = to_musb_ep(ep);
1436        void __iomem            *epio = musb_ep->hw_ep->regs;
1437        int                     retval = -EINVAL;
1438
1439        if (musb_ep->desc && !musb_ep->is_in) {
1440                struct musb             *musb = musb_ep->musb;
1441                int                     epnum = musb_ep->current_epnum;
1442                void __iomem            *mbase = musb->mregs;
1443                unsigned long           flags;
1444
1445                spin_lock_irqsave(&musb->lock, flags);
1446
1447                musb_ep_select(mbase, epnum);
1448                /* FIXME return zero unless RXPKTRDY is set */
1449                retval = musb_readw(epio, MUSB_RXCOUNT);
1450
1451                spin_unlock_irqrestore(&musb->lock, flags);
1452        }
1453        return retval;
1454}
1455
1456static void musb_gadget_fifo_flush(struct usb_ep *ep)
1457{
1458        struct musb_ep  *musb_ep = to_musb_ep(ep);
1459        struct musb     *musb = musb_ep->musb;
1460        u8              epnum = musb_ep->current_epnum;
1461        void __iomem    *epio = musb->endpoints[epnum].regs;
1462        void __iomem    *mbase;
1463        unsigned long   flags;
1464        u16             csr;
1465
1466        mbase = musb->mregs;
1467
1468        spin_lock_irqsave(&musb->lock, flags);
1469        musb_ep_select(mbase, (u8) epnum);
1470
1471        /* disable interrupts */
1472        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1473
1474        if (musb_ep->is_in) {
1475                csr = musb_readw(epio, MUSB_TXCSR);
1476                if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1477                        csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1478                        /*
1479                         * Setting both TXPKTRDY and FLUSHFIFO makes controller
1480                         * to interrupt current FIFO loading, but not flushing
1481                         * the already loaded ones.
1482                         */
1483                        csr &= ~MUSB_TXCSR_TXPKTRDY;
1484                        musb_writew(epio, MUSB_TXCSR, csr);
1485                        /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1486                        musb_writew(epio, MUSB_TXCSR, csr);
1487                }
1488        } else {
1489                csr = musb_readw(epio, MUSB_RXCSR);
1490                csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1491                musb_writew(epio, MUSB_RXCSR, csr);
1492                musb_writew(epio, MUSB_RXCSR, csr);
1493        }
1494
1495        /* re-enable interrupt */
1496        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1497        spin_unlock_irqrestore(&musb->lock, flags);
1498}
1499
1500static const struct usb_ep_ops musb_ep_ops = {
1501        .enable         = musb_gadget_enable,
1502        .disable        = musb_gadget_disable,
1503        .alloc_request  = musb_alloc_request,
1504        .free_request   = musb_free_request,
1505        .queue          = musb_gadget_queue,
1506        .dequeue        = musb_gadget_dequeue,
1507        .set_halt       = musb_gadget_set_halt,
1508        .set_wedge      = musb_gadget_set_wedge,
1509        .fifo_status    = musb_gadget_fifo_status,
1510        .fifo_flush     = musb_gadget_fifo_flush
1511};
1512
1513/* ----------------------------------------------------------------------- */
1514
1515static int musb_gadget_get_frame(struct usb_gadget *gadget)
1516{
1517        struct musb     *musb = gadget_to_musb(gadget);
1518
1519        return (int)musb_readw(musb->mregs, MUSB_FRAME);
1520}
1521
1522static int musb_gadget_wakeup(struct usb_gadget *gadget)
1523{
1524        struct musb     *musb = gadget_to_musb(gadget);
1525        void __iomem    *mregs = musb->mregs;
1526        unsigned long   flags;
1527        int             status = -EINVAL;
1528        u8              power, devctl;
1529        int             retries;
1530
1531        spin_lock_irqsave(&musb->lock, flags);
1532
1533        switch (musb->xceiv->otg->state) {
1534        case OTG_STATE_B_PERIPHERAL:
1535                /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1536                 * that's part of the standard usb 1.1 state machine, and
1537                 * doesn't affect OTG transitions.
1538                 */
1539                if (musb->may_wakeup && musb->is_suspended)
1540                        break;
1541                goto done;
1542        case OTG_STATE_B_IDLE:
1543                /* Start SRP ... OTG not required. */
1544                devctl = musb_readb(mregs, MUSB_DEVCTL);
1545                musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
1546                devctl |= MUSB_DEVCTL_SESSION;
1547                musb_writeb(mregs, MUSB_DEVCTL, devctl);
1548                devctl = musb_readb(mregs, MUSB_DEVCTL);
1549                retries = 100;
1550                while (!(devctl & MUSB_DEVCTL_SESSION)) {
1551                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1552                        if (retries-- < 1)
1553                                break;
1554                }
1555                retries = 10000;
1556                while (devctl & MUSB_DEVCTL_SESSION) {
1557                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1558                        if (retries-- < 1)
1559                                break;
1560                }
1561
1562                spin_unlock_irqrestore(&musb->lock, flags);
1563                otg_start_srp(musb->xceiv->otg);
1564                spin_lock_irqsave(&musb->lock, flags);
1565
1566                /* Block idling for at least 1s */
1567                musb_platform_try_idle(musb,
1568                        jiffies + msecs_to_jiffies(1 * HZ));
1569
1570                status = 0;
1571                goto done;
1572        default:
1573                musb_dbg(musb, "Unhandled wake: %s",
1574                        usb_otg_state_string(musb->xceiv->otg->state));
1575                goto done;
1576        }
1577
1578        status = 0;
1579
1580        power = musb_readb(mregs, MUSB_POWER);
1581        power |= MUSB_POWER_RESUME;
1582        musb_writeb(mregs, MUSB_POWER, power);
1583        musb_dbg(musb, "issue wakeup");
1584
1585        /* FIXME do this next chunk in a timer callback, no udelay */
1586        mdelay(2);
1587
1588        power = musb_readb(mregs, MUSB_POWER);
1589        power &= ~MUSB_POWER_RESUME;
1590        musb_writeb(mregs, MUSB_POWER, power);
1591done:
1592        spin_unlock_irqrestore(&musb->lock, flags);
1593        return status;
1594}
1595
1596static int
1597musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1598{
1599        gadget->is_selfpowered = !!is_selfpowered;
1600        return 0;
1601}
1602
1603static void musb_pullup(struct musb *musb, int is_on)
1604{
1605        u8 power;
1606
1607        power = musb_readb(musb->mregs, MUSB_POWER);
1608        if (is_on)
1609                power |= MUSB_POWER_SOFTCONN;
1610        else
1611                power &= ~MUSB_POWER_SOFTCONN;
1612
1613        /* FIXME if on, HdrcStart; if off, HdrcStop */
1614
1615        musb_dbg(musb, "gadget D+ pullup %s",
1616                is_on ? "on" : "off");
1617        musb_writeb(musb->mregs, MUSB_POWER, power);
1618}
1619
1620#if 0
1621static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1622{
1623        musb_dbg(musb, "<= %s =>\n", __func__);
1624
1625        /*
1626         * FIXME iff driver's softconnect flag is set (as it is during probe,
1627         * though that can clear it), just musb_pullup().
1628         */
1629
1630        return -EINVAL;
1631}
1632#endif
1633
1634static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1635{
1636        struct musb     *musb = gadget_to_musb(gadget);
1637
1638        if (!musb->xceiv->set_power)
1639                return -EOPNOTSUPP;
1640        return usb_phy_set_power(musb->xceiv, mA);
1641}
1642
1643static void musb_gadget_work(struct work_struct *work)
1644{
1645        struct musb *musb;
1646        unsigned long flags;
1647
1648        musb = container_of(work, struct musb, gadget_work.work);
1649        pm_runtime_get_sync(musb->controller);
1650        spin_lock_irqsave(&musb->lock, flags);
1651        musb_pullup(musb, musb->softconnect);
1652        spin_unlock_irqrestore(&musb->lock, flags);
1653        pm_runtime_mark_last_busy(musb->controller);
1654        pm_runtime_put_autosuspend(musb->controller);
1655}
1656
1657static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1658{
1659        struct musb     *musb = gadget_to_musb(gadget);
1660        unsigned long   flags;
1661
1662        is_on = !!is_on;
1663
1664        /* NOTE: this assumes we are sensing vbus; we'd rather
1665         * not pullup unless the B-session is active.
1666         */
1667        spin_lock_irqsave(&musb->lock, flags);
1668        if (is_on != musb->softconnect) {
1669                musb->softconnect = is_on;
1670                schedule_delayed_work(&musb->gadget_work, 0);
1671        }
1672        spin_unlock_irqrestore(&musb->lock, flags);
1673
1674        return 0;
1675}
1676
1677static int musb_gadget_start(struct usb_gadget *g,
1678                struct usb_gadget_driver *driver);
1679static int musb_gadget_stop(struct usb_gadget *g);
1680
1681static const struct usb_gadget_ops musb_gadget_operations = {
1682        .get_frame              = musb_gadget_get_frame,
1683        .wakeup                 = musb_gadget_wakeup,
1684        .set_selfpowered        = musb_gadget_set_self_powered,
1685        /* .vbus_session                = musb_gadget_vbus_session, */
1686        .vbus_draw              = musb_gadget_vbus_draw,
1687        .pullup                 = musb_gadget_pullup,
1688        .udc_start              = musb_gadget_start,
1689        .udc_stop               = musb_gadget_stop,
1690};
1691
1692/* ----------------------------------------------------------------------- */
1693
1694/* Registration */
1695
1696/* Only this registration code "knows" the rule (from USB standards)
1697 * about there being only one external upstream port.  It assumes
1698 * all peripheral ports are external...
1699 */
1700
1701static void
1702init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1703{
1704        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1705
1706        memset(ep, 0, sizeof *ep);
1707
1708        ep->current_epnum = epnum;
1709        ep->musb = musb;
1710        ep->hw_ep = hw_ep;
1711        ep->is_in = is_in;
1712
1713        INIT_LIST_HEAD(&ep->req_list);
1714
1715        sprintf(ep->name, "ep%d%s", epnum,
1716                        (!epnum || hw_ep->is_shared_fifo) ? "" : (
1717                                is_in ? "in" : "out"));
1718        ep->end_point.name = ep->name;
1719        INIT_LIST_HEAD(&ep->end_point.ep_list);
1720        if (!epnum) {
1721                usb_ep_set_maxpacket_limit(&ep->end_point, 64);
1722                ep->end_point.caps.type_control = true;
1723                ep->end_point.ops = &musb_g_ep0_ops;
1724                musb->g.ep0 = &ep->end_point;
1725        } else {
1726                if (is_in)
1727                        usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
1728                else
1729                        usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
1730                ep->end_point.caps.type_iso = true;
1731                ep->end_point.caps.type_bulk = true;
1732                ep->end_point.caps.type_int = true;
1733                ep->end_point.ops = &musb_ep_ops;
1734                list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1735        }
1736
1737        if (!epnum || hw_ep->is_shared_fifo) {
1738                ep->end_point.caps.dir_in = true;
1739                ep->end_point.caps.dir_out = true;
1740        } else if (is_in)
1741                ep->end_point.caps.dir_in = true;
1742        else
1743                ep->end_point.caps.dir_out = true;
1744}
1745
1746/*
1747 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1748 * to the rest of the driver state.
1749 */
1750static inline void musb_g_init_endpoints(struct musb *musb)
1751{
1752        u8                      epnum;
1753        struct musb_hw_ep       *hw_ep;
1754        unsigned                count = 0;
1755
1756        /* initialize endpoint list just once */
1757        INIT_LIST_HEAD(&(musb->g.ep_list));
1758
1759        for (epnum = 0, hw_ep = musb->endpoints;
1760                        epnum < musb->nr_endpoints;
1761                        epnum++, hw_ep++) {
1762                if (hw_ep->is_shared_fifo /* || !epnum */) {
1763                        init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1764                        count++;
1765                } else {
1766                        if (hw_ep->max_packet_sz_tx) {
1767                                init_peripheral_ep(musb, &hw_ep->ep_in,
1768                                                        epnum, 1);
1769                                count++;
1770                        }
1771                        if (hw_ep->max_packet_sz_rx) {
1772                                init_peripheral_ep(musb, &hw_ep->ep_out,
1773                                                        epnum, 0);
1774                                count++;
1775                        }
1776                }
1777        }
1778}
1779
1780/* called once during driver setup to initialize and link into
1781 * the driver model; memory is zeroed.
1782 */
1783int musb_gadget_setup(struct musb *musb)
1784{
1785        int status;
1786
1787        /* REVISIT minor race:  if (erroneously) setting up two
1788         * musb peripherals at the same time, only the bus lock
1789         * is probably held.
1790         */
1791
1792        musb->g.ops = &musb_gadget_operations;
1793        musb->g.max_speed = USB_SPEED_HIGH;
1794        musb->g.speed = USB_SPEED_UNKNOWN;
1795
1796        MUSB_DEV_MODE(musb);
1797        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1798
1799        /* this "gadget" abstracts/virtualizes the controller */
1800        musb->g.name = musb_driver_name;
1801        /* don't support otg protocols */
1802        musb->g.is_otg = 0;
1803        INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
1804        musb_g_init_endpoints(musb);
1805
1806        musb->is_active = 0;
1807        musb_platform_try_idle(musb, 0);
1808
1809        status = usb_add_gadget_udc(musb->controller, &musb->g);
1810        if (status)
1811                goto err;
1812
1813        return 0;
1814err:
1815        musb->g.dev.parent = NULL;
1816        device_unregister(&musb->g.dev);
1817        return status;
1818}
1819
1820void musb_gadget_cleanup(struct musb *musb)
1821{
1822        if (musb->port_mode == MUSB_HOST)
1823                return;
1824
1825        cancel_delayed_work_sync(&musb->gadget_work);
1826        usb_del_gadget_udc(&musb->g);
1827}
1828
1829/*
1830 * Register the gadget driver. Used by gadget drivers when
1831 * registering themselves with the controller.
1832 *
1833 * -EINVAL something went wrong (not driver)
1834 * -EBUSY another gadget is already using the controller
1835 * -ENOMEM no memory to perform the operation
1836 *
1837 * @param driver the gadget driver
1838 * @return <0 if error, 0 if everything is fine
1839 */
1840static int musb_gadget_start(struct usb_gadget *g,
1841                struct usb_gadget_driver *driver)
1842{
1843        struct musb             *musb = gadget_to_musb(g);
1844        struct usb_otg          *otg = musb->xceiv->otg;
1845        unsigned long           flags;
1846        int                     retval = 0;
1847
1848        if (driver->max_speed < USB_SPEED_HIGH) {
1849                retval = -EINVAL;
1850                goto err;
1851        }
1852
1853        pm_runtime_get_sync(musb->controller);
1854
1855        musb->softconnect = 0;
1856        musb->gadget_driver = driver;
1857
1858        spin_lock_irqsave(&musb->lock, flags);
1859        musb->is_active = 1;
1860
1861        otg_set_peripheral(otg, &musb->g);
1862        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1863        spin_unlock_irqrestore(&musb->lock, flags);
1864
1865        musb_start(musb);
1866
1867        /* REVISIT:  funcall to other code, which also
1868         * handles power budgeting ... this way also
1869         * ensures HdrcStart is indirectly called.
1870         */
1871        if (musb->xceiv->last_event == USB_EVENT_ID)
1872                musb_platform_set_vbus(musb, 1);
1873
1874        pm_runtime_mark_last_busy(musb->controller);
1875        pm_runtime_put_autosuspend(musb->controller);
1876
1877        return 0;
1878
1879err:
1880        return retval;
1881}
1882
1883/*
1884 * Unregister the gadget driver. Used by gadget drivers when
1885 * unregistering themselves from the controller.
1886 *
1887 * @param driver the gadget driver to unregister
1888 */
1889static int musb_gadget_stop(struct usb_gadget *g)
1890{
1891        struct musb     *musb = gadget_to_musb(g);
1892        unsigned long   flags;
1893
1894        pm_runtime_get_sync(musb->controller);
1895
1896        /*
1897         * REVISIT always use otg_set_peripheral() here too;
1898         * this needs to shut down the OTG engine.
1899         */
1900
1901        spin_lock_irqsave(&musb->lock, flags);
1902
1903        musb_hnp_stop(musb);
1904
1905        (void) musb_gadget_vbus_draw(&musb->g, 0);
1906
1907        musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
1908        musb_stop(musb);
1909        otg_set_peripheral(musb->xceiv->otg, NULL);
1910
1911        musb->is_active = 0;
1912        musb->gadget_driver = NULL;
1913        musb_platform_try_idle(musb, 0);
1914        spin_unlock_irqrestore(&musb->lock, flags);
1915
1916        /*
1917         * FIXME we need to be able to register another
1918         * gadget driver here and have everything work;
1919         * that currently misbehaves.
1920         */
1921
1922        /* Force check of devctl register for PM runtime */
1923        schedule_delayed_work(&musb->irq_work, 0);
1924
1925        pm_runtime_mark_last_busy(musb->controller);
1926        pm_runtime_put_autosuspend(musb->controller);
1927
1928        return 0;
1929}
1930
1931/* ----------------------------------------------------------------------- */
1932
1933/* lifecycle operations called through plat_uds.c */
1934
1935void musb_g_resume(struct musb *musb)
1936{
1937        musb->is_suspended = 0;
1938        switch (musb->xceiv->otg->state) {
1939        case OTG_STATE_B_IDLE:
1940                break;
1941        case OTG_STATE_B_WAIT_ACON:
1942        case OTG_STATE_B_PERIPHERAL:
1943                musb->is_active = 1;
1944                if (musb->gadget_driver && musb->gadget_driver->resume) {
1945                        spin_unlock(&musb->lock);
1946                        musb->gadget_driver->resume(&musb->g);
1947                        spin_lock(&musb->lock);
1948                }
1949                break;
1950        default:
1951                WARNING("unhandled RESUME transition (%s)\n",
1952                                usb_otg_state_string(musb->xceiv->otg->state));
1953        }
1954}
1955
1956/* called when SOF packets stop for 3+ msec */
1957void musb_g_suspend(struct musb *musb)
1958{
1959        u8      devctl;
1960
1961        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1962        musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
1963
1964        switch (musb->xceiv->otg->state) {
1965        case OTG_STATE_B_IDLE:
1966                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1967                        musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
1968                break;
1969        case OTG_STATE_B_PERIPHERAL:
1970                musb->is_suspended = 1;
1971                if (musb->gadget_driver && musb->gadget_driver->suspend) {
1972                        spin_unlock(&musb->lock);
1973                        musb->gadget_driver->suspend(&musb->g);
1974                        spin_lock(&musb->lock);
1975                }
1976                break;
1977        default:
1978                /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1979                 * A_PERIPHERAL may need care too
1980                 */
1981                WARNING("unhandled SUSPEND transition (%s)",
1982                                usb_otg_state_string(musb->xceiv->otg->state));
1983        }
1984}
1985
1986/* Called during SRP */
1987void musb_g_wakeup(struct musb *musb)
1988{
1989        musb_gadget_wakeup(&musb->g);
1990}
1991
1992/* called when VBUS drops below session threshold, and in other cases */
1993void musb_g_disconnect(struct musb *musb)
1994{
1995        void __iomem    *mregs = musb->mregs;
1996        u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
1997
1998        musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
1999
2000        /* clear HR */
2001        musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2002
2003        /* don't draw vbus until new b-default session */
2004        (void) musb_gadget_vbus_draw(&musb->g, 0);
2005
2006        musb->g.speed = USB_SPEED_UNKNOWN;
2007        if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2008                spin_unlock(&musb->lock);
2009                musb->gadget_driver->disconnect(&musb->g);
2010                spin_lock(&musb->lock);
2011        }
2012
2013        switch (musb->xceiv->otg->state) {
2014        default:
2015                musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
2016                        usb_otg_state_string(musb->xceiv->otg->state));
2017                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2018                MUSB_HST_MODE(musb);
2019                break;
2020        case OTG_STATE_A_PERIPHERAL:
2021                musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2022                MUSB_HST_MODE(musb);
2023                break;
2024        case OTG_STATE_B_WAIT_ACON:
2025        case OTG_STATE_B_HOST:
2026        case OTG_STATE_B_PERIPHERAL:
2027        case OTG_STATE_B_IDLE:
2028                musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2029                break;
2030        case OTG_STATE_B_SRP_INIT:
2031                break;
2032        }
2033
2034        musb->is_active = 0;
2035}
2036
2037void musb_g_reset(struct musb *musb)
2038__releases(musb->lock)
2039__acquires(musb->lock)
2040{
2041        void __iomem    *mbase = musb->mregs;
2042        u8              devctl = musb_readb(mbase, MUSB_DEVCTL);
2043        u8              power;
2044
2045        musb_dbg(musb, "<== %s driver '%s'",
2046                        (devctl & MUSB_DEVCTL_BDEVICE)
2047                                ? "B-Device" : "A-Device",
2048                        musb->gadget_driver
2049                                ? musb->gadget_driver->driver.name
2050                                : NULL
2051                        );
2052
2053        /* report reset, if we didn't already (flushing EP state) */
2054        if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2055                spin_unlock(&musb->lock);
2056                usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2057                spin_lock(&musb->lock);
2058        }
2059
2060        /* clear HR */
2061        else if (devctl & MUSB_DEVCTL_HR)
2062                musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2063
2064
2065        /* what speed did we negotiate? */
2066        power = musb_readb(mbase, MUSB_POWER);
2067        musb->g.speed = (power & MUSB_POWER_HSMODE)
2068                        ? USB_SPEED_HIGH : USB_SPEED_FULL;
2069
2070        /* start in USB_STATE_DEFAULT */
2071        musb->is_active = 1;
2072        musb->is_suspended = 0;
2073        MUSB_DEV_MODE(musb);
2074        musb->address = 0;
2075        musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2076
2077        musb->may_wakeup = 0;
2078        musb->g.b_hnp_enable = 0;
2079        musb->g.a_alt_hnp_support = 0;
2080        musb->g.a_hnp_support = 0;
2081        musb->g.quirk_zlp_not_supp = 1;
2082
2083        /* Normal reset, as B-Device;
2084         * or else after HNP, as A-Device
2085         */
2086        if (!musb->g.is_otg) {
2087                /* USB device controllers that are not OTG compatible
2088                 * may not have DEVCTL register in silicon.
2089                 * In that case, do not rely on devctl for setting
2090                 * peripheral mode.
2091                 */
2092                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2093                musb->g.is_a_peripheral = 0;
2094        } else if (devctl & MUSB_DEVCTL_BDEVICE) {
2095                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2096                musb->g.is_a_peripheral = 0;
2097        } else {
2098                musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
2099                musb->g.is_a_peripheral = 1;
2100        }
2101
2102        /* start with default limits on VBUS power draw */
2103        (void) musb_gadget_vbus_draw(&musb->g, 8);
2104}
2105