linux/drivers/usb/musb/musb_gadget.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MUSB OTG driver peripheral support
   4 *
   5 * Copyright 2005 Mentor Graphics Corporation
   6 * Copyright (C) 2005-2006 by Texas Instruments
   7 * Copyright (C) 2006-2007 Nokia Corporation
   8 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/list.h>
  13#include <linux/timer.h>
  14#include <linux/module.h>
  15#include <linux/smp.h>
  16#include <linux/spinlock.h>
  17#include <linux/delay.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/slab.h>
  20
  21#include "musb_core.h"
  22#include "musb_trace.h"
  23
  24
  25/* ----------------------------------------------------------------------- */
  26
  27#define is_buffer_mapped(req) (is_dma_capable() && \
  28                                        (req->map_state != UN_MAPPED))
  29
  30/* Maps the buffer to dma  */
  31
  32static inline void map_dma_buffer(struct musb_request *request,
  33                        struct musb *musb, struct musb_ep *musb_ep)
  34{
  35        int compatible = true;
  36        struct dma_controller *dma = musb->dma_controller;
  37
  38        request->map_state = UN_MAPPED;
  39
  40        if (!is_dma_capable() || !musb_ep->dma)
  41                return;
  42
  43        /* Check if DMA engine can handle this request.
  44         * DMA code must reject the USB request explicitly.
  45         * Default behaviour is to map the request.
  46         */
  47        if (dma->is_compatible)
  48                compatible = dma->is_compatible(musb_ep->dma,
  49                                musb_ep->packet_sz, request->request.buf,
  50                                request->request.length);
  51        if (!compatible)
  52                return;
  53
  54        if (request->request.dma == DMA_ADDR_INVALID) {
  55                dma_addr_t dma_addr;
  56                int ret;
  57
  58                dma_addr = dma_map_single(
  59                                musb->controller,
  60                                request->request.buf,
  61                                request->request.length,
  62                                request->tx
  63                                        ? DMA_TO_DEVICE
  64                                        : DMA_FROM_DEVICE);
  65                ret = dma_mapping_error(musb->controller, dma_addr);
  66                if (ret)
  67                        return;
  68
  69                request->request.dma = dma_addr;
  70                request->map_state = MUSB_MAPPED;
  71        } else {
  72                dma_sync_single_for_device(musb->controller,
  73                        request->request.dma,
  74                        request->request.length,
  75                        request->tx
  76                                ? DMA_TO_DEVICE
  77                                : DMA_FROM_DEVICE);
  78                request->map_state = PRE_MAPPED;
  79        }
  80}
  81
  82/* Unmap the buffer from dma and maps it back to cpu */
  83static inline void unmap_dma_buffer(struct musb_request *request,
  84                                struct musb *musb)
  85{
  86        struct musb_ep *musb_ep = request->ep;
  87
  88        if (!is_buffer_mapped(request) || !musb_ep->dma)
  89                return;
  90
  91        if (request->request.dma == DMA_ADDR_INVALID) {
  92                dev_vdbg(musb->controller,
  93                                "not unmapping a never mapped buffer\n");
  94                return;
  95        }
  96        if (request->map_state == MUSB_MAPPED) {
  97                dma_unmap_single(musb->controller,
  98                        request->request.dma,
  99                        request->request.length,
 100                        request->tx
 101                                ? DMA_TO_DEVICE
 102                                : DMA_FROM_DEVICE);
 103                request->request.dma = DMA_ADDR_INVALID;
 104        } else { /* PRE_MAPPED */
 105                dma_sync_single_for_cpu(musb->controller,
 106                        request->request.dma,
 107                        request->request.length,
 108                        request->tx
 109                                ? DMA_TO_DEVICE
 110                                : DMA_FROM_DEVICE);
 111        }
 112        request->map_state = UN_MAPPED;
 113}
 114
 115/*
 116 * Immediately complete a request.
 117 *
 118 * @param request the request to complete
 119 * @param status the status to complete the request with
 120 * Context: controller locked, IRQs blocked.
 121 */
 122void musb_g_giveback(
 123        struct musb_ep          *ep,
 124        struct usb_request      *request,
 125        int                     status)
 126__releases(ep->musb->lock)
 127__acquires(ep->musb->lock)
 128{
 129        struct musb_request     *req;
 130        struct musb             *musb;
 131        int                     busy = ep->busy;
 132
 133        req = to_musb_request(request);
 134
 135        list_del(&req->list);
 136        if (req->request.status == -EINPROGRESS)
 137                req->request.status = status;
 138        musb = req->musb;
 139
 140        ep->busy = 1;
 141        spin_unlock(&musb->lock);
 142
 143        if (!dma_mapping_error(&musb->g.dev, request->dma))
 144                unmap_dma_buffer(req, musb);
 145
 146        trace_musb_req_gb(req);
 147        usb_gadget_giveback_request(&req->ep->end_point, &req->request);
 148        spin_lock(&musb->lock);
 149        ep->busy = busy;
 150}
 151
 152/* ----------------------------------------------------------------------- */
 153
 154/*
 155 * Abort requests queued to an endpoint using the status. Synchronous.
 156 * caller locked controller and blocked irqs, and selected this ep.
 157 */
 158static void nuke(struct musb_ep *ep, const int status)
 159{
 160        struct musb             *musb = ep->musb;
 161        struct musb_request     *req = NULL;
 162        void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
 163
 164        ep->busy = 1;
 165
 166        if (is_dma_capable() && ep->dma) {
 167                struct dma_controller   *c = ep->musb->dma_controller;
 168                int value;
 169
 170                if (ep->is_in) {
 171                        /*
 172                         * The programming guide says that we must not clear
 173                         * the DMAMODE bit before DMAENAB, so we only
 174                         * clear it in the second write...
 175                         */
 176                        musb_writew(epio, MUSB_TXCSR,
 177                                    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
 178                        musb_writew(epio, MUSB_TXCSR,
 179                                        0 | MUSB_TXCSR_FLUSHFIFO);
 180                } else {
 181                        musb_writew(epio, MUSB_RXCSR,
 182                                        0 | MUSB_RXCSR_FLUSHFIFO);
 183                        musb_writew(epio, MUSB_RXCSR,
 184                                        0 | MUSB_RXCSR_FLUSHFIFO);
 185                }
 186
 187                value = c->channel_abort(ep->dma);
 188                musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
 189                c->channel_release(ep->dma);
 190                ep->dma = NULL;
 191        }
 192
 193        while (!list_empty(&ep->req_list)) {
 194                req = list_first_entry(&ep->req_list, struct musb_request, list);
 195                musb_g_giveback(ep, &req->request, status);
 196        }
 197}
 198
 199/* ----------------------------------------------------------------------- */
 200
 201/* Data transfers - pure PIO, pure DMA, or mixed mode */
 202
 203/*
 204 * This assumes the separate CPPI engine is responding to DMA requests
 205 * from the usb core ... sequenced a bit differently from mentor dma.
 206 */
 207
 208static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
 209{
 210        if (can_bulk_split(musb, ep->type))
 211                return ep->hw_ep->max_packet_sz_tx;
 212        else
 213                return ep->packet_sz;
 214}
 215
 216/*
 217 * An endpoint is transmitting data. This can be called either from
 218 * the IRQ routine or from ep.queue() to kickstart a request on an
 219 * endpoint.
 220 *
 221 * Context: controller locked, IRQs blocked, endpoint selected
 222 */
 223static void txstate(struct musb *musb, struct musb_request *req)
 224{
 225        u8                      epnum = req->epnum;
 226        struct musb_ep          *musb_ep;
 227        void __iomem            *epio = musb->endpoints[epnum].regs;
 228        struct usb_request      *request;
 229        u16                     fifo_count = 0, csr;
 230        int                     use_dma = 0;
 231
 232        musb_ep = req->ep;
 233
 234        /* Check if EP is disabled */
 235        if (!musb_ep->desc) {
 236                musb_dbg(musb, "ep:%s disabled - ignore request",
 237                                                musb_ep->end_point.name);
 238                return;
 239        }
 240
 241        /* we shouldn't get here while DMA is active ... but we do ... */
 242        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 243                musb_dbg(musb, "dma pending...");
 244                return;
 245        }
 246
 247        /* read TXCSR before */
 248        csr = musb_readw(epio, MUSB_TXCSR);
 249
 250        request = &req->request;
 251        fifo_count = min(max_ep_writesize(musb, musb_ep),
 252                        (int)(request->length - request->actual));
 253
 254        if (csr & MUSB_TXCSR_TXPKTRDY) {
 255                musb_dbg(musb, "%s old packet still ready , txcsr %03x",
 256                                musb_ep->end_point.name, csr);
 257                return;
 258        }
 259
 260        if (csr & MUSB_TXCSR_P_SENDSTALL) {
 261                musb_dbg(musb, "%s stalling, txcsr %03x",
 262                                musb_ep->end_point.name, csr);
 263                return;
 264        }
 265
 266        musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
 267                        epnum, musb_ep->packet_sz, fifo_count,
 268                        csr);
 269
 270#ifndef CONFIG_MUSB_PIO_ONLY
 271        if (is_buffer_mapped(req)) {
 272                struct dma_controller   *c = musb->dma_controller;
 273                size_t request_size;
 274
 275                /* setup DMA, then program endpoint CSR */
 276                request_size = min_t(size_t, request->length - request->actual,
 277                                        musb_ep->dma->max_len);
 278
 279                use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
 280
 281                /* MUSB_TXCSR_P_ISO is still set correctly */
 282
 283                if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
 284                        if (request_size < musb_ep->packet_sz)
 285                                musb_ep->dma->desired_mode = 0;
 286                        else
 287                                musb_ep->dma->desired_mode = 1;
 288
 289                        use_dma = use_dma && c->channel_program(
 290                                        musb_ep->dma, musb_ep->packet_sz,
 291                                        musb_ep->dma->desired_mode,
 292                                        request->dma + request->actual, request_size);
 293                        if (use_dma) {
 294                                if (musb_ep->dma->desired_mode == 0) {
 295                                        /*
 296                                         * We must not clear the DMAMODE bit
 297                                         * before the DMAENAB bit -- and the
 298                                         * latter doesn't always get cleared
 299                                         * before we get here...
 300                                         */
 301                                        csr &= ~(MUSB_TXCSR_AUTOSET
 302                                                | MUSB_TXCSR_DMAENAB);
 303                                        musb_writew(epio, MUSB_TXCSR, csr
 304                                                | MUSB_TXCSR_P_WZC_BITS);
 305                                        csr &= ~MUSB_TXCSR_DMAMODE;
 306                                        csr |= (MUSB_TXCSR_DMAENAB |
 307                                                        MUSB_TXCSR_MODE);
 308                                        /* against programming guide */
 309                                } else {
 310                                        csr |= (MUSB_TXCSR_DMAENAB
 311                                                        | MUSB_TXCSR_DMAMODE
 312                                                        | MUSB_TXCSR_MODE);
 313                                        /*
 314                                         * Enable Autoset according to table
 315                                         * below
 316                                         * bulk_split hb_mult   Autoset_Enable
 317                                         *      0       0       Yes(Normal)
 318                                         *      0       >0      No(High BW ISO)
 319                                         *      1       0       Yes(HS bulk)
 320                                         *      1       >0      Yes(FS bulk)
 321                                         */
 322                                        if (!musb_ep->hb_mult ||
 323                                            can_bulk_split(musb,
 324                                                           musb_ep->type))
 325                                                csr |= MUSB_TXCSR_AUTOSET;
 326                                }
 327                                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 328
 329                                musb_writew(epio, MUSB_TXCSR, csr);
 330                        }
 331                }
 332
 333                if (is_cppi_enabled(musb)) {
 334                        /* program endpoint CSR first, then setup DMA */
 335                        csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 336                        csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
 337                                MUSB_TXCSR_MODE;
 338                        musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
 339                                                ~MUSB_TXCSR_P_UNDERRUN) | csr);
 340
 341                        /* ensure writebuffer is empty */
 342                        csr = musb_readw(epio, MUSB_TXCSR);
 343
 344                        /*
 345                         * NOTE host side sets DMAENAB later than this; both are
 346                         * OK since the transfer dma glue (between CPPI and
 347                         * Mentor fifos) just tells CPPI it could start. Data
 348                         * only moves to the USB TX fifo when both fifos are
 349                         * ready.
 350                         */
 351                        /*
 352                         * "mode" is irrelevant here; handle terminating ZLPs
 353                         * like PIO does, since the hardware RNDIS mode seems
 354                         * unreliable except for the
 355                         * last-packet-is-already-short case.
 356                         */
 357                        use_dma = use_dma && c->channel_program(
 358                                        musb_ep->dma, musb_ep->packet_sz,
 359                                        0,
 360                                        request->dma + request->actual,
 361                                        request_size);
 362                        if (!use_dma) {
 363                                c->channel_release(musb_ep->dma);
 364                                musb_ep->dma = NULL;
 365                                csr &= ~MUSB_TXCSR_DMAENAB;
 366                                musb_writew(epio, MUSB_TXCSR, csr);
 367                                /* invariant: prequest->buf is non-null */
 368                        }
 369                } else if (tusb_dma_omap(musb))
 370                        use_dma = use_dma && c->channel_program(
 371                                        musb_ep->dma, musb_ep->packet_sz,
 372                                        request->zero,
 373                                        request->dma + request->actual,
 374                                        request_size);
 375        }
 376#endif
 377
 378        if (!use_dma) {
 379                /*
 380                 * Unmap the dma buffer back to cpu if dma channel
 381                 * programming fails
 382                 */
 383                unmap_dma_buffer(req, musb);
 384
 385                musb_write_fifo(musb_ep->hw_ep, fifo_count,
 386                                (u8 *) (request->buf + request->actual));
 387                request->actual += fifo_count;
 388                csr |= MUSB_TXCSR_TXPKTRDY;
 389                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 390                musb_writew(epio, MUSB_TXCSR, csr);
 391        }
 392
 393        /* host may already have the data when this message shows... */
 394        musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
 395                        musb_ep->end_point.name, use_dma ? "dma" : "pio",
 396                        request->actual, request->length,
 397                        musb_readw(epio, MUSB_TXCSR),
 398                        fifo_count,
 399                        musb_readw(epio, MUSB_TXMAXP));
 400}
 401
 402/*
 403 * FIFO state update (e.g. data ready).
 404 * Called from IRQ,  with controller locked.
 405 */
 406void musb_g_tx(struct musb *musb, u8 epnum)
 407{
 408        u16                     csr;
 409        struct musb_request     *req;
 410        struct usb_request      *request;
 411        u8 __iomem              *mbase = musb->mregs;
 412        struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_in;
 413        void __iomem            *epio = musb->endpoints[epnum].regs;
 414        struct dma_channel      *dma;
 415
 416        musb_ep_select(mbase, epnum);
 417        req = next_request(musb_ep);
 418        request = &req->request;
 419
 420        csr = musb_readw(epio, MUSB_TXCSR);
 421        musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
 422
 423        dma = is_dma_capable() ? musb_ep->dma : NULL;
 424
 425        /*
 426         * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
 427         * probably rates reporting as a host error.
 428         */
 429        if (csr & MUSB_TXCSR_P_SENTSTALL) {
 430                csr |=  MUSB_TXCSR_P_WZC_BITS;
 431                csr &= ~MUSB_TXCSR_P_SENTSTALL;
 432                musb_writew(epio, MUSB_TXCSR, csr);
 433                return;
 434        }
 435
 436        if (csr & MUSB_TXCSR_P_UNDERRUN) {
 437                /* We NAKed, no big deal... little reason to care. */
 438                csr |=   MUSB_TXCSR_P_WZC_BITS;
 439                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 440                musb_writew(epio, MUSB_TXCSR, csr);
 441                dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
 442                                epnum, request);
 443        }
 444
 445        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 446                /*
 447                 * SHOULD NOT HAPPEN... has with CPPI though, after
 448                 * changing SENDSTALL (and other cases); harmless?
 449                 */
 450                musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
 451                return;
 452        }
 453
 454        if (req) {
 455
 456                trace_musb_req_tx(req);
 457
 458                if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
 459                        csr |= MUSB_TXCSR_P_WZC_BITS;
 460                        csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
 461                                 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
 462                        musb_writew(epio, MUSB_TXCSR, csr);
 463                        /* Ensure writebuffer is empty. */
 464                        csr = musb_readw(epio, MUSB_TXCSR);
 465                        request->actual += musb_ep->dma->actual_len;
 466                        musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
 467                                epnum, csr, musb_ep->dma->actual_len, request);
 468                }
 469
 470                /*
 471                 * First, maybe a terminating short packet. Some DMA
 472                 * engines might handle this by themselves.
 473                 */
 474                if ((request->zero && request->length)
 475                        && (request->length % musb_ep->packet_sz == 0)
 476                        && (request->actual == request->length)) {
 477
 478                        /*
 479                         * On DMA completion, FIFO may not be
 480                         * available yet...
 481                         */
 482                        if (csr & MUSB_TXCSR_TXPKTRDY)
 483                                return;
 484
 485                        musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
 486                                        | MUSB_TXCSR_TXPKTRDY);
 487                        request->zero = 0;
 488                }
 489
 490                if (request->actual == request->length) {
 491                        musb_g_giveback(musb_ep, request, 0);
 492                        /*
 493                         * In the giveback function the MUSB lock is
 494                         * released and acquired after sometime. During
 495                         * this time period the INDEX register could get
 496                         * changed by the gadget_queue function especially
 497                         * on SMP systems. Reselect the INDEX to be sure
 498                         * we are reading/modifying the right registers
 499                         */
 500                        musb_ep_select(mbase, epnum);
 501                        req = musb_ep->desc ? next_request(musb_ep) : NULL;
 502                        if (!req) {
 503                                musb_dbg(musb, "%s idle now",
 504                                        musb_ep->end_point.name);
 505                                return;
 506                        }
 507                }
 508
 509                txstate(musb, req);
 510        }
 511}
 512
 513/* ------------------------------------------------------------ */
 514
 515/*
 516 * Context: controller locked, IRQs blocked, endpoint selected
 517 */
 518static void rxstate(struct musb *musb, struct musb_request *req)
 519{
 520        const u8                epnum = req->epnum;
 521        struct usb_request      *request = &req->request;
 522        struct musb_ep          *musb_ep;
 523        void __iomem            *epio = musb->endpoints[epnum].regs;
 524        unsigned                len = 0;
 525        u16                     fifo_count;
 526        u16                     csr = musb_readw(epio, MUSB_RXCSR);
 527        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 528        u8                      use_mode_1;
 529
 530        if (hw_ep->is_shared_fifo)
 531                musb_ep = &hw_ep->ep_in;
 532        else
 533                musb_ep = &hw_ep->ep_out;
 534
 535        fifo_count = musb_ep->packet_sz;
 536
 537        /* Check if EP is disabled */
 538        if (!musb_ep->desc) {
 539                musb_dbg(musb, "ep:%s disabled - ignore request",
 540                                                musb_ep->end_point.name);
 541                return;
 542        }
 543
 544        /* We shouldn't get here while DMA is active, but we do... */
 545        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 546                musb_dbg(musb, "DMA pending...");
 547                return;
 548        }
 549
 550        if (csr & MUSB_RXCSR_P_SENDSTALL) {
 551                musb_dbg(musb, "%s stalling, RXCSR %04x",
 552                    musb_ep->end_point.name, csr);
 553                return;
 554        }
 555
 556        if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
 557                struct dma_controller   *c = musb->dma_controller;
 558                struct dma_channel      *channel = musb_ep->dma;
 559
 560                /* NOTE:  CPPI won't actually stop advancing the DMA
 561                 * queue after short packet transfers, so this is almost
 562                 * always going to run as IRQ-per-packet DMA so that
 563                 * faults will be handled correctly.
 564                 */
 565                if (c->channel_program(channel,
 566                                musb_ep->packet_sz,
 567                                !request->short_not_ok,
 568                                request->dma + request->actual,
 569                                request->length - request->actual)) {
 570
 571                        /* make sure that if an rxpkt arrived after the irq,
 572                         * the cppi engine will be ready to take it as soon
 573                         * as DMA is enabled
 574                         */
 575                        csr &= ~(MUSB_RXCSR_AUTOCLEAR
 576                                        | MUSB_RXCSR_DMAMODE);
 577                        csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
 578                        musb_writew(epio, MUSB_RXCSR, csr);
 579                        return;
 580                }
 581        }
 582
 583        if (csr & MUSB_RXCSR_RXPKTRDY) {
 584                fifo_count = musb_readw(epio, MUSB_RXCOUNT);
 585
 586                /*
 587                 * Enable Mode 1 on RX transfers only when short_not_ok flag
 588                 * is set. Currently short_not_ok flag is set only from
 589                 * file_storage and f_mass_storage drivers
 590                 */
 591
 592                if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
 593                        use_mode_1 = 1;
 594                else
 595                        use_mode_1 = 0;
 596
 597                if (request->actual < request->length) {
 598                        if (!is_buffer_mapped(req))
 599                                goto buffer_aint_mapped;
 600
 601                        if (musb_dma_inventra(musb)) {
 602                                struct dma_controller   *c;
 603                                struct dma_channel      *channel;
 604                                int                     use_dma = 0;
 605                                unsigned int transfer_size;
 606
 607                                c = musb->dma_controller;
 608                                channel = musb_ep->dma;
 609
 610        /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
 611         * mode 0 only. So we do not get endpoint interrupts due to DMA
 612         * completion. We only get interrupts from DMA controller.
 613         *
 614         * We could operate in DMA mode 1 if we knew the size of the transfer
 615         * in advance. For mass storage class, request->length = what the host
 616         * sends, so that'd work.  But for pretty much everything else,
 617         * request->length is routinely more than what the host sends. For
 618         * most these gadgets, end of is signified either by a short packet,
 619         * or filling the last byte of the buffer.  (Sending extra data in
 620         * that last pckate should trigger an overflow fault.)  But in mode 1,
 621         * we don't get DMA completion interrupt for short packets.
 622         *
 623         * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
 624         * to get endpoint interrupt on every DMA req, but that didn't seem
 625         * to work reliably.
 626         *
 627         * REVISIT an updated g_file_storage can set req->short_not_ok, which
 628         * then becomes usable as a runtime "use mode 1" hint...
 629         */
 630
 631                                /* Experimental: Mode1 works with mass storage use cases */
 632                                if (use_mode_1) {
 633                                        csr |= MUSB_RXCSR_AUTOCLEAR;
 634                                        musb_writew(epio, MUSB_RXCSR, csr);
 635                                        csr |= MUSB_RXCSR_DMAENAB;
 636                                        musb_writew(epio, MUSB_RXCSR, csr);
 637
 638                                        /*
 639                                         * this special sequence (enabling and then
 640                                         * disabling MUSB_RXCSR_DMAMODE) is required
 641                                         * to get DMAReq to activate
 642                                         */
 643                                        musb_writew(epio, MUSB_RXCSR,
 644                                                csr | MUSB_RXCSR_DMAMODE);
 645                                        musb_writew(epio, MUSB_RXCSR, csr);
 646
 647                                        transfer_size = min_t(unsigned int,
 648                                                        request->length -
 649                                                        request->actual,
 650                                                        channel->max_len);
 651                                        musb_ep->dma->desired_mode = 1;
 652                                } else {
 653                                        if (!musb_ep->hb_mult &&
 654                                                musb_ep->hw_ep->rx_double_buffered)
 655                                                csr |= MUSB_RXCSR_AUTOCLEAR;
 656                                        csr |= MUSB_RXCSR_DMAENAB;
 657                                        musb_writew(epio, MUSB_RXCSR, csr);
 658
 659                                        transfer_size = min(request->length - request->actual,
 660                                                        (unsigned)fifo_count);
 661                                        musb_ep->dma->desired_mode = 0;
 662                                }
 663
 664                                use_dma = c->channel_program(
 665                                                channel,
 666                                                musb_ep->packet_sz,
 667                                                channel->desired_mode,
 668                                                request->dma
 669                                                + request->actual,
 670                                                transfer_size);
 671
 672                                if (use_dma)
 673                                        return;
 674                        }
 675
 676                        if ((musb_dma_ux500(musb)) &&
 677                                (request->actual < request->length)) {
 678
 679                                struct dma_controller *c;
 680                                struct dma_channel *channel;
 681                                unsigned int transfer_size = 0;
 682
 683                                c = musb->dma_controller;
 684                                channel = musb_ep->dma;
 685
 686                                /* In case first packet is short */
 687                                if (fifo_count < musb_ep->packet_sz)
 688                                        transfer_size = fifo_count;
 689                                else if (request->short_not_ok)
 690                                        transfer_size = min_t(unsigned int,
 691                                                        request->length -
 692                                                        request->actual,
 693                                                        channel->max_len);
 694                                else
 695                                        transfer_size = min_t(unsigned int,
 696                                                        request->length -
 697                                                        request->actual,
 698                                                        (unsigned)fifo_count);
 699
 700                                csr &= ~MUSB_RXCSR_DMAMODE;
 701                                csr |= (MUSB_RXCSR_DMAENAB |
 702                                        MUSB_RXCSR_AUTOCLEAR);
 703
 704                                musb_writew(epio, MUSB_RXCSR, csr);
 705
 706                                if (transfer_size <= musb_ep->packet_sz) {
 707                                        musb_ep->dma->desired_mode = 0;
 708                                } else {
 709                                        musb_ep->dma->desired_mode = 1;
 710                                        /* Mode must be set after DMAENAB */
 711                                        csr |= MUSB_RXCSR_DMAMODE;
 712                                        musb_writew(epio, MUSB_RXCSR, csr);
 713                                }
 714
 715                                if (c->channel_program(channel,
 716                                                        musb_ep->packet_sz,
 717                                                        channel->desired_mode,
 718                                                        request->dma
 719                                                        + request->actual,
 720                                                        transfer_size))
 721
 722                                        return;
 723                        }
 724
 725                        len = request->length - request->actual;
 726                        musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
 727                                        musb_ep->end_point.name,
 728                                        fifo_count, len,
 729                                        musb_ep->packet_sz);
 730
 731                        fifo_count = min_t(unsigned, len, fifo_count);
 732
 733                        if (tusb_dma_omap(musb)) {
 734                                struct dma_controller *c = musb->dma_controller;
 735                                struct dma_channel *channel = musb_ep->dma;
 736                                u32 dma_addr = request->dma + request->actual;
 737                                int ret;
 738
 739                                ret = c->channel_program(channel,
 740                                                musb_ep->packet_sz,
 741                                                channel->desired_mode,
 742                                                dma_addr,
 743                                                fifo_count);
 744                                if (ret)
 745                                        return;
 746                        }
 747
 748                        /*
 749                         * Unmap the dma buffer back to cpu if dma channel
 750                         * programming fails. This buffer is mapped if the
 751                         * channel allocation is successful
 752                         */
 753                        unmap_dma_buffer(req, musb);
 754
 755                        /*
 756                         * Clear DMAENAB and AUTOCLEAR for the
 757                         * PIO mode transfer
 758                         */
 759                        csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
 760                        musb_writew(epio, MUSB_RXCSR, csr);
 761
 762buffer_aint_mapped:
 763                        musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
 764                                        (request->buf + request->actual));
 765                        request->actual += fifo_count;
 766
 767                        /* REVISIT if we left anything in the fifo, flush
 768                         * it and report -EOVERFLOW
 769                         */
 770
 771                        /* ack the read! */
 772                        csr |= MUSB_RXCSR_P_WZC_BITS;
 773                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 774                        musb_writew(epio, MUSB_RXCSR, csr);
 775                }
 776        }
 777
 778        /* reach the end or short packet detected */
 779        if (request->actual == request->length ||
 780            fifo_count < musb_ep->packet_sz)
 781                musb_g_giveback(musb_ep, request, 0);
 782}
 783
 784/*
 785 * Data ready for a request; called from IRQ
 786 */
 787void musb_g_rx(struct musb *musb, u8 epnum)
 788{
 789        u16                     csr;
 790        struct musb_request     *req;
 791        struct usb_request      *request;
 792        void __iomem            *mbase = musb->mregs;
 793        struct musb_ep          *musb_ep;
 794        void __iomem            *epio = musb->endpoints[epnum].regs;
 795        struct dma_channel      *dma;
 796        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 797
 798        if (hw_ep->is_shared_fifo)
 799                musb_ep = &hw_ep->ep_in;
 800        else
 801                musb_ep = &hw_ep->ep_out;
 802
 803        musb_ep_select(mbase, epnum);
 804
 805        req = next_request(musb_ep);
 806        if (!req)
 807                return;
 808
 809        trace_musb_req_rx(req);
 810        request = &req->request;
 811
 812        csr = musb_readw(epio, MUSB_RXCSR);
 813        dma = is_dma_capable() ? musb_ep->dma : NULL;
 814
 815        musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
 816                        csr, dma ? " (dma)" : "", request);
 817
 818        if (csr & MUSB_RXCSR_P_SENTSTALL) {
 819                csr |= MUSB_RXCSR_P_WZC_BITS;
 820                csr &= ~MUSB_RXCSR_P_SENTSTALL;
 821                musb_writew(epio, MUSB_RXCSR, csr);
 822                return;
 823        }
 824
 825        if (csr & MUSB_RXCSR_P_OVERRUN) {
 826                /* csr |= MUSB_RXCSR_P_WZC_BITS; */
 827                csr &= ~MUSB_RXCSR_P_OVERRUN;
 828                musb_writew(epio, MUSB_RXCSR, csr);
 829
 830                musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
 831                if (request->status == -EINPROGRESS)
 832                        request->status = -EOVERFLOW;
 833        }
 834        if (csr & MUSB_RXCSR_INCOMPRX) {
 835                /* REVISIT not necessarily an error */
 836                musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
 837        }
 838
 839        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 840                /* "should not happen"; likely RXPKTRDY pending for DMA */
 841                musb_dbg(musb, "%s busy, csr %04x",
 842                        musb_ep->end_point.name, csr);
 843                return;
 844        }
 845
 846        if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
 847                csr &= ~(MUSB_RXCSR_AUTOCLEAR
 848                                | MUSB_RXCSR_DMAENAB
 849                                | MUSB_RXCSR_DMAMODE);
 850                musb_writew(epio, MUSB_RXCSR,
 851                        MUSB_RXCSR_P_WZC_BITS | csr);
 852
 853                request->actual += musb_ep->dma->actual_len;
 854
 855#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 856        defined(CONFIG_USB_UX500_DMA)
 857                /* Autoclear doesn't clear RxPktRdy for short packets */
 858                if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
 859                                || (dma->actual_len
 860                                        & (musb_ep->packet_sz - 1))) {
 861                        /* ack the read! */
 862                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 863                        musb_writew(epio, MUSB_RXCSR, csr);
 864                }
 865
 866                /* incomplete, and not short? wait for next IN packet */
 867                if ((request->actual < request->length)
 868                                && (musb_ep->dma->actual_len
 869                                        == musb_ep->packet_sz)) {
 870                        /* In double buffer case, continue to unload fifo if
 871                         * there is Rx packet in FIFO.
 872                         **/
 873                        csr = musb_readw(epio, MUSB_RXCSR);
 874                        if ((csr & MUSB_RXCSR_RXPKTRDY) &&
 875                                hw_ep->rx_double_buffered)
 876                                goto exit;
 877                        return;
 878                }
 879#endif
 880                musb_g_giveback(musb_ep, request, 0);
 881                /*
 882                 * In the giveback function the MUSB lock is
 883                 * released and acquired after sometime. During
 884                 * this time period the INDEX register could get
 885                 * changed by the gadget_queue function especially
 886                 * on SMP systems. Reselect the INDEX to be sure
 887                 * we are reading/modifying the right registers
 888                 */
 889                musb_ep_select(mbase, epnum);
 890
 891                req = next_request(musb_ep);
 892                if (!req)
 893                        return;
 894        }
 895#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 896        defined(CONFIG_USB_UX500_DMA)
 897exit:
 898#endif
 899        /* Analyze request */
 900        rxstate(musb, req);
 901}
 902
 903/* ------------------------------------------------------------ */
 904
 905static int musb_gadget_enable(struct usb_ep *ep,
 906                        const struct usb_endpoint_descriptor *desc)
 907{
 908        unsigned long           flags;
 909        struct musb_ep          *musb_ep;
 910        struct musb_hw_ep       *hw_ep;
 911        void __iomem            *regs;
 912        struct musb             *musb;
 913        void __iomem    *mbase;
 914        u8              epnum;
 915        u16             csr;
 916        unsigned        tmp;
 917        int             status = -EINVAL;
 918
 919        if (!ep || !desc)
 920                return -EINVAL;
 921
 922        musb_ep = to_musb_ep(ep);
 923        hw_ep = musb_ep->hw_ep;
 924        regs = hw_ep->regs;
 925        musb = musb_ep->musb;
 926        mbase = musb->mregs;
 927        epnum = musb_ep->current_epnum;
 928
 929        spin_lock_irqsave(&musb->lock, flags);
 930
 931        if (musb_ep->desc) {
 932                status = -EBUSY;
 933                goto fail;
 934        }
 935        musb_ep->type = usb_endpoint_type(desc);
 936
 937        /* check direction and (later) maxpacket size against endpoint */
 938        if (usb_endpoint_num(desc) != epnum)
 939                goto fail;
 940
 941        /* REVISIT this rules out high bandwidth periodic transfers */
 942        tmp = usb_endpoint_maxp_mult(desc) - 1;
 943        if (tmp) {
 944                int ok;
 945
 946                if (usb_endpoint_dir_in(desc))
 947                        ok = musb->hb_iso_tx;
 948                else
 949                        ok = musb->hb_iso_rx;
 950
 951                if (!ok) {
 952                        musb_dbg(musb, "no support for high bandwidth ISO");
 953                        goto fail;
 954                }
 955                musb_ep->hb_mult = tmp;
 956        } else {
 957                musb_ep->hb_mult = 0;
 958        }
 959
 960        musb_ep->packet_sz = usb_endpoint_maxp(desc);
 961        tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
 962
 963        /* enable the interrupts for the endpoint, set the endpoint
 964         * packet size (or fail), set the mode, clear the fifo
 965         */
 966        musb_ep_select(mbase, epnum);
 967        if (usb_endpoint_dir_in(desc)) {
 968
 969                if (hw_ep->is_shared_fifo)
 970                        musb_ep->is_in = 1;
 971                if (!musb_ep->is_in)
 972                        goto fail;
 973
 974                if (tmp > hw_ep->max_packet_sz_tx) {
 975                        musb_dbg(musb, "packet size beyond hardware FIFO size");
 976                        goto fail;
 977                }
 978
 979                musb->intrtxe |= (1 << epnum);
 980                musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
 981
 982                /* REVISIT if can_bulk_split(), use by updating "tmp";
 983                 * likewise high bandwidth periodic tx
 984                 */
 985                /* Set TXMAXP with the FIFO size of the endpoint
 986                 * to disable double buffering mode.
 987                 */
 988                if (can_bulk_split(musb, musb_ep->type))
 989                        musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
 990                                                musb_ep->packet_sz) - 1;
 991                musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
 992                                | (musb_ep->hb_mult << 11));
 993
 994                csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
 995                if (musb_readw(regs, MUSB_TXCSR)
 996                                & MUSB_TXCSR_FIFONOTEMPTY)
 997                        csr |= MUSB_TXCSR_FLUSHFIFO;
 998                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
 999                        csr |= MUSB_TXCSR_P_ISO;
1000
1001                /* set twice in case of double buffering */
1002                musb_writew(regs, MUSB_TXCSR, csr);
1003                /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1004                musb_writew(regs, MUSB_TXCSR, csr);
1005
1006        } else {
1007
1008                if (hw_ep->is_shared_fifo)
1009                        musb_ep->is_in = 0;
1010                if (musb_ep->is_in)
1011                        goto fail;
1012
1013                if (tmp > hw_ep->max_packet_sz_rx) {
1014                        musb_dbg(musb, "packet size beyond hardware FIFO size");
1015                        goto fail;
1016                }
1017
1018                musb->intrrxe |= (1 << epnum);
1019                musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1020
1021                /* REVISIT if can_bulk_combine() use by updating "tmp"
1022                 * likewise high bandwidth periodic rx
1023                 */
1024                /* Set RXMAXP with the FIFO size of the endpoint
1025                 * to disable double buffering mode.
1026                 */
1027                musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1028                                | (musb_ep->hb_mult << 11));
1029
1030                /* force shared fifo to OUT-only mode */
1031                if (hw_ep->is_shared_fifo) {
1032                        csr = musb_readw(regs, MUSB_TXCSR);
1033                        csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1034                        musb_writew(regs, MUSB_TXCSR, csr);
1035                }
1036
1037                csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1038                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1039                        csr |= MUSB_RXCSR_P_ISO;
1040                else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1041                        csr |= MUSB_RXCSR_DISNYET;
1042
1043                /* set twice in case of double buffering */
1044                musb_writew(regs, MUSB_RXCSR, csr);
1045                musb_writew(regs, MUSB_RXCSR, csr);
1046        }
1047
1048        /* NOTE:  all the I/O code _should_ work fine without DMA, in case
1049         * for some reason you run out of channels here.
1050         */
1051        if (is_dma_capable() && musb->dma_controller) {
1052                struct dma_controller   *c = musb->dma_controller;
1053
1054                musb_ep->dma = c->channel_alloc(c, hw_ep,
1055                                (desc->bEndpointAddress & USB_DIR_IN));
1056        } else
1057                musb_ep->dma = NULL;
1058
1059        musb_ep->desc = desc;
1060        musb_ep->busy = 0;
1061        musb_ep->wedged = 0;
1062        status = 0;
1063
1064        pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1065                        musb_driver_name, musb_ep->end_point.name,
1066                        musb_ep_xfertype_string(musb_ep->type),
1067                        musb_ep->is_in ? "IN" : "OUT",
1068                        musb_ep->dma ? "dma, " : "",
1069                        musb_ep->packet_sz);
1070
1071        schedule_delayed_work(&musb->irq_work, 0);
1072
1073fail:
1074        spin_unlock_irqrestore(&musb->lock, flags);
1075        return status;
1076}
1077
1078/*
1079 * Disable an endpoint flushing all requests queued.
1080 */
1081static int musb_gadget_disable(struct usb_ep *ep)
1082{
1083        unsigned long   flags;
1084        struct musb     *musb;
1085        u8              epnum;
1086        struct musb_ep  *musb_ep;
1087        void __iomem    *epio;
1088
1089        musb_ep = to_musb_ep(ep);
1090        musb = musb_ep->musb;
1091        epnum = musb_ep->current_epnum;
1092        epio = musb->endpoints[epnum].regs;
1093
1094        spin_lock_irqsave(&musb->lock, flags);
1095        musb_ep_select(musb->mregs, epnum);
1096
1097        /* zero the endpoint sizes */
1098        if (musb_ep->is_in) {
1099                musb->intrtxe &= ~(1 << epnum);
1100                musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1101                musb_writew(epio, MUSB_TXMAXP, 0);
1102        } else {
1103                musb->intrrxe &= ~(1 << epnum);
1104                musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1105                musb_writew(epio, MUSB_RXMAXP, 0);
1106        }
1107
1108        /* abort all pending DMA and requests */
1109        nuke(musb_ep, -ESHUTDOWN);
1110
1111        musb_ep->desc = NULL;
1112        musb_ep->end_point.desc = NULL;
1113
1114        schedule_delayed_work(&musb->irq_work, 0);
1115
1116        spin_unlock_irqrestore(&(musb->lock), flags);
1117
1118        musb_dbg(musb, "%s", musb_ep->end_point.name);
1119
1120        return 0;
1121}
1122
1123/*
1124 * Allocate a request for an endpoint.
1125 * Reused by ep0 code.
1126 */
1127struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1128{
1129        struct musb_ep          *musb_ep = to_musb_ep(ep);
1130        struct musb_request     *request = NULL;
1131
1132        request = kzalloc(sizeof *request, gfp_flags);
1133        if (!request)
1134                return NULL;
1135
1136        request->request.dma = DMA_ADDR_INVALID;
1137        request->epnum = musb_ep->current_epnum;
1138        request->ep = musb_ep;
1139
1140        trace_musb_req_alloc(request);
1141        return &request->request;
1142}
1143
1144/*
1145 * Free a request
1146 * Reused by ep0 code.
1147 */
1148void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1149{
1150        struct musb_request *request = to_musb_request(req);
1151
1152        trace_musb_req_free(request);
1153        kfree(request);
1154}
1155
1156static LIST_HEAD(buffers);
1157
1158struct free_record {
1159        struct list_head        list;
1160        struct device           *dev;
1161        unsigned                bytes;
1162        dma_addr_t              dma;
1163};
1164
1165/*
1166 * Context: controller locked, IRQs blocked.
1167 */
1168void musb_ep_restart(struct musb *musb, struct musb_request *req)
1169{
1170        trace_musb_req_start(req);
1171        musb_ep_select(musb->mregs, req->epnum);
1172        if (req->tx)
1173                txstate(musb, req);
1174        else
1175                rxstate(musb, req);
1176}
1177
1178static int musb_ep_restart_resume_work(struct musb *musb, void *data)
1179{
1180        struct musb_request *req = data;
1181
1182        musb_ep_restart(musb, req);
1183
1184        return 0;
1185}
1186
1187static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1188                        gfp_t gfp_flags)
1189{
1190        struct musb_ep          *musb_ep;
1191        struct musb_request     *request;
1192        struct musb             *musb;
1193        int                     status;
1194        unsigned long           lockflags;
1195
1196        if (!ep || !req)
1197                return -EINVAL;
1198        if (!req->buf)
1199                return -ENODATA;
1200
1201        musb_ep = to_musb_ep(ep);
1202        musb = musb_ep->musb;
1203
1204        request = to_musb_request(req);
1205        request->musb = musb;
1206
1207        if (request->ep != musb_ep)
1208                return -EINVAL;
1209
1210        status = pm_runtime_get(musb->controller);
1211        if ((status != -EINPROGRESS) && status < 0) {
1212                dev_err(musb->controller,
1213                        "pm runtime get failed in %s\n",
1214                        __func__);
1215                pm_runtime_put_noidle(musb->controller);
1216
1217                return status;
1218        }
1219        status = 0;
1220
1221        trace_musb_req_enq(request);
1222
1223        /* request is mine now... */
1224        request->request.actual = 0;
1225        request->request.status = -EINPROGRESS;
1226        request->epnum = musb_ep->current_epnum;
1227        request->tx = musb_ep->is_in;
1228
1229        map_dma_buffer(request, musb, musb_ep);
1230
1231        spin_lock_irqsave(&musb->lock, lockflags);
1232
1233        /* don't queue if the ep is down */
1234        if (!musb_ep->desc) {
1235                musb_dbg(musb, "req %p queued to %s while ep %s",
1236                                req, ep->name, "disabled");
1237                status = -ESHUTDOWN;
1238                unmap_dma_buffer(request, musb);
1239                goto unlock;
1240        }
1241
1242        /* add request to the list */
1243        list_add_tail(&request->list, &musb_ep->req_list);
1244
1245        /* it this is the head of the queue, start i/o ... */
1246        if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
1247                status = musb_queue_resume_work(musb,
1248                                                musb_ep_restart_resume_work,
1249                                                request);
1250                if (status < 0)
1251                        dev_err(musb->controller, "%s resume work: %i\n",
1252                                __func__, status);
1253        }
1254
1255unlock:
1256        spin_unlock_irqrestore(&musb->lock, lockflags);
1257        pm_runtime_mark_last_busy(musb->controller);
1258        pm_runtime_put_autosuspend(musb->controller);
1259
1260        return status;
1261}
1262
1263static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1264{
1265        struct musb_ep          *musb_ep = to_musb_ep(ep);
1266        struct musb_request     *req = to_musb_request(request);
1267        struct musb_request     *r;
1268        unsigned long           flags;
1269        int                     status = 0;
1270        struct musb             *musb = musb_ep->musb;
1271
1272        if (!ep || !request || req->ep != musb_ep)
1273                return -EINVAL;
1274
1275        trace_musb_req_deq(req);
1276
1277        spin_lock_irqsave(&musb->lock, flags);
1278
1279        list_for_each_entry(r, &musb_ep->req_list, list) {
1280                if (r == req)
1281                        break;
1282        }
1283        if (r != req) {
1284                dev_err(musb->controller, "request %p not queued to %s\n",
1285                                request, ep->name);
1286                status = -EINVAL;
1287                goto done;
1288        }
1289
1290        /* if the hardware doesn't have the request, easy ... */
1291        if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1292                musb_g_giveback(musb_ep, request, -ECONNRESET);
1293
1294        /* ... else abort the dma transfer ... */
1295        else if (is_dma_capable() && musb_ep->dma) {
1296                struct dma_controller   *c = musb->dma_controller;
1297
1298                musb_ep_select(musb->mregs, musb_ep->current_epnum);
1299                if (c->channel_abort)
1300                        status = c->channel_abort(musb_ep->dma);
1301                else
1302                        status = -EBUSY;
1303                if (status == 0)
1304                        musb_g_giveback(musb_ep, request, -ECONNRESET);
1305        } else {
1306                /* NOTE: by sticking to easily tested hardware/driver states,
1307                 * we leave counting of in-flight packets imprecise.
1308                 */
1309                musb_g_giveback(musb_ep, request, -ECONNRESET);
1310        }
1311
1312done:
1313        spin_unlock_irqrestore(&musb->lock, flags);
1314        return status;
1315}
1316
1317/*
1318 * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any
1319 * data but will queue requests.
1320 *
1321 * exported to ep0 code
1322 */
1323static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1324{
1325        struct musb_ep          *musb_ep = to_musb_ep(ep);
1326        u8                      epnum = musb_ep->current_epnum;
1327        struct musb             *musb = musb_ep->musb;
1328        void __iomem            *epio = musb->endpoints[epnum].regs;
1329        void __iomem            *mbase;
1330        unsigned long           flags;
1331        u16                     csr;
1332        struct musb_request     *request;
1333        int                     status = 0;
1334
1335        if (!ep)
1336                return -EINVAL;
1337        mbase = musb->mregs;
1338
1339        spin_lock_irqsave(&musb->lock, flags);
1340
1341        if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1342                status = -EINVAL;
1343                goto done;
1344        }
1345
1346        musb_ep_select(mbase, epnum);
1347
1348        request = next_request(musb_ep);
1349        if (value) {
1350                if (request) {
1351                        musb_dbg(musb, "request in progress, cannot halt %s",
1352                            ep->name);
1353                        status = -EAGAIN;
1354                        goto done;
1355                }
1356                /* Cannot portably stall with non-empty FIFO */
1357                if (musb_ep->is_in) {
1358                        csr = musb_readw(epio, MUSB_TXCSR);
1359                        if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1360                                musb_dbg(musb, "FIFO busy, cannot halt %s",
1361                                                ep->name);
1362                                status = -EAGAIN;
1363                                goto done;
1364                        }
1365                }
1366        } else
1367                musb_ep->wedged = 0;
1368
1369        /* set/clear the stall and toggle bits */
1370        musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
1371        if (musb_ep->is_in) {
1372                csr = musb_readw(epio, MUSB_TXCSR);
1373                csr |= MUSB_TXCSR_P_WZC_BITS
1374                        | MUSB_TXCSR_CLRDATATOG;
1375                if (value)
1376                        csr |= MUSB_TXCSR_P_SENDSTALL;
1377                else
1378                        csr &= ~(MUSB_TXCSR_P_SENDSTALL
1379                                | MUSB_TXCSR_P_SENTSTALL);
1380                csr &= ~MUSB_TXCSR_TXPKTRDY;
1381                musb_writew(epio, MUSB_TXCSR, csr);
1382        } else {
1383                csr = musb_readw(epio, MUSB_RXCSR);
1384                csr |= MUSB_RXCSR_P_WZC_BITS
1385                        | MUSB_RXCSR_FLUSHFIFO
1386                        | MUSB_RXCSR_CLRDATATOG;
1387                if (value)
1388                        csr |= MUSB_RXCSR_P_SENDSTALL;
1389                else
1390                        csr &= ~(MUSB_RXCSR_P_SENDSTALL
1391                                | MUSB_RXCSR_P_SENTSTALL);
1392                musb_writew(epio, MUSB_RXCSR, csr);
1393        }
1394
1395        /* maybe start the first request in the queue */
1396        if (!musb_ep->busy && !value && request) {
1397                musb_dbg(musb, "restarting the request");
1398                musb_ep_restart(musb, request);
1399        }
1400
1401done:
1402        spin_unlock_irqrestore(&musb->lock, flags);
1403        return status;
1404}
1405
1406/*
1407 * Sets the halt feature with the clear requests ignored
1408 */
1409static int musb_gadget_set_wedge(struct usb_ep *ep)
1410{
1411        struct musb_ep          *musb_ep = to_musb_ep(ep);
1412
1413        if (!ep)
1414                return -EINVAL;
1415
1416        musb_ep->wedged = 1;
1417
1418        return usb_ep_set_halt(ep);
1419}
1420
1421static int musb_gadget_fifo_status(struct usb_ep *ep)
1422{
1423        struct musb_ep          *musb_ep = to_musb_ep(ep);
1424        void __iomem            *epio = musb_ep->hw_ep->regs;
1425        int                     retval = -EINVAL;
1426
1427        if (musb_ep->desc && !musb_ep->is_in) {
1428                struct musb             *musb = musb_ep->musb;
1429                int                     epnum = musb_ep->current_epnum;
1430                void __iomem            *mbase = musb->mregs;
1431                unsigned long           flags;
1432
1433                spin_lock_irqsave(&musb->lock, flags);
1434
1435                musb_ep_select(mbase, epnum);
1436                /* FIXME return zero unless RXPKTRDY is set */
1437                retval = musb_readw(epio, MUSB_RXCOUNT);
1438
1439                spin_unlock_irqrestore(&musb->lock, flags);
1440        }
1441        return retval;
1442}
1443
1444static void musb_gadget_fifo_flush(struct usb_ep *ep)
1445{
1446        struct musb_ep  *musb_ep = to_musb_ep(ep);
1447        struct musb     *musb = musb_ep->musb;
1448        u8              epnum = musb_ep->current_epnum;
1449        void __iomem    *epio = musb->endpoints[epnum].regs;
1450        void __iomem    *mbase;
1451        unsigned long   flags;
1452        u16             csr;
1453
1454        mbase = musb->mregs;
1455
1456        spin_lock_irqsave(&musb->lock, flags);
1457        musb_ep_select(mbase, (u8) epnum);
1458
1459        /* disable interrupts */
1460        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1461
1462        if (musb_ep->is_in) {
1463                csr = musb_readw(epio, MUSB_TXCSR);
1464                if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1465                        csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1466                        /*
1467                         * Setting both TXPKTRDY and FLUSHFIFO makes controller
1468                         * to interrupt current FIFO loading, but not flushing
1469                         * the already loaded ones.
1470                         */
1471                        csr &= ~MUSB_TXCSR_TXPKTRDY;
1472                        musb_writew(epio, MUSB_TXCSR, csr);
1473                        /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1474                        musb_writew(epio, MUSB_TXCSR, csr);
1475                }
1476        } else {
1477                csr = musb_readw(epio, MUSB_RXCSR);
1478                csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1479                musb_writew(epio, MUSB_RXCSR, csr);
1480                musb_writew(epio, MUSB_RXCSR, csr);
1481        }
1482
1483        /* re-enable interrupt */
1484        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1485        spin_unlock_irqrestore(&musb->lock, flags);
1486}
1487
1488static const struct usb_ep_ops musb_ep_ops = {
1489        .enable         = musb_gadget_enable,
1490        .disable        = musb_gadget_disable,
1491        .alloc_request  = musb_alloc_request,
1492        .free_request   = musb_free_request,
1493        .queue          = musb_gadget_queue,
1494        .dequeue        = musb_gadget_dequeue,
1495        .set_halt       = musb_gadget_set_halt,
1496        .set_wedge      = musb_gadget_set_wedge,
1497        .fifo_status    = musb_gadget_fifo_status,
1498        .fifo_flush     = musb_gadget_fifo_flush
1499};
1500
1501/* ----------------------------------------------------------------------- */
1502
1503static int musb_gadget_get_frame(struct usb_gadget *gadget)
1504{
1505        struct musb     *musb = gadget_to_musb(gadget);
1506
1507        return (int)musb_readw(musb->mregs, MUSB_FRAME);
1508}
1509
1510static int musb_gadget_wakeup(struct usb_gadget *gadget)
1511{
1512        struct musb     *musb = gadget_to_musb(gadget);
1513        void __iomem    *mregs = musb->mregs;
1514        unsigned long   flags;
1515        int             status = -EINVAL;
1516        u8              power, devctl;
1517        int             retries;
1518
1519        spin_lock_irqsave(&musb->lock, flags);
1520
1521        switch (musb->xceiv->otg->state) {
1522        case OTG_STATE_B_PERIPHERAL:
1523                /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1524                 * that's part of the standard usb 1.1 state machine, and
1525                 * doesn't affect OTG transitions.
1526                 */
1527                if (musb->may_wakeup && musb->is_suspended)
1528                        break;
1529                goto done;
1530        case OTG_STATE_B_IDLE:
1531                /* Start SRP ... OTG not required. */
1532                devctl = musb_readb(mregs, MUSB_DEVCTL);
1533                musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
1534                devctl |= MUSB_DEVCTL_SESSION;
1535                musb_writeb(mregs, MUSB_DEVCTL, devctl);
1536                devctl = musb_readb(mregs, MUSB_DEVCTL);
1537                retries = 100;
1538                while (!(devctl & MUSB_DEVCTL_SESSION)) {
1539                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1540                        if (retries-- < 1)
1541                                break;
1542                }
1543                retries = 10000;
1544                while (devctl & MUSB_DEVCTL_SESSION) {
1545                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1546                        if (retries-- < 1)
1547                                break;
1548                }
1549
1550                spin_unlock_irqrestore(&musb->lock, flags);
1551                otg_start_srp(musb->xceiv->otg);
1552                spin_lock_irqsave(&musb->lock, flags);
1553
1554                /* Block idling for at least 1s */
1555                musb_platform_try_idle(musb,
1556                        jiffies + msecs_to_jiffies(1 * HZ));
1557
1558                status = 0;
1559                goto done;
1560        default:
1561                musb_dbg(musb, "Unhandled wake: %s",
1562                        usb_otg_state_string(musb->xceiv->otg->state));
1563                goto done;
1564        }
1565
1566        status = 0;
1567
1568        power = musb_readb(mregs, MUSB_POWER);
1569        power |= MUSB_POWER_RESUME;
1570        musb_writeb(mregs, MUSB_POWER, power);
1571        musb_dbg(musb, "issue wakeup");
1572
1573        /* FIXME do this next chunk in a timer callback, no udelay */
1574        mdelay(2);
1575
1576        power = musb_readb(mregs, MUSB_POWER);
1577        power &= ~MUSB_POWER_RESUME;
1578        musb_writeb(mregs, MUSB_POWER, power);
1579done:
1580        spin_unlock_irqrestore(&musb->lock, flags);
1581        return status;
1582}
1583
1584static int
1585musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1586{
1587        gadget->is_selfpowered = !!is_selfpowered;
1588        return 0;
1589}
1590
1591static void musb_pullup(struct musb *musb, int is_on)
1592{
1593        u8 power;
1594
1595        power = musb_readb(musb->mregs, MUSB_POWER);
1596        if (is_on)
1597                power |= MUSB_POWER_SOFTCONN;
1598        else
1599                power &= ~MUSB_POWER_SOFTCONN;
1600
1601        /* FIXME if on, HdrcStart; if off, HdrcStop */
1602
1603        musb_dbg(musb, "gadget D+ pullup %s",
1604                is_on ? "on" : "off");
1605        musb_writeb(musb->mregs, MUSB_POWER, power);
1606}
1607
1608#if 0
1609static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1610{
1611        musb_dbg(musb, "<= %s =>\n", __func__);
1612
1613        /*
1614         * FIXME iff driver's softconnect flag is set (as it is during probe,
1615         * though that can clear it), just musb_pullup().
1616         */
1617
1618        return -EINVAL;
1619}
1620#endif
1621
1622static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1623{
1624        struct musb     *musb = gadget_to_musb(gadget);
1625
1626        if (!musb->xceiv->set_power)
1627                return -EOPNOTSUPP;
1628        return usb_phy_set_power(musb->xceiv, mA);
1629}
1630
1631static void musb_gadget_work(struct work_struct *work)
1632{
1633        struct musb *musb;
1634        unsigned long flags;
1635
1636        musb = container_of(work, struct musb, gadget_work.work);
1637        pm_runtime_get_sync(musb->controller);
1638        spin_lock_irqsave(&musb->lock, flags);
1639        musb_pullup(musb, musb->softconnect);
1640        spin_unlock_irqrestore(&musb->lock, flags);
1641        pm_runtime_mark_last_busy(musb->controller);
1642        pm_runtime_put_autosuspend(musb->controller);
1643}
1644
1645static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1646{
1647        struct musb     *musb = gadget_to_musb(gadget);
1648        unsigned long   flags;
1649
1650        is_on = !!is_on;
1651
1652        /* NOTE: this assumes we are sensing vbus; we'd rather
1653         * not pullup unless the B-session is active.
1654         */
1655        spin_lock_irqsave(&musb->lock, flags);
1656        if (is_on != musb->softconnect) {
1657                musb->softconnect = is_on;
1658                schedule_delayed_work(&musb->gadget_work, 0);
1659        }
1660        spin_unlock_irqrestore(&musb->lock, flags);
1661
1662        return 0;
1663}
1664
1665static int musb_gadget_start(struct usb_gadget *g,
1666                struct usb_gadget_driver *driver);
1667static int musb_gadget_stop(struct usb_gadget *g);
1668
1669static const struct usb_gadget_ops musb_gadget_operations = {
1670        .get_frame              = musb_gadget_get_frame,
1671        .wakeup                 = musb_gadget_wakeup,
1672        .set_selfpowered        = musb_gadget_set_self_powered,
1673        /* .vbus_session                = musb_gadget_vbus_session, */
1674        .vbus_draw              = musb_gadget_vbus_draw,
1675        .pullup                 = musb_gadget_pullup,
1676        .udc_start              = musb_gadget_start,
1677        .udc_stop               = musb_gadget_stop,
1678};
1679
1680/* ----------------------------------------------------------------------- */
1681
1682/* Registration */
1683
1684/* Only this registration code "knows" the rule (from USB standards)
1685 * about there being only one external upstream port.  It assumes
1686 * all peripheral ports are external...
1687 */
1688
1689static void
1690init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1691{
1692        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1693
1694        memset(ep, 0, sizeof *ep);
1695
1696        ep->current_epnum = epnum;
1697        ep->musb = musb;
1698        ep->hw_ep = hw_ep;
1699        ep->is_in = is_in;
1700
1701        INIT_LIST_HEAD(&ep->req_list);
1702
1703        sprintf(ep->name, "ep%d%s", epnum,
1704                        (!epnum || hw_ep->is_shared_fifo) ? "" : (
1705                                is_in ? "in" : "out"));
1706        ep->end_point.name = ep->name;
1707        INIT_LIST_HEAD(&ep->end_point.ep_list);
1708        if (!epnum) {
1709                usb_ep_set_maxpacket_limit(&ep->end_point, 64);
1710                ep->end_point.caps.type_control = true;
1711                ep->end_point.ops = &musb_g_ep0_ops;
1712                musb->g.ep0 = &ep->end_point;
1713        } else {
1714                if (is_in)
1715                        usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
1716                else
1717                        usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
1718                ep->end_point.caps.type_iso = true;
1719                ep->end_point.caps.type_bulk = true;
1720                ep->end_point.caps.type_int = true;
1721                ep->end_point.ops = &musb_ep_ops;
1722                list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1723        }
1724
1725        if (!epnum || hw_ep->is_shared_fifo) {
1726                ep->end_point.caps.dir_in = true;
1727                ep->end_point.caps.dir_out = true;
1728        } else if (is_in)
1729                ep->end_point.caps.dir_in = true;
1730        else
1731                ep->end_point.caps.dir_out = true;
1732}
1733
1734/*
1735 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1736 * to the rest of the driver state.
1737 */
1738static inline void musb_g_init_endpoints(struct musb *musb)
1739{
1740        u8                      epnum;
1741        struct musb_hw_ep       *hw_ep;
1742        unsigned                count = 0;
1743
1744        /* initialize endpoint list just once */
1745        INIT_LIST_HEAD(&(musb->g.ep_list));
1746
1747        for (epnum = 0, hw_ep = musb->endpoints;
1748                        epnum < musb->nr_endpoints;
1749                        epnum++, hw_ep++) {
1750                if (hw_ep->is_shared_fifo /* || !epnum */) {
1751                        init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1752                        count++;
1753                } else {
1754                        if (hw_ep->max_packet_sz_tx) {
1755                                init_peripheral_ep(musb, &hw_ep->ep_in,
1756                                                        epnum, 1);
1757                                count++;
1758                        }
1759                        if (hw_ep->max_packet_sz_rx) {
1760                                init_peripheral_ep(musb, &hw_ep->ep_out,
1761                                                        epnum, 0);
1762                                count++;
1763                        }
1764                }
1765        }
1766}
1767
1768/* called once during driver setup to initialize and link into
1769 * the driver model; memory is zeroed.
1770 */
1771int musb_gadget_setup(struct musb *musb)
1772{
1773        int status;
1774
1775        /* REVISIT minor race:  if (erroneously) setting up two
1776         * musb peripherals at the same time, only the bus lock
1777         * is probably held.
1778         */
1779
1780        musb->g.ops = &musb_gadget_operations;
1781        musb->g.max_speed = USB_SPEED_HIGH;
1782        musb->g.speed = USB_SPEED_UNKNOWN;
1783
1784        MUSB_DEV_MODE(musb);
1785        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1786
1787        /* this "gadget" abstracts/virtualizes the controller */
1788        musb->g.name = musb_driver_name;
1789        /* don't support otg protocols */
1790        musb->g.is_otg = 0;
1791        INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
1792        musb_g_init_endpoints(musb);
1793
1794        musb->is_active = 0;
1795        musb_platform_try_idle(musb, 0);
1796
1797        status = usb_add_gadget_udc(musb->controller, &musb->g);
1798        if (status)
1799                goto err;
1800
1801        return 0;
1802err:
1803        musb->g.dev.parent = NULL;
1804        device_unregister(&musb->g.dev);
1805        return status;
1806}
1807
1808void musb_gadget_cleanup(struct musb *musb)
1809{
1810        if (musb->port_mode == MUSB_HOST)
1811                return;
1812
1813        cancel_delayed_work_sync(&musb->gadget_work);
1814        usb_del_gadget_udc(&musb->g);
1815}
1816
1817/*
1818 * Register the gadget driver. Used by gadget drivers when
1819 * registering themselves with the controller.
1820 *
1821 * -EINVAL something went wrong (not driver)
1822 * -EBUSY another gadget is already using the controller
1823 * -ENOMEM no memory to perform the operation
1824 *
1825 * @param driver the gadget driver
1826 * @return <0 if error, 0 if everything is fine
1827 */
1828static int musb_gadget_start(struct usb_gadget *g,
1829                struct usb_gadget_driver *driver)
1830{
1831        struct musb             *musb = gadget_to_musb(g);
1832        struct usb_otg          *otg = musb->xceiv->otg;
1833        unsigned long           flags;
1834        int                     retval = 0;
1835
1836        if (driver->max_speed < USB_SPEED_HIGH) {
1837                retval = -EINVAL;
1838                goto err;
1839        }
1840
1841        pm_runtime_get_sync(musb->controller);
1842
1843        musb->softconnect = 0;
1844        musb->gadget_driver = driver;
1845
1846        spin_lock_irqsave(&musb->lock, flags);
1847        musb->is_active = 1;
1848
1849        otg_set_peripheral(otg, &musb->g);
1850        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1851        spin_unlock_irqrestore(&musb->lock, flags);
1852
1853        musb_start(musb);
1854
1855        /* REVISIT:  funcall to other code, which also
1856         * handles power budgeting ... this way also
1857         * ensures HdrcStart is indirectly called.
1858         */
1859        if (musb->xceiv->last_event == USB_EVENT_ID)
1860                musb_platform_set_vbus(musb, 1);
1861
1862        pm_runtime_mark_last_busy(musb->controller);
1863        pm_runtime_put_autosuspend(musb->controller);
1864
1865        return 0;
1866
1867err:
1868        return retval;
1869}
1870
1871/*
1872 * Unregister the gadget driver. Used by gadget drivers when
1873 * unregistering themselves from the controller.
1874 *
1875 * @param driver the gadget driver to unregister
1876 */
1877static int musb_gadget_stop(struct usb_gadget *g)
1878{
1879        struct musb     *musb = gadget_to_musb(g);
1880        unsigned long   flags;
1881
1882        pm_runtime_get_sync(musb->controller);
1883
1884        /*
1885         * REVISIT always use otg_set_peripheral() here too;
1886         * this needs to shut down the OTG engine.
1887         */
1888
1889        spin_lock_irqsave(&musb->lock, flags);
1890
1891        musb_hnp_stop(musb);
1892
1893        (void) musb_gadget_vbus_draw(&musb->g, 0);
1894
1895        musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
1896        musb_stop(musb);
1897        otg_set_peripheral(musb->xceiv->otg, NULL);
1898
1899        musb->is_active = 0;
1900        musb->gadget_driver = NULL;
1901        musb_platform_try_idle(musb, 0);
1902        spin_unlock_irqrestore(&musb->lock, flags);
1903
1904        /*
1905         * FIXME we need to be able to register another
1906         * gadget driver here and have everything work;
1907         * that currently misbehaves.
1908         */
1909
1910        /* Force check of devctl register for PM runtime */
1911        schedule_delayed_work(&musb->irq_work, 0);
1912
1913        pm_runtime_mark_last_busy(musb->controller);
1914        pm_runtime_put_autosuspend(musb->controller);
1915
1916        return 0;
1917}
1918
1919/* ----------------------------------------------------------------------- */
1920
1921/* lifecycle operations called through plat_uds.c */
1922
1923void musb_g_resume(struct musb *musb)
1924{
1925        musb->is_suspended = 0;
1926        switch (musb->xceiv->otg->state) {
1927        case OTG_STATE_B_IDLE:
1928                break;
1929        case OTG_STATE_B_WAIT_ACON:
1930        case OTG_STATE_B_PERIPHERAL:
1931                musb->is_active = 1;
1932                if (musb->gadget_driver && musb->gadget_driver->resume) {
1933                        spin_unlock(&musb->lock);
1934                        musb->gadget_driver->resume(&musb->g);
1935                        spin_lock(&musb->lock);
1936                }
1937                break;
1938        default:
1939                WARNING("unhandled RESUME transition (%s)\n",
1940                                usb_otg_state_string(musb->xceiv->otg->state));
1941        }
1942}
1943
1944/* called when SOF packets stop for 3+ msec */
1945void musb_g_suspend(struct musb *musb)
1946{
1947        u8      devctl;
1948
1949        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1950        musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
1951
1952        switch (musb->xceiv->otg->state) {
1953        case OTG_STATE_B_IDLE:
1954                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1955                        musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
1956                break;
1957        case OTG_STATE_B_PERIPHERAL:
1958                musb->is_suspended = 1;
1959                if (musb->gadget_driver && musb->gadget_driver->suspend) {
1960                        spin_unlock(&musb->lock);
1961                        musb->gadget_driver->suspend(&musb->g);
1962                        spin_lock(&musb->lock);
1963                }
1964                break;
1965        default:
1966                /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1967                 * A_PERIPHERAL may need care too
1968                 */
1969                WARNING("unhandled SUSPEND transition (%s)",
1970                                usb_otg_state_string(musb->xceiv->otg->state));
1971        }
1972}
1973
1974/* Called during SRP */
1975void musb_g_wakeup(struct musb *musb)
1976{
1977        musb_gadget_wakeup(&musb->g);
1978}
1979
1980/* called when VBUS drops below session threshold, and in other cases */
1981void musb_g_disconnect(struct musb *musb)
1982{
1983        void __iomem    *mregs = musb->mregs;
1984        u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
1985
1986        musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
1987
1988        /* clear HR */
1989        musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1990
1991        /* don't draw vbus until new b-default session */
1992        (void) musb_gadget_vbus_draw(&musb->g, 0);
1993
1994        musb->g.speed = USB_SPEED_UNKNOWN;
1995        if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1996                spin_unlock(&musb->lock);
1997                musb->gadget_driver->disconnect(&musb->g);
1998                spin_lock(&musb->lock);
1999        }
2000
2001        switch (musb->xceiv->otg->state) {
2002        default:
2003                musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
2004                        usb_otg_state_string(musb->xceiv->otg->state));
2005                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2006                MUSB_HST_MODE(musb);
2007                break;
2008        case OTG_STATE_A_PERIPHERAL:
2009                musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2010                MUSB_HST_MODE(musb);
2011                break;
2012        case OTG_STATE_B_WAIT_ACON:
2013        case OTG_STATE_B_HOST:
2014        case OTG_STATE_B_PERIPHERAL:
2015        case OTG_STATE_B_IDLE:
2016                musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2017                break;
2018        case OTG_STATE_B_SRP_INIT:
2019                break;
2020        }
2021
2022        musb->is_active = 0;
2023}
2024
2025void musb_g_reset(struct musb *musb)
2026__releases(musb->lock)
2027__acquires(musb->lock)
2028{
2029        void __iomem    *mbase = musb->mregs;
2030        u8              devctl = musb_readb(mbase, MUSB_DEVCTL);
2031        u8              power;
2032
2033        musb_dbg(musb, "<== %s driver '%s'",
2034                        (devctl & MUSB_DEVCTL_BDEVICE)
2035                                ? "B-Device" : "A-Device",
2036                        musb->gadget_driver
2037                                ? musb->gadget_driver->driver.name
2038                                : NULL
2039                        );
2040
2041        /* report reset, if we didn't already (flushing EP state) */
2042        if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2043                spin_unlock(&musb->lock);
2044                usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2045                spin_lock(&musb->lock);
2046        }
2047
2048        /* clear HR */
2049        else if (devctl & MUSB_DEVCTL_HR)
2050                musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2051
2052
2053        /* what speed did we negotiate? */
2054        power = musb_readb(mbase, MUSB_POWER);
2055        musb->g.speed = (power & MUSB_POWER_HSMODE)
2056                        ? USB_SPEED_HIGH : USB_SPEED_FULL;
2057
2058        /* start in USB_STATE_DEFAULT */
2059        musb->is_active = 1;
2060        musb->is_suspended = 0;
2061        MUSB_DEV_MODE(musb);
2062        musb->address = 0;
2063        musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2064
2065        musb->may_wakeup = 0;
2066        musb->g.b_hnp_enable = 0;
2067        musb->g.a_alt_hnp_support = 0;
2068        musb->g.a_hnp_support = 0;
2069        musb->g.quirk_zlp_not_supp = 1;
2070
2071        /* Normal reset, as B-Device;
2072         * or else after HNP, as A-Device
2073         */
2074        if (!musb->g.is_otg) {
2075                /* USB device controllers that are not OTG compatible
2076                 * may not have DEVCTL register in silicon.
2077                 * In that case, do not rely on devctl for setting
2078                 * peripheral mode.
2079                 */
2080                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2081                musb->g.is_a_peripheral = 0;
2082        } else if (devctl & MUSB_DEVCTL_BDEVICE) {
2083                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2084                musb->g.is_a_peripheral = 0;
2085        } else {
2086                musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
2087                musb->g.is_a_peripheral = 1;
2088        }
2089
2090        /* start with default limits on VBUS power draw */
2091        (void) musb_gadget_vbus_draw(&musb->g, 8);
2092}
2093