linux/drivers/usb/musb/musb_gadget.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MUSB OTG driver peripheral support
   4 *
   5 * Copyright 2005 Mentor Graphics Corporation
   6 * Copyright (C) 2005-2006 by Texas Instruments
   7 * Copyright (C) 2006-2007 Nokia Corporation
   8 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/list.h>
  13#include <linux/timer.h>
  14#include <linux/module.h>
  15#include <linux/smp.h>
  16#include <linux/spinlock.h>
  17#include <linux/delay.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/slab.h>
  20
  21#include "musb_core.h"
  22#include "musb_trace.h"
  23
  24
  25/* ----------------------------------------------------------------------- */
  26
  27#define is_buffer_mapped(req) (is_dma_capable() && \
  28                                        (req->map_state != UN_MAPPED))
  29
  30/* Maps the buffer to dma  */
  31
  32static inline void map_dma_buffer(struct musb_request *request,
  33                        struct musb *musb, struct musb_ep *musb_ep)
  34{
  35        int compatible = true;
  36        struct dma_controller *dma = musb->dma_controller;
  37
  38        request->map_state = UN_MAPPED;
  39
  40        if (!is_dma_capable() || !musb_ep->dma)
  41                return;
  42
  43        /* Check if DMA engine can handle this request.
  44         * DMA code must reject the USB request explicitly.
  45         * Default behaviour is to map the request.
  46         */
  47        if (dma->is_compatible)
  48                compatible = dma->is_compatible(musb_ep->dma,
  49                                musb_ep->packet_sz, request->request.buf,
  50                                request->request.length);
  51        if (!compatible)
  52                return;
  53
  54        if (request->request.dma == DMA_ADDR_INVALID) {
  55                dma_addr_t dma_addr;
  56                int ret;
  57
  58                dma_addr = dma_map_single(
  59                                musb->controller,
  60                                request->request.buf,
  61                                request->request.length,
  62                                request->tx
  63                                        ? DMA_TO_DEVICE
  64                                        : DMA_FROM_DEVICE);
  65                ret = dma_mapping_error(musb->controller, dma_addr);
  66                if (ret)
  67                        return;
  68
  69                request->request.dma = dma_addr;
  70                request->map_state = MUSB_MAPPED;
  71        } else {
  72                dma_sync_single_for_device(musb->controller,
  73                        request->request.dma,
  74                        request->request.length,
  75                        request->tx
  76                                ? DMA_TO_DEVICE
  77                                : DMA_FROM_DEVICE);
  78                request->map_state = PRE_MAPPED;
  79        }
  80}
  81
  82/* Unmap the buffer from dma and maps it back to cpu */
  83static inline void unmap_dma_buffer(struct musb_request *request,
  84                                struct musb *musb)
  85{
  86        struct musb_ep *musb_ep = request->ep;
  87
  88        if (!is_buffer_mapped(request) || !musb_ep->dma)
  89                return;
  90
  91        if (request->request.dma == DMA_ADDR_INVALID) {
  92                dev_vdbg(musb->controller,
  93                                "not unmapping a never mapped buffer\n");
  94                return;
  95        }
  96        if (request->map_state == MUSB_MAPPED) {
  97                dma_unmap_single(musb->controller,
  98                        request->request.dma,
  99                        request->request.length,
 100                        request->tx
 101                                ? DMA_TO_DEVICE
 102                                : DMA_FROM_DEVICE);
 103                request->request.dma = DMA_ADDR_INVALID;
 104        } else { /* PRE_MAPPED */
 105                dma_sync_single_for_cpu(musb->controller,
 106                        request->request.dma,
 107                        request->request.length,
 108                        request->tx
 109                                ? DMA_TO_DEVICE
 110                                : DMA_FROM_DEVICE);
 111        }
 112        request->map_state = UN_MAPPED;
 113}
 114
 115/*
 116 * Immediately complete a request.
 117 *
 118 * @param request the request to complete
 119 * @param status the status to complete the request with
 120 * Context: controller locked, IRQs blocked.
 121 */
 122void musb_g_giveback(
 123        struct musb_ep          *ep,
 124        struct usb_request      *request,
 125        int                     status)
 126__releases(ep->musb->lock)
 127__acquires(ep->musb->lock)
 128{
 129        struct musb_request     *req;
 130        struct musb             *musb;
 131        int                     busy = ep->busy;
 132
 133        req = to_musb_request(request);
 134
 135        list_del(&req->list);
 136        if (req->request.status == -EINPROGRESS)
 137                req->request.status = status;
 138        musb = req->musb;
 139
 140        ep->busy = 1;
 141        spin_unlock(&musb->lock);
 142
 143        if (!dma_mapping_error(&musb->g.dev, request->dma))
 144                unmap_dma_buffer(req, musb);
 145
 146        trace_musb_req_gb(req);
 147        usb_gadget_giveback_request(&req->ep->end_point, &req->request);
 148        spin_lock(&musb->lock);
 149        ep->busy = busy;
 150}
 151
 152/* ----------------------------------------------------------------------- */
 153
 154/*
 155 * Abort requests queued to an endpoint using the status. Synchronous.
 156 * caller locked controller and blocked irqs, and selected this ep.
 157 */
 158static void nuke(struct musb_ep *ep, const int status)
 159{
 160        struct musb             *musb = ep->musb;
 161        struct musb_request     *req = NULL;
 162        void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
 163
 164        ep->busy = 1;
 165
 166        if (is_dma_capable() && ep->dma) {
 167                struct dma_controller   *c = ep->musb->dma_controller;
 168                int value;
 169
 170                if (ep->is_in) {
 171                        /*
 172                         * The programming guide says that we must not clear
 173                         * the DMAMODE bit before DMAENAB, so we only
 174                         * clear it in the second write...
 175                         */
 176                        musb_writew(epio, MUSB_TXCSR,
 177                                    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
 178                        musb_writew(epio, MUSB_TXCSR,
 179                                        0 | MUSB_TXCSR_FLUSHFIFO);
 180                } else {
 181                        musb_writew(epio, MUSB_RXCSR,
 182                                        0 | MUSB_RXCSR_FLUSHFIFO);
 183                        musb_writew(epio, MUSB_RXCSR,
 184                                        0 | MUSB_RXCSR_FLUSHFIFO);
 185                }
 186
 187                value = c->channel_abort(ep->dma);
 188                musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
 189                c->channel_release(ep->dma);
 190                ep->dma = NULL;
 191        }
 192
 193        while (!list_empty(&ep->req_list)) {
 194                req = list_first_entry(&ep->req_list, struct musb_request, list);
 195                musb_g_giveback(ep, &req->request, status);
 196        }
 197}
 198
 199/* ----------------------------------------------------------------------- */
 200
 201/* Data transfers - pure PIO, pure DMA, or mixed mode */
 202
 203/*
 204 * This assumes the separate CPPI engine is responding to DMA requests
 205 * from the usb core ... sequenced a bit differently from mentor dma.
 206 */
 207
 208static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
 209{
 210        if (can_bulk_split(musb, ep->type))
 211                return ep->hw_ep->max_packet_sz_tx;
 212        else
 213                return ep->packet_sz;
 214}
 215
 216/*
 217 * An endpoint is transmitting data. This can be called either from
 218 * the IRQ routine or from ep.queue() to kickstart a request on an
 219 * endpoint.
 220 *
 221 * Context: controller locked, IRQs blocked, endpoint selected
 222 */
 223static void txstate(struct musb *musb, struct musb_request *req)
 224{
 225        u8                      epnum = req->epnum;
 226        struct musb_ep          *musb_ep;
 227        void __iomem            *epio = musb->endpoints[epnum].regs;
 228        struct usb_request      *request;
 229        u16                     fifo_count = 0, csr;
 230        int                     use_dma = 0;
 231
 232        musb_ep = req->ep;
 233
 234        /* Check if EP is disabled */
 235        if (!musb_ep->desc) {
 236                musb_dbg(musb, "ep:%s disabled - ignore request",
 237                                                musb_ep->end_point.name);
 238                return;
 239        }
 240
 241        /* we shouldn't get here while DMA is active ... but we do ... */
 242        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 243                musb_dbg(musb, "dma pending...");
 244                return;
 245        }
 246
 247        /* read TXCSR before */
 248        csr = musb_readw(epio, MUSB_TXCSR);
 249
 250        request = &req->request;
 251        fifo_count = min(max_ep_writesize(musb, musb_ep),
 252                        (int)(request->length - request->actual));
 253
 254        if (csr & MUSB_TXCSR_TXPKTRDY) {
 255                musb_dbg(musb, "%s old packet still ready , txcsr %03x",
 256                                musb_ep->end_point.name, csr);
 257                return;
 258        }
 259
 260        if (csr & MUSB_TXCSR_P_SENDSTALL) {
 261                musb_dbg(musb, "%s stalling, txcsr %03x",
 262                                musb_ep->end_point.name, csr);
 263                return;
 264        }
 265
 266        musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
 267                        epnum, musb_ep->packet_sz, fifo_count,
 268                        csr);
 269
 270#ifndef CONFIG_MUSB_PIO_ONLY
 271        if (is_buffer_mapped(req)) {
 272                struct dma_controller   *c = musb->dma_controller;
 273                size_t request_size;
 274
 275                /* setup DMA, then program endpoint CSR */
 276                request_size = min_t(size_t, request->length - request->actual,
 277                                        musb_ep->dma->max_len);
 278
 279                use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
 280
 281                /* MUSB_TXCSR_P_ISO is still set correctly */
 282
 283                if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
 284                        if (request_size < musb_ep->packet_sz)
 285                                musb_ep->dma->desired_mode = 0;
 286                        else
 287                                musb_ep->dma->desired_mode = 1;
 288
 289                        use_dma = use_dma && c->channel_program(
 290                                        musb_ep->dma, musb_ep->packet_sz,
 291                                        musb_ep->dma->desired_mode,
 292                                        request->dma + request->actual, request_size);
 293                        if (use_dma) {
 294                                if (musb_ep->dma->desired_mode == 0) {
 295                                        /*
 296                                         * We must not clear the DMAMODE bit
 297                                         * before the DMAENAB bit -- and the
 298                                         * latter doesn't always get cleared
 299                                         * before we get here...
 300                                         */
 301                                        csr &= ~(MUSB_TXCSR_AUTOSET
 302                                                | MUSB_TXCSR_DMAENAB);
 303                                        musb_writew(epio, MUSB_TXCSR, csr
 304                                                | MUSB_TXCSR_P_WZC_BITS);
 305                                        csr &= ~MUSB_TXCSR_DMAMODE;
 306                                        csr |= (MUSB_TXCSR_DMAENAB |
 307                                                        MUSB_TXCSR_MODE);
 308                                        /* against programming guide */
 309                                } else {
 310                                        csr |= (MUSB_TXCSR_DMAENAB
 311                                                        | MUSB_TXCSR_DMAMODE
 312                                                        | MUSB_TXCSR_MODE);
 313                                        /*
 314                                         * Enable Autoset according to table
 315                                         * below
 316                                         * bulk_split hb_mult   Autoset_Enable
 317                                         *      0       0       Yes(Normal)
 318                                         *      0       >0      No(High BW ISO)
 319                                         *      1       0       Yes(HS bulk)
 320                                         *      1       >0      Yes(FS bulk)
 321                                         */
 322                                        if (!musb_ep->hb_mult ||
 323                                            can_bulk_split(musb,
 324                                                           musb_ep->type))
 325                                                csr |= MUSB_TXCSR_AUTOSET;
 326                                }
 327                                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 328
 329                                musb_writew(epio, MUSB_TXCSR, csr);
 330                        }
 331                }
 332
 333                if (is_cppi_enabled(musb)) {
 334                        /* program endpoint CSR first, then setup DMA */
 335                        csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 336                        csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
 337                                MUSB_TXCSR_MODE;
 338                        musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
 339                                                ~MUSB_TXCSR_P_UNDERRUN) | csr);
 340
 341                        /* ensure writebuffer is empty */
 342                        csr = musb_readw(epio, MUSB_TXCSR);
 343
 344                        /*
 345                         * NOTE host side sets DMAENAB later than this; both are
 346                         * OK since the transfer dma glue (between CPPI and
 347                         * Mentor fifos) just tells CPPI it could start. Data
 348                         * only moves to the USB TX fifo when both fifos are
 349                         * ready.
 350                         */
 351                        /*
 352                         * "mode" is irrelevant here; handle terminating ZLPs
 353                         * like PIO does, since the hardware RNDIS mode seems
 354                         * unreliable except for the
 355                         * last-packet-is-already-short case.
 356                         */
 357                        use_dma = use_dma && c->channel_program(
 358                                        musb_ep->dma, musb_ep->packet_sz,
 359                                        0,
 360                                        request->dma + request->actual,
 361                                        request_size);
 362                        if (!use_dma) {
 363                                c->channel_release(musb_ep->dma);
 364                                musb_ep->dma = NULL;
 365                                csr &= ~MUSB_TXCSR_DMAENAB;
 366                                musb_writew(epio, MUSB_TXCSR, csr);
 367                                /* invariant: prequest->buf is non-null */
 368                        }
 369                } else if (tusb_dma_omap(musb))
 370                        use_dma = use_dma && c->channel_program(
 371                                        musb_ep->dma, musb_ep->packet_sz,
 372                                        request->zero,
 373                                        request->dma + request->actual,
 374                                        request_size);
 375        }
 376#endif
 377
 378        if (!use_dma) {
 379                /*
 380                 * Unmap the dma buffer back to cpu if dma channel
 381                 * programming fails
 382                 */
 383                unmap_dma_buffer(req, musb);
 384
 385                musb_write_fifo(musb_ep->hw_ep, fifo_count,
 386                                (u8 *) (request->buf + request->actual));
 387                request->actual += fifo_count;
 388                csr |= MUSB_TXCSR_TXPKTRDY;
 389                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 390                musb_writew(epio, MUSB_TXCSR, csr);
 391        }
 392
 393        /* host may already have the data when this message shows... */
 394        musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
 395                        musb_ep->end_point.name, use_dma ? "dma" : "pio",
 396                        request->actual, request->length,
 397                        musb_readw(epio, MUSB_TXCSR),
 398                        fifo_count,
 399                        musb_readw(epio, MUSB_TXMAXP));
 400}
 401
 402/*
 403 * FIFO state update (e.g. data ready).
 404 * Called from IRQ,  with controller locked.
 405 */
 406void musb_g_tx(struct musb *musb, u8 epnum)
 407{
 408        u16                     csr;
 409        struct musb_request     *req;
 410        struct usb_request      *request;
 411        u8 __iomem              *mbase = musb->mregs;
 412        struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_in;
 413        void __iomem            *epio = musb->endpoints[epnum].regs;
 414        struct dma_channel      *dma;
 415
 416        musb_ep_select(mbase, epnum);
 417        req = next_request(musb_ep);
 418        request = &req->request;
 419
 420        csr = musb_readw(epio, MUSB_TXCSR);
 421        musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
 422
 423        dma = is_dma_capable() ? musb_ep->dma : NULL;
 424
 425        /*
 426         * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
 427         * probably rates reporting as a host error.
 428         */
 429        if (csr & MUSB_TXCSR_P_SENTSTALL) {
 430                csr |=  MUSB_TXCSR_P_WZC_BITS;
 431                csr &= ~MUSB_TXCSR_P_SENTSTALL;
 432                musb_writew(epio, MUSB_TXCSR, csr);
 433                return;
 434        }
 435
 436        if (csr & MUSB_TXCSR_P_UNDERRUN) {
 437                /* We NAKed, no big deal... little reason to care. */
 438                csr |=   MUSB_TXCSR_P_WZC_BITS;
 439                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 440                musb_writew(epio, MUSB_TXCSR, csr);
 441                dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
 442                                epnum, request);
 443        }
 444
 445        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 446                /*
 447                 * SHOULD NOT HAPPEN... has with CPPI though, after
 448                 * changing SENDSTALL (and other cases); harmless?
 449                 */
 450                musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
 451                return;
 452        }
 453
 454        if (request) {
 455
 456                trace_musb_req_tx(req);
 457
 458                if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
 459                        csr |= MUSB_TXCSR_P_WZC_BITS;
 460                        csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
 461                                 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
 462                        musb_writew(epio, MUSB_TXCSR, csr);
 463                        /* Ensure writebuffer is empty. */
 464                        csr = musb_readw(epio, MUSB_TXCSR);
 465                        request->actual += musb_ep->dma->actual_len;
 466                        musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
 467                                epnum, csr, musb_ep->dma->actual_len, request);
 468                }
 469
 470                /*
 471                 * First, maybe a terminating short packet. Some DMA
 472                 * engines might handle this by themselves.
 473                 */
 474                if ((request->zero && request->length)
 475                        && (request->length % musb_ep->packet_sz == 0)
 476                        && (request->actual == request->length)) {
 477
 478                        /*
 479                         * On DMA completion, FIFO may not be
 480                         * available yet...
 481                         */
 482                        if (csr & MUSB_TXCSR_TXPKTRDY)
 483                                return;
 484
 485                        musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
 486                                        | MUSB_TXCSR_TXPKTRDY);
 487                        request->zero = 0;
 488                }
 489
 490                if (request->actual == request->length) {
 491                        musb_g_giveback(musb_ep, request, 0);
 492                        /*
 493                         * In the giveback function the MUSB lock is
 494                         * released and acquired after sometime. During
 495                         * this time period the INDEX register could get
 496                         * changed by the gadget_queue function especially
 497                         * on SMP systems. Reselect the INDEX to be sure
 498                         * we are reading/modifying the right registers
 499                         */
 500                        musb_ep_select(mbase, epnum);
 501                        req = musb_ep->desc ? next_request(musb_ep) : NULL;
 502                        if (!req) {
 503                                musb_dbg(musb, "%s idle now",
 504                                        musb_ep->end_point.name);
 505                                return;
 506                        }
 507                }
 508
 509                txstate(musb, req);
 510        }
 511}
 512
 513/* ------------------------------------------------------------ */
 514
 515/*
 516 * Context: controller locked, IRQs blocked, endpoint selected
 517 */
 518static void rxstate(struct musb *musb, struct musb_request *req)
 519{
 520        const u8                epnum = req->epnum;
 521        struct usb_request      *request = &req->request;
 522        struct musb_ep          *musb_ep;
 523        void __iomem            *epio = musb->endpoints[epnum].regs;
 524        unsigned                len = 0;
 525        u16                     fifo_count;
 526        u16                     csr = musb_readw(epio, MUSB_RXCSR);
 527        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 528        u8                      use_mode_1;
 529
 530        if (hw_ep->is_shared_fifo)
 531                musb_ep = &hw_ep->ep_in;
 532        else
 533                musb_ep = &hw_ep->ep_out;
 534
 535        fifo_count = musb_ep->packet_sz;
 536
 537        /* Check if EP is disabled */
 538        if (!musb_ep->desc) {
 539                musb_dbg(musb, "ep:%s disabled - ignore request",
 540                                                musb_ep->end_point.name);
 541                return;
 542        }
 543
 544        /* We shouldn't get here while DMA is active, but we do... */
 545        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 546                musb_dbg(musb, "DMA pending...");
 547                return;
 548        }
 549
 550        if (csr & MUSB_RXCSR_P_SENDSTALL) {
 551                musb_dbg(musb, "%s stalling, RXCSR %04x",
 552                    musb_ep->end_point.name, csr);
 553                return;
 554        }
 555
 556        if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
 557                struct dma_controller   *c = musb->dma_controller;
 558                struct dma_channel      *channel = musb_ep->dma;
 559
 560                /* NOTE:  CPPI won't actually stop advancing the DMA
 561                 * queue after short packet transfers, so this is almost
 562                 * always going to run as IRQ-per-packet DMA so that
 563                 * faults will be handled correctly.
 564                 */
 565                if (c->channel_program(channel,
 566                                musb_ep->packet_sz,
 567                                !request->short_not_ok,
 568                                request->dma + request->actual,
 569                                request->length - request->actual)) {
 570
 571                        /* make sure that if an rxpkt arrived after the irq,
 572                         * the cppi engine will be ready to take it as soon
 573                         * as DMA is enabled
 574                         */
 575                        csr &= ~(MUSB_RXCSR_AUTOCLEAR
 576                                        | MUSB_RXCSR_DMAMODE);
 577                        csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
 578                        musb_writew(epio, MUSB_RXCSR, csr);
 579                        return;
 580                }
 581        }
 582
 583        if (csr & MUSB_RXCSR_RXPKTRDY) {
 584                fifo_count = musb_readw(epio, MUSB_RXCOUNT);
 585
 586                /*
 587                 * Enable Mode 1 on RX transfers only when short_not_ok flag
 588                 * is set. Currently short_not_ok flag is set only from
 589                 * file_storage and f_mass_storage drivers
 590                 */
 591
 592                if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
 593                        use_mode_1 = 1;
 594                else
 595                        use_mode_1 = 0;
 596
 597                if (request->actual < request->length) {
 598                        if (!is_buffer_mapped(req))
 599                                goto buffer_aint_mapped;
 600
 601                        if (musb_dma_inventra(musb)) {
 602                                struct dma_controller   *c;
 603                                struct dma_channel      *channel;
 604                                int                     use_dma = 0;
 605                                unsigned int transfer_size;
 606
 607                                c = musb->dma_controller;
 608                                channel = musb_ep->dma;
 609
 610        /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
 611         * mode 0 only. So we do not get endpoint interrupts due to DMA
 612         * completion. We only get interrupts from DMA controller.
 613         *
 614         * We could operate in DMA mode 1 if we knew the size of the tranfer
 615         * in advance. For mass storage class, request->length = what the host
 616         * sends, so that'd work.  But for pretty much everything else,
 617         * request->length is routinely more than what the host sends. For
 618         * most these gadgets, end of is signified either by a short packet,
 619         * or filling the last byte of the buffer.  (Sending extra data in
 620         * that last pckate should trigger an overflow fault.)  But in mode 1,
 621         * we don't get DMA completion interrupt for short packets.
 622         *
 623         * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
 624         * to get endpoint interrupt on every DMA req, but that didn't seem
 625         * to work reliably.
 626         *
 627         * REVISIT an updated g_file_storage can set req->short_not_ok, which
 628         * then becomes usable as a runtime "use mode 1" hint...
 629         */
 630
 631                                /* Experimental: Mode1 works with mass storage use cases */
 632                                if (use_mode_1) {
 633                                        csr |= MUSB_RXCSR_AUTOCLEAR;
 634                                        musb_writew(epio, MUSB_RXCSR, csr);
 635                                        csr |= MUSB_RXCSR_DMAENAB;
 636                                        musb_writew(epio, MUSB_RXCSR, csr);
 637
 638                                        /*
 639                                         * this special sequence (enabling and then
 640                                         * disabling MUSB_RXCSR_DMAMODE) is required
 641                                         * to get DMAReq to activate
 642                                         */
 643                                        musb_writew(epio, MUSB_RXCSR,
 644                                                csr | MUSB_RXCSR_DMAMODE);
 645                                        musb_writew(epio, MUSB_RXCSR, csr);
 646
 647                                        transfer_size = min_t(unsigned int,
 648                                                        request->length -
 649                                                        request->actual,
 650                                                        channel->max_len);
 651                                        musb_ep->dma->desired_mode = 1;
 652                                } else {
 653                                        if (!musb_ep->hb_mult &&
 654                                                musb_ep->hw_ep->rx_double_buffered)
 655                                                csr |= MUSB_RXCSR_AUTOCLEAR;
 656                                        csr |= MUSB_RXCSR_DMAENAB;
 657                                        musb_writew(epio, MUSB_RXCSR, csr);
 658
 659                                        transfer_size = min(request->length - request->actual,
 660                                                        (unsigned)fifo_count);
 661                                        musb_ep->dma->desired_mode = 0;
 662                                }
 663
 664                                use_dma = c->channel_program(
 665                                                channel,
 666                                                musb_ep->packet_sz,
 667                                                channel->desired_mode,
 668                                                request->dma
 669                                                + request->actual,
 670                                                transfer_size);
 671
 672                                if (use_dma)
 673                                        return;
 674                        }
 675
 676                        if ((musb_dma_ux500(musb)) &&
 677                                (request->actual < request->length)) {
 678
 679                                struct dma_controller *c;
 680                                struct dma_channel *channel;
 681                                unsigned int transfer_size = 0;
 682
 683                                c = musb->dma_controller;
 684                                channel = musb_ep->dma;
 685
 686                                /* In case first packet is short */
 687                                if (fifo_count < musb_ep->packet_sz)
 688                                        transfer_size = fifo_count;
 689                                else if (request->short_not_ok)
 690                                        transfer_size = min_t(unsigned int,
 691                                                        request->length -
 692                                                        request->actual,
 693                                                        channel->max_len);
 694                                else
 695                                        transfer_size = min_t(unsigned int,
 696                                                        request->length -
 697                                                        request->actual,
 698                                                        (unsigned)fifo_count);
 699
 700                                csr &= ~MUSB_RXCSR_DMAMODE;
 701                                csr |= (MUSB_RXCSR_DMAENAB |
 702                                        MUSB_RXCSR_AUTOCLEAR);
 703
 704                                musb_writew(epio, MUSB_RXCSR, csr);
 705
 706                                if (transfer_size <= musb_ep->packet_sz) {
 707                                        musb_ep->dma->desired_mode = 0;
 708                                } else {
 709                                        musb_ep->dma->desired_mode = 1;
 710                                        /* Mode must be set after DMAENAB */
 711                                        csr |= MUSB_RXCSR_DMAMODE;
 712                                        musb_writew(epio, MUSB_RXCSR, csr);
 713                                }
 714
 715                                if (c->channel_program(channel,
 716                                                        musb_ep->packet_sz,
 717                                                        channel->desired_mode,
 718                                                        request->dma
 719                                                        + request->actual,
 720                                                        transfer_size))
 721
 722                                        return;
 723                        }
 724
 725                        len = request->length - request->actual;
 726                        musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
 727                                        musb_ep->end_point.name,
 728                                        fifo_count, len,
 729                                        musb_ep->packet_sz);
 730
 731                        fifo_count = min_t(unsigned, len, fifo_count);
 732
 733                        if (tusb_dma_omap(musb)) {
 734                                struct dma_controller *c = musb->dma_controller;
 735                                struct dma_channel *channel = musb_ep->dma;
 736                                u32 dma_addr = request->dma + request->actual;
 737                                int ret;
 738
 739                                ret = c->channel_program(channel,
 740                                                musb_ep->packet_sz,
 741                                                channel->desired_mode,
 742                                                dma_addr,
 743                                                fifo_count);
 744                                if (ret)
 745                                        return;
 746                        }
 747
 748                        /*
 749                         * Unmap the dma buffer back to cpu if dma channel
 750                         * programming fails. This buffer is mapped if the
 751                         * channel allocation is successful
 752                         */
 753                        unmap_dma_buffer(req, musb);
 754
 755                        /*
 756                         * Clear DMAENAB and AUTOCLEAR for the
 757                         * PIO mode transfer
 758                         */
 759                        csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
 760                        musb_writew(epio, MUSB_RXCSR, csr);
 761
 762buffer_aint_mapped:
 763                        musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
 764                                        (request->buf + request->actual));
 765                        request->actual += fifo_count;
 766
 767                        /* REVISIT if we left anything in the fifo, flush
 768                         * it and report -EOVERFLOW
 769                         */
 770
 771                        /* ack the read! */
 772                        csr |= MUSB_RXCSR_P_WZC_BITS;
 773                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 774                        musb_writew(epio, MUSB_RXCSR, csr);
 775                }
 776        }
 777
 778        /* reach the end or short packet detected */
 779        if (request->actual == request->length ||
 780            fifo_count < musb_ep->packet_sz)
 781                musb_g_giveback(musb_ep, request, 0);
 782}
 783
 784/*
 785 * Data ready for a request; called from IRQ
 786 */
 787void musb_g_rx(struct musb *musb, u8 epnum)
 788{
 789        u16                     csr;
 790        struct musb_request     *req;
 791        struct usb_request      *request;
 792        void __iomem            *mbase = musb->mregs;
 793        struct musb_ep          *musb_ep;
 794        void __iomem            *epio = musb->endpoints[epnum].regs;
 795        struct dma_channel      *dma;
 796        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 797
 798        if (hw_ep->is_shared_fifo)
 799                musb_ep = &hw_ep->ep_in;
 800        else
 801                musb_ep = &hw_ep->ep_out;
 802
 803        musb_ep_select(mbase, epnum);
 804
 805        req = next_request(musb_ep);
 806        if (!req)
 807                return;
 808
 809        trace_musb_req_rx(req);
 810        request = &req->request;
 811
 812        csr = musb_readw(epio, MUSB_RXCSR);
 813        dma = is_dma_capable() ? musb_ep->dma : NULL;
 814
 815        musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
 816                        csr, dma ? " (dma)" : "", request);
 817
 818        if (csr & MUSB_RXCSR_P_SENTSTALL) {
 819                csr |= MUSB_RXCSR_P_WZC_BITS;
 820                csr &= ~MUSB_RXCSR_P_SENTSTALL;
 821                musb_writew(epio, MUSB_RXCSR, csr);
 822                return;
 823        }
 824
 825        if (csr & MUSB_RXCSR_P_OVERRUN) {
 826                /* csr |= MUSB_RXCSR_P_WZC_BITS; */
 827                csr &= ~MUSB_RXCSR_P_OVERRUN;
 828                musb_writew(epio, MUSB_RXCSR, csr);
 829
 830                musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
 831                if (request->status == -EINPROGRESS)
 832                        request->status = -EOVERFLOW;
 833        }
 834        if (csr & MUSB_RXCSR_INCOMPRX) {
 835                /* REVISIT not necessarily an error */
 836                musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
 837        }
 838
 839        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 840                /* "should not happen"; likely RXPKTRDY pending for DMA */
 841                musb_dbg(musb, "%s busy, csr %04x",
 842                        musb_ep->end_point.name, csr);
 843                return;
 844        }
 845
 846        if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
 847                csr &= ~(MUSB_RXCSR_AUTOCLEAR
 848                                | MUSB_RXCSR_DMAENAB
 849                                | MUSB_RXCSR_DMAMODE);
 850                musb_writew(epio, MUSB_RXCSR,
 851                        MUSB_RXCSR_P_WZC_BITS | csr);
 852
 853                request->actual += musb_ep->dma->actual_len;
 854
 855#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 856        defined(CONFIG_USB_UX500_DMA)
 857                /* Autoclear doesn't clear RxPktRdy for short packets */
 858                if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
 859                                || (dma->actual_len
 860                                        & (musb_ep->packet_sz - 1))) {
 861                        /* ack the read! */
 862                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 863                        musb_writew(epio, MUSB_RXCSR, csr);
 864                }
 865
 866                /* incomplete, and not short? wait for next IN packet */
 867                if ((request->actual < request->length)
 868                                && (musb_ep->dma->actual_len
 869                                        == musb_ep->packet_sz)) {
 870                        /* In double buffer case, continue to unload fifo if
 871                         * there is Rx packet in FIFO.
 872                         **/
 873                        csr = musb_readw(epio, MUSB_RXCSR);
 874                        if ((csr & MUSB_RXCSR_RXPKTRDY) &&
 875                                hw_ep->rx_double_buffered)
 876                                goto exit;
 877                        return;
 878                }
 879#endif
 880                musb_g_giveback(musb_ep, request, 0);
 881                /*
 882                 * In the giveback function the MUSB lock is
 883                 * released and acquired after sometime. During
 884                 * this time period the INDEX register could get
 885                 * changed by the gadget_queue function especially
 886                 * on SMP systems. Reselect the INDEX to be sure
 887                 * we are reading/modifying the right registers
 888                 */
 889                musb_ep_select(mbase, epnum);
 890
 891                req = next_request(musb_ep);
 892                if (!req)
 893                        return;
 894        }
 895#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 896        defined(CONFIG_USB_UX500_DMA)
 897exit:
 898#endif
 899        /* Analyze request */
 900        rxstate(musb, req);
 901}
 902
 903/* ------------------------------------------------------------ */
 904
 905static int musb_gadget_enable(struct usb_ep *ep,
 906                        const struct usb_endpoint_descriptor *desc)
 907{
 908        unsigned long           flags;
 909        struct musb_ep          *musb_ep;
 910        struct musb_hw_ep       *hw_ep;
 911        void __iomem            *regs;
 912        struct musb             *musb;
 913        void __iomem    *mbase;
 914        u8              epnum;
 915        u16             csr;
 916        unsigned        tmp;
 917        int             status = -EINVAL;
 918
 919        if (!ep || !desc)
 920                return -EINVAL;
 921
 922        musb_ep = to_musb_ep(ep);
 923        hw_ep = musb_ep->hw_ep;
 924        regs = hw_ep->regs;
 925        musb = musb_ep->musb;
 926        mbase = musb->mregs;
 927        epnum = musb_ep->current_epnum;
 928
 929        spin_lock_irqsave(&musb->lock, flags);
 930
 931        if (musb_ep->desc) {
 932                status = -EBUSY;
 933                goto fail;
 934        }
 935        musb_ep->type = usb_endpoint_type(desc);
 936
 937        /* check direction and (later) maxpacket size against endpoint */
 938        if (usb_endpoint_num(desc) != epnum)
 939                goto fail;
 940
 941        /* REVISIT this rules out high bandwidth periodic transfers */
 942        tmp = usb_endpoint_maxp_mult(desc) - 1;
 943        if (tmp) {
 944                int ok;
 945
 946                if (usb_endpoint_dir_in(desc))
 947                        ok = musb->hb_iso_tx;
 948                else
 949                        ok = musb->hb_iso_rx;
 950
 951                if (!ok) {
 952                        musb_dbg(musb, "no support for high bandwidth ISO");
 953                        goto fail;
 954                }
 955                musb_ep->hb_mult = tmp;
 956        } else {
 957                musb_ep->hb_mult = 0;
 958        }
 959
 960        musb_ep->packet_sz = usb_endpoint_maxp(desc);
 961        tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
 962
 963        /* enable the interrupts for the endpoint, set the endpoint
 964         * packet size (or fail), set the mode, clear the fifo
 965         */
 966        musb_ep_select(mbase, epnum);
 967        if (usb_endpoint_dir_in(desc)) {
 968
 969                if (hw_ep->is_shared_fifo)
 970                        musb_ep->is_in = 1;
 971                if (!musb_ep->is_in)
 972                        goto fail;
 973
 974                if (tmp > hw_ep->max_packet_sz_tx) {
 975                        musb_dbg(musb, "packet size beyond hardware FIFO size");
 976                        goto fail;
 977                }
 978
 979                musb->intrtxe |= (1 << epnum);
 980                musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
 981
 982                /* REVISIT if can_bulk_split(), use by updating "tmp";
 983                 * likewise high bandwidth periodic tx
 984                 */
 985                /* Set TXMAXP with the FIFO size of the endpoint
 986                 * to disable double buffering mode.
 987                 */
 988                if (can_bulk_split(musb, musb_ep->type))
 989                        musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
 990                                                musb_ep->packet_sz) - 1;
 991                musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
 992                                | (musb_ep->hb_mult << 11));
 993
 994                csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
 995                if (musb_readw(regs, MUSB_TXCSR)
 996                                & MUSB_TXCSR_FIFONOTEMPTY)
 997                        csr |= MUSB_TXCSR_FLUSHFIFO;
 998                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
 999                        csr |= MUSB_TXCSR_P_ISO;
1000
1001                /* set twice in case of double buffering */
1002                musb_writew(regs, MUSB_TXCSR, csr);
1003                /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1004                musb_writew(regs, MUSB_TXCSR, csr);
1005
1006        } else {
1007
1008                if (hw_ep->is_shared_fifo)
1009                        musb_ep->is_in = 0;
1010                if (musb_ep->is_in)
1011                        goto fail;
1012
1013                if (tmp > hw_ep->max_packet_sz_rx) {
1014                        musb_dbg(musb, "packet size beyond hardware FIFO size");
1015                        goto fail;
1016                }
1017
1018                musb->intrrxe |= (1 << epnum);
1019                musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1020
1021                /* REVISIT if can_bulk_combine() use by updating "tmp"
1022                 * likewise high bandwidth periodic rx
1023                 */
1024                /* Set RXMAXP with the FIFO size of the endpoint
1025                 * to disable double buffering mode.
1026                 */
1027                musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1028                                | (musb_ep->hb_mult << 11));
1029
1030                /* force shared fifo to OUT-only mode */
1031                if (hw_ep->is_shared_fifo) {
1032                        csr = musb_readw(regs, MUSB_TXCSR);
1033                        csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1034                        musb_writew(regs, MUSB_TXCSR, csr);
1035                }
1036
1037                csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1038                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1039                        csr |= MUSB_RXCSR_P_ISO;
1040                else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1041                        csr |= MUSB_RXCSR_DISNYET;
1042
1043                /* set twice in case of double buffering */
1044                musb_writew(regs, MUSB_RXCSR, csr);
1045                musb_writew(regs, MUSB_RXCSR, csr);
1046        }
1047
1048        /* NOTE:  all the I/O code _should_ work fine without DMA, in case
1049         * for some reason you run out of channels here.
1050         */
1051        if (is_dma_capable() && musb->dma_controller) {
1052                struct dma_controller   *c = musb->dma_controller;
1053
1054                musb_ep->dma = c->channel_alloc(c, hw_ep,
1055                                (desc->bEndpointAddress & USB_DIR_IN));
1056        } else
1057                musb_ep->dma = NULL;
1058
1059        musb_ep->desc = desc;
1060        musb_ep->busy = 0;
1061        musb_ep->wedged = 0;
1062        status = 0;
1063
1064        pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1065                        musb_driver_name, musb_ep->end_point.name,
1066                        musb_ep_xfertype_string(musb_ep->type),
1067                        musb_ep->is_in ? "IN" : "OUT",
1068                        musb_ep->dma ? "dma, " : "",
1069                        musb_ep->packet_sz);
1070
1071        schedule_delayed_work(&musb->irq_work, 0);
1072
1073fail:
1074        spin_unlock_irqrestore(&musb->lock, flags);
1075        return status;
1076}
1077
1078/*
1079 * Disable an endpoint flushing all requests queued.
1080 */
1081static int musb_gadget_disable(struct usb_ep *ep)
1082{
1083        unsigned long   flags;
1084        struct musb     *musb;
1085        u8              epnum;
1086        struct musb_ep  *musb_ep;
1087        void __iomem    *epio;
1088        int             status = 0;
1089
1090        musb_ep = to_musb_ep(ep);
1091        musb = musb_ep->musb;
1092        epnum = musb_ep->current_epnum;
1093        epio = musb->endpoints[epnum].regs;
1094
1095        spin_lock_irqsave(&musb->lock, flags);
1096        musb_ep_select(musb->mregs, epnum);
1097
1098        /* zero the endpoint sizes */
1099        if (musb_ep->is_in) {
1100                musb->intrtxe &= ~(1 << epnum);
1101                musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1102                musb_writew(epio, MUSB_TXMAXP, 0);
1103        } else {
1104                musb->intrrxe &= ~(1 << epnum);
1105                musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1106                musb_writew(epio, MUSB_RXMAXP, 0);
1107        }
1108
1109        /* abort all pending DMA and requests */
1110        nuke(musb_ep, -ESHUTDOWN);
1111
1112        musb_ep->desc = NULL;
1113        musb_ep->end_point.desc = NULL;
1114
1115        schedule_delayed_work(&musb->irq_work, 0);
1116
1117        spin_unlock_irqrestore(&(musb->lock), flags);
1118
1119        musb_dbg(musb, "%s", musb_ep->end_point.name);
1120
1121        return status;
1122}
1123
1124/*
1125 * Allocate a request for an endpoint.
1126 * Reused by ep0 code.
1127 */
1128struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1129{
1130        struct musb_ep          *musb_ep = to_musb_ep(ep);
1131        struct musb_request     *request = NULL;
1132
1133        request = kzalloc(sizeof *request, gfp_flags);
1134        if (!request)
1135                return NULL;
1136
1137        request->request.dma = DMA_ADDR_INVALID;
1138        request->epnum = musb_ep->current_epnum;
1139        request->ep = musb_ep;
1140
1141        trace_musb_req_alloc(request);
1142        return &request->request;
1143}
1144
1145/*
1146 * Free a request
1147 * Reused by ep0 code.
1148 */
1149void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1150{
1151        struct musb_request *request = to_musb_request(req);
1152
1153        trace_musb_req_free(request);
1154        kfree(request);
1155}
1156
1157static LIST_HEAD(buffers);
1158
1159struct free_record {
1160        struct list_head        list;
1161        struct device           *dev;
1162        unsigned                bytes;
1163        dma_addr_t              dma;
1164};
1165
1166/*
1167 * Context: controller locked, IRQs blocked.
1168 */
1169void musb_ep_restart(struct musb *musb, struct musb_request *req)
1170{
1171        trace_musb_req_start(req);
1172        musb_ep_select(musb->mregs, req->epnum);
1173        if (req->tx)
1174                txstate(musb, req);
1175        else
1176                rxstate(musb, req);
1177}
1178
1179static int musb_ep_restart_resume_work(struct musb *musb, void *data)
1180{
1181        struct musb_request *req = data;
1182
1183        musb_ep_restart(musb, req);
1184
1185        return 0;
1186}
1187
1188static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1189                        gfp_t gfp_flags)
1190{
1191        struct musb_ep          *musb_ep;
1192        struct musb_request     *request;
1193        struct musb             *musb;
1194        int                     status;
1195        unsigned long           lockflags;
1196
1197        if (!ep || !req)
1198                return -EINVAL;
1199        if (!req->buf)
1200                return -ENODATA;
1201
1202        musb_ep = to_musb_ep(ep);
1203        musb = musb_ep->musb;
1204
1205        request = to_musb_request(req);
1206        request->musb = musb;
1207
1208        if (request->ep != musb_ep)
1209                return -EINVAL;
1210
1211        status = pm_runtime_get(musb->controller);
1212        if ((status != -EINPROGRESS) && status < 0) {
1213                dev_err(musb->controller,
1214                        "pm runtime get failed in %s\n",
1215                        __func__);
1216                pm_runtime_put_noidle(musb->controller);
1217
1218                return status;
1219        }
1220        status = 0;
1221
1222        trace_musb_req_enq(request);
1223
1224        /* request is mine now... */
1225        request->request.actual = 0;
1226        request->request.status = -EINPROGRESS;
1227        request->epnum = musb_ep->current_epnum;
1228        request->tx = musb_ep->is_in;
1229
1230        map_dma_buffer(request, musb, musb_ep);
1231
1232        spin_lock_irqsave(&musb->lock, lockflags);
1233
1234        /* don't queue if the ep is down */
1235        if (!musb_ep->desc) {
1236                musb_dbg(musb, "req %p queued to %s while ep %s",
1237                                req, ep->name, "disabled");
1238                status = -ESHUTDOWN;
1239                unmap_dma_buffer(request, musb);
1240                goto unlock;
1241        }
1242
1243        /* add request to the list */
1244        list_add_tail(&request->list, &musb_ep->req_list);
1245
1246        /* it this is the head of the queue, start i/o ... */
1247        if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
1248                status = musb_queue_resume_work(musb,
1249                                                musb_ep_restart_resume_work,
1250                                                request);
1251                if (status < 0)
1252                        dev_err(musb->controller, "%s resume work: %i\n",
1253                                __func__, status);
1254        }
1255
1256unlock:
1257        spin_unlock_irqrestore(&musb->lock, lockflags);
1258        pm_runtime_mark_last_busy(musb->controller);
1259        pm_runtime_put_autosuspend(musb->controller);
1260
1261        return status;
1262}
1263
1264static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1265{
1266        struct musb_ep          *musb_ep = to_musb_ep(ep);
1267        struct musb_request     *req = to_musb_request(request);
1268        struct musb_request     *r;
1269        unsigned long           flags;
1270        int                     status = 0;
1271        struct musb             *musb = musb_ep->musb;
1272
1273        if (!ep || !request || req->ep != musb_ep)
1274                return -EINVAL;
1275
1276        trace_musb_req_deq(req);
1277
1278        spin_lock_irqsave(&musb->lock, flags);
1279
1280        list_for_each_entry(r, &musb_ep->req_list, list) {
1281                if (r == req)
1282                        break;
1283        }
1284        if (r != req) {
1285                dev_err(musb->controller, "request %p not queued to %s\n",
1286                                request, ep->name);
1287                status = -EINVAL;
1288                goto done;
1289        }
1290
1291        /* if the hardware doesn't have the request, easy ... */
1292        if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1293                musb_g_giveback(musb_ep, request, -ECONNRESET);
1294
1295        /* ... else abort the dma transfer ... */
1296        else if (is_dma_capable() && musb_ep->dma) {
1297                struct dma_controller   *c = musb->dma_controller;
1298
1299                musb_ep_select(musb->mregs, musb_ep->current_epnum);
1300                if (c->channel_abort)
1301                        status = c->channel_abort(musb_ep->dma);
1302                else
1303                        status = -EBUSY;
1304                if (status == 0)
1305                        musb_g_giveback(musb_ep, request, -ECONNRESET);
1306        } else {
1307                /* NOTE: by sticking to easily tested hardware/driver states,
1308                 * we leave counting of in-flight packets imprecise.
1309                 */
1310                musb_g_giveback(musb_ep, request, -ECONNRESET);
1311        }
1312
1313done:
1314        spin_unlock_irqrestore(&musb->lock, flags);
1315        return status;
1316}
1317
1318/*
1319 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1320 * data but will queue requests.
1321 *
1322 * exported to ep0 code
1323 */
1324static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1325{
1326        struct musb_ep          *musb_ep = to_musb_ep(ep);
1327        u8                      epnum = musb_ep->current_epnum;
1328        struct musb             *musb = musb_ep->musb;
1329        void __iomem            *epio = musb->endpoints[epnum].regs;
1330        void __iomem            *mbase;
1331        unsigned long           flags;
1332        u16                     csr;
1333        struct musb_request     *request;
1334        int                     status = 0;
1335
1336        if (!ep)
1337                return -EINVAL;
1338        mbase = musb->mregs;
1339
1340        spin_lock_irqsave(&musb->lock, flags);
1341
1342        if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1343                status = -EINVAL;
1344                goto done;
1345        }
1346
1347        musb_ep_select(mbase, epnum);
1348
1349        request = next_request(musb_ep);
1350        if (value) {
1351                if (request) {
1352                        musb_dbg(musb, "request in progress, cannot halt %s",
1353                            ep->name);
1354                        status = -EAGAIN;
1355                        goto done;
1356                }
1357                /* Cannot portably stall with non-empty FIFO */
1358                if (musb_ep->is_in) {
1359                        csr = musb_readw(epio, MUSB_TXCSR);
1360                        if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1361                                musb_dbg(musb, "FIFO busy, cannot halt %s",
1362                                                ep->name);
1363                                status = -EAGAIN;
1364                                goto done;
1365                        }
1366                }
1367        } else
1368                musb_ep->wedged = 0;
1369
1370        /* set/clear the stall and toggle bits */
1371        musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
1372        if (musb_ep->is_in) {
1373                csr = musb_readw(epio, MUSB_TXCSR);
1374                csr |= MUSB_TXCSR_P_WZC_BITS
1375                        | MUSB_TXCSR_CLRDATATOG;
1376                if (value)
1377                        csr |= MUSB_TXCSR_P_SENDSTALL;
1378                else
1379                        csr &= ~(MUSB_TXCSR_P_SENDSTALL
1380                                | MUSB_TXCSR_P_SENTSTALL);
1381                csr &= ~MUSB_TXCSR_TXPKTRDY;
1382                musb_writew(epio, MUSB_TXCSR, csr);
1383        } else {
1384                csr = musb_readw(epio, MUSB_RXCSR);
1385                csr |= MUSB_RXCSR_P_WZC_BITS
1386                        | MUSB_RXCSR_FLUSHFIFO
1387                        | MUSB_RXCSR_CLRDATATOG;
1388                if (value)
1389                        csr |= MUSB_RXCSR_P_SENDSTALL;
1390                else
1391                        csr &= ~(MUSB_RXCSR_P_SENDSTALL
1392                                | MUSB_RXCSR_P_SENTSTALL);
1393                musb_writew(epio, MUSB_RXCSR, csr);
1394        }
1395
1396        /* maybe start the first request in the queue */
1397        if (!musb_ep->busy && !value && request) {
1398                musb_dbg(musb, "restarting the request");
1399                musb_ep_restart(musb, request);
1400        }
1401
1402done:
1403        spin_unlock_irqrestore(&musb->lock, flags);
1404        return status;
1405}
1406
1407/*
1408 * Sets the halt feature with the clear requests ignored
1409 */
1410static int musb_gadget_set_wedge(struct usb_ep *ep)
1411{
1412        struct musb_ep          *musb_ep = to_musb_ep(ep);
1413
1414        if (!ep)
1415                return -EINVAL;
1416
1417        musb_ep->wedged = 1;
1418
1419        return usb_ep_set_halt(ep);
1420}
1421
1422static int musb_gadget_fifo_status(struct usb_ep *ep)
1423{
1424        struct musb_ep          *musb_ep = to_musb_ep(ep);
1425        void __iomem            *epio = musb_ep->hw_ep->regs;
1426        int                     retval = -EINVAL;
1427
1428        if (musb_ep->desc && !musb_ep->is_in) {
1429                struct musb             *musb = musb_ep->musb;
1430                int                     epnum = musb_ep->current_epnum;
1431                void __iomem            *mbase = musb->mregs;
1432                unsigned long           flags;
1433
1434                spin_lock_irqsave(&musb->lock, flags);
1435
1436                musb_ep_select(mbase, epnum);
1437                /* FIXME return zero unless RXPKTRDY is set */
1438                retval = musb_readw(epio, MUSB_RXCOUNT);
1439
1440                spin_unlock_irqrestore(&musb->lock, flags);
1441        }
1442        return retval;
1443}
1444
1445static void musb_gadget_fifo_flush(struct usb_ep *ep)
1446{
1447        struct musb_ep  *musb_ep = to_musb_ep(ep);
1448        struct musb     *musb = musb_ep->musb;
1449        u8              epnum = musb_ep->current_epnum;
1450        void __iomem    *epio = musb->endpoints[epnum].regs;
1451        void __iomem    *mbase;
1452        unsigned long   flags;
1453        u16             csr;
1454
1455        mbase = musb->mregs;
1456
1457        spin_lock_irqsave(&musb->lock, flags);
1458        musb_ep_select(mbase, (u8) epnum);
1459
1460        /* disable interrupts */
1461        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1462
1463        if (musb_ep->is_in) {
1464                csr = musb_readw(epio, MUSB_TXCSR);
1465                if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1466                        csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1467                        /*
1468                         * Setting both TXPKTRDY and FLUSHFIFO makes controller
1469                         * to interrupt current FIFO loading, but not flushing
1470                         * the already loaded ones.
1471                         */
1472                        csr &= ~MUSB_TXCSR_TXPKTRDY;
1473                        musb_writew(epio, MUSB_TXCSR, csr);
1474                        /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1475                        musb_writew(epio, MUSB_TXCSR, csr);
1476                }
1477        } else {
1478                csr = musb_readw(epio, MUSB_RXCSR);
1479                csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1480                musb_writew(epio, MUSB_RXCSR, csr);
1481                musb_writew(epio, MUSB_RXCSR, csr);
1482        }
1483
1484        /* re-enable interrupt */
1485        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1486        spin_unlock_irqrestore(&musb->lock, flags);
1487}
1488
1489static const struct usb_ep_ops musb_ep_ops = {
1490        .enable         = musb_gadget_enable,
1491        .disable        = musb_gadget_disable,
1492        .alloc_request  = musb_alloc_request,
1493        .free_request   = musb_free_request,
1494        .queue          = musb_gadget_queue,
1495        .dequeue        = musb_gadget_dequeue,
1496        .set_halt       = musb_gadget_set_halt,
1497        .set_wedge      = musb_gadget_set_wedge,
1498        .fifo_status    = musb_gadget_fifo_status,
1499        .fifo_flush     = musb_gadget_fifo_flush
1500};
1501
1502/* ----------------------------------------------------------------------- */
1503
1504static int musb_gadget_get_frame(struct usb_gadget *gadget)
1505{
1506        struct musb     *musb = gadget_to_musb(gadget);
1507
1508        return (int)musb_readw(musb->mregs, MUSB_FRAME);
1509}
1510
1511static int musb_gadget_wakeup(struct usb_gadget *gadget)
1512{
1513        struct musb     *musb = gadget_to_musb(gadget);
1514        void __iomem    *mregs = musb->mregs;
1515        unsigned long   flags;
1516        int             status = -EINVAL;
1517        u8              power, devctl;
1518        int             retries;
1519
1520        spin_lock_irqsave(&musb->lock, flags);
1521
1522        switch (musb->xceiv->otg->state) {
1523        case OTG_STATE_B_PERIPHERAL:
1524                /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1525                 * that's part of the standard usb 1.1 state machine, and
1526                 * doesn't affect OTG transitions.
1527                 */
1528                if (musb->may_wakeup && musb->is_suspended)
1529                        break;
1530                goto done;
1531        case OTG_STATE_B_IDLE:
1532                /* Start SRP ... OTG not required. */
1533                devctl = musb_readb(mregs, MUSB_DEVCTL);
1534                musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
1535                devctl |= MUSB_DEVCTL_SESSION;
1536                musb_writeb(mregs, MUSB_DEVCTL, devctl);
1537                devctl = musb_readb(mregs, MUSB_DEVCTL);
1538                retries = 100;
1539                while (!(devctl & MUSB_DEVCTL_SESSION)) {
1540                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1541                        if (retries-- < 1)
1542                                break;
1543                }
1544                retries = 10000;
1545                while (devctl & MUSB_DEVCTL_SESSION) {
1546                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1547                        if (retries-- < 1)
1548                                break;
1549                }
1550
1551                spin_unlock_irqrestore(&musb->lock, flags);
1552                otg_start_srp(musb->xceiv->otg);
1553                spin_lock_irqsave(&musb->lock, flags);
1554
1555                /* Block idling for at least 1s */
1556                musb_platform_try_idle(musb,
1557                        jiffies + msecs_to_jiffies(1 * HZ));
1558
1559                status = 0;
1560                goto done;
1561        default:
1562                musb_dbg(musb, "Unhandled wake: %s",
1563                        usb_otg_state_string(musb->xceiv->otg->state));
1564                goto done;
1565        }
1566
1567        status = 0;
1568
1569        power = musb_readb(mregs, MUSB_POWER);
1570        power |= MUSB_POWER_RESUME;
1571        musb_writeb(mregs, MUSB_POWER, power);
1572        musb_dbg(musb, "issue wakeup");
1573
1574        /* FIXME do this next chunk in a timer callback, no udelay */
1575        mdelay(2);
1576
1577        power = musb_readb(mregs, MUSB_POWER);
1578        power &= ~MUSB_POWER_RESUME;
1579        musb_writeb(mregs, MUSB_POWER, power);
1580done:
1581        spin_unlock_irqrestore(&musb->lock, flags);
1582        return status;
1583}
1584
1585static int
1586musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1587{
1588        gadget->is_selfpowered = !!is_selfpowered;
1589        return 0;
1590}
1591
1592static void musb_pullup(struct musb *musb, int is_on)
1593{
1594        u8 power;
1595
1596        power = musb_readb(musb->mregs, MUSB_POWER);
1597        if (is_on)
1598                power |= MUSB_POWER_SOFTCONN;
1599        else
1600                power &= ~MUSB_POWER_SOFTCONN;
1601
1602        /* FIXME if on, HdrcStart; if off, HdrcStop */
1603
1604        musb_dbg(musb, "gadget D+ pullup %s",
1605                is_on ? "on" : "off");
1606        musb_writeb(musb->mregs, MUSB_POWER, power);
1607}
1608
1609#if 0
1610static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1611{
1612        musb_dbg(musb, "<= %s =>\n", __func__);
1613
1614        /*
1615         * FIXME iff driver's softconnect flag is set (as it is during probe,
1616         * though that can clear it), just musb_pullup().
1617         */
1618
1619        return -EINVAL;
1620}
1621#endif
1622
1623static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1624{
1625        struct musb     *musb = gadget_to_musb(gadget);
1626
1627        if (!musb->xceiv->set_power)
1628                return -EOPNOTSUPP;
1629        return usb_phy_set_power(musb->xceiv, mA);
1630}
1631
1632static void musb_gadget_work(struct work_struct *work)
1633{
1634        struct musb *musb;
1635        unsigned long flags;
1636
1637        musb = container_of(work, struct musb, gadget_work.work);
1638        pm_runtime_get_sync(musb->controller);
1639        spin_lock_irqsave(&musb->lock, flags);
1640        musb_pullup(musb, musb->softconnect);
1641        spin_unlock_irqrestore(&musb->lock, flags);
1642        pm_runtime_mark_last_busy(musb->controller);
1643        pm_runtime_put_autosuspend(musb->controller);
1644}
1645
1646static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1647{
1648        struct musb     *musb = gadget_to_musb(gadget);
1649        unsigned long   flags;
1650
1651        is_on = !!is_on;
1652
1653        /* NOTE: this assumes we are sensing vbus; we'd rather
1654         * not pullup unless the B-session is active.
1655         */
1656        spin_lock_irqsave(&musb->lock, flags);
1657        if (is_on != musb->softconnect) {
1658                musb->softconnect = is_on;
1659                schedule_delayed_work(&musb->gadget_work, 0);
1660        }
1661        spin_unlock_irqrestore(&musb->lock, flags);
1662
1663        return 0;
1664}
1665
1666static int musb_gadget_start(struct usb_gadget *g,
1667                struct usb_gadget_driver *driver);
1668static int musb_gadget_stop(struct usb_gadget *g);
1669
1670static const struct usb_gadget_ops musb_gadget_operations = {
1671        .get_frame              = musb_gadget_get_frame,
1672        .wakeup                 = musb_gadget_wakeup,
1673        .set_selfpowered        = musb_gadget_set_self_powered,
1674        /* .vbus_session                = musb_gadget_vbus_session, */
1675        .vbus_draw              = musb_gadget_vbus_draw,
1676        .pullup                 = musb_gadget_pullup,
1677        .udc_start              = musb_gadget_start,
1678        .udc_stop               = musb_gadget_stop,
1679};
1680
1681/* ----------------------------------------------------------------------- */
1682
1683/* Registration */
1684
1685/* Only this registration code "knows" the rule (from USB standards)
1686 * about there being only one external upstream port.  It assumes
1687 * all peripheral ports are external...
1688 */
1689
1690static void
1691init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1692{
1693        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1694
1695        memset(ep, 0, sizeof *ep);
1696
1697        ep->current_epnum = epnum;
1698        ep->musb = musb;
1699        ep->hw_ep = hw_ep;
1700        ep->is_in = is_in;
1701
1702        INIT_LIST_HEAD(&ep->req_list);
1703
1704        sprintf(ep->name, "ep%d%s", epnum,
1705                        (!epnum || hw_ep->is_shared_fifo) ? "" : (
1706                                is_in ? "in" : "out"));
1707        ep->end_point.name = ep->name;
1708        INIT_LIST_HEAD(&ep->end_point.ep_list);
1709        if (!epnum) {
1710                usb_ep_set_maxpacket_limit(&ep->end_point, 64);
1711                ep->end_point.caps.type_control = true;
1712                ep->end_point.ops = &musb_g_ep0_ops;
1713                musb->g.ep0 = &ep->end_point;
1714        } else {
1715                if (is_in)
1716                        usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
1717                else
1718                        usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
1719                ep->end_point.caps.type_iso = true;
1720                ep->end_point.caps.type_bulk = true;
1721                ep->end_point.caps.type_int = true;
1722                ep->end_point.ops = &musb_ep_ops;
1723                list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1724        }
1725
1726        if (!epnum || hw_ep->is_shared_fifo) {
1727                ep->end_point.caps.dir_in = true;
1728                ep->end_point.caps.dir_out = true;
1729        } else if (is_in)
1730                ep->end_point.caps.dir_in = true;
1731        else
1732                ep->end_point.caps.dir_out = true;
1733}
1734
1735/*
1736 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1737 * to the rest of the driver state.
1738 */
1739static inline void musb_g_init_endpoints(struct musb *musb)
1740{
1741        u8                      epnum;
1742        struct musb_hw_ep       *hw_ep;
1743        unsigned                count = 0;
1744
1745        /* initialize endpoint list just once */
1746        INIT_LIST_HEAD(&(musb->g.ep_list));
1747
1748        for (epnum = 0, hw_ep = musb->endpoints;
1749                        epnum < musb->nr_endpoints;
1750                        epnum++, hw_ep++) {
1751                if (hw_ep->is_shared_fifo /* || !epnum */) {
1752                        init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1753                        count++;
1754                } else {
1755                        if (hw_ep->max_packet_sz_tx) {
1756                                init_peripheral_ep(musb, &hw_ep->ep_in,
1757                                                        epnum, 1);
1758                                count++;
1759                        }
1760                        if (hw_ep->max_packet_sz_rx) {
1761                                init_peripheral_ep(musb, &hw_ep->ep_out,
1762                                                        epnum, 0);
1763                                count++;
1764                        }
1765                }
1766        }
1767}
1768
1769/* called once during driver setup to initialize and link into
1770 * the driver model; memory is zeroed.
1771 */
1772int musb_gadget_setup(struct musb *musb)
1773{
1774        int status;
1775
1776        /* REVISIT minor race:  if (erroneously) setting up two
1777         * musb peripherals at the same time, only the bus lock
1778         * is probably held.
1779         */
1780
1781        musb->g.ops = &musb_gadget_operations;
1782        musb->g.max_speed = USB_SPEED_HIGH;
1783        musb->g.speed = USB_SPEED_UNKNOWN;
1784
1785        MUSB_DEV_MODE(musb);
1786        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1787
1788        /* this "gadget" abstracts/virtualizes the controller */
1789        musb->g.name = musb_driver_name;
1790        /* don't support otg protocols */
1791        musb->g.is_otg = 0;
1792        INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
1793        musb_g_init_endpoints(musb);
1794
1795        musb->is_active = 0;
1796        musb_platform_try_idle(musb, 0);
1797
1798        status = usb_add_gadget_udc(musb->controller, &musb->g);
1799        if (status)
1800                goto err;
1801
1802        return 0;
1803err:
1804        musb->g.dev.parent = NULL;
1805        device_unregister(&musb->g.dev);
1806        return status;
1807}
1808
1809void musb_gadget_cleanup(struct musb *musb)
1810{
1811        if (musb->port_mode == MUSB_HOST)
1812                return;
1813
1814        cancel_delayed_work_sync(&musb->gadget_work);
1815        usb_del_gadget_udc(&musb->g);
1816}
1817
1818/*
1819 * Register the gadget driver. Used by gadget drivers when
1820 * registering themselves with the controller.
1821 *
1822 * -EINVAL something went wrong (not driver)
1823 * -EBUSY another gadget is already using the controller
1824 * -ENOMEM no memory to perform the operation
1825 *
1826 * @param driver the gadget driver
1827 * @return <0 if error, 0 if everything is fine
1828 */
1829static int musb_gadget_start(struct usb_gadget *g,
1830                struct usb_gadget_driver *driver)
1831{
1832        struct musb             *musb = gadget_to_musb(g);
1833        struct usb_otg          *otg = musb->xceiv->otg;
1834        unsigned long           flags;
1835        int                     retval = 0;
1836
1837        if (driver->max_speed < USB_SPEED_HIGH) {
1838                retval = -EINVAL;
1839                goto err;
1840        }
1841
1842        pm_runtime_get_sync(musb->controller);
1843
1844        musb->softconnect = 0;
1845        musb->gadget_driver = driver;
1846
1847        spin_lock_irqsave(&musb->lock, flags);
1848        musb->is_active = 1;
1849
1850        otg_set_peripheral(otg, &musb->g);
1851        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1852        spin_unlock_irqrestore(&musb->lock, flags);
1853
1854        musb_start(musb);
1855
1856        /* REVISIT:  funcall to other code, which also
1857         * handles power budgeting ... this way also
1858         * ensures HdrcStart is indirectly called.
1859         */
1860        if (musb->xceiv->last_event == USB_EVENT_ID)
1861                musb_platform_set_vbus(musb, 1);
1862
1863        pm_runtime_mark_last_busy(musb->controller);
1864        pm_runtime_put_autosuspend(musb->controller);
1865
1866        return 0;
1867
1868err:
1869        return retval;
1870}
1871
1872/*
1873 * Unregister the gadget driver. Used by gadget drivers when
1874 * unregistering themselves from the controller.
1875 *
1876 * @param driver the gadget driver to unregister
1877 */
1878static int musb_gadget_stop(struct usb_gadget *g)
1879{
1880        struct musb     *musb = gadget_to_musb(g);
1881        unsigned long   flags;
1882
1883        pm_runtime_get_sync(musb->controller);
1884
1885        /*
1886         * REVISIT always use otg_set_peripheral() here too;
1887         * this needs to shut down the OTG engine.
1888         */
1889
1890        spin_lock_irqsave(&musb->lock, flags);
1891
1892        musb_hnp_stop(musb);
1893
1894        (void) musb_gadget_vbus_draw(&musb->g, 0);
1895
1896        musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
1897        musb_stop(musb);
1898        otg_set_peripheral(musb->xceiv->otg, NULL);
1899
1900        musb->is_active = 0;
1901        musb->gadget_driver = NULL;
1902        musb_platform_try_idle(musb, 0);
1903        spin_unlock_irqrestore(&musb->lock, flags);
1904
1905        /*
1906         * FIXME we need to be able to register another
1907         * gadget driver here and have everything work;
1908         * that currently misbehaves.
1909         */
1910
1911        /* Force check of devctl register for PM runtime */
1912        schedule_delayed_work(&musb->irq_work, 0);
1913
1914        pm_runtime_mark_last_busy(musb->controller);
1915        pm_runtime_put_autosuspend(musb->controller);
1916
1917        return 0;
1918}
1919
1920/* ----------------------------------------------------------------------- */
1921
1922/* lifecycle operations called through plat_uds.c */
1923
1924void musb_g_resume(struct musb *musb)
1925{
1926        musb->is_suspended = 0;
1927        switch (musb->xceiv->otg->state) {
1928        case OTG_STATE_B_IDLE:
1929                break;
1930        case OTG_STATE_B_WAIT_ACON:
1931        case OTG_STATE_B_PERIPHERAL:
1932                musb->is_active = 1;
1933                if (musb->gadget_driver && musb->gadget_driver->resume) {
1934                        spin_unlock(&musb->lock);
1935                        musb->gadget_driver->resume(&musb->g);
1936                        spin_lock(&musb->lock);
1937                }
1938                break;
1939        default:
1940                WARNING("unhandled RESUME transition (%s)\n",
1941                                usb_otg_state_string(musb->xceiv->otg->state));
1942        }
1943}
1944
1945/* called when SOF packets stop for 3+ msec */
1946void musb_g_suspend(struct musb *musb)
1947{
1948        u8      devctl;
1949
1950        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1951        musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
1952
1953        switch (musb->xceiv->otg->state) {
1954        case OTG_STATE_B_IDLE:
1955                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1956                        musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
1957                break;
1958        case OTG_STATE_B_PERIPHERAL:
1959                musb->is_suspended = 1;
1960                if (musb->gadget_driver && musb->gadget_driver->suspend) {
1961                        spin_unlock(&musb->lock);
1962                        musb->gadget_driver->suspend(&musb->g);
1963                        spin_lock(&musb->lock);
1964                }
1965                break;
1966        default:
1967                /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1968                 * A_PERIPHERAL may need care too
1969                 */
1970                WARNING("unhandled SUSPEND transition (%s)",
1971                                usb_otg_state_string(musb->xceiv->otg->state));
1972        }
1973}
1974
1975/* Called during SRP */
1976void musb_g_wakeup(struct musb *musb)
1977{
1978        musb_gadget_wakeup(&musb->g);
1979}
1980
1981/* called when VBUS drops below session threshold, and in other cases */
1982void musb_g_disconnect(struct musb *musb)
1983{
1984        void __iomem    *mregs = musb->mregs;
1985        u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
1986
1987        musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
1988
1989        /* clear HR */
1990        musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1991
1992        /* don't draw vbus until new b-default session */
1993        (void) musb_gadget_vbus_draw(&musb->g, 0);
1994
1995        musb->g.speed = USB_SPEED_UNKNOWN;
1996        if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1997                spin_unlock(&musb->lock);
1998                musb->gadget_driver->disconnect(&musb->g);
1999                spin_lock(&musb->lock);
2000        }
2001
2002        switch (musb->xceiv->otg->state) {
2003        default:
2004                musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
2005                        usb_otg_state_string(musb->xceiv->otg->state));
2006                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2007                MUSB_HST_MODE(musb);
2008                break;
2009        case OTG_STATE_A_PERIPHERAL:
2010                musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2011                MUSB_HST_MODE(musb);
2012                break;
2013        case OTG_STATE_B_WAIT_ACON:
2014        case OTG_STATE_B_HOST:
2015        case OTG_STATE_B_PERIPHERAL:
2016        case OTG_STATE_B_IDLE:
2017                musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2018                break;
2019        case OTG_STATE_B_SRP_INIT:
2020                break;
2021        }
2022
2023        musb->is_active = 0;
2024}
2025
2026void musb_g_reset(struct musb *musb)
2027__releases(musb->lock)
2028__acquires(musb->lock)
2029{
2030        void __iomem    *mbase = musb->mregs;
2031        u8              devctl = musb_readb(mbase, MUSB_DEVCTL);
2032        u8              power;
2033
2034        musb_dbg(musb, "<== %s driver '%s'",
2035                        (devctl & MUSB_DEVCTL_BDEVICE)
2036                                ? "B-Device" : "A-Device",
2037                        musb->gadget_driver
2038                                ? musb->gadget_driver->driver.name
2039                                : NULL
2040                        );
2041
2042        /* report reset, if we didn't already (flushing EP state) */
2043        if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2044                spin_unlock(&musb->lock);
2045                usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2046                spin_lock(&musb->lock);
2047        }
2048
2049        /* clear HR */
2050        else if (devctl & MUSB_DEVCTL_HR)
2051                musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2052
2053
2054        /* what speed did we negotiate? */
2055        power = musb_readb(mbase, MUSB_POWER);
2056        musb->g.speed = (power & MUSB_POWER_HSMODE)
2057                        ? USB_SPEED_HIGH : USB_SPEED_FULL;
2058
2059        /* start in USB_STATE_DEFAULT */
2060        musb->is_active = 1;
2061        musb->is_suspended = 0;
2062        MUSB_DEV_MODE(musb);
2063        musb->address = 0;
2064        musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2065
2066        musb->may_wakeup = 0;
2067        musb->g.b_hnp_enable = 0;
2068        musb->g.a_alt_hnp_support = 0;
2069        musb->g.a_hnp_support = 0;
2070        musb->g.quirk_zlp_not_supp = 1;
2071
2072        /* Normal reset, as B-Device;
2073         * or else after HNP, as A-Device
2074         */
2075        if (!musb->g.is_otg) {
2076                /* USB device controllers that are not OTG compatible
2077                 * may not have DEVCTL register in silicon.
2078                 * In that case, do not rely on devctl for setting
2079                 * peripheral mode.
2080                 */
2081                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2082                musb->g.is_a_peripheral = 0;
2083        } else if (devctl & MUSB_DEVCTL_BDEVICE) {
2084                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2085                musb->g.is_a_peripheral = 0;
2086        } else {
2087                musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
2088                musb->g.is_a_peripheral = 1;
2089        }
2090
2091        /* start with default limits on VBUS power draw */
2092        (void) musb_gadget_vbus_draw(&musb->g, 8);
2093}
2094