linux/drivers/usb/musb/musb_gadget.c
<<
>>
Prefs
   1/*
   2 * MUSB OTG driver peripheral support
   3 *
   4 * Copyright 2005 Mentor Graphics Corporation
   5 * Copyright (C) 2005-2006 by Texas Instruments
   6 * Copyright (C) 2006-2007 Nokia Corporation
   7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * version 2 as published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21 * 02110-1301 USA
  22 *
  23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33 *
  34 */
  35
  36#include <linux/kernel.h>
  37#include <linux/list.h>
  38#include <linux/timer.h>
  39#include <linux/module.h>
  40#include <linux/smp.h>
  41#include <linux/spinlock.h>
  42#include <linux/delay.h>
  43#include <linux/dma-mapping.h>
  44#include <linux/slab.h>
  45
  46#include "musb_core.h"
  47
  48
  49/* ----------------------------------------------------------------------- */
  50
  51#define is_buffer_mapped(req) (is_dma_capable() && \
  52                                        (req->map_state != UN_MAPPED))
  53
  54/* Maps the buffer to dma  */
  55
  56static inline void map_dma_buffer(struct musb_request *request,
  57                        struct musb *musb, struct musb_ep *musb_ep)
  58{
  59        int compatible = true;
  60        struct dma_controller *dma = musb->dma_controller;
  61
  62        request->map_state = UN_MAPPED;
  63
  64        if (!is_dma_capable() || !musb_ep->dma)
  65                return;
  66
  67        /* Check if DMA engine can handle this request.
  68         * DMA code must reject the USB request explicitly.
  69         * Default behaviour is to map the request.
  70         */
  71        if (dma->is_compatible)
  72                compatible = dma->is_compatible(musb_ep->dma,
  73                                musb_ep->packet_sz, request->request.buf,
  74                                request->request.length);
  75        if (!compatible)
  76                return;
  77
  78        if (request->request.dma == DMA_ADDR_INVALID) {
  79                dma_addr_t dma_addr;
  80                int ret;
  81
  82                dma_addr = dma_map_single(
  83                                musb->controller,
  84                                request->request.buf,
  85                                request->request.length,
  86                                request->tx
  87                                        ? DMA_TO_DEVICE
  88                                        : DMA_FROM_DEVICE);
  89                ret = dma_mapping_error(musb->controller, dma_addr);
  90                if (ret)
  91                        return;
  92
  93                request->request.dma = dma_addr;
  94                request->map_state = MUSB_MAPPED;
  95        } else {
  96                dma_sync_single_for_device(musb->controller,
  97                        request->request.dma,
  98                        request->request.length,
  99                        request->tx
 100                                ? DMA_TO_DEVICE
 101                                : DMA_FROM_DEVICE);
 102                request->map_state = PRE_MAPPED;
 103        }
 104}
 105
 106/* Unmap the buffer from dma and maps it back to cpu */
 107static inline void unmap_dma_buffer(struct musb_request *request,
 108                                struct musb *musb)
 109{
 110        struct musb_ep *musb_ep = request->ep;
 111
 112        if (!is_buffer_mapped(request) || !musb_ep->dma)
 113                return;
 114
 115        if (request->request.dma == DMA_ADDR_INVALID) {
 116                dev_vdbg(musb->controller,
 117                                "not unmapping a never mapped buffer\n");
 118                return;
 119        }
 120        if (request->map_state == MUSB_MAPPED) {
 121                dma_unmap_single(musb->controller,
 122                        request->request.dma,
 123                        request->request.length,
 124                        request->tx
 125                                ? DMA_TO_DEVICE
 126                                : DMA_FROM_DEVICE);
 127                request->request.dma = DMA_ADDR_INVALID;
 128        } else { /* PRE_MAPPED */
 129                dma_sync_single_for_cpu(musb->controller,
 130                        request->request.dma,
 131                        request->request.length,
 132                        request->tx
 133                                ? DMA_TO_DEVICE
 134                                : DMA_FROM_DEVICE);
 135        }
 136        request->map_state = UN_MAPPED;
 137}
 138
 139/*
 140 * Immediately complete a request.
 141 *
 142 * @param request the request to complete
 143 * @param status the status to complete the request with
 144 * Context: controller locked, IRQs blocked.
 145 */
 146void musb_g_giveback(
 147        struct musb_ep          *ep,
 148        struct usb_request      *request,
 149        int                     status)
 150__releases(ep->musb->lock)
 151__acquires(ep->musb->lock)
 152{
 153        struct musb_request     *req;
 154        struct musb             *musb;
 155        int                     busy = ep->busy;
 156
 157        req = to_musb_request(request);
 158
 159        list_del(&req->list);
 160        if (req->request.status == -EINPROGRESS)
 161                req->request.status = status;
 162        musb = req->musb;
 163
 164        ep->busy = 1;
 165        spin_unlock(&musb->lock);
 166
 167        if (!dma_mapping_error(&musb->g.dev, request->dma))
 168                unmap_dma_buffer(req, musb);
 169
 170        if (request->status == 0)
 171                dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
 172                                ep->end_point.name, request,
 173                                req->request.actual, req->request.length);
 174        else
 175                dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
 176                                ep->end_point.name, request,
 177                                req->request.actual, req->request.length,
 178                                request->status);
 179        usb_gadget_giveback_request(&req->ep->end_point, &req->request);
 180        spin_lock(&musb->lock);
 181        ep->busy = busy;
 182}
 183
 184/* ----------------------------------------------------------------------- */
 185
 186/*
 187 * Abort requests queued to an endpoint using the status. Synchronous.
 188 * caller locked controller and blocked irqs, and selected this ep.
 189 */
 190static void nuke(struct musb_ep *ep, const int status)
 191{
 192        struct musb             *musb = ep->musb;
 193        struct musb_request     *req = NULL;
 194        void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
 195
 196        ep->busy = 1;
 197
 198        if (is_dma_capable() && ep->dma) {
 199                struct dma_controller   *c = ep->musb->dma_controller;
 200                int value;
 201
 202                if (ep->is_in) {
 203                        /*
 204                         * The programming guide says that we must not clear
 205                         * the DMAMODE bit before DMAENAB, so we only
 206                         * clear it in the second write...
 207                         */
 208                        musb_writew(epio, MUSB_TXCSR,
 209                                    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
 210                        musb_writew(epio, MUSB_TXCSR,
 211                                        0 | MUSB_TXCSR_FLUSHFIFO);
 212                } else {
 213                        musb_writew(epio, MUSB_RXCSR,
 214                                        0 | MUSB_RXCSR_FLUSHFIFO);
 215                        musb_writew(epio, MUSB_RXCSR,
 216                                        0 | MUSB_RXCSR_FLUSHFIFO);
 217                }
 218
 219                value = c->channel_abort(ep->dma);
 220                dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
 221                                ep->name, value);
 222                c->channel_release(ep->dma);
 223                ep->dma = NULL;
 224        }
 225
 226        while (!list_empty(&ep->req_list)) {
 227                req = list_first_entry(&ep->req_list, struct musb_request, list);
 228                musb_g_giveback(ep, &req->request, status);
 229        }
 230}
 231
 232/* ----------------------------------------------------------------------- */
 233
 234/* Data transfers - pure PIO, pure DMA, or mixed mode */
 235
 236/*
 237 * This assumes the separate CPPI engine is responding to DMA requests
 238 * from the usb core ... sequenced a bit differently from mentor dma.
 239 */
 240
 241static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
 242{
 243        if (can_bulk_split(musb, ep->type))
 244                return ep->hw_ep->max_packet_sz_tx;
 245        else
 246                return ep->packet_sz;
 247}
 248
 249/*
 250 * An endpoint is transmitting data. This can be called either from
 251 * the IRQ routine or from ep.queue() to kickstart a request on an
 252 * endpoint.
 253 *
 254 * Context: controller locked, IRQs blocked, endpoint selected
 255 */
 256static void txstate(struct musb *musb, struct musb_request *req)
 257{
 258        u8                      epnum = req->epnum;
 259        struct musb_ep          *musb_ep;
 260        void __iomem            *epio = musb->endpoints[epnum].regs;
 261        struct usb_request      *request;
 262        u16                     fifo_count = 0, csr;
 263        int                     use_dma = 0;
 264
 265        musb_ep = req->ep;
 266
 267        /* Check if EP is disabled */
 268        if (!musb_ep->desc) {
 269                dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
 270                                                musb_ep->end_point.name);
 271                return;
 272        }
 273
 274        /* we shouldn't get here while DMA is active ... but we do ... */
 275        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 276                dev_dbg(musb->controller, "dma pending...\n");
 277                return;
 278        }
 279
 280        /* read TXCSR before */
 281        csr = musb_readw(epio, MUSB_TXCSR);
 282
 283        request = &req->request;
 284        fifo_count = min(max_ep_writesize(musb, musb_ep),
 285                        (int)(request->length - request->actual));
 286
 287        if (csr & MUSB_TXCSR_TXPKTRDY) {
 288                dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
 289                                musb_ep->end_point.name, csr);
 290                return;
 291        }
 292
 293        if (csr & MUSB_TXCSR_P_SENDSTALL) {
 294                dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
 295                                musb_ep->end_point.name, csr);
 296                return;
 297        }
 298
 299        dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
 300                        epnum, musb_ep->packet_sz, fifo_count,
 301                        csr);
 302
 303#ifndef CONFIG_MUSB_PIO_ONLY
 304        if (is_buffer_mapped(req)) {
 305                struct dma_controller   *c = musb->dma_controller;
 306                size_t request_size;
 307
 308                /* setup DMA, then program endpoint CSR */
 309                request_size = min_t(size_t, request->length - request->actual,
 310                                        musb_ep->dma->max_len);
 311
 312                use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
 313
 314                /* MUSB_TXCSR_P_ISO is still set correctly */
 315
 316                if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
 317                        if (request_size < musb_ep->packet_sz)
 318                                musb_ep->dma->desired_mode = 0;
 319                        else
 320                                musb_ep->dma->desired_mode = 1;
 321
 322                        use_dma = use_dma && c->channel_program(
 323                                        musb_ep->dma, musb_ep->packet_sz,
 324                                        musb_ep->dma->desired_mode,
 325                                        request->dma + request->actual, request_size);
 326                        if (use_dma) {
 327                                if (musb_ep->dma->desired_mode == 0) {
 328                                        /*
 329                                         * We must not clear the DMAMODE bit
 330                                         * before the DMAENAB bit -- and the
 331                                         * latter doesn't always get cleared
 332                                         * before we get here...
 333                                         */
 334                                        csr &= ~(MUSB_TXCSR_AUTOSET
 335                                                | MUSB_TXCSR_DMAENAB);
 336                                        musb_writew(epio, MUSB_TXCSR, csr
 337                                                | MUSB_TXCSR_P_WZC_BITS);
 338                                        csr &= ~MUSB_TXCSR_DMAMODE;
 339                                        csr |= (MUSB_TXCSR_DMAENAB |
 340                                                        MUSB_TXCSR_MODE);
 341                                        /* against programming guide */
 342                                } else {
 343                                        csr |= (MUSB_TXCSR_DMAENAB
 344                                                        | MUSB_TXCSR_DMAMODE
 345                                                        | MUSB_TXCSR_MODE);
 346                                        /*
 347                                         * Enable Autoset according to table
 348                                         * below
 349                                         * bulk_split hb_mult   Autoset_Enable
 350                                         *      0       0       Yes(Normal)
 351                                         *      0       >0      No(High BW ISO)
 352                                         *      1       0       Yes(HS bulk)
 353                                         *      1       >0      Yes(FS bulk)
 354                                         */
 355                                        if (!musb_ep->hb_mult ||
 356                                            can_bulk_split(musb,
 357                                                           musb_ep->type))
 358                                                csr |= MUSB_TXCSR_AUTOSET;
 359                                }
 360                                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 361
 362                                musb_writew(epio, MUSB_TXCSR, csr);
 363                        }
 364                }
 365
 366                if (is_cppi_enabled(musb)) {
 367                        /* program endpoint CSR first, then setup DMA */
 368                        csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 369                        csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
 370                                MUSB_TXCSR_MODE;
 371                        musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
 372                                                ~MUSB_TXCSR_P_UNDERRUN) | csr);
 373
 374                        /* ensure writebuffer is empty */
 375                        csr = musb_readw(epio, MUSB_TXCSR);
 376
 377                        /*
 378                         * NOTE host side sets DMAENAB later than this; both are
 379                         * OK since the transfer dma glue (between CPPI and
 380                         * Mentor fifos) just tells CPPI it could start. Data
 381                         * only moves to the USB TX fifo when both fifos are
 382                         * ready.
 383                         */
 384                        /*
 385                         * "mode" is irrelevant here; handle terminating ZLPs
 386                         * like PIO does, since the hardware RNDIS mode seems
 387                         * unreliable except for the
 388                         * last-packet-is-already-short case.
 389                         */
 390                        use_dma = use_dma && c->channel_program(
 391                                        musb_ep->dma, musb_ep->packet_sz,
 392                                        0,
 393                                        request->dma + request->actual,
 394                                        request_size);
 395                        if (!use_dma) {
 396                                c->channel_release(musb_ep->dma);
 397                                musb_ep->dma = NULL;
 398                                csr &= ~MUSB_TXCSR_DMAENAB;
 399                                musb_writew(epio, MUSB_TXCSR, csr);
 400                                /* invariant: prequest->buf is non-null */
 401                        }
 402                } else if (tusb_dma_omap(musb))
 403                        use_dma = use_dma && c->channel_program(
 404                                        musb_ep->dma, musb_ep->packet_sz,
 405                                        request->zero,
 406                                        request->dma + request->actual,
 407                                        request_size);
 408        }
 409#endif
 410
 411        if (!use_dma) {
 412                /*
 413                 * Unmap the dma buffer back to cpu if dma channel
 414                 * programming fails
 415                 */
 416                unmap_dma_buffer(req, musb);
 417
 418                musb_write_fifo(musb_ep->hw_ep, fifo_count,
 419                                (u8 *) (request->buf + request->actual));
 420                request->actual += fifo_count;
 421                csr |= MUSB_TXCSR_TXPKTRDY;
 422                csr &= ~MUSB_TXCSR_P_UNDERRUN;
 423                musb_writew(epio, MUSB_TXCSR, csr);
 424        }
 425
 426        /* host may already have the data when this message shows... */
 427        dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
 428                        musb_ep->end_point.name, use_dma ? "dma" : "pio",
 429                        request->actual, request->length,
 430                        musb_readw(epio, MUSB_TXCSR),
 431                        fifo_count,
 432                        musb_readw(epio, MUSB_TXMAXP));
 433}
 434
 435/*
 436 * FIFO state update (e.g. data ready).
 437 * Called from IRQ,  with controller locked.
 438 */
 439void musb_g_tx(struct musb *musb, u8 epnum)
 440{
 441        u16                     csr;
 442        struct musb_request     *req;
 443        struct usb_request      *request;
 444        u8 __iomem              *mbase = musb->mregs;
 445        struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_in;
 446        void __iomem            *epio = musb->endpoints[epnum].regs;
 447        struct dma_channel      *dma;
 448
 449        musb_ep_select(mbase, epnum);
 450        req = next_request(musb_ep);
 451        request = &req->request;
 452
 453        csr = musb_readw(epio, MUSB_TXCSR);
 454        dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
 455
 456        dma = is_dma_capable() ? musb_ep->dma : NULL;
 457
 458        /*
 459         * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
 460         * probably rates reporting as a host error.
 461         */
 462        if (csr & MUSB_TXCSR_P_SENTSTALL) {
 463                csr |=  MUSB_TXCSR_P_WZC_BITS;
 464                csr &= ~MUSB_TXCSR_P_SENTSTALL;
 465                musb_writew(epio, MUSB_TXCSR, csr);
 466                return;
 467        }
 468
 469        if (csr & MUSB_TXCSR_P_UNDERRUN) {
 470                /* We NAKed, no big deal... little reason to care. */
 471                csr |=   MUSB_TXCSR_P_WZC_BITS;
 472                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
 473                musb_writew(epio, MUSB_TXCSR, csr);
 474                dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
 475                                epnum, request);
 476        }
 477
 478        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 479                /*
 480                 * SHOULD NOT HAPPEN... has with CPPI though, after
 481                 * changing SENDSTALL (and other cases); harmless?
 482                 */
 483                dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
 484                return;
 485        }
 486
 487        if (request) {
 488                u8      is_dma = 0;
 489                bool    short_packet = false;
 490
 491                if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
 492                        is_dma = 1;
 493                        csr |= MUSB_TXCSR_P_WZC_BITS;
 494                        csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
 495                                 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
 496                        musb_writew(epio, MUSB_TXCSR, csr);
 497                        /* Ensure writebuffer is empty. */
 498                        csr = musb_readw(epio, MUSB_TXCSR);
 499                        request->actual += musb_ep->dma->actual_len;
 500                        dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
 501                                epnum, csr, musb_ep->dma->actual_len, request);
 502                }
 503
 504                /*
 505                 * First, maybe a terminating short packet. Some DMA
 506                 * engines might handle this by themselves.
 507                 */
 508                if ((request->zero && request->length)
 509                        && (request->length % musb_ep->packet_sz == 0)
 510                        && (request->actual == request->length))
 511                                short_packet = true;
 512
 513                if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
 514                        (is_dma && (!dma->desired_mode ||
 515                                (request->actual &
 516                                        (musb_ep->packet_sz - 1)))))
 517                                short_packet = true;
 518
 519                if (short_packet) {
 520                        /*
 521                         * On DMA completion, FIFO may not be
 522                         * available yet...
 523                         */
 524                        if (csr & MUSB_TXCSR_TXPKTRDY)
 525                                return;
 526
 527                        dev_dbg(musb->controller, "sending zero pkt\n");
 528                        musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
 529                                        | MUSB_TXCSR_TXPKTRDY);
 530                        request->zero = 0;
 531                }
 532
 533                if (request->actual == request->length) {
 534                        musb_g_giveback(musb_ep, request, 0);
 535                        /*
 536                         * In the giveback function the MUSB lock is
 537                         * released and acquired after sometime. During
 538                         * this time period the INDEX register could get
 539                         * changed by the gadget_queue function especially
 540                         * on SMP systems. Reselect the INDEX to be sure
 541                         * we are reading/modifying the right registers
 542                         */
 543                        musb_ep_select(mbase, epnum);
 544                        req = musb_ep->desc ? next_request(musb_ep) : NULL;
 545                        if (!req) {
 546                                dev_dbg(musb->controller, "%s idle now\n",
 547                                        musb_ep->end_point.name);
 548                                return;
 549                        }
 550                }
 551
 552                txstate(musb, req);
 553        }
 554}
 555
 556/* ------------------------------------------------------------ */
 557
 558/*
 559 * Context: controller locked, IRQs blocked, endpoint selected
 560 */
 561static void rxstate(struct musb *musb, struct musb_request *req)
 562{
 563        const u8                epnum = req->epnum;
 564        struct usb_request      *request = &req->request;
 565        struct musb_ep          *musb_ep;
 566        void __iomem            *epio = musb->endpoints[epnum].regs;
 567        unsigned                len = 0;
 568        u16                     fifo_count;
 569        u16                     csr = musb_readw(epio, MUSB_RXCSR);
 570        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 571        u8                      use_mode_1;
 572
 573        if (hw_ep->is_shared_fifo)
 574                musb_ep = &hw_ep->ep_in;
 575        else
 576                musb_ep = &hw_ep->ep_out;
 577
 578        fifo_count = musb_ep->packet_sz;
 579
 580        /* Check if EP is disabled */
 581        if (!musb_ep->desc) {
 582                dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
 583                                                musb_ep->end_point.name);
 584                return;
 585        }
 586
 587        /* We shouldn't get here while DMA is active, but we do... */
 588        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
 589                dev_dbg(musb->controller, "DMA pending...\n");
 590                return;
 591        }
 592
 593        if (csr & MUSB_RXCSR_P_SENDSTALL) {
 594                dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
 595                    musb_ep->end_point.name, csr);
 596                return;
 597        }
 598
 599        if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
 600                struct dma_controller   *c = musb->dma_controller;
 601                struct dma_channel      *channel = musb_ep->dma;
 602
 603                /* NOTE:  CPPI won't actually stop advancing the DMA
 604                 * queue after short packet transfers, so this is almost
 605                 * always going to run as IRQ-per-packet DMA so that
 606                 * faults will be handled correctly.
 607                 */
 608                if (c->channel_program(channel,
 609                                musb_ep->packet_sz,
 610                                !request->short_not_ok,
 611                                request->dma + request->actual,
 612                                request->length - request->actual)) {
 613
 614                        /* make sure that if an rxpkt arrived after the irq,
 615                         * the cppi engine will be ready to take it as soon
 616                         * as DMA is enabled
 617                         */
 618                        csr &= ~(MUSB_RXCSR_AUTOCLEAR
 619                                        | MUSB_RXCSR_DMAMODE);
 620                        csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
 621                        musb_writew(epio, MUSB_RXCSR, csr);
 622                        return;
 623                }
 624        }
 625
 626        if (csr & MUSB_RXCSR_RXPKTRDY) {
 627                fifo_count = musb_readw(epio, MUSB_RXCOUNT);
 628
 629                /*
 630                 * Enable Mode 1 on RX transfers only when short_not_ok flag
 631                 * is set. Currently short_not_ok flag is set only from
 632                 * file_storage and f_mass_storage drivers
 633                 */
 634
 635                if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
 636                        use_mode_1 = 1;
 637                else
 638                        use_mode_1 = 0;
 639
 640                if (request->actual < request->length) {
 641                        if (!is_buffer_mapped(req))
 642                                goto buffer_aint_mapped;
 643
 644                        if (musb_dma_inventra(musb)) {
 645                                struct dma_controller   *c;
 646                                struct dma_channel      *channel;
 647                                int                     use_dma = 0;
 648                                unsigned int transfer_size;
 649
 650                                c = musb->dma_controller;
 651                                channel = musb_ep->dma;
 652
 653        /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
 654         * mode 0 only. So we do not get endpoint interrupts due to DMA
 655         * completion. We only get interrupts from DMA controller.
 656         *
 657         * We could operate in DMA mode 1 if we knew the size of the tranfer
 658         * in advance. For mass storage class, request->length = what the host
 659         * sends, so that'd work.  But for pretty much everything else,
 660         * request->length is routinely more than what the host sends. For
 661         * most these gadgets, end of is signified either by a short packet,
 662         * or filling the last byte of the buffer.  (Sending extra data in
 663         * that last pckate should trigger an overflow fault.)  But in mode 1,
 664         * we don't get DMA completion interrupt for short packets.
 665         *
 666         * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
 667         * to get endpoint interrupt on every DMA req, but that didn't seem
 668         * to work reliably.
 669         *
 670         * REVISIT an updated g_file_storage can set req->short_not_ok, which
 671         * then becomes usable as a runtime "use mode 1" hint...
 672         */
 673
 674                                /* Experimental: Mode1 works with mass storage use cases */
 675                                if (use_mode_1) {
 676                                        csr |= MUSB_RXCSR_AUTOCLEAR;
 677                                        musb_writew(epio, MUSB_RXCSR, csr);
 678                                        csr |= MUSB_RXCSR_DMAENAB;
 679                                        musb_writew(epio, MUSB_RXCSR, csr);
 680
 681                                        /*
 682                                         * this special sequence (enabling and then
 683                                         * disabling MUSB_RXCSR_DMAMODE) is required
 684                                         * to get DMAReq to activate
 685                                         */
 686                                        musb_writew(epio, MUSB_RXCSR,
 687                                                csr | MUSB_RXCSR_DMAMODE);
 688                                        musb_writew(epio, MUSB_RXCSR, csr);
 689
 690                                        transfer_size = min_t(unsigned int,
 691                                                        request->length -
 692                                                        request->actual,
 693                                                        channel->max_len);
 694                                        musb_ep->dma->desired_mode = 1;
 695                                } else {
 696                                        if (!musb_ep->hb_mult &&
 697                                                musb_ep->hw_ep->rx_double_buffered)
 698                                                csr |= MUSB_RXCSR_AUTOCLEAR;
 699                                        csr |= MUSB_RXCSR_DMAENAB;
 700                                        musb_writew(epio, MUSB_RXCSR, csr);
 701
 702                                        transfer_size = min(request->length - request->actual,
 703                                                        (unsigned)fifo_count);
 704                                        musb_ep->dma->desired_mode = 0;
 705                                }
 706
 707                                use_dma = c->channel_program(
 708                                                channel,
 709                                                musb_ep->packet_sz,
 710                                                channel->desired_mode,
 711                                                request->dma
 712                                                + request->actual,
 713                                                transfer_size);
 714
 715                                if (use_dma)
 716                                        return;
 717                        }
 718
 719                        if ((musb_dma_ux500(musb)) &&
 720                                (request->actual < request->length)) {
 721
 722                                struct dma_controller *c;
 723                                struct dma_channel *channel;
 724                                unsigned int transfer_size = 0;
 725
 726                                c = musb->dma_controller;
 727                                channel = musb_ep->dma;
 728
 729                                /* In case first packet is short */
 730                                if (fifo_count < musb_ep->packet_sz)
 731                                        transfer_size = fifo_count;
 732                                else if (request->short_not_ok)
 733                                        transfer_size = min_t(unsigned int,
 734                                                        request->length -
 735                                                        request->actual,
 736                                                        channel->max_len);
 737                                else
 738                                        transfer_size = min_t(unsigned int,
 739                                                        request->length -
 740                                                        request->actual,
 741                                                        (unsigned)fifo_count);
 742
 743                                csr &= ~MUSB_RXCSR_DMAMODE;
 744                                csr |= (MUSB_RXCSR_DMAENAB |
 745                                        MUSB_RXCSR_AUTOCLEAR);
 746
 747                                musb_writew(epio, MUSB_RXCSR, csr);
 748
 749                                if (transfer_size <= musb_ep->packet_sz) {
 750                                        musb_ep->dma->desired_mode = 0;
 751                                } else {
 752                                        musb_ep->dma->desired_mode = 1;
 753                                        /* Mode must be set after DMAENAB */
 754                                        csr |= MUSB_RXCSR_DMAMODE;
 755                                        musb_writew(epio, MUSB_RXCSR, csr);
 756                                }
 757
 758                                if (c->channel_program(channel,
 759                                                        musb_ep->packet_sz,
 760                                                        channel->desired_mode,
 761                                                        request->dma
 762                                                        + request->actual,
 763                                                        transfer_size))
 764
 765                                        return;
 766                        }
 767
 768                        len = request->length - request->actual;
 769                        dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
 770                                        musb_ep->end_point.name,
 771                                        fifo_count, len,
 772                                        musb_ep->packet_sz);
 773
 774                        fifo_count = min_t(unsigned, len, fifo_count);
 775
 776                        if (tusb_dma_omap(musb)) {
 777                                struct dma_controller *c = musb->dma_controller;
 778                                struct dma_channel *channel = musb_ep->dma;
 779                                u32 dma_addr = request->dma + request->actual;
 780                                int ret;
 781
 782                                ret = c->channel_program(channel,
 783                                                musb_ep->packet_sz,
 784                                                channel->desired_mode,
 785                                                dma_addr,
 786                                                fifo_count);
 787                                if (ret)
 788                                        return;
 789                        }
 790
 791                        /*
 792                         * Unmap the dma buffer back to cpu if dma channel
 793                         * programming fails. This buffer is mapped if the
 794                         * channel allocation is successful
 795                         */
 796                        unmap_dma_buffer(req, musb);
 797
 798                        /*
 799                         * Clear DMAENAB and AUTOCLEAR for the
 800                         * PIO mode transfer
 801                         */
 802                        csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
 803                        musb_writew(epio, MUSB_RXCSR, csr);
 804
 805buffer_aint_mapped:
 806                        musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
 807                                        (request->buf + request->actual));
 808                        request->actual += fifo_count;
 809
 810                        /* REVISIT if we left anything in the fifo, flush
 811                         * it and report -EOVERFLOW
 812                         */
 813
 814                        /* ack the read! */
 815                        csr |= MUSB_RXCSR_P_WZC_BITS;
 816                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 817                        musb_writew(epio, MUSB_RXCSR, csr);
 818                }
 819        }
 820
 821        /* reach the end or short packet detected */
 822        if (request->actual == request->length ||
 823            fifo_count < musb_ep->packet_sz)
 824                musb_g_giveback(musb_ep, request, 0);
 825}
 826
 827/*
 828 * Data ready for a request; called from IRQ
 829 */
 830void musb_g_rx(struct musb *musb, u8 epnum)
 831{
 832        u16                     csr;
 833        struct musb_request     *req;
 834        struct usb_request      *request;
 835        void __iomem            *mbase = musb->mregs;
 836        struct musb_ep          *musb_ep;
 837        void __iomem            *epio = musb->endpoints[epnum].regs;
 838        struct dma_channel      *dma;
 839        struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
 840
 841        if (hw_ep->is_shared_fifo)
 842                musb_ep = &hw_ep->ep_in;
 843        else
 844                musb_ep = &hw_ep->ep_out;
 845
 846        musb_ep_select(mbase, epnum);
 847
 848        req = next_request(musb_ep);
 849        if (!req)
 850                return;
 851
 852        request = &req->request;
 853
 854        csr = musb_readw(epio, MUSB_RXCSR);
 855        dma = is_dma_capable() ? musb_ep->dma : NULL;
 856
 857        dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
 858                        csr, dma ? " (dma)" : "", request);
 859
 860        if (csr & MUSB_RXCSR_P_SENTSTALL) {
 861                csr |= MUSB_RXCSR_P_WZC_BITS;
 862                csr &= ~MUSB_RXCSR_P_SENTSTALL;
 863                musb_writew(epio, MUSB_RXCSR, csr);
 864                return;
 865        }
 866
 867        if (csr & MUSB_RXCSR_P_OVERRUN) {
 868                /* csr |= MUSB_RXCSR_P_WZC_BITS; */
 869                csr &= ~MUSB_RXCSR_P_OVERRUN;
 870                musb_writew(epio, MUSB_RXCSR, csr);
 871
 872                dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
 873                if (request->status == -EINPROGRESS)
 874                        request->status = -EOVERFLOW;
 875        }
 876        if (csr & MUSB_RXCSR_INCOMPRX) {
 877                /* REVISIT not necessarily an error */
 878                dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
 879        }
 880
 881        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
 882                /* "should not happen"; likely RXPKTRDY pending for DMA */
 883                dev_dbg(musb->controller, "%s busy, csr %04x\n",
 884                        musb_ep->end_point.name, csr);
 885                return;
 886        }
 887
 888        if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
 889                csr &= ~(MUSB_RXCSR_AUTOCLEAR
 890                                | MUSB_RXCSR_DMAENAB
 891                                | MUSB_RXCSR_DMAMODE);
 892                musb_writew(epio, MUSB_RXCSR,
 893                        MUSB_RXCSR_P_WZC_BITS | csr);
 894
 895                request->actual += musb_ep->dma->actual_len;
 896
 897                dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
 898                        epnum, csr,
 899                        musb_readw(epio, MUSB_RXCSR),
 900                        musb_ep->dma->actual_len, request);
 901
 902#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 903        defined(CONFIG_USB_UX500_DMA)
 904                /* Autoclear doesn't clear RxPktRdy for short packets */
 905                if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
 906                                || (dma->actual_len
 907                                        & (musb_ep->packet_sz - 1))) {
 908                        /* ack the read! */
 909                        csr &= ~MUSB_RXCSR_RXPKTRDY;
 910                        musb_writew(epio, MUSB_RXCSR, csr);
 911                }
 912
 913                /* incomplete, and not short? wait for next IN packet */
 914                if ((request->actual < request->length)
 915                                && (musb_ep->dma->actual_len
 916                                        == musb_ep->packet_sz)) {
 917                        /* In double buffer case, continue to unload fifo if
 918                         * there is Rx packet in FIFO.
 919                         **/
 920                        csr = musb_readw(epio, MUSB_RXCSR);
 921                        if ((csr & MUSB_RXCSR_RXPKTRDY) &&
 922                                hw_ep->rx_double_buffered)
 923                                goto exit;
 924                        return;
 925                }
 926#endif
 927                musb_g_giveback(musb_ep, request, 0);
 928                /*
 929                 * In the giveback function the MUSB lock is
 930                 * released and acquired after sometime. During
 931                 * this time period the INDEX register could get
 932                 * changed by the gadget_queue function especially
 933                 * on SMP systems. Reselect the INDEX to be sure
 934                 * we are reading/modifying the right registers
 935                 */
 936                musb_ep_select(mbase, epnum);
 937
 938                req = next_request(musb_ep);
 939                if (!req)
 940                        return;
 941        }
 942#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
 943        defined(CONFIG_USB_UX500_DMA)
 944exit:
 945#endif
 946        /* Analyze request */
 947        rxstate(musb, req);
 948}
 949
 950/* ------------------------------------------------------------ */
 951
 952static int musb_gadget_enable(struct usb_ep *ep,
 953                        const struct usb_endpoint_descriptor *desc)
 954{
 955        unsigned long           flags;
 956        struct musb_ep          *musb_ep;
 957        struct musb_hw_ep       *hw_ep;
 958        void __iomem            *regs;
 959        struct musb             *musb;
 960        void __iomem    *mbase;
 961        u8              epnum;
 962        u16             csr;
 963        unsigned        tmp;
 964        int             status = -EINVAL;
 965
 966        if (!ep || !desc)
 967                return -EINVAL;
 968
 969        musb_ep = to_musb_ep(ep);
 970        hw_ep = musb_ep->hw_ep;
 971        regs = hw_ep->regs;
 972        musb = musb_ep->musb;
 973        mbase = musb->mregs;
 974        epnum = musb_ep->current_epnum;
 975
 976        spin_lock_irqsave(&musb->lock, flags);
 977
 978        if (musb_ep->desc) {
 979                status = -EBUSY;
 980                goto fail;
 981        }
 982        musb_ep->type = usb_endpoint_type(desc);
 983
 984        /* check direction and (later) maxpacket size against endpoint */
 985        if (usb_endpoint_num(desc) != epnum)
 986                goto fail;
 987
 988        /* REVISIT this rules out high bandwidth periodic transfers */
 989        tmp = usb_endpoint_maxp(desc);
 990        if (tmp & ~0x07ff) {
 991                int ok;
 992
 993                if (usb_endpoint_dir_in(desc))
 994                        ok = musb->hb_iso_tx;
 995                else
 996                        ok = musb->hb_iso_rx;
 997
 998                if (!ok) {
 999                        dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1000                        goto fail;
1001                }
1002                musb_ep->hb_mult = (tmp >> 11) & 3;
1003        } else {
1004                musb_ep->hb_mult = 0;
1005        }
1006
1007        musb_ep->packet_sz = tmp & 0x7ff;
1008        tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1009
1010        /* enable the interrupts for the endpoint, set the endpoint
1011         * packet size (or fail), set the mode, clear the fifo
1012         */
1013        musb_ep_select(mbase, epnum);
1014        if (usb_endpoint_dir_in(desc)) {
1015
1016                if (hw_ep->is_shared_fifo)
1017                        musb_ep->is_in = 1;
1018                if (!musb_ep->is_in)
1019                        goto fail;
1020
1021                if (tmp > hw_ep->max_packet_sz_tx) {
1022                        dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1023                        goto fail;
1024                }
1025
1026                musb->intrtxe |= (1 << epnum);
1027                musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1028
1029                /* REVISIT if can_bulk_split(), use by updating "tmp";
1030                 * likewise high bandwidth periodic tx
1031                 */
1032                /* Set TXMAXP with the FIFO size of the endpoint
1033                 * to disable double buffering mode.
1034                 */
1035                if (musb->double_buffer_not_ok) {
1036                        musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1037                } else {
1038                        if (can_bulk_split(musb, musb_ep->type))
1039                                musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
1040                                                        musb_ep->packet_sz) - 1;
1041                        musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1042                                        | (musb_ep->hb_mult << 11));
1043                }
1044
1045                csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1046                if (musb_readw(regs, MUSB_TXCSR)
1047                                & MUSB_TXCSR_FIFONOTEMPTY)
1048                        csr |= MUSB_TXCSR_FLUSHFIFO;
1049                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1050                        csr |= MUSB_TXCSR_P_ISO;
1051
1052                /* set twice in case of double buffering */
1053                musb_writew(regs, MUSB_TXCSR, csr);
1054                /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1055                musb_writew(regs, MUSB_TXCSR, csr);
1056
1057        } else {
1058
1059                if (hw_ep->is_shared_fifo)
1060                        musb_ep->is_in = 0;
1061                if (musb_ep->is_in)
1062                        goto fail;
1063
1064                if (tmp > hw_ep->max_packet_sz_rx) {
1065                        dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1066                        goto fail;
1067                }
1068
1069                musb->intrrxe |= (1 << epnum);
1070                musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1071
1072                /* REVISIT if can_bulk_combine() use by updating "tmp"
1073                 * likewise high bandwidth periodic rx
1074                 */
1075                /* Set RXMAXP with the FIFO size of the endpoint
1076                 * to disable double buffering mode.
1077                 */
1078                if (musb->double_buffer_not_ok)
1079                        musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1080                else
1081                        musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1082                                        | (musb_ep->hb_mult << 11));
1083
1084                /* force shared fifo to OUT-only mode */
1085                if (hw_ep->is_shared_fifo) {
1086                        csr = musb_readw(regs, MUSB_TXCSR);
1087                        csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1088                        musb_writew(regs, MUSB_TXCSR, csr);
1089                }
1090
1091                csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1092                if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1093                        csr |= MUSB_RXCSR_P_ISO;
1094                else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1095                        csr |= MUSB_RXCSR_DISNYET;
1096
1097                /* set twice in case of double buffering */
1098                musb_writew(regs, MUSB_RXCSR, csr);
1099                musb_writew(regs, MUSB_RXCSR, csr);
1100        }
1101
1102        /* NOTE:  all the I/O code _should_ work fine without DMA, in case
1103         * for some reason you run out of channels here.
1104         */
1105        if (is_dma_capable() && musb->dma_controller) {
1106                struct dma_controller   *c = musb->dma_controller;
1107
1108                musb_ep->dma = c->channel_alloc(c, hw_ep,
1109                                (desc->bEndpointAddress & USB_DIR_IN));
1110        } else
1111                musb_ep->dma = NULL;
1112
1113        musb_ep->desc = desc;
1114        musb_ep->busy = 0;
1115        musb_ep->wedged = 0;
1116        status = 0;
1117
1118        pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1119                        musb_driver_name, musb_ep->end_point.name,
1120                        ({ char *s; switch (musb_ep->type) {
1121                        case USB_ENDPOINT_XFER_BULK:    s = "bulk"; break;
1122                        case USB_ENDPOINT_XFER_INT:     s = "int"; break;
1123                        default:                        s = "iso"; break;
1124                        } s; }),
1125                        musb_ep->is_in ? "IN" : "OUT",
1126                        musb_ep->dma ? "dma, " : "",
1127                        musb_ep->packet_sz);
1128
1129        schedule_work(&musb->irq_work);
1130
1131fail:
1132        spin_unlock_irqrestore(&musb->lock, flags);
1133        return status;
1134}
1135
1136/*
1137 * Disable an endpoint flushing all requests queued.
1138 */
1139static int musb_gadget_disable(struct usb_ep *ep)
1140{
1141        unsigned long   flags;
1142        struct musb     *musb;
1143        u8              epnum;
1144        struct musb_ep  *musb_ep;
1145        void __iomem    *epio;
1146        int             status = 0;
1147
1148        musb_ep = to_musb_ep(ep);
1149        musb = musb_ep->musb;
1150        epnum = musb_ep->current_epnum;
1151        epio = musb->endpoints[epnum].regs;
1152
1153        spin_lock_irqsave(&musb->lock, flags);
1154        musb_ep_select(musb->mregs, epnum);
1155
1156        /* zero the endpoint sizes */
1157        if (musb_ep->is_in) {
1158                musb->intrtxe &= ~(1 << epnum);
1159                musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1160                musb_writew(epio, MUSB_TXMAXP, 0);
1161        } else {
1162                musb->intrrxe &= ~(1 << epnum);
1163                musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1164                musb_writew(epio, MUSB_RXMAXP, 0);
1165        }
1166
1167        /* abort all pending DMA and requests */
1168        nuke(musb_ep, -ESHUTDOWN);
1169
1170        musb_ep->desc = NULL;
1171        musb_ep->end_point.desc = NULL;
1172
1173        schedule_work(&musb->irq_work);
1174
1175        spin_unlock_irqrestore(&(musb->lock), flags);
1176
1177        dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1178
1179        return status;
1180}
1181
1182/*
1183 * Allocate a request for an endpoint.
1184 * Reused by ep0 code.
1185 */
1186struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1187{
1188        struct musb_ep          *musb_ep = to_musb_ep(ep);
1189        struct musb             *musb = musb_ep->musb;
1190        struct musb_request     *request = NULL;
1191
1192        request = kzalloc(sizeof *request, gfp_flags);
1193        if (!request) {
1194                dev_dbg(musb->controller, "not enough memory\n");
1195                return NULL;
1196        }
1197
1198        request->request.dma = DMA_ADDR_INVALID;
1199        request->epnum = musb_ep->current_epnum;
1200        request->ep = musb_ep;
1201
1202        return &request->request;
1203}
1204
1205/*
1206 * Free a request
1207 * Reused by ep0 code.
1208 */
1209void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1210{
1211        kfree(to_musb_request(req));
1212}
1213
1214static LIST_HEAD(buffers);
1215
1216struct free_record {
1217        struct list_head        list;
1218        struct device           *dev;
1219        unsigned                bytes;
1220        dma_addr_t              dma;
1221};
1222
1223/*
1224 * Context: controller locked, IRQs blocked.
1225 */
1226void musb_ep_restart(struct musb *musb, struct musb_request *req)
1227{
1228        dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1229                req->tx ? "TX/IN" : "RX/OUT",
1230                &req->request, req->request.length, req->epnum);
1231
1232        musb_ep_select(musb->mregs, req->epnum);
1233        if (req->tx)
1234                txstate(musb, req);
1235        else
1236                rxstate(musb, req);
1237}
1238
1239static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1240                        gfp_t gfp_flags)
1241{
1242        struct musb_ep          *musb_ep;
1243        struct musb_request     *request;
1244        struct musb             *musb;
1245        int                     status = 0;
1246        unsigned long           lockflags;
1247
1248        if (!ep || !req)
1249                return -EINVAL;
1250        if (!req->buf)
1251                return -ENODATA;
1252
1253        musb_ep = to_musb_ep(ep);
1254        musb = musb_ep->musb;
1255
1256        request = to_musb_request(req);
1257        request->musb = musb;
1258
1259        if (request->ep != musb_ep)
1260                return -EINVAL;
1261
1262        dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1263
1264        /* request is mine now... */
1265        request->request.actual = 0;
1266        request->request.status = -EINPROGRESS;
1267        request->epnum = musb_ep->current_epnum;
1268        request->tx = musb_ep->is_in;
1269
1270        map_dma_buffer(request, musb, musb_ep);
1271
1272        spin_lock_irqsave(&musb->lock, lockflags);
1273
1274        /* don't queue if the ep is down */
1275        if (!musb_ep->desc) {
1276                dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1277                                req, ep->name, "disabled");
1278                status = -ESHUTDOWN;
1279                unmap_dma_buffer(request, musb);
1280                goto unlock;
1281        }
1282
1283        /* add request to the list */
1284        list_add_tail(&request->list, &musb_ep->req_list);
1285
1286        /* it this is the head of the queue, start i/o ... */
1287        if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1288                musb_ep_restart(musb, request);
1289
1290unlock:
1291        spin_unlock_irqrestore(&musb->lock, lockflags);
1292        return status;
1293}
1294
1295static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1296{
1297        struct musb_ep          *musb_ep = to_musb_ep(ep);
1298        struct musb_request     *req = to_musb_request(request);
1299        struct musb_request     *r;
1300        unsigned long           flags;
1301        int                     status = 0;
1302        struct musb             *musb = musb_ep->musb;
1303
1304        if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1305                return -EINVAL;
1306
1307        spin_lock_irqsave(&musb->lock, flags);
1308
1309        list_for_each_entry(r, &musb_ep->req_list, list) {
1310                if (r == req)
1311                        break;
1312        }
1313        if (r != req) {
1314                dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1315                status = -EINVAL;
1316                goto done;
1317        }
1318
1319        /* if the hardware doesn't have the request, easy ... */
1320        if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1321                musb_g_giveback(musb_ep, request, -ECONNRESET);
1322
1323        /* ... else abort the dma transfer ... */
1324        else if (is_dma_capable() && musb_ep->dma) {
1325                struct dma_controller   *c = musb->dma_controller;
1326
1327                musb_ep_select(musb->mregs, musb_ep->current_epnum);
1328                if (c->channel_abort)
1329                        status = c->channel_abort(musb_ep->dma);
1330                else
1331                        status = -EBUSY;
1332                if (status == 0)
1333                        musb_g_giveback(musb_ep, request, -ECONNRESET);
1334        } else {
1335                /* NOTE: by sticking to easily tested hardware/driver states,
1336                 * we leave counting of in-flight packets imprecise.
1337                 */
1338                musb_g_giveback(musb_ep, request, -ECONNRESET);
1339        }
1340
1341done:
1342        spin_unlock_irqrestore(&musb->lock, flags);
1343        return status;
1344}
1345
1346/*
1347 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1348 * data but will queue requests.
1349 *
1350 * exported to ep0 code
1351 */
1352static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1353{
1354        struct musb_ep          *musb_ep = to_musb_ep(ep);
1355        u8                      epnum = musb_ep->current_epnum;
1356        struct musb             *musb = musb_ep->musb;
1357        void __iomem            *epio = musb->endpoints[epnum].regs;
1358        void __iomem            *mbase;
1359        unsigned long           flags;
1360        u16                     csr;
1361        struct musb_request     *request;
1362        int                     status = 0;
1363
1364        if (!ep)
1365                return -EINVAL;
1366        mbase = musb->mregs;
1367
1368        spin_lock_irqsave(&musb->lock, flags);
1369
1370        if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1371                status = -EINVAL;
1372                goto done;
1373        }
1374
1375        musb_ep_select(mbase, epnum);
1376
1377        request = next_request(musb_ep);
1378        if (value) {
1379                if (request) {
1380                        dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1381                            ep->name);
1382                        status = -EAGAIN;
1383                        goto done;
1384                }
1385                /* Cannot portably stall with non-empty FIFO */
1386                if (musb_ep->is_in) {
1387                        csr = musb_readw(epio, MUSB_TXCSR);
1388                        if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1389                                dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1390                                status = -EAGAIN;
1391                                goto done;
1392                        }
1393                }
1394        } else
1395                musb_ep->wedged = 0;
1396
1397        /* set/clear the stall and toggle bits */
1398        dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1399        if (musb_ep->is_in) {
1400                csr = musb_readw(epio, MUSB_TXCSR);
1401                csr |= MUSB_TXCSR_P_WZC_BITS
1402                        | MUSB_TXCSR_CLRDATATOG;
1403                if (value)
1404                        csr |= MUSB_TXCSR_P_SENDSTALL;
1405                else
1406                        csr &= ~(MUSB_TXCSR_P_SENDSTALL
1407                                | MUSB_TXCSR_P_SENTSTALL);
1408                csr &= ~MUSB_TXCSR_TXPKTRDY;
1409                musb_writew(epio, MUSB_TXCSR, csr);
1410        } else {
1411                csr = musb_readw(epio, MUSB_RXCSR);
1412                csr |= MUSB_RXCSR_P_WZC_BITS
1413                        | MUSB_RXCSR_FLUSHFIFO
1414                        | MUSB_RXCSR_CLRDATATOG;
1415                if (value)
1416                        csr |= MUSB_RXCSR_P_SENDSTALL;
1417                else
1418                        csr &= ~(MUSB_RXCSR_P_SENDSTALL
1419                                | MUSB_RXCSR_P_SENTSTALL);
1420                musb_writew(epio, MUSB_RXCSR, csr);
1421        }
1422
1423        /* maybe start the first request in the queue */
1424        if (!musb_ep->busy && !value && request) {
1425                dev_dbg(musb->controller, "restarting the request\n");
1426                musb_ep_restart(musb, request);
1427        }
1428
1429done:
1430        spin_unlock_irqrestore(&musb->lock, flags);
1431        return status;
1432}
1433
1434/*
1435 * Sets the halt feature with the clear requests ignored
1436 */
1437static int musb_gadget_set_wedge(struct usb_ep *ep)
1438{
1439        struct musb_ep          *musb_ep = to_musb_ep(ep);
1440
1441        if (!ep)
1442                return -EINVAL;
1443
1444        musb_ep->wedged = 1;
1445
1446        return usb_ep_set_halt(ep);
1447}
1448
1449static int musb_gadget_fifo_status(struct usb_ep *ep)
1450{
1451        struct musb_ep          *musb_ep = to_musb_ep(ep);
1452        void __iomem            *epio = musb_ep->hw_ep->regs;
1453        int                     retval = -EINVAL;
1454
1455        if (musb_ep->desc && !musb_ep->is_in) {
1456                struct musb             *musb = musb_ep->musb;
1457                int                     epnum = musb_ep->current_epnum;
1458                void __iomem            *mbase = musb->mregs;
1459                unsigned long           flags;
1460
1461                spin_lock_irqsave(&musb->lock, flags);
1462
1463                musb_ep_select(mbase, epnum);
1464                /* FIXME return zero unless RXPKTRDY is set */
1465                retval = musb_readw(epio, MUSB_RXCOUNT);
1466
1467                spin_unlock_irqrestore(&musb->lock, flags);
1468        }
1469        return retval;
1470}
1471
1472static void musb_gadget_fifo_flush(struct usb_ep *ep)
1473{
1474        struct musb_ep  *musb_ep = to_musb_ep(ep);
1475        struct musb     *musb = musb_ep->musb;
1476        u8              epnum = musb_ep->current_epnum;
1477        void __iomem    *epio = musb->endpoints[epnum].regs;
1478        void __iomem    *mbase;
1479        unsigned long   flags;
1480        u16             csr;
1481
1482        mbase = musb->mregs;
1483
1484        spin_lock_irqsave(&musb->lock, flags);
1485        musb_ep_select(mbase, (u8) epnum);
1486
1487        /* disable interrupts */
1488        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1489
1490        if (musb_ep->is_in) {
1491                csr = musb_readw(epio, MUSB_TXCSR);
1492                if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1493                        csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1494                        /*
1495                         * Setting both TXPKTRDY and FLUSHFIFO makes controller
1496                         * to interrupt current FIFO loading, but not flushing
1497                         * the already loaded ones.
1498                         */
1499                        csr &= ~MUSB_TXCSR_TXPKTRDY;
1500                        musb_writew(epio, MUSB_TXCSR, csr);
1501                        /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1502                        musb_writew(epio, MUSB_TXCSR, csr);
1503                }
1504        } else {
1505                csr = musb_readw(epio, MUSB_RXCSR);
1506                csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1507                musb_writew(epio, MUSB_RXCSR, csr);
1508                musb_writew(epio, MUSB_RXCSR, csr);
1509        }
1510
1511        /* re-enable interrupt */
1512        musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1513        spin_unlock_irqrestore(&musb->lock, flags);
1514}
1515
1516static const struct usb_ep_ops musb_ep_ops = {
1517        .enable         = musb_gadget_enable,
1518        .disable        = musb_gadget_disable,
1519        .alloc_request  = musb_alloc_request,
1520        .free_request   = musb_free_request,
1521        .queue          = musb_gadget_queue,
1522        .dequeue        = musb_gadget_dequeue,
1523        .set_halt       = musb_gadget_set_halt,
1524        .set_wedge      = musb_gadget_set_wedge,
1525        .fifo_status    = musb_gadget_fifo_status,
1526        .fifo_flush     = musb_gadget_fifo_flush
1527};
1528
1529/* ----------------------------------------------------------------------- */
1530
1531static int musb_gadget_get_frame(struct usb_gadget *gadget)
1532{
1533        struct musb     *musb = gadget_to_musb(gadget);
1534
1535        return (int)musb_readw(musb->mregs, MUSB_FRAME);
1536}
1537
1538static int musb_gadget_wakeup(struct usb_gadget *gadget)
1539{
1540        struct musb     *musb = gadget_to_musb(gadget);
1541        void __iomem    *mregs = musb->mregs;
1542        unsigned long   flags;
1543        int             status = -EINVAL;
1544        u8              power, devctl;
1545        int             retries;
1546
1547        spin_lock_irqsave(&musb->lock, flags);
1548
1549        switch (musb->xceiv->otg->state) {
1550        case OTG_STATE_B_PERIPHERAL:
1551                /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1552                 * that's part of the standard usb 1.1 state machine, and
1553                 * doesn't affect OTG transitions.
1554                 */
1555                if (musb->may_wakeup && musb->is_suspended)
1556                        break;
1557                goto done;
1558        case OTG_STATE_B_IDLE:
1559                /* Start SRP ... OTG not required. */
1560                devctl = musb_readb(mregs, MUSB_DEVCTL);
1561                dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1562                devctl |= MUSB_DEVCTL_SESSION;
1563                musb_writeb(mregs, MUSB_DEVCTL, devctl);
1564                devctl = musb_readb(mregs, MUSB_DEVCTL);
1565                retries = 100;
1566                while (!(devctl & MUSB_DEVCTL_SESSION)) {
1567                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1568                        if (retries-- < 1)
1569                                break;
1570                }
1571                retries = 10000;
1572                while (devctl & MUSB_DEVCTL_SESSION) {
1573                        devctl = musb_readb(mregs, MUSB_DEVCTL);
1574                        if (retries-- < 1)
1575                                break;
1576                }
1577
1578                spin_unlock_irqrestore(&musb->lock, flags);
1579                otg_start_srp(musb->xceiv->otg);
1580                spin_lock_irqsave(&musb->lock, flags);
1581
1582                /* Block idling for at least 1s */
1583                musb_platform_try_idle(musb,
1584                        jiffies + msecs_to_jiffies(1 * HZ));
1585
1586                status = 0;
1587                goto done;
1588        default:
1589                dev_dbg(musb->controller, "Unhandled wake: %s\n",
1590                        usb_otg_state_string(musb->xceiv->otg->state));
1591                goto done;
1592        }
1593
1594        status = 0;
1595
1596        power = musb_readb(mregs, MUSB_POWER);
1597        power |= MUSB_POWER_RESUME;
1598        musb_writeb(mregs, MUSB_POWER, power);
1599        dev_dbg(musb->controller, "issue wakeup\n");
1600
1601        /* FIXME do this next chunk in a timer callback, no udelay */
1602        mdelay(2);
1603
1604        power = musb_readb(mregs, MUSB_POWER);
1605        power &= ~MUSB_POWER_RESUME;
1606        musb_writeb(mregs, MUSB_POWER, power);
1607done:
1608        spin_unlock_irqrestore(&musb->lock, flags);
1609        return status;
1610}
1611
1612static int
1613musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1614{
1615        gadget->is_selfpowered = !!is_selfpowered;
1616        return 0;
1617}
1618
1619static void musb_pullup(struct musb *musb, int is_on)
1620{
1621        u8 power;
1622
1623        power = musb_readb(musb->mregs, MUSB_POWER);
1624        if (is_on)
1625                power |= MUSB_POWER_SOFTCONN;
1626        else
1627                power &= ~MUSB_POWER_SOFTCONN;
1628
1629        /* FIXME if on, HdrcStart; if off, HdrcStop */
1630
1631        dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1632                is_on ? "on" : "off");
1633        musb_writeb(musb->mregs, MUSB_POWER, power);
1634}
1635
1636#if 0
1637static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1638{
1639        dev_dbg(musb->controller, "<= %s =>\n", __func__);
1640
1641        /*
1642         * FIXME iff driver's softconnect flag is set (as it is during probe,
1643         * though that can clear it), just musb_pullup().
1644         */
1645
1646        return -EINVAL;
1647}
1648#endif
1649
1650static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1651{
1652        struct musb     *musb = gadget_to_musb(gadget);
1653
1654        if (!musb->xceiv->set_power)
1655                return -EOPNOTSUPP;
1656        return usb_phy_set_power(musb->xceiv, mA);
1657}
1658
1659static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1660{
1661        struct musb     *musb = gadget_to_musb(gadget);
1662        unsigned long   flags;
1663
1664        is_on = !!is_on;
1665
1666        pm_runtime_get_sync(musb->controller);
1667
1668        /* NOTE: this assumes we are sensing vbus; we'd rather
1669         * not pullup unless the B-session is active.
1670         */
1671        spin_lock_irqsave(&musb->lock, flags);
1672        if (is_on != musb->softconnect) {
1673                musb->softconnect = is_on;
1674                musb_pullup(musb, is_on);
1675        }
1676        spin_unlock_irqrestore(&musb->lock, flags);
1677
1678        pm_runtime_put(musb->controller);
1679
1680        return 0;
1681}
1682
1683#ifdef CONFIG_BLACKFIN
1684static struct usb_ep *musb_match_ep(struct usb_gadget *g,
1685                struct usb_endpoint_descriptor *desc,
1686                struct usb_ss_ep_comp_descriptor *ep_comp)
1687{
1688        struct usb_ep *ep = NULL;
1689
1690        switch (usb_endpoint_type(desc)) {
1691        case USB_ENDPOINT_XFER_ISOC:
1692        case USB_ENDPOINT_XFER_BULK:
1693                if (usb_endpoint_dir_in(desc))
1694                        ep = gadget_find_ep_by_name(g, "ep5in");
1695                else
1696                        ep = gadget_find_ep_by_name(g, "ep6out");
1697                break;
1698        case USB_ENDPOINT_XFER_INT:
1699                if (usb_endpoint_dir_in(desc))
1700                        ep = gadget_find_ep_by_name(g, "ep1in");
1701                else
1702                        ep = gadget_find_ep_by_name(g, "ep2out");
1703                break;
1704        default:
1705                break;
1706        }
1707
1708        if (ep && usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
1709                return ep;
1710
1711        return NULL;
1712}
1713#else
1714#define musb_match_ep NULL
1715#endif
1716
1717static int musb_gadget_start(struct usb_gadget *g,
1718                struct usb_gadget_driver *driver);
1719static int musb_gadget_stop(struct usb_gadget *g);
1720
1721static const struct usb_gadget_ops musb_gadget_operations = {
1722        .get_frame              = musb_gadget_get_frame,
1723        .wakeup                 = musb_gadget_wakeup,
1724        .set_selfpowered        = musb_gadget_set_self_powered,
1725        /* .vbus_session                = musb_gadget_vbus_session, */
1726        .vbus_draw              = musb_gadget_vbus_draw,
1727        .pullup                 = musb_gadget_pullup,
1728        .udc_start              = musb_gadget_start,
1729        .udc_stop               = musb_gadget_stop,
1730        .match_ep               = musb_match_ep,
1731};
1732
1733/* ----------------------------------------------------------------------- */
1734
1735/* Registration */
1736
1737/* Only this registration code "knows" the rule (from USB standards)
1738 * about there being only one external upstream port.  It assumes
1739 * all peripheral ports are external...
1740 */
1741
1742static void
1743init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1744{
1745        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
1746
1747        memset(ep, 0, sizeof *ep);
1748
1749        ep->current_epnum = epnum;
1750        ep->musb = musb;
1751        ep->hw_ep = hw_ep;
1752        ep->is_in = is_in;
1753
1754        INIT_LIST_HEAD(&ep->req_list);
1755
1756        sprintf(ep->name, "ep%d%s", epnum,
1757                        (!epnum || hw_ep->is_shared_fifo) ? "" : (
1758                                is_in ? "in" : "out"));
1759        ep->end_point.name = ep->name;
1760        INIT_LIST_HEAD(&ep->end_point.ep_list);
1761        if (!epnum) {
1762                usb_ep_set_maxpacket_limit(&ep->end_point, 64);
1763                ep->end_point.caps.type_control = true;
1764                ep->end_point.ops = &musb_g_ep0_ops;
1765                musb->g.ep0 = &ep->end_point;
1766        } else {
1767                if (is_in)
1768                        usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
1769                else
1770                        usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
1771                ep->end_point.caps.type_iso = true;
1772                ep->end_point.caps.type_bulk = true;
1773                ep->end_point.caps.type_int = true;
1774                ep->end_point.ops = &musb_ep_ops;
1775                list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1776        }
1777
1778        if (!epnum || hw_ep->is_shared_fifo) {
1779                ep->end_point.caps.dir_in = true;
1780                ep->end_point.caps.dir_out = true;
1781        } else if (is_in)
1782                ep->end_point.caps.dir_in = true;
1783        else
1784                ep->end_point.caps.dir_out = true;
1785}
1786
1787/*
1788 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1789 * to the rest of the driver state.
1790 */
1791static inline void musb_g_init_endpoints(struct musb *musb)
1792{
1793        u8                      epnum;
1794        struct musb_hw_ep       *hw_ep;
1795        unsigned                count = 0;
1796
1797        /* initialize endpoint list just once */
1798        INIT_LIST_HEAD(&(musb->g.ep_list));
1799
1800        for (epnum = 0, hw_ep = musb->endpoints;
1801                        epnum < musb->nr_endpoints;
1802                        epnum++, hw_ep++) {
1803                if (hw_ep->is_shared_fifo /* || !epnum */) {
1804                        init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1805                        count++;
1806                } else {
1807                        if (hw_ep->max_packet_sz_tx) {
1808                                init_peripheral_ep(musb, &hw_ep->ep_in,
1809                                                        epnum, 1);
1810                                count++;
1811                        }
1812                        if (hw_ep->max_packet_sz_rx) {
1813                                init_peripheral_ep(musb, &hw_ep->ep_out,
1814                                                        epnum, 0);
1815                                count++;
1816                        }
1817                }
1818        }
1819}
1820
1821/* called once during driver setup to initialize and link into
1822 * the driver model; memory is zeroed.
1823 */
1824int musb_gadget_setup(struct musb *musb)
1825{
1826        int status;
1827
1828        /* REVISIT minor race:  if (erroneously) setting up two
1829         * musb peripherals at the same time, only the bus lock
1830         * is probably held.
1831         */
1832
1833        musb->g.ops = &musb_gadget_operations;
1834        musb->g.max_speed = USB_SPEED_HIGH;
1835        musb->g.speed = USB_SPEED_UNKNOWN;
1836
1837        MUSB_DEV_MODE(musb);
1838        musb->xceiv->otg->default_a = 0;
1839        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1840
1841        /* this "gadget" abstracts/virtualizes the controller */
1842        musb->g.name = musb_driver_name;
1843#if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
1844        musb->g.is_otg = 1;
1845#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
1846        musb->g.is_otg = 0;
1847#endif
1848
1849        musb_g_init_endpoints(musb);
1850
1851        musb->is_active = 0;
1852        musb_platform_try_idle(musb, 0);
1853
1854        status = usb_add_gadget_udc(musb->controller, &musb->g);
1855        if (status)
1856                goto err;
1857
1858        return 0;
1859err:
1860        musb->g.dev.parent = NULL;
1861        device_unregister(&musb->g.dev);
1862        return status;
1863}
1864
1865void musb_gadget_cleanup(struct musb *musb)
1866{
1867        if (musb->port_mode == MUSB_PORT_MODE_HOST)
1868                return;
1869        usb_del_gadget_udc(&musb->g);
1870}
1871
1872/*
1873 * Register the gadget driver. Used by gadget drivers when
1874 * registering themselves with the controller.
1875 *
1876 * -EINVAL something went wrong (not driver)
1877 * -EBUSY another gadget is already using the controller
1878 * -ENOMEM no memory to perform the operation
1879 *
1880 * @param driver the gadget driver
1881 * @return <0 if error, 0 if everything is fine
1882 */
1883static int musb_gadget_start(struct usb_gadget *g,
1884                struct usb_gadget_driver *driver)
1885{
1886        struct musb             *musb = gadget_to_musb(g);
1887        struct usb_otg          *otg = musb->xceiv->otg;
1888        unsigned long           flags;
1889        int                     retval = 0;
1890
1891        if (driver->max_speed < USB_SPEED_HIGH) {
1892                retval = -EINVAL;
1893                goto err;
1894        }
1895
1896        pm_runtime_get_sync(musb->controller);
1897
1898        musb->softconnect = 0;
1899        musb->gadget_driver = driver;
1900
1901        spin_lock_irqsave(&musb->lock, flags);
1902        musb->is_active = 1;
1903
1904        otg_set_peripheral(otg, &musb->g);
1905        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1906        spin_unlock_irqrestore(&musb->lock, flags);
1907
1908        musb_start(musb);
1909
1910        /* REVISIT:  funcall to other code, which also
1911         * handles power budgeting ... this way also
1912         * ensures HdrcStart is indirectly called.
1913         */
1914        if (musb->xceiv->last_event == USB_EVENT_ID)
1915                musb_platform_set_vbus(musb, 1);
1916
1917        if (musb->xceiv->last_event == USB_EVENT_NONE)
1918                pm_runtime_put(musb->controller);
1919
1920        return 0;
1921
1922err:
1923        return retval;
1924}
1925
1926/*
1927 * Unregister the gadget driver. Used by gadget drivers when
1928 * unregistering themselves from the controller.
1929 *
1930 * @param driver the gadget driver to unregister
1931 */
1932static int musb_gadget_stop(struct usb_gadget *g)
1933{
1934        struct musb     *musb = gadget_to_musb(g);
1935        unsigned long   flags;
1936
1937        if (musb->xceiv->last_event == USB_EVENT_NONE)
1938                pm_runtime_get_sync(musb->controller);
1939
1940        /*
1941         * REVISIT always use otg_set_peripheral() here too;
1942         * this needs to shut down the OTG engine.
1943         */
1944
1945        spin_lock_irqsave(&musb->lock, flags);
1946
1947        musb_hnp_stop(musb);
1948
1949        (void) musb_gadget_vbus_draw(&musb->g, 0);
1950
1951        musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
1952        musb_stop(musb);
1953        otg_set_peripheral(musb->xceiv->otg, NULL);
1954
1955        musb->is_active = 0;
1956        musb->gadget_driver = NULL;
1957        musb_platform_try_idle(musb, 0);
1958        spin_unlock_irqrestore(&musb->lock, flags);
1959
1960        /*
1961         * FIXME we need to be able to register another
1962         * gadget driver here and have everything work;
1963         * that currently misbehaves.
1964         */
1965
1966        pm_runtime_put(musb->controller);
1967
1968        return 0;
1969}
1970
1971/* ----------------------------------------------------------------------- */
1972
1973/* lifecycle operations called through plat_uds.c */
1974
1975void musb_g_resume(struct musb *musb)
1976{
1977        musb->is_suspended = 0;
1978        switch (musb->xceiv->otg->state) {
1979        case OTG_STATE_B_IDLE:
1980                break;
1981        case OTG_STATE_B_WAIT_ACON:
1982        case OTG_STATE_B_PERIPHERAL:
1983                musb->is_active = 1;
1984                if (musb->gadget_driver && musb->gadget_driver->resume) {
1985                        spin_unlock(&musb->lock);
1986                        musb->gadget_driver->resume(&musb->g);
1987                        spin_lock(&musb->lock);
1988                }
1989                break;
1990        default:
1991                WARNING("unhandled RESUME transition (%s)\n",
1992                                usb_otg_state_string(musb->xceiv->otg->state));
1993        }
1994}
1995
1996/* called when SOF packets stop for 3+ msec */
1997void musb_g_suspend(struct musb *musb)
1998{
1999        u8      devctl;
2000
2001        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2002        dev_dbg(musb->controller, "devctl %02x\n", devctl);
2003
2004        switch (musb->xceiv->otg->state) {
2005        case OTG_STATE_B_IDLE:
2006                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2007                        musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2008                break;
2009        case OTG_STATE_B_PERIPHERAL:
2010                musb->is_suspended = 1;
2011                if (musb->gadget_driver && musb->gadget_driver->suspend) {
2012                        spin_unlock(&musb->lock);
2013                        musb->gadget_driver->suspend(&musb->g);
2014                        spin_lock(&musb->lock);
2015                }
2016                break;
2017        default:
2018                /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2019                 * A_PERIPHERAL may need care too
2020                 */
2021                WARNING("unhandled SUSPEND transition (%s)\n",
2022                                usb_otg_state_string(musb->xceiv->otg->state));
2023        }
2024}
2025
2026/* Called during SRP */
2027void musb_g_wakeup(struct musb *musb)
2028{
2029        musb_gadget_wakeup(&musb->g);
2030}
2031
2032/* called when VBUS drops below session threshold, and in other cases */
2033void musb_g_disconnect(struct musb *musb)
2034{
2035        void __iomem    *mregs = musb->mregs;
2036        u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
2037
2038        dev_dbg(musb->controller, "devctl %02x\n", devctl);
2039
2040        /* clear HR */
2041        musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2042
2043        /* don't draw vbus until new b-default session */
2044        (void) musb_gadget_vbus_draw(&musb->g, 0);
2045
2046        musb->g.speed = USB_SPEED_UNKNOWN;
2047        if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2048                spin_unlock(&musb->lock);
2049                musb->gadget_driver->disconnect(&musb->g);
2050                spin_lock(&musb->lock);
2051        }
2052
2053        switch (musb->xceiv->otg->state) {
2054        default:
2055                dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2056                        usb_otg_state_string(musb->xceiv->otg->state));
2057                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2058                MUSB_HST_MODE(musb);
2059                break;
2060        case OTG_STATE_A_PERIPHERAL:
2061                musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2062                MUSB_HST_MODE(musb);
2063                break;
2064        case OTG_STATE_B_WAIT_ACON:
2065        case OTG_STATE_B_HOST:
2066        case OTG_STATE_B_PERIPHERAL:
2067        case OTG_STATE_B_IDLE:
2068                musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2069                break;
2070        case OTG_STATE_B_SRP_INIT:
2071                break;
2072        }
2073
2074        musb->is_active = 0;
2075}
2076
2077void musb_g_reset(struct musb *musb)
2078__releases(musb->lock)
2079__acquires(musb->lock)
2080{
2081        void __iomem    *mbase = musb->mregs;
2082        u8              devctl = musb_readb(mbase, MUSB_DEVCTL);
2083        u8              power;
2084
2085        dev_dbg(musb->controller, "<== %s driver '%s'\n",
2086                        (devctl & MUSB_DEVCTL_BDEVICE)
2087                                ? "B-Device" : "A-Device",
2088                        musb->gadget_driver
2089                                ? musb->gadget_driver->driver.name
2090                                : NULL
2091                        );
2092
2093        /* report reset, if we didn't already (flushing EP state) */
2094        if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2095                spin_unlock(&musb->lock);
2096                usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2097                spin_lock(&musb->lock);
2098        }
2099
2100        /* clear HR */
2101        else if (devctl & MUSB_DEVCTL_HR)
2102                musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2103
2104
2105        /* what speed did we negotiate? */
2106        power = musb_readb(mbase, MUSB_POWER);
2107        musb->g.speed = (power & MUSB_POWER_HSMODE)
2108                        ? USB_SPEED_HIGH : USB_SPEED_FULL;
2109
2110        /* start in USB_STATE_DEFAULT */
2111        musb->is_active = 1;
2112        musb->is_suspended = 0;
2113        MUSB_DEV_MODE(musb);
2114        musb->address = 0;
2115        musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2116
2117        musb->may_wakeup = 0;
2118        musb->g.b_hnp_enable = 0;
2119        musb->g.a_alt_hnp_support = 0;
2120        musb->g.a_hnp_support = 0;
2121        musb->g.quirk_zlp_not_supp = 1;
2122
2123        /* Normal reset, as B-Device;
2124         * or else after HNP, as A-Device
2125         */
2126        if (!musb->g.is_otg) {
2127                /* USB device controllers that are not OTG compatible
2128                 * may not have DEVCTL register in silicon.
2129                 * In that case, do not rely on devctl for setting
2130                 * peripheral mode.
2131                 */
2132                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2133                musb->g.is_a_peripheral = 0;
2134        } else if (devctl & MUSB_DEVCTL_BDEVICE) {
2135                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2136                musb->g.is_a_peripheral = 0;
2137        } else {
2138                musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
2139                musb->g.is_a_peripheral = 1;
2140        }
2141
2142        /* start with default limits on VBUS power draw */
2143        (void) musb_gadget_vbus_draw(&musb->g, 8);
2144}
2145