linux/drivers/usb/gadget/udc/mv_udc_core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
   3 * Author: Chao Xie <chao.xie@marvell.com>
   4 *         Neil Zhang <zhangwm@marvell.com>
   5 *
   6 * This program is free software; you can redistribute  it and/or modify it
   7 * under  the terms of  the GNU General  Public License as published by the
   8 * Free Software Foundation;  either version 2 of the  License, or (at your
   9 * option) any later version.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/dmapool.h>
  16#include <linux/kernel.h>
  17#include <linux/delay.h>
  18#include <linux/ioport.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/errno.h>
  22#include <linux/err.h>
  23#include <linux/timer.h>
  24#include <linux/list.h>
  25#include <linux/interrupt.h>
  26#include <linux/moduleparam.h>
  27#include <linux/device.h>
  28#include <linux/usb/ch9.h>
  29#include <linux/usb/gadget.h>
  30#include <linux/usb/otg.h>
  31#include <linux/pm.h>
  32#include <linux/io.h>
  33#include <linux/irq.h>
  34#include <linux/platform_device.h>
  35#include <linux/clk.h>
  36#include <linux/platform_data/mv_usb.h>
  37#include <asm/unaligned.h>
  38
  39#include "mv_udc.h"
  40
  41#define DRIVER_DESC             "Marvell PXA USB Device Controller driver"
  42#define DRIVER_VERSION          "8 Nov 2010"
  43
  44#define ep_dir(ep)      (((ep)->ep_num == 0) ? \
  45                                ((ep)->udc->ep0_dir) : ((ep)->direction))
  46
  47/* timeout value -- usec */
  48#define RESET_TIMEOUT           10000
  49#define FLUSH_TIMEOUT           10000
  50#define EPSTATUS_TIMEOUT        10000
  51#define PRIME_TIMEOUT           10000
  52#define READSAFE_TIMEOUT        1000
  53
  54#define LOOPS_USEC_SHIFT        1
  55#define LOOPS_USEC              (1 << LOOPS_USEC_SHIFT)
  56#define LOOPS(timeout)          ((timeout) >> LOOPS_USEC_SHIFT)
  57
  58static DECLARE_COMPLETION(release_done);
  59
  60static const char driver_name[] = "mv_udc";
  61static const char driver_desc[] = DRIVER_DESC;
  62
  63static void nuke(struct mv_ep *ep, int status);
  64static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
  65
  66/* for endpoint 0 operations */
  67static const struct usb_endpoint_descriptor mv_ep0_desc = {
  68        .bLength =              USB_DT_ENDPOINT_SIZE,
  69        .bDescriptorType =      USB_DT_ENDPOINT,
  70        .bEndpointAddress =     0,
  71        .bmAttributes =         USB_ENDPOINT_XFER_CONTROL,
  72        .wMaxPacketSize =       EP0_MAX_PKT_SIZE,
  73};
  74
  75static void ep0_reset(struct mv_udc *udc)
  76{
  77        struct mv_ep *ep;
  78        u32 epctrlx;
  79        int i = 0;
  80
  81        /* ep0 in and out */
  82        for (i = 0; i < 2; i++) {
  83                ep = &udc->eps[i];
  84                ep->udc = udc;
  85
  86                /* ep0 dQH */
  87                ep->dqh = &udc->ep_dqh[i];
  88
  89                /* configure ep0 endpoint capabilities in dQH */
  90                ep->dqh->max_packet_length =
  91                        (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  92                        | EP_QUEUE_HEAD_IOS;
  93
  94                ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
  95
  96                epctrlx = readl(&udc->op_regs->epctrlx[0]);
  97                if (i) {        /* TX */
  98                        epctrlx |= EPCTRL_TX_ENABLE
  99                                | (USB_ENDPOINT_XFER_CONTROL
 100                                        << EPCTRL_TX_EP_TYPE_SHIFT);
 101
 102                } else {        /* RX */
 103                        epctrlx |= EPCTRL_RX_ENABLE
 104                                | (USB_ENDPOINT_XFER_CONTROL
 105                                        << EPCTRL_RX_EP_TYPE_SHIFT);
 106                }
 107
 108                writel(epctrlx, &udc->op_regs->epctrlx[0]);
 109        }
 110}
 111
 112/* protocol ep0 stall, will automatically be cleared on new transaction */
 113static void ep0_stall(struct mv_udc *udc)
 114{
 115        u32     epctrlx;
 116
 117        /* set TX and RX to stall */
 118        epctrlx = readl(&udc->op_regs->epctrlx[0]);
 119        epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
 120        writel(epctrlx, &udc->op_regs->epctrlx[0]);
 121
 122        /* update ep0 state */
 123        udc->ep0_state = WAIT_FOR_SETUP;
 124        udc->ep0_dir = EP_DIR_OUT;
 125}
 126
 127static int process_ep_req(struct mv_udc *udc, int index,
 128        struct mv_req *curr_req)
 129{
 130        struct mv_dtd   *curr_dtd;
 131        struct mv_dqh   *curr_dqh;
 132        int actual, remaining_length;
 133        int i, direction;
 134        int retval = 0;
 135        u32 errors;
 136        u32 bit_pos;
 137
 138        curr_dqh = &udc->ep_dqh[index];
 139        direction = index % 2;
 140
 141        curr_dtd = curr_req->head;
 142        actual = curr_req->req.length;
 143
 144        for (i = 0; i < curr_req->dtd_count; i++) {
 145                if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
 146                        dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
 147                                udc->eps[index].name);
 148                        return 1;
 149                }
 150
 151                errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
 152                if (!errors) {
 153                        remaining_length =
 154                                (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
 155                                        >> DTD_LENGTH_BIT_POS;
 156                        actual -= remaining_length;
 157
 158                        if (remaining_length) {
 159                                if (direction) {
 160                                        dev_dbg(&udc->dev->dev,
 161                                                "TX dTD remains data\n");
 162                                        retval = -EPROTO;
 163                                        break;
 164                                } else
 165                                        break;
 166                        }
 167                } else {
 168                        dev_info(&udc->dev->dev,
 169                                "complete_tr error: ep=%d %s: error = 0x%x\n",
 170                                index >> 1, direction ? "SEND" : "RECV",
 171                                errors);
 172                        if (errors & DTD_STATUS_HALTED) {
 173                                /* Clear the errors and Halt condition */
 174                                curr_dqh->size_ioc_int_sts &= ~errors;
 175                                retval = -EPIPE;
 176                        } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
 177                                retval = -EPROTO;
 178                        } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
 179                                retval = -EILSEQ;
 180                        }
 181                }
 182                if (i != curr_req->dtd_count - 1)
 183                        curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
 184        }
 185        if (retval)
 186                return retval;
 187
 188        if (direction == EP_DIR_OUT)
 189                bit_pos = 1 << curr_req->ep->ep_num;
 190        else
 191                bit_pos = 1 << (16 + curr_req->ep->ep_num);
 192
 193        while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
 194                if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
 195                        while (readl(&udc->op_regs->epstatus) & bit_pos)
 196                                udelay(1);
 197                        break;
 198                }
 199                udelay(1);
 200        }
 201
 202        curr_req->req.actual = actual;
 203
 204        return 0;
 205}
 206
 207/*
 208 * done() - retire a request; caller blocked irqs
 209 * @status : request status to be set, only works when
 210 * request is still in progress.
 211 */
 212static void done(struct mv_ep *ep, struct mv_req *req, int status)
 213        __releases(&ep->udc->lock)
 214        __acquires(&ep->udc->lock)
 215{
 216        struct mv_udc *udc = NULL;
 217        unsigned char stopped = ep->stopped;
 218        struct mv_dtd *curr_td, *next_td;
 219        int j;
 220
 221        udc = (struct mv_udc *)ep->udc;
 222        /* Removed the req from fsl_ep->queue */
 223        list_del_init(&req->queue);
 224
 225        /* req.status should be set as -EINPROGRESS in ep_queue() */
 226        if (req->req.status == -EINPROGRESS)
 227                req->req.status = status;
 228        else
 229                status = req->req.status;
 230
 231        /* Free dtd for the request */
 232        next_td = req->head;
 233        for (j = 0; j < req->dtd_count; j++) {
 234                curr_td = next_td;
 235                if (j != req->dtd_count - 1)
 236                        next_td = curr_td->next_dtd_virt;
 237                dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
 238        }
 239
 240        usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
 241
 242        if (status && (status != -ESHUTDOWN))
 243                dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
 244                        ep->ep.name, &req->req, status,
 245                        req->req.actual, req->req.length);
 246
 247        ep->stopped = 1;
 248
 249        spin_unlock(&ep->udc->lock);
 250
 251        usb_gadget_giveback_request(&ep->ep, &req->req);
 252
 253        spin_lock(&ep->udc->lock);
 254        ep->stopped = stopped;
 255}
 256
 257static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
 258{
 259        struct mv_udc *udc;
 260        struct mv_dqh *dqh;
 261        u32 bit_pos, direction;
 262        u32 usbcmd, epstatus;
 263        unsigned int loops;
 264        int retval = 0;
 265
 266        udc = ep->udc;
 267        direction = ep_dir(ep);
 268        dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
 269        bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
 270
 271        /* check if the pipe is empty */
 272        if (!(list_empty(&ep->queue))) {
 273                struct mv_req *lastreq;
 274                lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
 275                lastreq->tail->dtd_next =
 276                        req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 277
 278                wmb();
 279
 280                if (readl(&udc->op_regs->epprime) & bit_pos)
 281                        goto done;
 282
 283                loops = LOOPS(READSAFE_TIMEOUT);
 284                while (1) {
 285                        /* start with setting the semaphores */
 286                        usbcmd = readl(&udc->op_regs->usbcmd);
 287                        usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
 288                        writel(usbcmd, &udc->op_regs->usbcmd);
 289
 290                        /* read the endpoint status */
 291                        epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
 292
 293                        /*
 294                         * Reread the ATDTW semaphore bit to check if it is
 295                         * cleared. When hardware see a hazard, it will clear
 296                         * the bit or else we remain set to 1 and we can
 297                         * proceed with priming of endpoint if not already
 298                         * primed.
 299                         */
 300                        if (readl(&udc->op_regs->usbcmd)
 301                                & USBCMD_ATDTW_TRIPWIRE_SET)
 302                                break;
 303
 304                        loops--;
 305                        if (loops == 0) {
 306                                dev_err(&udc->dev->dev,
 307                                        "Timeout for ATDTW_TRIPWIRE...\n");
 308                                retval = -ETIME;
 309                                goto done;
 310                        }
 311                        udelay(LOOPS_USEC);
 312                }
 313
 314                /* Clear the semaphore */
 315                usbcmd = readl(&udc->op_regs->usbcmd);
 316                usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
 317                writel(usbcmd, &udc->op_regs->usbcmd);
 318
 319                if (epstatus)
 320                        goto done;
 321        }
 322
 323        /* Write dQH next pointer and terminate bit to 0 */
 324        dqh->next_dtd_ptr = req->head->td_dma
 325                                & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 326
 327        /* clear active and halt bit, in case set from a previous error */
 328        dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
 329
 330        /* Ensure that updates to the QH will occur before priming. */
 331        wmb();
 332
 333        /* Prime the Endpoint */
 334        writel(bit_pos, &udc->op_regs->epprime);
 335
 336done:
 337        return retval;
 338}
 339
 340static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
 341                dma_addr_t *dma, int *is_last)
 342{
 343        struct mv_dtd *dtd;
 344        struct mv_udc *udc;
 345        struct mv_dqh *dqh;
 346        u32 temp, mult = 0;
 347
 348        /* how big will this transfer be? */
 349        if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
 350                dqh = req->ep->dqh;
 351                mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
 352                                & 0x3;
 353                *length = min(req->req.length - req->req.actual,
 354                                (unsigned)(mult * req->ep->ep.maxpacket));
 355        } else
 356                *length = min(req->req.length - req->req.actual,
 357                                (unsigned)EP_MAX_LENGTH_TRANSFER);
 358
 359        udc = req->ep->udc;
 360
 361        /*
 362         * Be careful that no _GFP_HIGHMEM is set,
 363         * or we can not use dma_to_virt
 364         */
 365        dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
 366        if (dtd == NULL)
 367                return dtd;
 368
 369        dtd->td_dma = *dma;
 370        /* initialize buffer page pointers */
 371        temp = (u32)(req->req.dma + req->req.actual);
 372        dtd->buff_ptr0 = cpu_to_le32(temp);
 373        temp &= ~0xFFF;
 374        dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
 375        dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
 376        dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
 377        dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
 378
 379        req->req.actual += *length;
 380
 381        /* zlp is needed if req->req.zero is set */
 382        if (req->req.zero) {
 383                if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
 384                        *is_last = 1;
 385                else
 386                        *is_last = 0;
 387        } else if (req->req.length == req->req.actual)
 388                *is_last = 1;
 389        else
 390                *is_last = 0;
 391
 392        /* Fill in the transfer size; set active bit */
 393        temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
 394
 395        /* Enable interrupt for the last dtd of a request */
 396        if (*is_last && !req->req.no_interrupt)
 397                temp |= DTD_IOC;
 398
 399        temp |= mult << 10;
 400
 401        dtd->size_ioc_sts = temp;
 402
 403        mb();
 404
 405        return dtd;
 406}
 407
 408/* generate dTD linked list for a request */
 409static int req_to_dtd(struct mv_req *req)
 410{
 411        unsigned count;
 412        int is_last, is_first = 1;
 413        struct mv_dtd *dtd, *last_dtd = NULL;
 414        dma_addr_t dma;
 415
 416        do {
 417                dtd = build_dtd(req, &count, &dma, &is_last);
 418                if (dtd == NULL)
 419                        return -ENOMEM;
 420
 421                if (is_first) {
 422                        is_first = 0;
 423                        req->head = dtd;
 424                } else {
 425                        last_dtd->dtd_next = dma;
 426                        last_dtd->next_dtd_virt = dtd;
 427                }
 428                last_dtd = dtd;
 429                req->dtd_count++;
 430        } while (!is_last);
 431
 432        /* set terminate bit to 1 for the last dTD */
 433        dtd->dtd_next = DTD_NEXT_TERMINATE;
 434
 435        req->tail = dtd;
 436
 437        return 0;
 438}
 439
 440static int mv_ep_enable(struct usb_ep *_ep,
 441                const struct usb_endpoint_descriptor *desc)
 442{
 443        struct mv_udc *udc;
 444        struct mv_ep *ep;
 445        struct mv_dqh *dqh;
 446        u16 max = 0;
 447        u32 bit_pos, epctrlx, direction;
 448        unsigned char zlt = 0, ios = 0, mult = 0;
 449        unsigned long flags;
 450
 451        ep = container_of(_ep, struct mv_ep, ep);
 452        udc = ep->udc;
 453
 454        if (!_ep || !desc
 455                        || desc->bDescriptorType != USB_DT_ENDPOINT)
 456                return -EINVAL;
 457
 458        if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
 459                return -ESHUTDOWN;
 460
 461        direction = ep_dir(ep);
 462        max = usb_endpoint_maxp(desc);
 463
 464        /*
 465         * disable HW zero length termination select
 466         * driver handles zero length packet through req->req.zero
 467         */
 468        zlt = 1;
 469
 470        bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
 471
 472        /* Check if the Endpoint is Primed */
 473        if ((readl(&udc->op_regs->epprime) & bit_pos)
 474                || (readl(&udc->op_regs->epstatus) & bit_pos)) {
 475                dev_info(&udc->dev->dev,
 476                        "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
 477                        " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
 478                        (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
 479                        (unsigned)readl(&udc->op_regs->epprime),
 480                        (unsigned)readl(&udc->op_regs->epstatus),
 481                        (unsigned)bit_pos);
 482                goto en_done;
 483        }
 484        /* Set the max packet length, interrupt on Setup and Mult fields */
 485        switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
 486        case USB_ENDPOINT_XFER_BULK:
 487                zlt = 1;
 488                mult = 0;
 489                break;
 490        case USB_ENDPOINT_XFER_CONTROL:
 491                ios = 1;
 492        case USB_ENDPOINT_XFER_INT:
 493                mult = 0;
 494                break;
 495        case USB_ENDPOINT_XFER_ISOC:
 496                /* Calculate transactions needed for high bandwidth iso */
 497                mult = (unsigned char)(1 + ((max >> 11) & 0x03));
 498                max = max & 0x7ff;      /* bit 0~10 */
 499                /* 3 transactions at most */
 500                if (mult > 3)
 501                        goto en_done;
 502                break;
 503        default:
 504                goto en_done;
 505        }
 506
 507        spin_lock_irqsave(&udc->lock, flags);
 508        /* Get the endpoint queue head address */
 509        dqh = ep->dqh;
 510        dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
 511                | (mult << EP_QUEUE_HEAD_MULT_POS)
 512                | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
 513                | (ios ? EP_QUEUE_HEAD_IOS : 0);
 514        dqh->next_dtd_ptr = 1;
 515        dqh->size_ioc_int_sts = 0;
 516
 517        ep->ep.maxpacket = max;
 518        ep->ep.desc = desc;
 519        ep->stopped = 0;
 520
 521        /* Enable the endpoint for Rx or Tx and set the endpoint type */
 522        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 523        if (direction == EP_DIR_IN) {
 524                epctrlx &= ~EPCTRL_TX_ALL_MASK;
 525                epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
 526                        | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
 527                                << EPCTRL_TX_EP_TYPE_SHIFT);
 528        } else {
 529                epctrlx &= ~EPCTRL_RX_ALL_MASK;
 530                epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
 531                        | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
 532                                << EPCTRL_RX_EP_TYPE_SHIFT);
 533        }
 534        writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 535
 536        /*
 537         * Implement Guideline (GL# USB-7) The unused endpoint type must
 538         * be programmed to bulk.
 539         */
 540        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 541        if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
 542                epctrlx |= (USB_ENDPOINT_XFER_BULK
 543                                << EPCTRL_RX_EP_TYPE_SHIFT);
 544                writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 545        }
 546
 547        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 548        if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
 549                epctrlx |= (USB_ENDPOINT_XFER_BULK
 550                                << EPCTRL_TX_EP_TYPE_SHIFT);
 551                writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 552        }
 553
 554        spin_unlock_irqrestore(&udc->lock, flags);
 555
 556        return 0;
 557en_done:
 558        return -EINVAL;
 559}
 560
 561static int  mv_ep_disable(struct usb_ep *_ep)
 562{
 563        struct mv_udc *udc;
 564        struct mv_ep *ep;
 565        struct mv_dqh *dqh;
 566        u32 epctrlx, direction;
 567        unsigned long flags;
 568
 569        ep = container_of(_ep, struct mv_ep, ep);
 570        if ((_ep == NULL) || !ep->ep.desc)
 571                return -EINVAL;
 572
 573        udc = ep->udc;
 574
 575        /* Get the endpoint queue head address */
 576        dqh = ep->dqh;
 577
 578        spin_lock_irqsave(&udc->lock, flags);
 579
 580        direction = ep_dir(ep);
 581
 582        /* Reset the max packet length and the interrupt on Setup */
 583        dqh->max_packet_length = 0;
 584
 585        /* Disable the endpoint for Rx or Tx and reset the endpoint type */
 586        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 587        epctrlx &= ~((direction == EP_DIR_IN)
 588                        ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
 589                        : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
 590        writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 591
 592        /* nuke all pending requests (does flush) */
 593        nuke(ep, -ESHUTDOWN);
 594
 595        ep->ep.desc = NULL;
 596        ep->stopped = 1;
 597
 598        spin_unlock_irqrestore(&udc->lock, flags);
 599
 600        return 0;
 601}
 602
 603static struct usb_request *
 604mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 605{
 606        struct mv_req *req = NULL;
 607
 608        req = kzalloc(sizeof *req, gfp_flags);
 609        if (!req)
 610                return NULL;
 611
 612        req->req.dma = DMA_ADDR_INVALID;
 613        INIT_LIST_HEAD(&req->queue);
 614
 615        return &req->req;
 616}
 617
 618static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
 619{
 620        struct mv_req *req = NULL;
 621
 622        req = container_of(_req, struct mv_req, req);
 623
 624        if (_req)
 625                kfree(req);
 626}
 627
 628static void mv_ep_fifo_flush(struct usb_ep *_ep)
 629{
 630        struct mv_udc *udc;
 631        u32 bit_pos, direction;
 632        struct mv_ep *ep;
 633        unsigned int loops;
 634
 635        if (!_ep)
 636                return;
 637
 638        ep = container_of(_ep, struct mv_ep, ep);
 639        if (!ep->ep.desc)
 640                return;
 641
 642        udc = ep->udc;
 643        direction = ep_dir(ep);
 644
 645        if (ep->ep_num == 0)
 646                bit_pos = (1 << 16) | 1;
 647        else if (direction == EP_DIR_OUT)
 648                bit_pos = 1 << ep->ep_num;
 649        else
 650                bit_pos = 1 << (16 + ep->ep_num);
 651
 652        loops = LOOPS(EPSTATUS_TIMEOUT);
 653        do {
 654                unsigned int inter_loops;
 655
 656                if (loops == 0) {
 657                        dev_err(&udc->dev->dev,
 658                                "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
 659                                (unsigned)readl(&udc->op_regs->epstatus),
 660                                (unsigned)bit_pos);
 661                        return;
 662                }
 663                /* Write 1 to the Flush register */
 664                writel(bit_pos, &udc->op_regs->epflush);
 665
 666                /* Wait until flushing completed */
 667                inter_loops = LOOPS(FLUSH_TIMEOUT);
 668                while (readl(&udc->op_regs->epflush)) {
 669                        /*
 670                         * ENDPTFLUSH bit should be cleared to indicate this
 671                         * operation is complete
 672                         */
 673                        if (inter_loops == 0) {
 674                                dev_err(&udc->dev->dev,
 675                                        "TIMEOUT for ENDPTFLUSH=0x%x,"
 676                                        "bit_pos=0x%x\n",
 677                                        (unsigned)readl(&udc->op_regs->epflush),
 678                                        (unsigned)bit_pos);
 679                                return;
 680                        }
 681                        inter_loops--;
 682                        udelay(LOOPS_USEC);
 683                }
 684                loops--;
 685        } while (readl(&udc->op_regs->epstatus) & bit_pos);
 686}
 687
 688/* queues (submits) an I/O request to an endpoint */
 689static int
 690mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 691{
 692        struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
 693        struct mv_req *req = container_of(_req, struct mv_req, req);
 694        struct mv_udc *udc = ep->udc;
 695        unsigned long flags;
 696        int retval;
 697
 698        /* catch various bogus parameters */
 699        if (!_req || !req->req.complete || !req->req.buf
 700                        || !list_empty(&req->queue)) {
 701                dev_err(&udc->dev->dev, "%s, bad params", __func__);
 702                return -EINVAL;
 703        }
 704        if (unlikely(!_ep || !ep->ep.desc)) {
 705                dev_err(&udc->dev->dev, "%s, bad ep", __func__);
 706                return -EINVAL;
 707        }
 708
 709        udc = ep->udc;
 710        if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
 711                return -ESHUTDOWN;
 712
 713        req->ep = ep;
 714
 715        /* map virtual address to hardware */
 716        retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
 717        if (retval)
 718                return retval;
 719
 720        req->req.status = -EINPROGRESS;
 721        req->req.actual = 0;
 722        req->dtd_count = 0;
 723
 724        spin_lock_irqsave(&udc->lock, flags);
 725
 726        /* build dtds and push them to device queue */
 727        if (!req_to_dtd(req)) {
 728                retval = queue_dtd(ep, req);
 729                if (retval) {
 730                        spin_unlock_irqrestore(&udc->lock, flags);
 731                        dev_err(&udc->dev->dev, "Failed to queue dtd\n");
 732                        goto err_unmap_dma;
 733                }
 734        } else {
 735                spin_unlock_irqrestore(&udc->lock, flags);
 736                dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
 737                retval = -ENOMEM;
 738                goto err_unmap_dma;
 739        }
 740
 741        /* Update ep0 state */
 742        if (ep->ep_num == 0)
 743                udc->ep0_state = DATA_STATE_XMIT;
 744
 745        /* irq handler advances the queue */
 746        list_add_tail(&req->queue, &ep->queue);
 747        spin_unlock_irqrestore(&udc->lock, flags);
 748
 749        return 0;
 750
 751err_unmap_dma:
 752        usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
 753
 754        return retval;
 755}
 756
 757static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
 758{
 759        struct mv_dqh *dqh = ep->dqh;
 760        u32 bit_pos;
 761
 762        /* Write dQH next pointer and terminate bit to 0 */
 763        dqh->next_dtd_ptr = req->head->td_dma
 764                & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 765
 766        /* clear active and halt bit, in case set from a previous error */
 767        dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
 768
 769        /* Ensure that updates to the QH will occure before priming. */
 770        wmb();
 771
 772        bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
 773
 774        /* Prime the Endpoint */
 775        writel(bit_pos, &ep->udc->op_regs->epprime);
 776}
 777
 778/* dequeues (cancels, unlinks) an I/O request from an endpoint */
 779static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 780{
 781        struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
 782        struct mv_req *req;
 783        struct mv_udc *udc = ep->udc;
 784        unsigned long flags;
 785        int stopped, ret = 0;
 786        u32 epctrlx;
 787
 788        if (!_ep || !_req)
 789                return -EINVAL;
 790
 791        spin_lock_irqsave(&ep->udc->lock, flags);
 792        stopped = ep->stopped;
 793
 794        /* Stop the ep before we deal with the queue */
 795        ep->stopped = 1;
 796        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 797        if (ep_dir(ep) == EP_DIR_IN)
 798                epctrlx &= ~EPCTRL_TX_ENABLE;
 799        else
 800                epctrlx &= ~EPCTRL_RX_ENABLE;
 801        writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 802
 803        /* make sure it's actually queued on this endpoint */
 804        list_for_each_entry(req, &ep->queue, queue) {
 805                if (&req->req == _req)
 806                        break;
 807        }
 808        if (&req->req != _req) {
 809                ret = -EINVAL;
 810                goto out;
 811        }
 812
 813        /* The request is in progress, or completed but not dequeued */
 814        if (ep->queue.next == &req->queue) {
 815                _req->status = -ECONNRESET;
 816                mv_ep_fifo_flush(_ep);  /* flush current transfer */
 817
 818                /* The request isn't the last request in this ep queue */
 819                if (req->queue.next != &ep->queue) {
 820                        struct mv_req *next_req;
 821
 822                        next_req = list_entry(req->queue.next,
 823                                struct mv_req, queue);
 824
 825                        /* Point the QH to the first TD of next request */
 826                        mv_prime_ep(ep, next_req);
 827                } else {
 828                        struct mv_dqh *qh;
 829
 830                        qh = ep->dqh;
 831                        qh->next_dtd_ptr = 1;
 832                        qh->size_ioc_int_sts = 0;
 833                }
 834
 835                /* The request hasn't been processed, patch up the TD chain */
 836        } else {
 837                struct mv_req *prev_req;
 838
 839                prev_req = list_entry(req->queue.prev, struct mv_req, queue);
 840                writel(readl(&req->tail->dtd_next),
 841                                &prev_req->tail->dtd_next);
 842
 843        }
 844
 845        done(ep, req, -ECONNRESET);
 846
 847        /* Enable EP */
 848out:
 849        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 850        if (ep_dir(ep) == EP_DIR_IN)
 851                epctrlx |= EPCTRL_TX_ENABLE;
 852        else
 853                epctrlx |= EPCTRL_RX_ENABLE;
 854        writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 855        ep->stopped = stopped;
 856
 857        spin_unlock_irqrestore(&ep->udc->lock, flags);
 858        return ret;
 859}
 860
 861static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
 862{
 863        u32 epctrlx;
 864
 865        epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
 866
 867        if (stall) {
 868                if (direction == EP_DIR_IN)
 869                        epctrlx |= EPCTRL_TX_EP_STALL;
 870                else
 871                        epctrlx |= EPCTRL_RX_EP_STALL;
 872        } else {
 873                if (direction == EP_DIR_IN) {
 874                        epctrlx &= ~EPCTRL_TX_EP_STALL;
 875                        epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
 876                } else {
 877                        epctrlx &= ~EPCTRL_RX_EP_STALL;
 878                        epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
 879                }
 880        }
 881        writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
 882}
 883
 884static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
 885{
 886        u32 epctrlx;
 887
 888        epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
 889
 890        if (direction == EP_DIR_OUT)
 891                return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
 892        else
 893                return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
 894}
 895
 896static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
 897{
 898        struct mv_ep *ep;
 899        unsigned long flags = 0;
 900        int status = 0;
 901        struct mv_udc *udc;
 902
 903        ep = container_of(_ep, struct mv_ep, ep);
 904        udc = ep->udc;
 905        if (!_ep || !ep->ep.desc) {
 906                status = -EINVAL;
 907                goto out;
 908        }
 909
 910        if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
 911                status = -EOPNOTSUPP;
 912                goto out;
 913        }
 914
 915        /*
 916         * Attempt to halt IN ep will fail if any transfer requests
 917         * are still queue
 918         */
 919        if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
 920                status = -EAGAIN;
 921                goto out;
 922        }
 923
 924        spin_lock_irqsave(&ep->udc->lock, flags);
 925        ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
 926        if (halt && wedge)
 927                ep->wedge = 1;
 928        else if (!halt)
 929                ep->wedge = 0;
 930        spin_unlock_irqrestore(&ep->udc->lock, flags);
 931
 932        if (ep->ep_num == 0) {
 933                udc->ep0_state = WAIT_FOR_SETUP;
 934                udc->ep0_dir = EP_DIR_OUT;
 935        }
 936out:
 937        return status;
 938}
 939
 940static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
 941{
 942        return mv_ep_set_halt_wedge(_ep, halt, 0);
 943}
 944
 945static int mv_ep_set_wedge(struct usb_ep *_ep)
 946{
 947        return mv_ep_set_halt_wedge(_ep, 1, 1);
 948}
 949
 950static struct usb_ep_ops mv_ep_ops = {
 951        .enable         = mv_ep_enable,
 952        .disable        = mv_ep_disable,
 953
 954        .alloc_request  = mv_alloc_request,
 955        .free_request   = mv_free_request,
 956
 957        .queue          = mv_ep_queue,
 958        .dequeue        = mv_ep_dequeue,
 959
 960        .set_wedge      = mv_ep_set_wedge,
 961        .set_halt       = mv_ep_set_halt,
 962        .fifo_flush     = mv_ep_fifo_flush,     /* flush fifo */
 963};
 964
 965static void udc_clock_enable(struct mv_udc *udc)
 966{
 967        clk_prepare_enable(udc->clk);
 968}
 969
 970static void udc_clock_disable(struct mv_udc *udc)
 971{
 972        clk_disable_unprepare(udc->clk);
 973}
 974
 975static void udc_stop(struct mv_udc *udc)
 976{
 977        u32 tmp;
 978
 979        /* Disable interrupts */
 980        tmp = readl(&udc->op_regs->usbintr);
 981        tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
 982                USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
 983        writel(tmp, &udc->op_regs->usbintr);
 984
 985        udc->stopped = 1;
 986
 987        /* Reset the Run the bit in the command register to stop VUSB */
 988        tmp = readl(&udc->op_regs->usbcmd);
 989        tmp &= ~USBCMD_RUN_STOP;
 990        writel(tmp, &udc->op_regs->usbcmd);
 991}
 992
 993static void udc_start(struct mv_udc *udc)
 994{
 995        u32 usbintr;
 996
 997        usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
 998                | USBINTR_PORT_CHANGE_DETECT_EN
 999                | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1000        /* Enable interrupts */
1001        writel(usbintr, &udc->op_regs->usbintr);
1002
1003        udc->stopped = 0;
1004
1005        /* Set the Run bit in the command register */
1006        writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1007}
1008
1009static int udc_reset(struct mv_udc *udc)
1010{
1011        unsigned int loops;
1012        u32 tmp, portsc;
1013
1014        /* Stop the controller */
1015        tmp = readl(&udc->op_regs->usbcmd);
1016        tmp &= ~USBCMD_RUN_STOP;
1017        writel(tmp, &udc->op_regs->usbcmd);
1018
1019        /* Reset the controller to get default values */
1020        writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1021
1022        /* wait for reset to complete */
1023        loops = LOOPS(RESET_TIMEOUT);
1024        while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1025                if (loops == 0) {
1026                        dev_err(&udc->dev->dev,
1027                                "Wait for RESET completed TIMEOUT\n");
1028                        return -ETIMEDOUT;
1029                }
1030                loops--;
1031                udelay(LOOPS_USEC);
1032        }
1033
1034        /* set controller to device mode */
1035        tmp = readl(&udc->op_regs->usbmode);
1036        tmp |= USBMODE_CTRL_MODE_DEVICE;
1037
1038        /* turn setup lockout off, require setup tripwire in usbcmd */
1039        tmp |= USBMODE_SETUP_LOCK_OFF;
1040
1041        writel(tmp, &udc->op_regs->usbmode);
1042
1043        writel(0x0, &udc->op_regs->epsetupstat);
1044
1045        /* Configure the Endpoint List Address */
1046        writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1047                &udc->op_regs->eplistaddr);
1048
1049        portsc = readl(&udc->op_regs->portsc[0]);
1050        if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1051                portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1052
1053        if (udc->force_fs)
1054                portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1055        else
1056                portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1057
1058        writel(portsc, &udc->op_regs->portsc[0]);
1059
1060        tmp = readl(&udc->op_regs->epctrlx[0]);
1061        tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1062        writel(tmp, &udc->op_regs->epctrlx[0]);
1063
1064        return 0;
1065}
1066
1067static int mv_udc_enable_internal(struct mv_udc *udc)
1068{
1069        int retval;
1070
1071        if (udc->active)
1072                return 0;
1073
1074        dev_dbg(&udc->dev->dev, "enable udc\n");
1075        udc_clock_enable(udc);
1076        if (udc->pdata->phy_init) {
1077                retval = udc->pdata->phy_init(udc->phy_regs);
1078                if (retval) {
1079                        dev_err(&udc->dev->dev,
1080                                "init phy error %d\n", retval);
1081                        udc_clock_disable(udc);
1082                        return retval;
1083                }
1084        }
1085        udc->active = 1;
1086
1087        return 0;
1088}
1089
1090static int mv_udc_enable(struct mv_udc *udc)
1091{
1092        if (udc->clock_gating)
1093                return mv_udc_enable_internal(udc);
1094
1095        return 0;
1096}
1097
1098static void mv_udc_disable_internal(struct mv_udc *udc)
1099{
1100        if (udc->active) {
1101                dev_dbg(&udc->dev->dev, "disable udc\n");
1102                if (udc->pdata->phy_deinit)
1103                        udc->pdata->phy_deinit(udc->phy_regs);
1104                udc_clock_disable(udc);
1105                udc->active = 0;
1106        }
1107}
1108
1109static void mv_udc_disable(struct mv_udc *udc)
1110{
1111        if (udc->clock_gating)
1112                mv_udc_disable_internal(udc);
1113}
1114
1115static int mv_udc_get_frame(struct usb_gadget *gadget)
1116{
1117        struct mv_udc *udc;
1118        u16     retval;
1119
1120        if (!gadget)
1121                return -ENODEV;
1122
1123        udc = container_of(gadget, struct mv_udc, gadget);
1124
1125        retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1126
1127        return retval;
1128}
1129
1130/* Tries to wake up the host connected to this gadget */
1131static int mv_udc_wakeup(struct usb_gadget *gadget)
1132{
1133        struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1134        u32 portsc;
1135
1136        /* Remote wakeup feature not enabled by host */
1137        if (!udc->remote_wakeup)
1138                return -ENOTSUPP;
1139
1140        portsc = readl(&udc->op_regs->portsc);
1141        /* not suspended? */
1142        if (!(portsc & PORTSCX_PORT_SUSPEND))
1143                return 0;
1144        /* trigger force resume */
1145        portsc |= PORTSCX_PORT_FORCE_RESUME;
1146        writel(portsc, &udc->op_regs->portsc[0]);
1147        return 0;
1148}
1149
1150static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1151{
1152        struct mv_udc *udc;
1153        unsigned long flags;
1154        int retval = 0;
1155
1156        udc = container_of(gadget, struct mv_udc, gadget);
1157        spin_lock_irqsave(&udc->lock, flags);
1158
1159        udc->vbus_active = (is_active != 0);
1160
1161        dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1162                __func__, udc->softconnect, udc->vbus_active);
1163
1164        if (udc->driver && udc->softconnect && udc->vbus_active) {
1165                retval = mv_udc_enable(udc);
1166                if (retval == 0) {
1167                        /* Clock is disabled, need re-init registers */
1168                        udc_reset(udc);
1169                        ep0_reset(udc);
1170                        udc_start(udc);
1171                }
1172        } else if (udc->driver && udc->softconnect) {
1173                if (!udc->active)
1174                        goto out;
1175
1176                /* stop all the transfer in queue*/
1177                stop_activity(udc, udc->driver);
1178                udc_stop(udc);
1179                mv_udc_disable(udc);
1180        }
1181
1182out:
1183        spin_unlock_irqrestore(&udc->lock, flags);
1184        return retval;
1185}
1186
1187static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1188{
1189        struct mv_udc *udc;
1190        unsigned long flags;
1191        int retval = 0;
1192
1193        udc = container_of(gadget, struct mv_udc, gadget);
1194        spin_lock_irqsave(&udc->lock, flags);
1195
1196        udc->softconnect = (is_on != 0);
1197
1198        dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1199                        __func__, udc->softconnect, udc->vbus_active);
1200
1201        if (udc->driver && udc->softconnect && udc->vbus_active) {
1202                retval = mv_udc_enable(udc);
1203                if (retval == 0) {
1204                        /* Clock is disabled, need re-init registers */
1205                        udc_reset(udc);
1206                        ep0_reset(udc);
1207                        udc_start(udc);
1208                }
1209        } else if (udc->driver && udc->vbus_active) {
1210                /* stop all the transfer in queue*/
1211                stop_activity(udc, udc->driver);
1212                udc_stop(udc);
1213                mv_udc_disable(udc);
1214        }
1215
1216        spin_unlock_irqrestore(&udc->lock, flags);
1217        return retval;
1218}
1219
1220static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1221static int mv_udc_stop(struct usb_gadget *);
1222/* device controller usb_gadget_ops structure */
1223static const struct usb_gadget_ops mv_ops = {
1224
1225        /* returns the current frame number */
1226        .get_frame      = mv_udc_get_frame,
1227
1228        /* tries to wake up the host connected to this gadget */
1229        .wakeup         = mv_udc_wakeup,
1230
1231        /* notify controller that VBUS is powered or not */
1232        .vbus_session   = mv_udc_vbus_session,
1233
1234        /* D+ pullup, software-controlled connect/disconnect to USB host */
1235        .pullup         = mv_udc_pullup,
1236        .udc_start      = mv_udc_start,
1237        .udc_stop       = mv_udc_stop,
1238};
1239
1240static int eps_init(struct mv_udc *udc)
1241{
1242        struct mv_ep    *ep;
1243        char name[14];
1244        int i;
1245
1246        /* initialize ep0 */
1247        ep = &udc->eps[0];
1248        ep->udc = udc;
1249        strncpy(ep->name, "ep0", sizeof(ep->name));
1250        ep->ep.name = ep->name;
1251        ep->ep.ops = &mv_ep_ops;
1252        ep->wedge = 0;
1253        ep->stopped = 0;
1254        usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1255        ep->ep.caps.type_control = true;
1256        ep->ep.caps.dir_in = true;
1257        ep->ep.caps.dir_out = true;
1258        ep->ep_num = 0;
1259        ep->ep.desc = &mv_ep0_desc;
1260        INIT_LIST_HEAD(&ep->queue);
1261
1262        ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1263
1264        /* initialize other endpoints */
1265        for (i = 2; i < udc->max_eps * 2; i++) {
1266                ep = &udc->eps[i];
1267                if (i % 2) {
1268                        snprintf(name, sizeof(name), "ep%din", i / 2);
1269                        ep->direction = EP_DIR_IN;
1270                        ep->ep.caps.dir_in = true;
1271                } else {
1272                        snprintf(name, sizeof(name), "ep%dout", i / 2);
1273                        ep->direction = EP_DIR_OUT;
1274                        ep->ep.caps.dir_out = true;
1275                }
1276                ep->udc = udc;
1277                strncpy(ep->name, name, sizeof(ep->name));
1278                ep->ep.name = ep->name;
1279
1280                ep->ep.caps.type_iso = true;
1281                ep->ep.caps.type_bulk = true;
1282                ep->ep.caps.type_int = true;
1283
1284                ep->ep.ops = &mv_ep_ops;
1285                ep->stopped = 0;
1286                usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1287                ep->ep_num = i / 2;
1288
1289                INIT_LIST_HEAD(&ep->queue);
1290                list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1291
1292                ep->dqh = &udc->ep_dqh[i];
1293        }
1294
1295        return 0;
1296}
1297
1298/* delete all endpoint requests, called with spinlock held */
1299static void nuke(struct mv_ep *ep, int status)
1300{
1301        /* called with spinlock held */
1302        ep->stopped = 1;
1303
1304        /* endpoint fifo flush */
1305        mv_ep_fifo_flush(&ep->ep);
1306
1307        while (!list_empty(&ep->queue)) {
1308                struct mv_req *req = NULL;
1309                req = list_entry(ep->queue.next, struct mv_req, queue);
1310                done(ep, req, status);
1311        }
1312}
1313
1314static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
1315{
1316        struct mv_ep    *ep;
1317
1318        nuke(&udc->eps[0], -ESHUTDOWN);
1319
1320        list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1321                nuke(ep, -ESHUTDOWN);
1322        }
1323
1324        /* report reset; the driver is already quiesced */
1325        if (driver) {
1326                spin_unlock(&udc->lock);
1327                usb_gadget_udc_reset(&udc->gadget, driver);
1328                spin_lock(&udc->lock);
1329        }
1330}
1331/* stop all USB activities */
1332static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1333{
1334        struct mv_ep    *ep;
1335
1336        nuke(&udc->eps[0], -ESHUTDOWN);
1337
1338        list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1339                nuke(ep, -ESHUTDOWN);
1340        }
1341
1342        /* report disconnect; the driver is already quiesced */
1343        if (driver) {
1344                spin_unlock(&udc->lock);
1345                driver->disconnect(&udc->gadget);
1346                spin_lock(&udc->lock);
1347        }
1348}
1349
1350static int mv_udc_start(struct usb_gadget *gadget,
1351                struct usb_gadget_driver *driver)
1352{
1353        struct mv_udc *udc;
1354        int retval = 0;
1355        unsigned long flags;
1356
1357        udc = container_of(gadget, struct mv_udc, gadget);
1358
1359        if (udc->driver)
1360                return -EBUSY;
1361
1362        spin_lock_irqsave(&udc->lock, flags);
1363
1364        /* hook up the driver ... */
1365        driver->driver.bus = NULL;
1366        udc->driver = driver;
1367
1368        udc->usb_state = USB_STATE_ATTACHED;
1369        udc->ep0_state = WAIT_FOR_SETUP;
1370        udc->ep0_dir = EP_DIR_OUT;
1371
1372        spin_unlock_irqrestore(&udc->lock, flags);
1373
1374        if (udc->transceiver) {
1375                retval = otg_set_peripheral(udc->transceiver->otg,
1376                                        &udc->gadget);
1377                if (retval) {
1378                        dev_err(&udc->dev->dev,
1379                                "unable to register peripheral to otg\n");
1380                        udc->driver = NULL;
1381                        return retval;
1382                }
1383        }
1384
1385        /* When boot with cable attached, there will be no vbus irq occurred */
1386        if (udc->qwork)
1387                queue_work(udc->qwork, &udc->vbus_work);
1388
1389        return 0;
1390}
1391
1392static int mv_udc_stop(struct usb_gadget *gadget)
1393{
1394        struct mv_udc *udc;
1395        unsigned long flags;
1396
1397        udc = container_of(gadget, struct mv_udc, gadget);
1398
1399        spin_lock_irqsave(&udc->lock, flags);
1400
1401        mv_udc_enable(udc);
1402        udc_stop(udc);
1403
1404        /* stop all usb activities */
1405        udc->gadget.speed = USB_SPEED_UNKNOWN;
1406        stop_activity(udc, NULL);
1407        mv_udc_disable(udc);
1408
1409        spin_unlock_irqrestore(&udc->lock, flags);
1410
1411        /* unbind gadget driver */
1412        udc->driver = NULL;
1413
1414        return 0;
1415}
1416
1417static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1418{
1419        u32 portsc;
1420
1421        portsc = readl(&udc->op_regs->portsc[0]);
1422        portsc |= mode << 16;
1423        writel(portsc, &udc->op_regs->portsc[0]);
1424}
1425
1426static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1427{
1428        struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1429        struct mv_req *req = container_of(_req, struct mv_req, req);
1430        struct mv_udc *udc;
1431        unsigned long flags;
1432
1433        udc = mvep->udc;
1434
1435        dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1436
1437        spin_lock_irqsave(&udc->lock, flags);
1438        if (req->test_mode) {
1439                mv_set_ptc(udc, req->test_mode);
1440                req->test_mode = 0;
1441        }
1442        spin_unlock_irqrestore(&udc->lock, flags);
1443}
1444
1445static int
1446udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1447{
1448        int retval = 0;
1449        struct mv_req *req;
1450        struct mv_ep *ep;
1451
1452        ep = &udc->eps[0];
1453        udc->ep0_dir = direction;
1454        udc->ep0_state = WAIT_FOR_OUT_STATUS;
1455
1456        req = udc->status_req;
1457
1458        /* fill in the reqest structure */
1459        if (empty == false) {
1460                *((u16 *) req->req.buf) = cpu_to_le16(status);
1461                req->req.length = 2;
1462        } else
1463                req->req.length = 0;
1464
1465        req->ep = ep;
1466        req->req.status = -EINPROGRESS;
1467        req->req.actual = 0;
1468        if (udc->test_mode) {
1469                req->req.complete = prime_status_complete;
1470                req->test_mode = udc->test_mode;
1471                udc->test_mode = 0;
1472        } else
1473                req->req.complete = NULL;
1474        req->dtd_count = 0;
1475
1476        if (req->req.dma == DMA_ADDR_INVALID) {
1477                req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1478                                req->req.buf, req->req.length,
1479                                ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1480                req->mapped = 1;
1481        }
1482
1483        /* prime the data phase */
1484        if (!req_to_dtd(req)) {
1485                retval = queue_dtd(ep, req);
1486                if (retval) {
1487                        dev_err(&udc->dev->dev,
1488                                "Failed to queue dtd when prime status\n");
1489                        goto out;
1490                }
1491        } else{ /* no mem */
1492                retval = -ENOMEM;
1493                dev_err(&udc->dev->dev,
1494                        "Failed to dma_pool_alloc when prime status\n");
1495                goto out;
1496        }
1497
1498        list_add_tail(&req->queue, &ep->queue);
1499
1500        return 0;
1501out:
1502        usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1503
1504        return retval;
1505}
1506
1507static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1508{
1509        if (index <= TEST_FORCE_EN) {
1510                udc->test_mode = index;
1511                if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1512                        ep0_stall(udc);
1513        } else
1514                dev_err(&udc->dev->dev,
1515                        "This test mode(%d) is not supported\n", index);
1516}
1517
1518static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1519{
1520        udc->dev_addr = (u8)setup->wValue;
1521
1522        /* update usb state */
1523        udc->usb_state = USB_STATE_ADDRESS;
1524
1525        if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1526                ep0_stall(udc);
1527}
1528
1529static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1530        struct usb_ctrlrequest *setup)
1531{
1532        u16 status = 0;
1533        int retval;
1534
1535        if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1536                != (USB_DIR_IN | USB_TYPE_STANDARD))
1537                return;
1538
1539        if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1540                status = 1 << USB_DEVICE_SELF_POWERED;
1541                status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1542        } else if ((setup->bRequestType & USB_RECIP_MASK)
1543                        == USB_RECIP_INTERFACE) {
1544                /* get interface status */
1545                status = 0;
1546        } else if ((setup->bRequestType & USB_RECIP_MASK)
1547                        == USB_RECIP_ENDPOINT) {
1548                u8 ep_num, direction;
1549
1550                ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1551                direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1552                                ? EP_DIR_IN : EP_DIR_OUT;
1553                status = ep_is_stall(udc, ep_num, direction)
1554                                << USB_ENDPOINT_HALT;
1555        }
1556
1557        retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1558        if (retval)
1559                ep0_stall(udc);
1560        else
1561                udc->ep0_state = DATA_STATE_XMIT;
1562}
1563
1564static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1565{
1566        u8 ep_num;
1567        u8 direction;
1568        struct mv_ep *ep;
1569
1570        if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1571                == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1572                switch (setup->wValue) {
1573                case USB_DEVICE_REMOTE_WAKEUP:
1574                        udc->remote_wakeup = 0;
1575                        break;
1576                default:
1577                        goto out;
1578                }
1579        } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1580                == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1581                switch (setup->wValue) {
1582                case USB_ENDPOINT_HALT:
1583                        ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1584                        direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1585                                ? EP_DIR_IN : EP_DIR_OUT;
1586                        if (setup->wValue != 0 || setup->wLength != 0
1587                                || ep_num > udc->max_eps)
1588                                goto out;
1589                        ep = &udc->eps[ep_num * 2 + direction];
1590                        if (ep->wedge == 1)
1591                                break;
1592                        spin_unlock(&udc->lock);
1593                        ep_set_stall(udc, ep_num, direction, 0);
1594                        spin_lock(&udc->lock);
1595                        break;
1596                default:
1597                        goto out;
1598                }
1599        } else
1600                goto out;
1601
1602        if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1603                ep0_stall(udc);
1604out:
1605        return;
1606}
1607
1608static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1609{
1610        u8 ep_num;
1611        u8 direction;
1612
1613        if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1614                == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1615                switch (setup->wValue) {
1616                case USB_DEVICE_REMOTE_WAKEUP:
1617                        udc->remote_wakeup = 1;
1618                        break;
1619                case USB_DEVICE_TEST_MODE:
1620                        if (setup->wIndex & 0xFF
1621                                ||  udc->gadget.speed != USB_SPEED_HIGH)
1622                                ep0_stall(udc);
1623
1624                        if (udc->usb_state != USB_STATE_CONFIGURED
1625                                && udc->usb_state != USB_STATE_ADDRESS
1626                                && udc->usb_state != USB_STATE_DEFAULT)
1627                                ep0_stall(udc);
1628
1629                        mv_udc_testmode(udc, (setup->wIndex >> 8));
1630                        goto out;
1631                default:
1632                        goto out;
1633                }
1634        } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1635                == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1636                switch (setup->wValue) {
1637                case USB_ENDPOINT_HALT:
1638                        ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1639                        direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1640                                ? EP_DIR_IN : EP_DIR_OUT;
1641                        if (setup->wValue != 0 || setup->wLength != 0
1642                                || ep_num > udc->max_eps)
1643                                goto out;
1644                        spin_unlock(&udc->lock);
1645                        ep_set_stall(udc, ep_num, direction, 1);
1646                        spin_lock(&udc->lock);
1647                        break;
1648                default:
1649                        goto out;
1650                }
1651        } else
1652                goto out;
1653
1654        if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1655                ep0_stall(udc);
1656out:
1657        return;
1658}
1659
1660static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1661        struct usb_ctrlrequest *setup)
1662        __releases(&ep->udc->lock)
1663        __acquires(&ep->udc->lock)
1664{
1665        bool delegate = false;
1666
1667        nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1668
1669        dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1670                        setup->bRequestType, setup->bRequest,
1671                        setup->wValue, setup->wIndex, setup->wLength);
1672        /* We process some standard setup requests here */
1673        if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1674                switch (setup->bRequest) {
1675                case USB_REQ_GET_STATUS:
1676                        ch9getstatus(udc, ep_num, setup);
1677                        break;
1678
1679                case USB_REQ_SET_ADDRESS:
1680                        ch9setaddress(udc, setup);
1681                        break;
1682
1683                case USB_REQ_CLEAR_FEATURE:
1684                        ch9clearfeature(udc, setup);
1685                        break;
1686
1687                case USB_REQ_SET_FEATURE:
1688                        ch9setfeature(udc, setup);
1689                        break;
1690
1691                default:
1692                        delegate = true;
1693                }
1694        } else
1695                delegate = true;
1696
1697        /* delegate USB standard requests to the gadget driver */
1698        if (delegate == true) {
1699                /* USB requests handled by gadget */
1700                if (setup->wLength) {
1701                        /* DATA phase from gadget, STATUS phase from udc */
1702                        udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1703                                        ?  EP_DIR_IN : EP_DIR_OUT;
1704                        spin_unlock(&udc->lock);
1705                        if (udc->driver->setup(&udc->gadget,
1706                                &udc->local_setup_buff) < 0)
1707                                ep0_stall(udc);
1708                        spin_lock(&udc->lock);
1709                        udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1710                                        ?  DATA_STATE_XMIT : DATA_STATE_RECV;
1711                } else {
1712                        /* no DATA phase, IN STATUS phase from gadget */
1713                        udc->ep0_dir = EP_DIR_IN;
1714                        spin_unlock(&udc->lock);
1715                        if (udc->driver->setup(&udc->gadget,
1716                                &udc->local_setup_buff) < 0)
1717                                ep0_stall(udc);
1718                        spin_lock(&udc->lock);
1719                        udc->ep0_state = WAIT_FOR_OUT_STATUS;
1720                }
1721        }
1722}
1723
1724/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1725static void ep0_req_complete(struct mv_udc *udc,
1726        struct mv_ep *ep0, struct mv_req *req)
1727{
1728        u32 new_addr;
1729
1730        if (udc->usb_state == USB_STATE_ADDRESS) {
1731                /* set the new address */
1732                new_addr = (u32)udc->dev_addr;
1733                writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1734                        &udc->op_regs->deviceaddr);
1735        }
1736
1737        done(ep0, req, 0);
1738
1739        switch (udc->ep0_state) {
1740        case DATA_STATE_XMIT:
1741                /* receive status phase */
1742                if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1743                        ep0_stall(udc);
1744                break;
1745        case DATA_STATE_RECV:
1746                /* send status phase */
1747                if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1748                        ep0_stall(udc);
1749                break;
1750        case WAIT_FOR_OUT_STATUS:
1751                udc->ep0_state = WAIT_FOR_SETUP;
1752                break;
1753        case WAIT_FOR_SETUP:
1754                dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1755                break;
1756        default:
1757                ep0_stall(udc);
1758                break;
1759        }
1760}
1761
1762static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1763{
1764        u32 temp;
1765        struct mv_dqh *dqh;
1766
1767        dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1768
1769        /* Clear bit in ENDPTSETUPSTAT */
1770        writel((1 << ep_num), &udc->op_regs->epsetupstat);
1771
1772        /* while a hazard exists when setup package arrives */
1773        do {
1774                /* Set Setup Tripwire */
1775                temp = readl(&udc->op_regs->usbcmd);
1776                writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1777
1778                /* Copy the setup packet to local buffer */
1779                memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1780        } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1781
1782        /* Clear Setup Tripwire */
1783        temp = readl(&udc->op_regs->usbcmd);
1784        writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1785}
1786
1787static void irq_process_tr_complete(struct mv_udc *udc)
1788{
1789        u32 tmp, bit_pos;
1790        int i, ep_num = 0, direction = 0;
1791        struct mv_ep    *curr_ep;
1792        struct mv_req *curr_req, *temp_req;
1793        int status;
1794
1795        /*
1796         * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1797         * because the setup packets are to be read ASAP
1798         */
1799
1800        /* Process all Setup packet received interrupts */
1801        tmp = readl(&udc->op_regs->epsetupstat);
1802
1803        if (tmp) {
1804                for (i = 0; i < udc->max_eps; i++) {
1805                        if (tmp & (1 << i)) {
1806                                get_setup_data(udc, i,
1807                                        (u8 *)(&udc->local_setup_buff));
1808                                handle_setup_packet(udc, i,
1809                                        &udc->local_setup_buff);
1810                        }
1811                }
1812        }
1813
1814        /* Don't clear the endpoint setup status register here.
1815         * It is cleared as a setup packet is read out of the buffer
1816         */
1817
1818        /* Process non-setup transaction complete interrupts */
1819        tmp = readl(&udc->op_regs->epcomplete);
1820
1821        if (!tmp)
1822                return;
1823
1824        writel(tmp, &udc->op_regs->epcomplete);
1825
1826        for (i = 0; i < udc->max_eps * 2; i++) {
1827                ep_num = i >> 1;
1828                direction = i % 2;
1829
1830                bit_pos = 1 << (ep_num + 16 * direction);
1831
1832                if (!(bit_pos & tmp))
1833                        continue;
1834
1835                if (i == 1)
1836                        curr_ep = &udc->eps[0];
1837                else
1838                        curr_ep = &udc->eps[i];
1839                /* process the req queue until an uncomplete request */
1840                list_for_each_entry_safe(curr_req, temp_req,
1841                        &curr_ep->queue, queue) {
1842                        status = process_ep_req(udc, i, curr_req);
1843                        if (status)
1844                                break;
1845
1846                        /* write back status to req */
1847                        curr_req->req.status = status;
1848
1849                        /* ep0 request completion */
1850                        if (ep_num == 0) {
1851                                ep0_req_complete(udc, curr_ep, curr_req);
1852                                break;
1853                        } else {
1854                                done(curr_ep, curr_req, status);
1855                        }
1856                }
1857        }
1858}
1859
1860static void irq_process_reset(struct mv_udc *udc)
1861{
1862        u32 tmp;
1863        unsigned int loops;
1864
1865        udc->ep0_dir = EP_DIR_OUT;
1866        udc->ep0_state = WAIT_FOR_SETUP;
1867        udc->remote_wakeup = 0;         /* default to 0 on reset */
1868
1869        /* The address bits are past bit 25-31. Set the address */
1870        tmp = readl(&udc->op_regs->deviceaddr);
1871        tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1872        writel(tmp, &udc->op_regs->deviceaddr);
1873
1874        /* Clear all the setup token semaphores */
1875        tmp = readl(&udc->op_regs->epsetupstat);
1876        writel(tmp, &udc->op_regs->epsetupstat);
1877
1878        /* Clear all the endpoint complete status bits */
1879        tmp = readl(&udc->op_regs->epcomplete);
1880        writel(tmp, &udc->op_regs->epcomplete);
1881
1882        /* wait until all endptprime bits cleared */
1883        loops = LOOPS(PRIME_TIMEOUT);
1884        while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1885                if (loops == 0) {
1886                        dev_err(&udc->dev->dev,
1887                                "Timeout for ENDPTPRIME = 0x%x\n",
1888                                readl(&udc->op_regs->epprime));
1889                        break;
1890                }
1891                loops--;
1892                udelay(LOOPS_USEC);
1893        }
1894
1895        /* Write 1s to the Flush register */
1896        writel((u32)~0, &udc->op_regs->epflush);
1897
1898        if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1899                dev_info(&udc->dev->dev, "usb bus reset\n");
1900                udc->usb_state = USB_STATE_DEFAULT;
1901                /* reset all the queues, stop all USB activities */
1902                gadget_reset(udc, udc->driver);
1903        } else {
1904                dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1905                        readl(&udc->op_regs->portsc));
1906
1907                /*
1908                 * re-initialize
1909                 * controller reset
1910                 */
1911                udc_reset(udc);
1912
1913                /* reset all the queues, stop all USB activities */
1914                stop_activity(udc, udc->driver);
1915
1916                /* reset ep0 dQH and endptctrl */
1917                ep0_reset(udc);
1918
1919                /* enable interrupt and set controller to run state */
1920                udc_start(udc);
1921
1922                udc->usb_state = USB_STATE_ATTACHED;
1923        }
1924}
1925
1926static void handle_bus_resume(struct mv_udc *udc)
1927{
1928        udc->usb_state = udc->resume_state;
1929        udc->resume_state = 0;
1930
1931        /* report resume to the driver */
1932        if (udc->driver) {
1933                if (udc->driver->resume) {
1934                        spin_unlock(&udc->lock);
1935                        udc->driver->resume(&udc->gadget);
1936                        spin_lock(&udc->lock);
1937                }
1938        }
1939}
1940
1941static void irq_process_suspend(struct mv_udc *udc)
1942{
1943        udc->resume_state = udc->usb_state;
1944        udc->usb_state = USB_STATE_SUSPENDED;
1945
1946        if (udc->driver->suspend) {
1947                spin_unlock(&udc->lock);
1948                udc->driver->suspend(&udc->gadget);
1949                spin_lock(&udc->lock);
1950        }
1951}
1952
1953static void irq_process_port_change(struct mv_udc *udc)
1954{
1955        u32 portsc;
1956
1957        portsc = readl(&udc->op_regs->portsc[0]);
1958        if (!(portsc & PORTSCX_PORT_RESET)) {
1959                /* Get the speed */
1960                u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1961                switch (speed) {
1962                case PORTSCX_PORT_SPEED_HIGH:
1963                        udc->gadget.speed = USB_SPEED_HIGH;
1964                        break;
1965                case PORTSCX_PORT_SPEED_FULL:
1966                        udc->gadget.speed = USB_SPEED_FULL;
1967                        break;
1968                case PORTSCX_PORT_SPEED_LOW:
1969                        udc->gadget.speed = USB_SPEED_LOW;
1970                        break;
1971                default:
1972                        udc->gadget.speed = USB_SPEED_UNKNOWN;
1973                        break;
1974                }
1975        }
1976
1977        if (portsc & PORTSCX_PORT_SUSPEND) {
1978                udc->resume_state = udc->usb_state;
1979                udc->usb_state = USB_STATE_SUSPENDED;
1980                if (udc->driver->suspend) {
1981                        spin_unlock(&udc->lock);
1982                        udc->driver->suspend(&udc->gadget);
1983                        spin_lock(&udc->lock);
1984                }
1985        }
1986
1987        if (!(portsc & PORTSCX_PORT_SUSPEND)
1988                && udc->usb_state == USB_STATE_SUSPENDED) {
1989                handle_bus_resume(udc);
1990        }
1991
1992        if (!udc->resume_state)
1993                udc->usb_state = USB_STATE_DEFAULT;
1994}
1995
1996static void irq_process_error(struct mv_udc *udc)
1997{
1998        /* Increment the error count */
1999        udc->errors++;
2000}
2001
2002static irqreturn_t mv_udc_irq(int irq, void *dev)
2003{
2004        struct mv_udc *udc = (struct mv_udc *)dev;
2005        u32 status, intr;
2006
2007        /* Disable ISR when stopped bit is set */
2008        if (udc->stopped)
2009                return IRQ_NONE;
2010
2011        spin_lock(&udc->lock);
2012
2013        status = readl(&udc->op_regs->usbsts);
2014        intr = readl(&udc->op_regs->usbintr);
2015        status &= intr;
2016
2017        if (status == 0) {
2018                spin_unlock(&udc->lock);
2019                return IRQ_NONE;
2020        }
2021
2022        /* Clear all the interrupts occurred */
2023        writel(status, &udc->op_regs->usbsts);
2024
2025        if (status & USBSTS_ERR)
2026                irq_process_error(udc);
2027
2028        if (status & USBSTS_RESET)
2029                irq_process_reset(udc);
2030
2031        if (status & USBSTS_PORT_CHANGE)
2032                irq_process_port_change(udc);
2033
2034        if (status & USBSTS_INT)
2035                irq_process_tr_complete(udc);
2036
2037        if (status & USBSTS_SUSPEND)
2038                irq_process_suspend(udc);
2039
2040        spin_unlock(&udc->lock);
2041
2042        return IRQ_HANDLED;
2043}
2044
2045static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2046{
2047        struct mv_udc *udc = (struct mv_udc *)dev;
2048
2049        /* polling VBUS and init phy may cause too much time*/
2050        if (udc->qwork)
2051                queue_work(udc->qwork, &udc->vbus_work);
2052
2053        return IRQ_HANDLED;
2054}
2055
2056static void mv_udc_vbus_work(struct work_struct *work)
2057{
2058        struct mv_udc *udc;
2059        unsigned int vbus;
2060
2061        udc = container_of(work, struct mv_udc, vbus_work);
2062        if (!udc->pdata->vbus)
2063                return;
2064
2065        vbus = udc->pdata->vbus->poll();
2066        dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2067
2068        if (vbus == VBUS_HIGH)
2069                mv_udc_vbus_session(&udc->gadget, 1);
2070        else if (vbus == VBUS_LOW)
2071                mv_udc_vbus_session(&udc->gadget, 0);
2072}
2073
2074/* release device structure */
2075static void gadget_release(struct device *_dev)
2076{
2077        struct mv_udc *udc;
2078
2079        udc = dev_get_drvdata(_dev);
2080
2081        complete(udc->done);
2082}
2083
2084static int mv_udc_remove(struct platform_device *pdev)
2085{
2086        struct mv_udc *udc;
2087
2088        udc = platform_get_drvdata(pdev);
2089
2090        usb_del_gadget_udc(&udc->gadget);
2091
2092        if (udc->qwork) {
2093                flush_workqueue(udc->qwork);
2094                destroy_workqueue(udc->qwork);
2095        }
2096
2097        /* free memory allocated in probe */
2098        dma_pool_destroy(udc->dtd_pool);
2099
2100        if (udc->ep_dqh)
2101                dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2102                        udc->ep_dqh, udc->ep_dqh_dma);
2103
2104        mv_udc_disable(udc);
2105
2106        /* free dev, wait for the release() finished */
2107        wait_for_completion(udc->done);
2108
2109        return 0;
2110}
2111
2112static int mv_udc_probe(struct platform_device *pdev)
2113{
2114        struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2115        struct mv_udc *udc;
2116        int retval = 0;
2117        struct resource *r;
2118        size_t size;
2119
2120        if (pdata == NULL) {
2121                dev_err(&pdev->dev, "missing platform_data\n");
2122                return -ENODEV;
2123        }
2124
2125        udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2126        if (udc == NULL)
2127                return -ENOMEM;
2128
2129        udc->done = &release_done;
2130        udc->pdata = dev_get_platdata(&pdev->dev);
2131        spin_lock_init(&udc->lock);
2132
2133        udc->dev = pdev;
2134
2135        if (pdata->mode == MV_USB_MODE_OTG) {
2136                udc->transceiver = devm_usb_get_phy(&pdev->dev,
2137                                        USB_PHY_TYPE_USB2);
2138                if (IS_ERR(udc->transceiver)) {
2139                        retval = PTR_ERR(udc->transceiver);
2140
2141                        if (retval == -ENXIO)
2142                                return retval;
2143
2144                        udc->transceiver = NULL;
2145                        return -EPROBE_DEFER;
2146                }
2147        }
2148
2149        /* udc only have one sysclk. */
2150        udc->clk = devm_clk_get(&pdev->dev, NULL);
2151        if (IS_ERR(udc->clk))
2152                return PTR_ERR(udc->clk);
2153
2154        r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2155        if (r == NULL) {
2156                dev_err(&pdev->dev, "no I/O memory resource defined\n");
2157                return -ENODEV;
2158        }
2159
2160        udc->cap_regs = (struct mv_cap_regs __iomem *)
2161                devm_ioremap(&pdev->dev, r->start, resource_size(r));
2162        if (udc->cap_regs == NULL) {
2163                dev_err(&pdev->dev, "failed to map I/O memory\n");
2164                return -EBUSY;
2165        }
2166
2167        r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2168        if (r == NULL) {
2169                dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2170                return -ENODEV;
2171        }
2172
2173        udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2174        if (udc->phy_regs == NULL) {
2175                dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2176                return -EBUSY;
2177        }
2178
2179        /* we will acces controller register, so enable the clk */
2180        retval = mv_udc_enable_internal(udc);
2181        if (retval)
2182                return retval;
2183
2184        udc->op_regs =
2185                (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2186                + (readl(&udc->cap_regs->caplength_hciversion)
2187                        & CAPLENGTH_MASK));
2188        udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2189
2190        /*
2191         * some platform will use usb to download image, it may not disconnect
2192         * usb gadget before loading kernel. So first stop udc here.
2193         */
2194        udc_stop(udc);
2195        writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2196
2197        size = udc->max_eps * sizeof(struct mv_dqh) *2;
2198        size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2199        udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2200                                        &udc->ep_dqh_dma, GFP_KERNEL);
2201
2202        if (udc->ep_dqh == NULL) {
2203                dev_err(&pdev->dev, "allocate dQH memory failed\n");
2204                retval = -ENOMEM;
2205                goto err_disable_clock;
2206        }
2207        udc->ep_dqh_size = size;
2208
2209        /* create dTD dma_pool resource */
2210        udc->dtd_pool = dma_pool_create("mv_dtd",
2211                        &pdev->dev,
2212                        sizeof(struct mv_dtd),
2213                        DTD_ALIGNMENT,
2214                        DMA_BOUNDARY);
2215
2216        if (!udc->dtd_pool) {
2217                retval = -ENOMEM;
2218                goto err_free_dma;
2219        }
2220
2221        size = udc->max_eps * sizeof(struct mv_ep) *2;
2222        udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2223        if (udc->eps == NULL) {
2224                retval = -ENOMEM;
2225                goto err_destroy_dma;
2226        }
2227
2228        /* initialize ep0 status request structure */
2229        udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2230                                        GFP_KERNEL);
2231        if (!udc->status_req) {
2232                retval = -ENOMEM;
2233                goto err_destroy_dma;
2234        }
2235        INIT_LIST_HEAD(&udc->status_req->queue);
2236
2237        /* allocate a small amount of memory to get valid address */
2238        udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2239        udc->status_req->req.dma = DMA_ADDR_INVALID;
2240
2241        udc->resume_state = USB_STATE_NOTATTACHED;
2242        udc->usb_state = USB_STATE_POWERED;
2243        udc->ep0_dir = EP_DIR_OUT;
2244        udc->remote_wakeup = 0;
2245
2246        r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2247        if (r == NULL) {
2248                dev_err(&pdev->dev, "no IRQ resource defined\n");
2249                retval = -ENODEV;
2250                goto err_destroy_dma;
2251        }
2252        udc->irq = r->start;
2253        if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2254                IRQF_SHARED, driver_name, udc)) {
2255                dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2256                        udc->irq);
2257                retval = -ENODEV;
2258                goto err_destroy_dma;
2259        }
2260
2261        /* initialize gadget structure */
2262        udc->gadget.ops = &mv_ops;      /* usb_gadget_ops */
2263        udc->gadget.ep0 = &udc->eps[0].ep;      /* gadget ep0 */
2264        INIT_LIST_HEAD(&udc->gadget.ep_list);   /* ep_list */
2265        udc->gadget.speed = USB_SPEED_UNKNOWN;  /* speed */
2266        udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
2267
2268        /* the "gadget" abstracts/virtualizes the controller */
2269        udc->gadget.name = driver_name;         /* gadget name */
2270
2271        eps_init(udc);
2272
2273        /* VBUS detect: we can disable/enable clock on demand.*/
2274        if (udc->transceiver)
2275                udc->clock_gating = 1;
2276        else if (pdata->vbus) {
2277                udc->clock_gating = 1;
2278                retval = devm_request_threaded_irq(&pdev->dev,
2279                                pdata->vbus->irq, NULL,
2280                                mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2281                if (retval) {
2282                        dev_info(&pdev->dev,
2283                                "Can not request irq for VBUS, "
2284                                "disable clock gating\n");
2285                        udc->clock_gating = 0;
2286                }
2287
2288                udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2289                if (!udc->qwork) {
2290                        dev_err(&pdev->dev, "cannot create workqueue\n");
2291                        retval = -ENOMEM;
2292                        goto err_destroy_dma;
2293                }
2294
2295                INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2296        }
2297
2298        /*
2299         * When clock gating is supported, we can disable clk and phy.
2300         * If not, it means that VBUS detection is not supported, we
2301         * have to enable vbus active all the time to let controller work.
2302         */
2303        if (udc->clock_gating)
2304                mv_udc_disable_internal(udc);
2305        else
2306                udc->vbus_active = 1;
2307
2308        retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2309                        gadget_release);
2310        if (retval)
2311                goto err_create_workqueue;
2312
2313        platform_set_drvdata(pdev, udc);
2314        dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2315                udc->clock_gating ? "with" : "without");
2316
2317        return 0;
2318
2319err_create_workqueue:
2320        destroy_workqueue(udc->qwork);
2321err_destroy_dma:
2322        dma_pool_destroy(udc->dtd_pool);
2323err_free_dma:
2324        dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2325                        udc->ep_dqh, udc->ep_dqh_dma);
2326err_disable_clock:
2327        mv_udc_disable_internal(udc);
2328
2329        return retval;
2330}
2331
2332#ifdef CONFIG_PM
2333static int mv_udc_suspend(struct device *dev)
2334{
2335        struct mv_udc *udc;
2336
2337        udc = dev_get_drvdata(dev);
2338
2339        /* if OTG is enabled, the following will be done in OTG driver*/
2340        if (udc->transceiver)
2341                return 0;
2342
2343        if (udc->pdata->vbus && udc->pdata->vbus->poll)
2344                if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2345                        dev_info(&udc->dev->dev, "USB cable is connected!\n");
2346                        return -EAGAIN;
2347                }
2348
2349        /*
2350         * only cable is unplugged, udc can suspend.
2351         * So do not care about clock_gating == 1.
2352         */
2353        if (!udc->clock_gating) {
2354                udc_stop(udc);
2355
2356                spin_lock_irq(&udc->lock);
2357                /* stop all usb activities */
2358                stop_activity(udc, udc->driver);
2359                spin_unlock_irq(&udc->lock);
2360
2361                mv_udc_disable_internal(udc);
2362        }
2363
2364        return 0;
2365}
2366
2367static int mv_udc_resume(struct device *dev)
2368{
2369        struct mv_udc *udc;
2370        int retval;
2371
2372        udc = dev_get_drvdata(dev);
2373
2374        /* if OTG is enabled, the following will be done in OTG driver*/
2375        if (udc->transceiver)
2376                return 0;
2377
2378        if (!udc->clock_gating) {
2379                retval = mv_udc_enable_internal(udc);
2380                if (retval)
2381                        return retval;
2382
2383                if (udc->driver && udc->softconnect) {
2384                        udc_reset(udc);
2385                        ep0_reset(udc);
2386                        udc_start(udc);
2387                }
2388        }
2389
2390        return 0;
2391}
2392
2393static const struct dev_pm_ops mv_udc_pm_ops = {
2394        .suspend        = mv_udc_suspend,
2395        .resume         = mv_udc_resume,
2396};
2397#endif
2398
2399static void mv_udc_shutdown(struct platform_device *pdev)
2400{
2401        struct mv_udc *udc;
2402        u32 mode;
2403
2404        udc = platform_get_drvdata(pdev);
2405        /* reset controller mode to IDLE */
2406        mv_udc_enable(udc);
2407        mode = readl(&udc->op_regs->usbmode);
2408        mode &= ~3;
2409        writel(mode, &udc->op_regs->usbmode);
2410        mv_udc_disable(udc);
2411}
2412
2413static struct platform_driver udc_driver = {
2414        .probe          = mv_udc_probe,
2415        .remove         = mv_udc_remove,
2416        .shutdown       = mv_udc_shutdown,
2417        .driver         = {
2418                .name   = "mv-udc",
2419#ifdef CONFIG_PM
2420                .pm     = &mv_udc_pm_ops,
2421#endif
2422        },
2423};
2424
2425module_platform_driver(udc_driver);
2426MODULE_ALIAS("platform:mv-udc");
2427MODULE_DESCRIPTION(DRIVER_DESC);
2428MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2429MODULE_VERSION(DRIVER_VERSION);
2430MODULE_LICENSE("GPL");
2431