linux/drivers/usb/gadget/udc/mv_udc_core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
   3 * Author: Chao Xie <chao.xie@marvell.com>
   4 *         Neil Zhang <zhangwm@marvell.com>
   5 *
   6 * This program is free software; you can redistribute  it and/or modify it
   7 * under  the terms of  the GNU General  Public License as published by the
   8 * Free Software Foundation;  either version 2 of the  License, or (at your
   9 * option) any later version.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/dmapool.h>
  16#include <linux/kernel.h>
  17#include <linux/delay.h>
  18#include <linux/ioport.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/errno.h>
  22#include <linux/err.h>
  23#include <linux/timer.h>
  24#include <linux/list.h>
  25#include <linux/interrupt.h>
  26#include <linux/moduleparam.h>
  27#include <linux/device.h>
  28#include <linux/usb/ch9.h>
  29#include <linux/usb/gadget.h>
  30#include <linux/usb/otg.h>
  31#include <linux/pm.h>
  32#include <linux/io.h>
  33#include <linux/irq.h>
  34#include <linux/platform_device.h>
  35#include <linux/clk.h>
  36#include <linux/platform_data/mv_usb.h>
  37#include <asm/unaligned.h>
  38
  39#include "mv_udc.h"
  40
  41#define DRIVER_DESC             "Marvell PXA USB Device Controller driver"
  42#define DRIVER_VERSION          "8 Nov 2010"
  43
  44#define ep_dir(ep)      (((ep)->ep_num == 0) ? \
  45                                ((ep)->udc->ep0_dir) : ((ep)->direction))
  46
  47/* timeout value -- usec */
  48#define RESET_TIMEOUT           10000
  49#define FLUSH_TIMEOUT           10000
  50#define EPSTATUS_TIMEOUT        10000
  51#define PRIME_TIMEOUT           10000
  52#define READSAFE_TIMEOUT        1000
  53
  54#define LOOPS_USEC_SHIFT        1
  55#define LOOPS_USEC              (1 << LOOPS_USEC_SHIFT)
  56#define LOOPS(timeout)          ((timeout) >> LOOPS_USEC_SHIFT)
  57
  58static DECLARE_COMPLETION(release_done);
  59
  60static const char driver_name[] = "mv_udc";
  61static const char driver_desc[] = DRIVER_DESC;
  62
  63static void nuke(struct mv_ep *ep, int status);
  64static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
  65
  66/* for endpoint 0 operations */
  67static const struct usb_endpoint_descriptor mv_ep0_desc = {
  68        .bLength =              USB_DT_ENDPOINT_SIZE,
  69        .bDescriptorType =      USB_DT_ENDPOINT,
  70        .bEndpointAddress =     0,
  71        .bmAttributes =         USB_ENDPOINT_XFER_CONTROL,
  72        .wMaxPacketSize =       EP0_MAX_PKT_SIZE,
  73};
  74
  75static void ep0_reset(struct mv_udc *udc)
  76{
  77        struct mv_ep *ep;
  78        u32 epctrlx;
  79        int i = 0;
  80
  81        /* ep0 in and out */
  82        for (i = 0; i < 2; i++) {
  83                ep = &udc->eps[i];
  84                ep->udc = udc;
  85
  86                /* ep0 dQH */
  87                ep->dqh = &udc->ep_dqh[i];
  88
  89                /* configure ep0 endpoint capabilities in dQH */
  90                ep->dqh->max_packet_length =
  91                        (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  92                        | EP_QUEUE_HEAD_IOS;
  93
  94                ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
  95
  96                epctrlx = readl(&udc->op_regs->epctrlx[0]);
  97                if (i) {        /* TX */
  98                        epctrlx |= EPCTRL_TX_ENABLE
  99                                | (USB_ENDPOINT_XFER_CONTROL
 100                                        << EPCTRL_TX_EP_TYPE_SHIFT);
 101
 102                } else {        /* RX */
 103                        epctrlx |= EPCTRL_RX_ENABLE
 104                                | (USB_ENDPOINT_XFER_CONTROL
 105                                        << EPCTRL_RX_EP_TYPE_SHIFT);
 106                }
 107
 108                writel(epctrlx, &udc->op_regs->epctrlx[0]);
 109        }
 110}
 111
 112/* protocol ep0 stall, will automatically be cleared on new transaction */
 113static void ep0_stall(struct mv_udc *udc)
 114{
 115        u32     epctrlx;
 116
 117        /* set TX and RX to stall */
 118        epctrlx = readl(&udc->op_regs->epctrlx[0]);
 119        epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
 120        writel(epctrlx, &udc->op_regs->epctrlx[0]);
 121
 122        /* update ep0 state */
 123        udc->ep0_state = WAIT_FOR_SETUP;
 124        udc->ep0_dir = EP_DIR_OUT;
 125}
 126
 127static int process_ep_req(struct mv_udc *udc, int index,
 128        struct mv_req *curr_req)
 129{
 130        struct mv_dtd   *curr_dtd;
 131        struct mv_dqh   *curr_dqh;
 132        int actual, remaining_length;
 133        int i, direction;
 134        int retval = 0;
 135        u32 errors;
 136        u32 bit_pos;
 137
 138        curr_dqh = &udc->ep_dqh[index];
 139        direction = index % 2;
 140
 141        curr_dtd = curr_req->head;
 142        actual = curr_req->req.length;
 143
 144        for (i = 0; i < curr_req->dtd_count; i++) {
 145                if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
 146                        dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
 147                                udc->eps[index].name);
 148                        return 1;
 149                }
 150
 151                errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
 152                if (!errors) {
 153                        remaining_length =
 154                                (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
 155                                        >> DTD_LENGTH_BIT_POS;
 156                        actual -= remaining_length;
 157
 158                        if (remaining_length) {
 159                                if (direction) {
 160                                        dev_dbg(&udc->dev->dev,
 161                                                "TX dTD remains data\n");
 162                                        retval = -EPROTO;
 163                                        break;
 164                                } else
 165                                        break;
 166                        }
 167                } else {
 168                        dev_info(&udc->dev->dev,
 169                                "complete_tr error: ep=%d %s: error = 0x%x\n",
 170                                index >> 1, direction ? "SEND" : "RECV",
 171                                errors);
 172                        if (errors & DTD_STATUS_HALTED) {
 173                                /* Clear the errors and Halt condition */
 174                                curr_dqh->size_ioc_int_sts &= ~errors;
 175                                retval = -EPIPE;
 176                        } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
 177                                retval = -EPROTO;
 178                        } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
 179                                retval = -EILSEQ;
 180                        }
 181                }
 182                if (i != curr_req->dtd_count - 1)
 183                        curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
 184        }
 185        if (retval)
 186                return retval;
 187
 188        if (direction == EP_DIR_OUT)
 189                bit_pos = 1 << curr_req->ep->ep_num;
 190        else
 191                bit_pos = 1 << (16 + curr_req->ep->ep_num);
 192
 193        while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
 194                if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
 195                        while (readl(&udc->op_regs->epstatus) & bit_pos)
 196                                udelay(1);
 197                        break;
 198                }
 199                udelay(1);
 200        }
 201
 202        curr_req->req.actual = actual;
 203
 204        return 0;
 205}
 206
 207/*
 208 * done() - retire a request; caller blocked irqs
 209 * @status : request status to be set, only works when
 210 * request is still in progress.
 211 */
 212static void done(struct mv_ep *ep, struct mv_req *req, int status)
 213        __releases(&ep->udc->lock)
 214        __acquires(&ep->udc->lock)
 215{
 216        struct mv_udc *udc = NULL;
 217        unsigned char stopped = ep->stopped;
 218        struct mv_dtd *curr_td, *next_td;
 219        int j;
 220
 221        udc = (struct mv_udc *)ep->udc;
 222        /* Removed the req from fsl_ep->queue */
 223        list_del_init(&req->queue);
 224
 225        /* req.status should be set as -EINPROGRESS in ep_queue() */
 226        if (req->req.status == -EINPROGRESS)
 227                req->req.status = status;
 228        else
 229                status = req->req.status;
 230
 231        /* Free dtd for the request */
 232        next_td = req->head;
 233        for (j = 0; j < req->dtd_count; j++) {
 234                curr_td = next_td;
 235                if (j != req->dtd_count - 1)
 236                        next_td = curr_td->next_dtd_virt;
 237                dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
 238        }
 239
 240        usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
 241
 242        if (status && (status != -ESHUTDOWN))
 243                dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
 244                        ep->ep.name, &req->req, status,
 245                        req->req.actual, req->req.length);
 246
 247        ep->stopped = 1;
 248
 249        spin_unlock(&ep->udc->lock);
 250
 251        usb_gadget_giveback_request(&ep->ep, &req->req);
 252
 253        spin_lock(&ep->udc->lock);
 254        ep->stopped = stopped;
 255}
 256
 257static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
 258{
 259        struct mv_udc *udc;
 260        struct mv_dqh *dqh;
 261        u32 bit_pos, direction;
 262        u32 usbcmd, epstatus;
 263        unsigned int loops;
 264        int retval = 0;
 265
 266        udc = ep->udc;
 267        direction = ep_dir(ep);
 268        dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
 269        bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
 270
 271        /* check if the pipe is empty */
 272        if (!(list_empty(&ep->queue))) {
 273                struct mv_req *lastreq;
 274                lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
 275                lastreq->tail->dtd_next =
 276                        req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 277
 278                wmb();
 279
 280                if (readl(&udc->op_regs->epprime) & bit_pos)
 281                        goto done;
 282
 283                loops = LOOPS(READSAFE_TIMEOUT);
 284                while (1) {
 285                        /* start with setting the semaphores */
 286                        usbcmd = readl(&udc->op_regs->usbcmd);
 287                        usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
 288                        writel(usbcmd, &udc->op_regs->usbcmd);
 289
 290                        /* read the endpoint status */
 291                        epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
 292
 293                        /*
 294                         * Reread the ATDTW semaphore bit to check if it is
 295                         * cleared. When hardware see a hazard, it will clear
 296                         * the bit or else we remain set to 1 and we can
 297                         * proceed with priming of endpoint if not already
 298                         * primed.
 299                         */
 300                        if (readl(&udc->op_regs->usbcmd)
 301                                & USBCMD_ATDTW_TRIPWIRE_SET)
 302                                break;
 303
 304                        loops--;
 305                        if (loops == 0) {
 306                                dev_err(&udc->dev->dev,
 307                                        "Timeout for ATDTW_TRIPWIRE...\n");
 308                                retval = -ETIME;
 309                                goto done;
 310                        }
 311                        udelay(LOOPS_USEC);
 312                }
 313
 314                /* Clear the semaphore */
 315                usbcmd = readl(&udc->op_regs->usbcmd);
 316                usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
 317                writel(usbcmd, &udc->op_regs->usbcmd);
 318
 319                if (epstatus)
 320                        goto done;
 321        }
 322
 323        /* Write dQH next pointer and terminate bit to 0 */
 324        dqh->next_dtd_ptr = req->head->td_dma
 325                                & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 326
 327        /* clear active and halt bit, in case set from a previous error */
 328        dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
 329
 330        /* Ensure that updates to the QH will occur before priming. */
 331        wmb();
 332
 333        /* Prime the Endpoint */
 334        writel(bit_pos, &udc->op_regs->epprime);
 335
 336done:
 337        return retval;
 338}
 339
 340static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
 341                dma_addr_t *dma, int *is_last)
 342{
 343        struct mv_dtd *dtd;
 344        struct mv_udc *udc;
 345        struct mv_dqh *dqh;
 346        u32 temp, mult = 0;
 347
 348        /* how big will this transfer be? */
 349        if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
 350                dqh = req->ep->dqh;
 351                mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
 352                                & 0x3;
 353                *length = min(req->req.length - req->req.actual,
 354                                (unsigned)(mult * req->ep->ep.maxpacket));
 355        } else
 356                *length = min(req->req.length - req->req.actual,
 357                                (unsigned)EP_MAX_LENGTH_TRANSFER);
 358
 359        udc = req->ep->udc;
 360
 361        /*
 362         * Be careful that no _GFP_HIGHMEM is set,
 363         * or we can not use dma_to_virt
 364         */
 365        dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
 366        if (dtd == NULL)
 367                return dtd;
 368
 369        dtd->td_dma = *dma;
 370        /* initialize buffer page pointers */
 371        temp = (u32)(req->req.dma + req->req.actual);
 372        dtd->buff_ptr0 = cpu_to_le32(temp);
 373        temp &= ~0xFFF;
 374        dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
 375        dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
 376        dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
 377        dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
 378
 379        req->req.actual += *length;
 380
 381        /* zlp is needed if req->req.zero is set */
 382        if (req->req.zero) {
 383                if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
 384                        *is_last = 1;
 385                else
 386                        *is_last = 0;
 387        } else if (req->req.length == req->req.actual)
 388                *is_last = 1;
 389        else
 390                *is_last = 0;
 391
 392        /* Fill in the transfer size; set active bit */
 393        temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
 394
 395        /* Enable interrupt for the last dtd of a request */
 396        if (*is_last && !req->req.no_interrupt)
 397                temp |= DTD_IOC;
 398
 399        temp |= mult << 10;
 400
 401        dtd->size_ioc_sts = temp;
 402
 403        mb();
 404
 405        return dtd;
 406}
 407
 408/* generate dTD linked list for a request */
 409static int req_to_dtd(struct mv_req *req)
 410{
 411        unsigned count;
 412        int is_last, is_first = 1;
 413        struct mv_dtd *dtd, *last_dtd = NULL;
 414        dma_addr_t dma;
 415
 416        do {
 417                dtd = build_dtd(req, &count, &dma, &is_last);
 418                if (dtd == NULL)
 419                        return -ENOMEM;
 420
 421                if (is_first) {
 422                        is_first = 0;
 423                        req->head = dtd;
 424                } else {
 425                        last_dtd->dtd_next = dma;
 426                        last_dtd->next_dtd_virt = dtd;
 427                }
 428                last_dtd = dtd;
 429                req->dtd_count++;
 430        } while (!is_last);
 431
 432        /* set terminate bit to 1 for the last dTD */
 433        dtd->dtd_next = DTD_NEXT_TERMINATE;
 434
 435        req->tail = dtd;
 436
 437        return 0;
 438}
 439
 440static int mv_ep_enable(struct usb_ep *_ep,
 441                const struct usb_endpoint_descriptor *desc)
 442{
 443        struct mv_udc *udc;
 444        struct mv_ep *ep;
 445        struct mv_dqh *dqh;
 446        u16 max = 0;
 447        u32 bit_pos, epctrlx, direction;
 448        const unsigned char zlt = 1;
 449        unsigned char ios, mult;
 450        unsigned long flags;
 451
 452        ep = container_of(_ep, struct mv_ep, ep);
 453        udc = ep->udc;
 454
 455        if (!_ep || !desc
 456                        || desc->bDescriptorType != USB_DT_ENDPOINT)
 457                return -EINVAL;
 458
 459        if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
 460                return -ESHUTDOWN;
 461
 462        direction = ep_dir(ep);
 463        max = usb_endpoint_maxp(desc);
 464
 465        /*
 466         * disable HW zero length termination select
 467         * driver handles zero length packet through req->req.zero
 468         */
 469        bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
 470
 471        /* Check if the Endpoint is Primed */
 472        if ((readl(&udc->op_regs->epprime) & bit_pos)
 473                || (readl(&udc->op_regs->epstatus) & bit_pos)) {
 474                dev_info(&udc->dev->dev,
 475                        "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
 476                        " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
 477                        (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
 478                        (unsigned)readl(&udc->op_regs->epprime),
 479                        (unsigned)readl(&udc->op_regs->epstatus),
 480                        (unsigned)bit_pos);
 481                goto en_done;
 482        }
 483
 484        /* Set the max packet length, interrupt on Setup and Mult fields */
 485        ios = 0;
 486        mult = 0;
 487        switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
 488        case USB_ENDPOINT_XFER_BULK:
 489        case USB_ENDPOINT_XFER_INT:
 490                break;
 491        case USB_ENDPOINT_XFER_CONTROL:
 492                ios = 1;
 493                break;
 494        case USB_ENDPOINT_XFER_ISOC:
 495                /* Calculate transactions needed for high bandwidth iso */
 496                mult = usb_endpoint_maxp_mult(desc);
 497                /* 3 transactions at most */
 498                if (mult > 3)
 499                        goto en_done;
 500                break;
 501        default:
 502                goto en_done;
 503        }
 504
 505        spin_lock_irqsave(&udc->lock, flags);
 506        /* Get the endpoint queue head address */
 507        dqh = ep->dqh;
 508        dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
 509                | (mult << EP_QUEUE_HEAD_MULT_POS)
 510                | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
 511                | (ios ? EP_QUEUE_HEAD_IOS : 0);
 512        dqh->next_dtd_ptr = 1;
 513        dqh->size_ioc_int_sts = 0;
 514
 515        ep->ep.maxpacket = max;
 516        ep->ep.desc = desc;
 517        ep->stopped = 0;
 518
 519        /* Enable the endpoint for Rx or Tx and set the endpoint type */
 520        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 521        if (direction == EP_DIR_IN) {
 522                epctrlx &= ~EPCTRL_TX_ALL_MASK;
 523                epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
 524                        | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
 525                                << EPCTRL_TX_EP_TYPE_SHIFT);
 526        } else {
 527                epctrlx &= ~EPCTRL_RX_ALL_MASK;
 528                epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
 529                        | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
 530                                << EPCTRL_RX_EP_TYPE_SHIFT);
 531        }
 532        writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 533
 534        /*
 535         * Implement Guideline (GL# USB-7) The unused endpoint type must
 536         * be programmed to bulk.
 537         */
 538        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 539        if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
 540                epctrlx |= (USB_ENDPOINT_XFER_BULK
 541                                << EPCTRL_RX_EP_TYPE_SHIFT);
 542                writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 543        }
 544
 545        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 546        if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
 547                epctrlx |= (USB_ENDPOINT_XFER_BULK
 548                                << EPCTRL_TX_EP_TYPE_SHIFT);
 549                writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 550        }
 551
 552        spin_unlock_irqrestore(&udc->lock, flags);
 553
 554        return 0;
 555en_done:
 556        return -EINVAL;
 557}
 558
 559static int  mv_ep_disable(struct usb_ep *_ep)
 560{
 561        struct mv_udc *udc;
 562        struct mv_ep *ep;
 563        struct mv_dqh *dqh;
 564        u32 epctrlx, direction;
 565        unsigned long flags;
 566
 567        ep = container_of(_ep, struct mv_ep, ep);
 568        if ((_ep == NULL) || !ep->ep.desc)
 569                return -EINVAL;
 570
 571        udc = ep->udc;
 572
 573        /* Get the endpoint queue head address */
 574        dqh = ep->dqh;
 575
 576        spin_lock_irqsave(&udc->lock, flags);
 577
 578        direction = ep_dir(ep);
 579
 580        /* Reset the max packet length and the interrupt on Setup */
 581        dqh->max_packet_length = 0;
 582
 583        /* Disable the endpoint for Rx or Tx and reset the endpoint type */
 584        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 585        epctrlx &= ~((direction == EP_DIR_IN)
 586                        ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
 587                        : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
 588        writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 589
 590        /* nuke all pending requests (does flush) */
 591        nuke(ep, -ESHUTDOWN);
 592
 593        ep->ep.desc = NULL;
 594        ep->stopped = 1;
 595
 596        spin_unlock_irqrestore(&udc->lock, flags);
 597
 598        return 0;
 599}
 600
 601static struct usb_request *
 602mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 603{
 604        struct mv_req *req = NULL;
 605
 606        req = kzalloc(sizeof *req, gfp_flags);
 607        if (!req)
 608                return NULL;
 609
 610        req->req.dma = DMA_ADDR_INVALID;
 611        INIT_LIST_HEAD(&req->queue);
 612
 613        return &req->req;
 614}
 615
 616static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
 617{
 618        struct mv_req *req = NULL;
 619
 620        req = container_of(_req, struct mv_req, req);
 621
 622        if (_req)
 623                kfree(req);
 624}
 625
 626static void mv_ep_fifo_flush(struct usb_ep *_ep)
 627{
 628        struct mv_udc *udc;
 629        u32 bit_pos, direction;
 630        struct mv_ep *ep;
 631        unsigned int loops;
 632
 633        if (!_ep)
 634                return;
 635
 636        ep = container_of(_ep, struct mv_ep, ep);
 637        if (!ep->ep.desc)
 638                return;
 639
 640        udc = ep->udc;
 641        direction = ep_dir(ep);
 642
 643        if (ep->ep_num == 0)
 644                bit_pos = (1 << 16) | 1;
 645        else if (direction == EP_DIR_OUT)
 646                bit_pos = 1 << ep->ep_num;
 647        else
 648                bit_pos = 1 << (16 + ep->ep_num);
 649
 650        loops = LOOPS(EPSTATUS_TIMEOUT);
 651        do {
 652                unsigned int inter_loops;
 653
 654                if (loops == 0) {
 655                        dev_err(&udc->dev->dev,
 656                                "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
 657                                (unsigned)readl(&udc->op_regs->epstatus),
 658                                (unsigned)bit_pos);
 659                        return;
 660                }
 661                /* Write 1 to the Flush register */
 662                writel(bit_pos, &udc->op_regs->epflush);
 663
 664                /* Wait until flushing completed */
 665                inter_loops = LOOPS(FLUSH_TIMEOUT);
 666                while (readl(&udc->op_regs->epflush)) {
 667                        /*
 668                         * ENDPTFLUSH bit should be cleared to indicate this
 669                         * operation is complete
 670                         */
 671                        if (inter_loops == 0) {
 672                                dev_err(&udc->dev->dev,
 673                                        "TIMEOUT for ENDPTFLUSH=0x%x,"
 674                                        "bit_pos=0x%x\n",
 675                                        (unsigned)readl(&udc->op_regs->epflush),
 676                                        (unsigned)bit_pos);
 677                                return;
 678                        }
 679                        inter_loops--;
 680                        udelay(LOOPS_USEC);
 681                }
 682                loops--;
 683        } while (readl(&udc->op_regs->epstatus) & bit_pos);
 684}
 685
 686/* queues (submits) an I/O request to an endpoint */
 687static int
 688mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 689{
 690        struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
 691        struct mv_req *req = container_of(_req, struct mv_req, req);
 692        struct mv_udc *udc = ep->udc;
 693        unsigned long flags;
 694        int retval;
 695
 696        /* catch various bogus parameters */
 697        if (!_req || !req->req.complete || !req->req.buf
 698                        || !list_empty(&req->queue)) {
 699                dev_err(&udc->dev->dev, "%s, bad params", __func__);
 700                return -EINVAL;
 701        }
 702        if (unlikely(!_ep || !ep->ep.desc)) {
 703                dev_err(&udc->dev->dev, "%s, bad ep", __func__);
 704                return -EINVAL;
 705        }
 706
 707        udc = ep->udc;
 708        if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
 709                return -ESHUTDOWN;
 710
 711        req->ep = ep;
 712
 713        /* map virtual address to hardware */
 714        retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
 715        if (retval)
 716                return retval;
 717
 718        req->req.status = -EINPROGRESS;
 719        req->req.actual = 0;
 720        req->dtd_count = 0;
 721
 722        spin_lock_irqsave(&udc->lock, flags);
 723
 724        /* build dtds and push them to device queue */
 725        if (!req_to_dtd(req)) {
 726                retval = queue_dtd(ep, req);
 727                if (retval) {
 728                        spin_unlock_irqrestore(&udc->lock, flags);
 729                        dev_err(&udc->dev->dev, "Failed to queue dtd\n");
 730                        goto err_unmap_dma;
 731                }
 732        } else {
 733                spin_unlock_irqrestore(&udc->lock, flags);
 734                dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
 735                retval = -ENOMEM;
 736                goto err_unmap_dma;
 737        }
 738
 739        /* Update ep0 state */
 740        if (ep->ep_num == 0)
 741                udc->ep0_state = DATA_STATE_XMIT;
 742
 743        /* irq handler advances the queue */
 744        list_add_tail(&req->queue, &ep->queue);
 745        spin_unlock_irqrestore(&udc->lock, flags);
 746
 747        return 0;
 748
 749err_unmap_dma:
 750        usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
 751
 752        return retval;
 753}
 754
 755static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
 756{
 757        struct mv_dqh *dqh = ep->dqh;
 758        u32 bit_pos;
 759
 760        /* Write dQH next pointer and terminate bit to 0 */
 761        dqh->next_dtd_ptr = req->head->td_dma
 762                & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 763
 764        /* clear active and halt bit, in case set from a previous error */
 765        dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
 766
 767        /* Ensure that updates to the QH will occure before priming. */
 768        wmb();
 769
 770        bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
 771
 772        /* Prime the Endpoint */
 773        writel(bit_pos, &ep->udc->op_regs->epprime);
 774}
 775
 776/* dequeues (cancels, unlinks) an I/O request from an endpoint */
 777static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 778{
 779        struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
 780        struct mv_req *req;
 781        struct mv_udc *udc = ep->udc;
 782        unsigned long flags;
 783        int stopped, ret = 0;
 784        u32 epctrlx;
 785
 786        if (!_ep || !_req)
 787                return -EINVAL;
 788
 789        spin_lock_irqsave(&ep->udc->lock, flags);
 790        stopped = ep->stopped;
 791
 792        /* Stop the ep before we deal with the queue */
 793        ep->stopped = 1;
 794        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 795        if (ep_dir(ep) == EP_DIR_IN)
 796                epctrlx &= ~EPCTRL_TX_ENABLE;
 797        else
 798                epctrlx &= ~EPCTRL_RX_ENABLE;
 799        writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 800
 801        /* make sure it's actually queued on this endpoint */
 802        list_for_each_entry(req, &ep->queue, queue) {
 803                if (&req->req == _req)
 804                        break;
 805        }
 806        if (&req->req != _req) {
 807                ret = -EINVAL;
 808                goto out;
 809        }
 810
 811        /* The request is in progress, or completed but not dequeued */
 812        if (ep->queue.next == &req->queue) {
 813                _req->status = -ECONNRESET;
 814                mv_ep_fifo_flush(_ep);  /* flush current transfer */
 815
 816                /* The request isn't the last request in this ep queue */
 817                if (req->queue.next != &ep->queue) {
 818                        struct mv_req *next_req;
 819
 820                        next_req = list_entry(req->queue.next,
 821                                struct mv_req, queue);
 822
 823                        /* Point the QH to the first TD of next request */
 824                        mv_prime_ep(ep, next_req);
 825                } else {
 826                        struct mv_dqh *qh;
 827
 828                        qh = ep->dqh;
 829                        qh->next_dtd_ptr = 1;
 830                        qh->size_ioc_int_sts = 0;
 831                }
 832
 833                /* The request hasn't been processed, patch up the TD chain */
 834        } else {
 835                struct mv_req *prev_req;
 836
 837                prev_req = list_entry(req->queue.prev, struct mv_req, queue);
 838                writel(readl(&req->tail->dtd_next),
 839                                &prev_req->tail->dtd_next);
 840
 841        }
 842
 843        done(ep, req, -ECONNRESET);
 844
 845        /* Enable EP */
 846out:
 847        epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 848        if (ep_dir(ep) == EP_DIR_IN)
 849                epctrlx |= EPCTRL_TX_ENABLE;
 850        else
 851                epctrlx |= EPCTRL_RX_ENABLE;
 852        writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 853        ep->stopped = stopped;
 854
 855        spin_unlock_irqrestore(&ep->udc->lock, flags);
 856        return ret;
 857}
 858
 859static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
 860{
 861        u32 epctrlx;
 862
 863        epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
 864
 865        if (stall) {
 866                if (direction == EP_DIR_IN)
 867                        epctrlx |= EPCTRL_TX_EP_STALL;
 868                else
 869                        epctrlx |= EPCTRL_RX_EP_STALL;
 870        } else {
 871                if (direction == EP_DIR_IN) {
 872                        epctrlx &= ~EPCTRL_TX_EP_STALL;
 873                        epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
 874                } else {
 875                        epctrlx &= ~EPCTRL_RX_EP_STALL;
 876                        epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
 877                }
 878        }
 879        writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
 880}
 881
 882static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
 883{
 884        u32 epctrlx;
 885
 886        epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
 887
 888        if (direction == EP_DIR_OUT)
 889                return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
 890        else
 891                return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
 892}
 893
 894static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
 895{
 896        struct mv_ep *ep;
 897        unsigned long flags = 0;
 898        int status = 0;
 899        struct mv_udc *udc;
 900
 901        ep = container_of(_ep, struct mv_ep, ep);
 902        udc = ep->udc;
 903        if (!_ep || !ep->ep.desc) {
 904                status = -EINVAL;
 905                goto out;
 906        }
 907
 908        if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
 909                status = -EOPNOTSUPP;
 910                goto out;
 911        }
 912
 913        /*
 914         * Attempt to halt IN ep will fail if any transfer requests
 915         * are still queue
 916         */
 917        if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
 918                status = -EAGAIN;
 919                goto out;
 920        }
 921
 922        spin_lock_irqsave(&ep->udc->lock, flags);
 923        ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
 924        if (halt && wedge)
 925                ep->wedge = 1;
 926        else if (!halt)
 927                ep->wedge = 0;
 928        spin_unlock_irqrestore(&ep->udc->lock, flags);
 929
 930        if (ep->ep_num == 0) {
 931                udc->ep0_state = WAIT_FOR_SETUP;
 932                udc->ep0_dir = EP_DIR_OUT;
 933        }
 934out:
 935        return status;
 936}
 937
 938static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
 939{
 940        return mv_ep_set_halt_wedge(_ep, halt, 0);
 941}
 942
 943static int mv_ep_set_wedge(struct usb_ep *_ep)
 944{
 945        return mv_ep_set_halt_wedge(_ep, 1, 1);
 946}
 947
 948static const struct usb_ep_ops mv_ep_ops = {
 949        .enable         = mv_ep_enable,
 950        .disable        = mv_ep_disable,
 951
 952        .alloc_request  = mv_alloc_request,
 953        .free_request   = mv_free_request,
 954
 955        .queue          = mv_ep_queue,
 956        .dequeue        = mv_ep_dequeue,
 957
 958        .set_wedge      = mv_ep_set_wedge,
 959        .set_halt       = mv_ep_set_halt,
 960        .fifo_flush     = mv_ep_fifo_flush,     /* flush fifo */
 961};
 962
 963static int udc_clock_enable(struct mv_udc *udc)
 964{
 965        return clk_prepare_enable(udc->clk);
 966}
 967
 968static void udc_clock_disable(struct mv_udc *udc)
 969{
 970        clk_disable_unprepare(udc->clk);
 971}
 972
 973static void udc_stop(struct mv_udc *udc)
 974{
 975        u32 tmp;
 976
 977        /* Disable interrupts */
 978        tmp = readl(&udc->op_regs->usbintr);
 979        tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
 980                USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
 981        writel(tmp, &udc->op_regs->usbintr);
 982
 983        udc->stopped = 1;
 984
 985        /* Reset the Run the bit in the command register to stop VUSB */
 986        tmp = readl(&udc->op_regs->usbcmd);
 987        tmp &= ~USBCMD_RUN_STOP;
 988        writel(tmp, &udc->op_regs->usbcmd);
 989}
 990
 991static void udc_start(struct mv_udc *udc)
 992{
 993        u32 usbintr;
 994
 995        usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
 996                | USBINTR_PORT_CHANGE_DETECT_EN
 997                | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
 998        /* Enable interrupts */
 999        writel(usbintr, &udc->op_regs->usbintr);
1000
1001        udc->stopped = 0;
1002
1003        /* Set the Run bit in the command register */
1004        writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1005}
1006
1007static int udc_reset(struct mv_udc *udc)
1008{
1009        unsigned int loops;
1010        u32 tmp, portsc;
1011
1012        /* Stop the controller */
1013        tmp = readl(&udc->op_regs->usbcmd);
1014        tmp &= ~USBCMD_RUN_STOP;
1015        writel(tmp, &udc->op_regs->usbcmd);
1016
1017        /* Reset the controller to get default values */
1018        writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1019
1020        /* wait for reset to complete */
1021        loops = LOOPS(RESET_TIMEOUT);
1022        while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1023                if (loops == 0) {
1024                        dev_err(&udc->dev->dev,
1025                                "Wait for RESET completed TIMEOUT\n");
1026                        return -ETIMEDOUT;
1027                }
1028                loops--;
1029                udelay(LOOPS_USEC);
1030        }
1031
1032        /* set controller to device mode */
1033        tmp = readl(&udc->op_regs->usbmode);
1034        tmp |= USBMODE_CTRL_MODE_DEVICE;
1035
1036        /* turn setup lockout off, require setup tripwire in usbcmd */
1037        tmp |= USBMODE_SETUP_LOCK_OFF;
1038
1039        writel(tmp, &udc->op_regs->usbmode);
1040
1041        writel(0x0, &udc->op_regs->epsetupstat);
1042
1043        /* Configure the Endpoint List Address */
1044        writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1045                &udc->op_regs->eplistaddr);
1046
1047        portsc = readl(&udc->op_regs->portsc[0]);
1048        if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1049                portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1050
1051        if (udc->force_fs)
1052                portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1053        else
1054                portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1055
1056        writel(portsc, &udc->op_regs->portsc[0]);
1057
1058        tmp = readl(&udc->op_regs->epctrlx[0]);
1059        tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1060        writel(tmp, &udc->op_regs->epctrlx[0]);
1061
1062        return 0;
1063}
1064
1065static int mv_udc_enable_internal(struct mv_udc *udc)
1066{
1067        int retval;
1068
1069        if (udc->active)
1070                return 0;
1071
1072        dev_dbg(&udc->dev->dev, "enable udc\n");
1073        retval = udc_clock_enable(udc);
1074        if (retval)
1075                return retval;
1076
1077        if (udc->pdata->phy_init) {
1078                retval = udc->pdata->phy_init(udc->phy_regs);
1079                if (retval) {
1080                        dev_err(&udc->dev->dev,
1081                                "init phy error %d\n", retval);
1082                        udc_clock_disable(udc);
1083                        return retval;
1084                }
1085        }
1086        udc->active = 1;
1087
1088        return 0;
1089}
1090
1091static int mv_udc_enable(struct mv_udc *udc)
1092{
1093        if (udc->clock_gating)
1094                return mv_udc_enable_internal(udc);
1095
1096        return 0;
1097}
1098
1099static void mv_udc_disable_internal(struct mv_udc *udc)
1100{
1101        if (udc->active) {
1102                dev_dbg(&udc->dev->dev, "disable udc\n");
1103                if (udc->pdata->phy_deinit)
1104                        udc->pdata->phy_deinit(udc->phy_regs);
1105                udc_clock_disable(udc);
1106                udc->active = 0;
1107        }
1108}
1109
1110static void mv_udc_disable(struct mv_udc *udc)
1111{
1112        if (udc->clock_gating)
1113                mv_udc_disable_internal(udc);
1114}
1115
1116static int mv_udc_get_frame(struct usb_gadget *gadget)
1117{
1118        struct mv_udc *udc;
1119        u16     retval;
1120
1121        if (!gadget)
1122                return -ENODEV;
1123
1124        udc = container_of(gadget, struct mv_udc, gadget);
1125
1126        retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1127
1128        return retval;
1129}
1130
1131/* Tries to wake up the host connected to this gadget */
1132static int mv_udc_wakeup(struct usb_gadget *gadget)
1133{
1134        struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1135        u32 portsc;
1136
1137        /* Remote wakeup feature not enabled by host */
1138        if (!udc->remote_wakeup)
1139                return -ENOTSUPP;
1140
1141        portsc = readl(&udc->op_regs->portsc);
1142        /* not suspended? */
1143        if (!(portsc & PORTSCX_PORT_SUSPEND))
1144                return 0;
1145        /* trigger force resume */
1146        portsc |= PORTSCX_PORT_FORCE_RESUME;
1147        writel(portsc, &udc->op_regs->portsc[0]);
1148        return 0;
1149}
1150
1151static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1152{
1153        struct mv_udc *udc;
1154        unsigned long flags;
1155        int retval = 0;
1156
1157        udc = container_of(gadget, struct mv_udc, gadget);
1158        spin_lock_irqsave(&udc->lock, flags);
1159
1160        udc->vbus_active = (is_active != 0);
1161
1162        dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1163                __func__, udc->softconnect, udc->vbus_active);
1164
1165        if (udc->driver && udc->softconnect && udc->vbus_active) {
1166                retval = mv_udc_enable(udc);
1167                if (retval == 0) {
1168                        /* Clock is disabled, need re-init registers */
1169                        udc_reset(udc);
1170                        ep0_reset(udc);
1171                        udc_start(udc);
1172                }
1173        } else if (udc->driver && udc->softconnect) {
1174                if (!udc->active)
1175                        goto out;
1176
1177                /* stop all the transfer in queue*/
1178                stop_activity(udc, udc->driver);
1179                udc_stop(udc);
1180                mv_udc_disable(udc);
1181        }
1182
1183out:
1184        spin_unlock_irqrestore(&udc->lock, flags);
1185        return retval;
1186}
1187
1188static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1189{
1190        struct mv_udc *udc;
1191        unsigned long flags;
1192        int retval = 0;
1193
1194        udc = container_of(gadget, struct mv_udc, gadget);
1195        spin_lock_irqsave(&udc->lock, flags);
1196
1197        udc->softconnect = (is_on != 0);
1198
1199        dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1200                        __func__, udc->softconnect, udc->vbus_active);
1201
1202        if (udc->driver && udc->softconnect && udc->vbus_active) {
1203                retval = mv_udc_enable(udc);
1204                if (retval == 0) {
1205                        /* Clock is disabled, need re-init registers */
1206                        udc_reset(udc);
1207                        ep0_reset(udc);
1208                        udc_start(udc);
1209                }
1210        } else if (udc->driver && udc->vbus_active) {
1211                /* stop all the transfer in queue*/
1212                stop_activity(udc, udc->driver);
1213                udc_stop(udc);
1214                mv_udc_disable(udc);
1215        }
1216
1217        spin_unlock_irqrestore(&udc->lock, flags);
1218        return retval;
1219}
1220
1221static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1222static int mv_udc_stop(struct usb_gadget *);
1223/* device controller usb_gadget_ops structure */
1224static const struct usb_gadget_ops mv_ops = {
1225
1226        /* returns the current frame number */
1227        .get_frame      = mv_udc_get_frame,
1228
1229        /* tries to wake up the host connected to this gadget */
1230        .wakeup         = mv_udc_wakeup,
1231
1232        /* notify controller that VBUS is powered or not */
1233        .vbus_session   = mv_udc_vbus_session,
1234
1235        /* D+ pullup, software-controlled connect/disconnect to USB host */
1236        .pullup         = mv_udc_pullup,
1237        .udc_start      = mv_udc_start,
1238        .udc_stop       = mv_udc_stop,
1239};
1240
1241static int eps_init(struct mv_udc *udc)
1242{
1243        struct mv_ep    *ep;
1244        char name[14];
1245        int i;
1246
1247        /* initialize ep0 */
1248        ep = &udc->eps[0];
1249        ep->udc = udc;
1250        strncpy(ep->name, "ep0", sizeof(ep->name));
1251        ep->ep.name = ep->name;
1252        ep->ep.ops = &mv_ep_ops;
1253        ep->wedge = 0;
1254        ep->stopped = 0;
1255        usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1256        ep->ep.caps.type_control = true;
1257        ep->ep.caps.dir_in = true;
1258        ep->ep.caps.dir_out = true;
1259        ep->ep_num = 0;
1260        ep->ep.desc = &mv_ep0_desc;
1261        INIT_LIST_HEAD(&ep->queue);
1262
1263        ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1264
1265        /* initialize other endpoints */
1266        for (i = 2; i < udc->max_eps * 2; i++) {
1267                ep = &udc->eps[i];
1268                if (i % 2) {
1269                        snprintf(name, sizeof(name), "ep%din", i / 2);
1270                        ep->direction = EP_DIR_IN;
1271                        ep->ep.caps.dir_in = true;
1272                } else {
1273                        snprintf(name, sizeof(name), "ep%dout", i / 2);
1274                        ep->direction = EP_DIR_OUT;
1275                        ep->ep.caps.dir_out = true;
1276                }
1277                ep->udc = udc;
1278                strncpy(ep->name, name, sizeof(ep->name));
1279                ep->ep.name = ep->name;
1280
1281                ep->ep.caps.type_iso = true;
1282                ep->ep.caps.type_bulk = true;
1283                ep->ep.caps.type_int = true;
1284
1285                ep->ep.ops = &mv_ep_ops;
1286                ep->stopped = 0;
1287                usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1288                ep->ep_num = i / 2;
1289
1290                INIT_LIST_HEAD(&ep->queue);
1291                list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1292
1293                ep->dqh = &udc->ep_dqh[i];
1294        }
1295
1296        return 0;
1297}
1298
1299/* delete all endpoint requests, called with spinlock held */
1300static void nuke(struct mv_ep *ep, int status)
1301{
1302        /* called with spinlock held */
1303        ep->stopped = 1;
1304
1305        /* endpoint fifo flush */
1306        mv_ep_fifo_flush(&ep->ep);
1307
1308        while (!list_empty(&ep->queue)) {
1309                struct mv_req *req = NULL;
1310                req = list_entry(ep->queue.next, struct mv_req, queue);
1311                done(ep, req, status);
1312        }
1313}
1314
1315static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
1316{
1317        struct mv_ep    *ep;
1318
1319        nuke(&udc->eps[0], -ESHUTDOWN);
1320
1321        list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1322                nuke(ep, -ESHUTDOWN);
1323        }
1324
1325        /* report reset; the driver is already quiesced */
1326        if (driver) {
1327                spin_unlock(&udc->lock);
1328                usb_gadget_udc_reset(&udc->gadget, driver);
1329                spin_lock(&udc->lock);
1330        }
1331}
1332/* stop all USB activities */
1333static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1334{
1335        struct mv_ep    *ep;
1336
1337        nuke(&udc->eps[0], -ESHUTDOWN);
1338
1339        list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1340                nuke(ep, -ESHUTDOWN);
1341        }
1342
1343        /* report disconnect; the driver is already quiesced */
1344        if (driver) {
1345                spin_unlock(&udc->lock);
1346                driver->disconnect(&udc->gadget);
1347                spin_lock(&udc->lock);
1348        }
1349}
1350
1351static int mv_udc_start(struct usb_gadget *gadget,
1352                struct usb_gadget_driver *driver)
1353{
1354        struct mv_udc *udc;
1355        int retval = 0;
1356        unsigned long flags;
1357
1358        udc = container_of(gadget, struct mv_udc, gadget);
1359
1360        if (udc->driver)
1361                return -EBUSY;
1362
1363        spin_lock_irqsave(&udc->lock, flags);
1364
1365        /* hook up the driver ... */
1366        driver->driver.bus = NULL;
1367        udc->driver = driver;
1368
1369        udc->usb_state = USB_STATE_ATTACHED;
1370        udc->ep0_state = WAIT_FOR_SETUP;
1371        udc->ep0_dir = EP_DIR_OUT;
1372
1373        spin_unlock_irqrestore(&udc->lock, flags);
1374
1375        if (udc->transceiver) {
1376                retval = otg_set_peripheral(udc->transceiver->otg,
1377                                        &udc->gadget);
1378                if (retval) {
1379                        dev_err(&udc->dev->dev,
1380                                "unable to register peripheral to otg\n");
1381                        udc->driver = NULL;
1382                        return retval;
1383                }
1384        }
1385
1386        /* When boot with cable attached, there will be no vbus irq occurred */
1387        if (udc->qwork)
1388                queue_work(udc->qwork, &udc->vbus_work);
1389
1390        return 0;
1391}
1392
1393static int mv_udc_stop(struct usb_gadget *gadget)
1394{
1395        struct mv_udc *udc;
1396        unsigned long flags;
1397
1398        udc = container_of(gadget, struct mv_udc, gadget);
1399
1400        spin_lock_irqsave(&udc->lock, flags);
1401
1402        mv_udc_enable(udc);
1403        udc_stop(udc);
1404
1405        /* stop all usb activities */
1406        udc->gadget.speed = USB_SPEED_UNKNOWN;
1407        stop_activity(udc, NULL);
1408        mv_udc_disable(udc);
1409
1410        spin_unlock_irqrestore(&udc->lock, flags);
1411
1412        /* unbind gadget driver */
1413        udc->driver = NULL;
1414
1415        return 0;
1416}
1417
1418static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1419{
1420        u32 portsc;
1421
1422        portsc = readl(&udc->op_regs->portsc[0]);
1423        portsc |= mode << 16;
1424        writel(portsc, &udc->op_regs->portsc[0]);
1425}
1426
1427static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1428{
1429        struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1430        struct mv_req *req = container_of(_req, struct mv_req, req);
1431        struct mv_udc *udc;
1432        unsigned long flags;
1433
1434        udc = mvep->udc;
1435
1436        dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1437
1438        spin_lock_irqsave(&udc->lock, flags);
1439        if (req->test_mode) {
1440                mv_set_ptc(udc, req->test_mode);
1441                req->test_mode = 0;
1442        }
1443        spin_unlock_irqrestore(&udc->lock, flags);
1444}
1445
1446static int
1447udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1448{
1449        int retval = 0;
1450        struct mv_req *req;
1451        struct mv_ep *ep;
1452
1453        ep = &udc->eps[0];
1454        udc->ep0_dir = direction;
1455        udc->ep0_state = WAIT_FOR_OUT_STATUS;
1456
1457        req = udc->status_req;
1458
1459        /* fill in the reqest structure */
1460        if (empty == false) {
1461                *((u16 *) req->req.buf) = cpu_to_le16(status);
1462                req->req.length = 2;
1463        } else
1464                req->req.length = 0;
1465
1466        req->ep = ep;
1467        req->req.status = -EINPROGRESS;
1468        req->req.actual = 0;
1469        if (udc->test_mode) {
1470                req->req.complete = prime_status_complete;
1471                req->test_mode = udc->test_mode;
1472                udc->test_mode = 0;
1473        } else
1474                req->req.complete = NULL;
1475        req->dtd_count = 0;
1476
1477        if (req->req.dma == DMA_ADDR_INVALID) {
1478                req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1479                                req->req.buf, req->req.length,
1480                                ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1481                req->mapped = 1;
1482        }
1483
1484        /* prime the data phase */
1485        if (!req_to_dtd(req)) {
1486                retval = queue_dtd(ep, req);
1487                if (retval) {
1488                        dev_err(&udc->dev->dev,
1489                                "Failed to queue dtd when prime status\n");
1490                        goto out;
1491                }
1492        } else{ /* no mem */
1493                retval = -ENOMEM;
1494                dev_err(&udc->dev->dev,
1495                        "Failed to dma_pool_alloc when prime status\n");
1496                goto out;
1497        }
1498
1499        list_add_tail(&req->queue, &ep->queue);
1500
1501        return 0;
1502out:
1503        usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1504
1505        return retval;
1506}
1507
1508static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1509{
1510        if (index <= TEST_FORCE_EN) {
1511                udc->test_mode = index;
1512                if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1513                        ep0_stall(udc);
1514        } else
1515                dev_err(&udc->dev->dev,
1516                        "This test mode(%d) is not supported\n", index);
1517}
1518
1519static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1520{
1521        udc->dev_addr = (u8)setup->wValue;
1522
1523        /* update usb state */
1524        udc->usb_state = USB_STATE_ADDRESS;
1525
1526        if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1527                ep0_stall(udc);
1528}
1529
1530static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1531        struct usb_ctrlrequest *setup)
1532{
1533        u16 status = 0;
1534        int retval;
1535
1536        if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1537                != (USB_DIR_IN | USB_TYPE_STANDARD))
1538                return;
1539
1540        if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1541                status = 1 << USB_DEVICE_SELF_POWERED;
1542                status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1543        } else if ((setup->bRequestType & USB_RECIP_MASK)
1544                        == USB_RECIP_INTERFACE) {
1545                /* get interface status */
1546                status = 0;
1547        } else if ((setup->bRequestType & USB_RECIP_MASK)
1548                        == USB_RECIP_ENDPOINT) {
1549                u8 ep_num, direction;
1550
1551                ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1552                direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1553                                ? EP_DIR_IN : EP_DIR_OUT;
1554                status = ep_is_stall(udc, ep_num, direction)
1555                                << USB_ENDPOINT_HALT;
1556        }
1557
1558        retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1559        if (retval)
1560                ep0_stall(udc);
1561        else
1562                udc->ep0_state = DATA_STATE_XMIT;
1563}
1564
1565static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1566{
1567        u8 ep_num;
1568        u8 direction;
1569        struct mv_ep *ep;
1570
1571        if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1572                == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1573                switch (setup->wValue) {
1574                case USB_DEVICE_REMOTE_WAKEUP:
1575                        udc->remote_wakeup = 0;
1576                        break;
1577                default:
1578                        goto out;
1579                }
1580        } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1581                == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1582                switch (setup->wValue) {
1583                case USB_ENDPOINT_HALT:
1584                        ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1585                        direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1586                                ? EP_DIR_IN : EP_DIR_OUT;
1587                        if (setup->wValue != 0 || setup->wLength != 0
1588                                || ep_num > udc->max_eps)
1589                                goto out;
1590                        ep = &udc->eps[ep_num * 2 + direction];
1591                        if (ep->wedge == 1)
1592                                break;
1593                        spin_unlock(&udc->lock);
1594                        ep_set_stall(udc, ep_num, direction, 0);
1595                        spin_lock(&udc->lock);
1596                        break;
1597                default:
1598                        goto out;
1599                }
1600        } else
1601                goto out;
1602
1603        if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1604                ep0_stall(udc);
1605out:
1606        return;
1607}
1608
1609static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1610{
1611        u8 ep_num;
1612        u8 direction;
1613
1614        if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1615                == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1616                switch (setup->wValue) {
1617                case USB_DEVICE_REMOTE_WAKEUP:
1618                        udc->remote_wakeup = 1;
1619                        break;
1620                case USB_DEVICE_TEST_MODE:
1621                        if (setup->wIndex & 0xFF
1622                                ||  udc->gadget.speed != USB_SPEED_HIGH)
1623                                ep0_stall(udc);
1624
1625                        if (udc->usb_state != USB_STATE_CONFIGURED
1626                                && udc->usb_state != USB_STATE_ADDRESS
1627                                && udc->usb_state != USB_STATE_DEFAULT)
1628                                ep0_stall(udc);
1629
1630                        mv_udc_testmode(udc, (setup->wIndex >> 8));
1631                        goto out;
1632                default:
1633                        goto out;
1634                }
1635        } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1636                == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1637                switch (setup->wValue) {
1638                case USB_ENDPOINT_HALT:
1639                        ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1640                        direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1641                                ? EP_DIR_IN : EP_DIR_OUT;
1642                        if (setup->wValue != 0 || setup->wLength != 0
1643                                || ep_num > udc->max_eps)
1644                                goto out;
1645                        spin_unlock(&udc->lock);
1646                        ep_set_stall(udc, ep_num, direction, 1);
1647                        spin_lock(&udc->lock);
1648                        break;
1649                default:
1650                        goto out;
1651                }
1652        } else
1653                goto out;
1654
1655        if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1656                ep0_stall(udc);
1657out:
1658        return;
1659}
1660
1661static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1662        struct usb_ctrlrequest *setup)
1663        __releases(&ep->udc->lock)
1664        __acquires(&ep->udc->lock)
1665{
1666        bool delegate = false;
1667
1668        nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1669
1670        dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1671                        setup->bRequestType, setup->bRequest,
1672                        setup->wValue, setup->wIndex, setup->wLength);
1673        /* We process some standard setup requests here */
1674        if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1675                switch (setup->bRequest) {
1676                case USB_REQ_GET_STATUS:
1677                        ch9getstatus(udc, ep_num, setup);
1678                        break;
1679
1680                case USB_REQ_SET_ADDRESS:
1681                        ch9setaddress(udc, setup);
1682                        break;
1683
1684                case USB_REQ_CLEAR_FEATURE:
1685                        ch9clearfeature(udc, setup);
1686                        break;
1687
1688                case USB_REQ_SET_FEATURE:
1689                        ch9setfeature(udc, setup);
1690                        break;
1691
1692                default:
1693                        delegate = true;
1694                }
1695        } else
1696                delegate = true;
1697
1698        /* delegate USB standard requests to the gadget driver */
1699        if (delegate == true) {
1700                /* USB requests handled by gadget */
1701                if (setup->wLength) {
1702                        /* DATA phase from gadget, STATUS phase from udc */
1703                        udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1704                                        ?  EP_DIR_IN : EP_DIR_OUT;
1705                        spin_unlock(&udc->lock);
1706                        if (udc->driver->setup(&udc->gadget,
1707                                &udc->local_setup_buff) < 0)
1708                                ep0_stall(udc);
1709                        spin_lock(&udc->lock);
1710                        udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1711                                        ?  DATA_STATE_XMIT : DATA_STATE_RECV;
1712                } else {
1713                        /* no DATA phase, IN STATUS phase from gadget */
1714                        udc->ep0_dir = EP_DIR_IN;
1715                        spin_unlock(&udc->lock);
1716                        if (udc->driver->setup(&udc->gadget,
1717                                &udc->local_setup_buff) < 0)
1718                                ep0_stall(udc);
1719                        spin_lock(&udc->lock);
1720                        udc->ep0_state = WAIT_FOR_OUT_STATUS;
1721                }
1722        }
1723}
1724
1725/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1726static void ep0_req_complete(struct mv_udc *udc,
1727        struct mv_ep *ep0, struct mv_req *req)
1728{
1729        u32 new_addr;
1730
1731        if (udc->usb_state == USB_STATE_ADDRESS) {
1732                /* set the new address */
1733                new_addr = (u32)udc->dev_addr;
1734                writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1735                        &udc->op_regs->deviceaddr);
1736        }
1737
1738        done(ep0, req, 0);
1739
1740        switch (udc->ep0_state) {
1741        case DATA_STATE_XMIT:
1742                /* receive status phase */
1743                if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1744                        ep0_stall(udc);
1745                break;
1746        case DATA_STATE_RECV:
1747                /* send status phase */
1748                if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1749                        ep0_stall(udc);
1750                break;
1751        case WAIT_FOR_OUT_STATUS:
1752                udc->ep0_state = WAIT_FOR_SETUP;
1753                break;
1754        case WAIT_FOR_SETUP:
1755                dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1756                break;
1757        default:
1758                ep0_stall(udc);
1759                break;
1760        }
1761}
1762
1763static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1764{
1765        u32 temp;
1766        struct mv_dqh *dqh;
1767
1768        dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1769
1770        /* Clear bit in ENDPTSETUPSTAT */
1771        writel((1 << ep_num), &udc->op_regs->epsetupstat);
1772
1773        /* while a hazard exists when setup package arrives */
1774        do {
1775                /* Set Setup Tripwire */
1776                temp = readl(&udc->op_regs->usbcmd);
1777                writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1778
1779                /* Copy the setup packet to local buffer */
1780                memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1781        } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1782
1783        /* Clear Setup Tripwire */
1784        temp = readl(&udc->op_regs->usbcmd);
1785        writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1786}
1787
1788static void irq_process_tr_complete(struct mv_udc *udc)
1789{
1790        u32 tmp, bit_pos;
1791        int i, ep_num = 0, direction = 0;
1792        struct mv_ep    *curr_ep;
1793        struct mv_req *curr_req, *temp_req;
1794        int status;
1795
1796        /*
1797         * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1798         * because the setup packets are to be read ASAP
1799         */
1800
1801        /* Process all Setup packet received interrupts */
1802        tmp = readl(&udc->op_regs->epsetupstat);
1803
1804        if (tmp) {
1805                for (i = 0; i < udc->max_eps; i++) {
1806                        if (tmp & (1 << i)) {
1807                                get_setup_data(udc, i,
1808                                        (u8 *)(&udc->local_setup_buff));
1809                                handle_setup_packet(udc, i,
1810                                        &udc->local_setup_buff);
1811                        }
1812                }
1813        }
1814
1815        /* Don't clear the endpoint setup status register here.
1816         * It is cleared as a setup packet is read out of the buffer
1817         */
1818
1819        /* Process non-setup transaction complete interrupts */
1820        tmp = readl(&udc->op_regs->epcomplete);
1821
1822        if (!tmp)
1823                return;
1824
1825        writel(tmp, &udc->op_regs->epcomplete);
1826
1827        for (i = 0; i < udc->max_eps * 2; i++) {
1828                ep_num = i >> 1;
1829                direction = i % 2;
1830
1831                bit_pos = 1 << (ep_num + 16 * direction);
1832
1833                if (!(bit_pos & tmp))
1834                        continue;
1835
1836                if (i == 1)
1837                        curr_ep = &udc->eps[0];
1838                else
1839                        curr_ep = &udc->eps[i];
1840                /* process the req queue until an uncomplete request */
1841                list_for_each_entry_safe(curr_req, temp_req,
1842                        &curr_ep->queue, queue) {
1843                        status = process_ep_req(udc, i, curr_req);
1844                        if (status)
1845                                break;
1846
1847                        /* write back status to req */
1848                        curr_req->req.status = status;
1849
1850                        /* ep0 request completion */
1851                        if (ep_num == 0) {
1852                                ep0_req_complete(udc, curr_ep, curr_req);
1853                                break;
1854                        } else {
1855                                done(curr_ep, curr_req, status);
1856                        }
1857                }
1858        }
1859}
1860
1861static void irq_process_reset(struct mv_udc *udc)
1862{
1863        u32 tmp;
1864        unsigned int loops;
1865
1866        udc->ep0_dir = EP_DIR_OUT;
1867        udc->ep0_state = WAIT_FOR_SETUP;
1868        udc->remote_wakeup = 0;         /* default to 0 on reset */
1869
1870        /* The address bits are past bit 25-31. Set the address */
1871        tmp = readl(&udc->op_regs->deviceaddr);
1872        tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1873        writel(tmp, &udc->op_regs->deviceaddr);
1874
1875        /* Clear all the setup token semaphores */
1876        tmp = readl(&udc->op_regs->epsetupstat);
1877        writel(tmp, &udc->op_regs->epsetupstat);
1878
1879        /* Clear all the endpoint complete status bits */
1880        tmp = readl(&udc->op_regs->epcomplete);
1881        writel(tmp, &udc->op_regs->epcomplete);
1882
1883        /* wait until all endptprime bits cleared */
1884        loops = LOOPS(PRIME_TIMEOUT);
1885        while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1886                if (loops == 0) {
1887                        dev_err(&udc->dev->dev,
1888                                "Timeout for ENDPTPRIME = 0x%x\n",
1889                                readl(&udc->op_regs->epprime));
1890                        break;
1891                }
1892                loops--;
1893                udelay(LOOPS_USEC);
1894        }
1895
1896        /* Write 1s to the Flush register */
1897        writel((u32)~0, &udc->op_regs->epflush);
1898
1899        if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1900                dev_info(&udc->dev->dev, "usb bus reset\n");
1901                udc->usb_state = USB_STATE_DEFAULT;
1902                /* reset all the queues, stop all USB activities */
1903                gadget_reset(udc, udc->driver);
1904        } else {
1905                dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1906                        readl(&udc->op_regs->portsc));
1907
1908                /*
1909                 * re-initialize
1910                 * controller reset
1911                 */
1912                udc_reset(udc);
1913
1914                /* reset all the queues, stop all USB activities */
1915                stop_activity(udc, udc->driver);
1916
1917                /* reset ep0 dQH and endptctrl */
1918                ep0_reset(udc);
1919
1920                /* enable interrupt and set controller to run state */
1921                udc_start(udc);
1922
1923                udc->usb_state = USB_STATE_ATTACHED;
1924        }
1925}
1926
1927static void handle_bus_resume(struct mv_udc *udc)
1928{
1929        udc->usb_state = udc->resume_state;
1930        udc->resume_state = 0;
1931
1932        /* report resume to the driver */
1933        if (udc->driver) {
1934                if (udc->driver->resume) {
1935                        spin_unlock(&udc->lock);
1936                        udc->driver->resume(&udc->gadget);
1937                        spin_lock(&udc->lock);
1938                }
1939        }
1940}
1941
1942static void irq_process_suspend(struct mv_udc *udc)
1943{
1944        udc->resume_state = udc->usb_state;
1945        udc->usb_state = USB_STATE_SUSPENDED;
1946
1947        if (udc->driver->suspend) {
1948                spin_unlock(&udc->lock);
1949                udc->driver->suspend(&udc->gadget);
1950                spin_lock(&udc->lock);
1951        }
1952}
1953
1954static void irq_process_port_change(struct mv_udc *udc)
1955{
1956        u32 portsc;
1957
1958        portsc = readl(&udc->op_regs->portsc[0]);
1959        if (!(portsc & PORTSCX_PORT_RESET)) {
1960                /* Get the speed */
1961                u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1962                switch (speed) {
1963                case PORTSCX_PORT_SPEED_HIGH:
1964                        udc->gadget.speed = USB_SPEED_HIGH;
1965                        break;
1966                case PORTSCX_PORT_SPEED_FULL:
1967                        udc->gadget.speed = USB_SPEED_FULL;
1968                        break;
1969                case PORTSCX_PORT_SPEED_LOW:
1970                        udc->gadget.speed = USB_SPEED_LOW;
1971                        break;
1972                default:
1973                        udc->gadget.speed = USB_SPEED_UNKNOWN;
1974                        break;
1975                }
1976        }
1977
1978        if (portsc & PORTSCX_PORT_SUSPEND) {
1979                udc->resume_state = udc->usb_state;
1980                udc->usb_state = USB_STATE_SUSPENDED;
1981                if (udc->driver->suspend) {
1982                        spin_unlock(&udc->lock);
1983                        udc->driver->suspend(&udc->gadget);
1984                        spin_lock(&udc->lock);
1985                }
1986        }
1987
1988        if (!(portsc & PORTSCX_PORT_SUSPEND)
1989                && udc->usb_state == USB_STATE_SUSPENDED) {
1990                handle_bus_resume(udc);
1991        }
1992
1993        if (!udc->resume_state)
1994                udc->usb_state = USB_STATE_DEFAULT;
1995}
1996
1997static void irq_process_error(struct mv_udc *udc)
1998{
1999        /* Increment the error count */
2000        udc->errors++;
2001}
2002
2003static irqreturn_t mv_udc_irq(int irq, void *dev)
2004{
2005        struct mv_udc *udc = (struct mv_udc *)dev;
2006        u32 status, intr;
2007
2008        /* Disable ISR when stopped bit is set */
2009        if (udc->stopped)
2010                return IRQ_NONE;
2011
2012        spin_lock(&udc->lock);
2013
2014        status = readl(&udc->op_regs->usbsts);
2015        intr = readl(&udc->op_regs->usbintr);
2016        status &= intr;
2017
2018        if (status == 0) {
2019                spin_unlock(&udc->lock);
2020                return IRQ_NONE;
2021        }
2022
2023        /* Clear all the interrupts occurred */
2024        writel(status, &udc->op_regs->usbsts);
2025
2026        if (status & USBSTS_ERR)
2027                irq_process_error(udc);
2028
2029        if (status & USBSTS_RESET)
2030                irq_process_reset(udc);
2031
2032        if (status & USBSTS_PORT_CHANGE)
2033                irq_process_port_change(udc);
2034
2035        if (status & USBSTS_INT)
2036                irq_process_tr_complete(udc);
2037
2038        if (status & USBSTS_SUSPEND)
2039                irq_process_suspend(udc);
2040
2041        spin_unlock(&udc->lock);
2042
2043        return IRQ_HANDLED;
2044}
2045
2046static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2047{
2048        struct mv_udc *udc = (struct mv_udc *)dev;
2049
2050        /* polling VBUS and init phy may cause too much time*/
2051        if (udc->qwork)
2052                queue_work(udc->qwork, &udc->vbus_work);
2053
2054        return IRQ_HANDLED;
2055}
2056
2057static void mv_udc_vbus_work(struct work_struct *work)
2058{
2059        struct mv_udc *udc;
2060        unsigned int vbus;
2061
2062        udc = container_of(work, struct mv_udc, vbus_work);
2063        if (!udc->pdata->vbus)
2064                return;
2065
2066        vbus = udc->pdata->vbus->poll();
2067        dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2068
2069        if (vbus == VBUS_HIGH)
2070                mv_udc_vbus_session(&udc->gadget, 1);
2071        else if (vbus == VBUS_LOW)
2072                mv_udc_vbus_session(&udc->gadget, 0);
2073}
2074
2075/* release device structure */
2076static void gadget_release(struct device *_dev)
2077{
2078        struct mv_udc *udc;
2079
2080        udc = dev_get_drvdata(_dev);
2081
2082        complete(udc->done);
2083}
2084
2085static int mv_udc_remove(struct platform_device *pdev)
2086{
2087        struct mv_udc *udc;
2088
2089        udc = platform_get_drvdata(pdev);
2090
2091        usb_del_gadget_udc(&udc->gadget);
2092
2093        if (udc->qwork) {
2094                flush_workqueue(udc->qwork);
2095                destroy_workqueue(udc->qwork);
2096        }
2097
2098        /* free memory allocated in probe */
2099        dma_pool_destroy(udc->dtd_pool);
2100
2101        if (udc->ep_dqh)
2102                dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2103                        udc->ep_dqh, udc->ep_dqh_dma);
2104
2105        mv_udc_disable(udc);
2106
2107        /* free dev, wait for the release() finished */
2108        wait_for_completion(udc->done);
2109
2110        return 0;
2111}
2112
2113static int mv_udc_probe(struct platform_device *pdev)
2114{
2115        struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2116        struct mv_udc *udc;
2117        int retval = 0;
2118        struct resource *r;
2119        size_t size;
2120
2121        if (pdata == NULL) {
2122                dev_err(&pdev->dev, "missing platform_data\n");
2123                return -ENODEV;
2124        }
2125
2126        udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2127        if (udc == NULL)
2128                return -ENOMEM;
2129
2130        udc->done = &release_done;
2131        udc->pdata = dev_get_platdata(&pdev->dev);
2132        spin_lock_init(&udc->lock);
2133
2134        udc->dev = pdev;
2135
2136        if (pdata->mode == MV_USB_MODE_OTG) {
2137                udc->transceiver = devm_usb_get_phy(&pdev->dev,
2138                                        USB_PHY_TYPE_USB2);
2139                if (IS_ERR(udc->transceiver)) {
2140                        retval = PTR_ERR(udc->transceiver);
2141
2142                        if (retval == -ENXIO)
2143                                return retval;
2144
2145                        udc->transceiver = NULL;
2146                        return -EPROBE_DEFER;
2147                }
2148        }
2149
2150        /* udc only have one sysclk. */
2151        udc->clk = devm_clk_get(&pdev->dev, NULL);
2152        if (IS_ERR(udc->clk))
2153                return PTR_ERR(udc->clk);
2154
2155        r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2156        if (r == NULL) {
2157                dev_err(&pdev->dev, "no I/O memory resource defined\n");
2158                return -ENODEV;
2159        }
2160
2161        udc->cap_regs = (struct mv_cap_regs __iomem *)
2162                devm_ioremap(&pdev->dev, r->start, resource_size(r));
2163        if (udc->cap_regs == NULL) {
2164                dev_err(&pdev->dev, "failed to map I/O memory\n");
2165                return -EBUSY;
2166        }
2167
2168        r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2169        if (r == NULL) {
2170                dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2171                return -ENODEV;
2172        }
2173
2174        udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2175        if (udc->phy_regs == NULL) {
2176                dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2177                return -EBUSY;
2178        }
2179
2180        /* we will acces controller register, so enable the clk */
2181        retval = mv_udc_enable_internal(udc);
2182        if (retval)
2183                return retval;
2184
2185        udc->op_regs =
2186                (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2187                + (readl(&udc->cap_regs->caplength_hciversion)
2188                        & CAPLENGTH_MASK));
2189        udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2190
2191        /*
2192         * some platform will use usb to download image, it may not disconnect
2193         * usb gadget before loading kernel. So first stop udc here.
2194         */
2195        udc_stop(udc);
2196        writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2197
2198        size = udc->max_eps * sizeof(struct mv_dqh) *2;
2199        size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2200        udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2201                                        &udc->ep_dqh_dma, GFP_KERNEL);
2202
2203        if (udc->ep_dqh == NULL) {
2204                dev_err(&pdev->dev, "allocate dQH memory failed\n");
2205                retval = -ENOMEM;
2206                goto err_disable_clock;
2207        }
2208        udc->ep_dqh_size = size;
2209
2210        /* create dTD dma_pool resource */
2211        udc->dtd_pool = dma_pool_create("mv_dtd",
2212                        &pdev->dev,
2213                        sizeof(struct mv_dtd),
2214                        DTD_ALIGNMENT,
2215                        DMA_BOUNDARY);
2216
2217        if (!udc->dtd_pool) {
2218                retval = -ENOMEM;
2219                goto err_free_dma;
2220        }
2221
2222        size = udc->max_eps * sizeof(struct mv_ep) *2;
2223        udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2224        if (udc->eps == NULL) {
2225                retval = -ENOMEM;
2226                goto err_destroy_dma;
2227        }
2228
2229        /* initialize ep0 status request structure */
2230        udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2231                                        GFP_KERNEL);
2232        if (!udc->status_req) {
2233                retval = -ENOMEM;
2234                goto err_destroy_dma;
2235        }
2236        INIT_LIST_HEAD(&udc->status_req->queue);
2237
2238        /* allocate a small amount of memory to get valid address */
2239        udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2240        udc->status_req->req.dma = DMA_ADDR_INVALID;
2241
2242        udc->resume_state = USB_STATE_NOTATTACHED;
2243        udc->usb_state = USB_STATE_POWERED;
2244        udc->ep0_dir = EP_DIR_OUT;
2245        udc->remote_wakeup = 0;
2246
2247        r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2248        if (r == NULL) {
2249                dev_err(&pdev->dev, "no IRQ resource defined\n");
2250                retval = -ENODEV;
2251                goto err_destroy_dma;
2252        }
2253        udc->irq = r->start;
2254        if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2255                IRQF_SHARED, driver_name, udc)) {
2256                dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2257                        udc->irq);
2258                retval = -ENODEV;
2259                goto err_destroy_dma;
2260        }
2261
2262        /* initialize gadget structure */
2263        udc->gadget.ops = &mv_ops;      /* usb_gadget_ops */
2264        udc->gadget.ep0 = &udc->eps[0].ep;      /* gadget ep0 */
2265        INIT_LIST_HEAD(&udc->gadget.ep_list);   /* ep_list */
2266        udc->gadget.speed = USB_SPEED_UNKNOWN;  /* speed */
2267        udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
2268
2269        /* the "gadget" abstracts/virtualizes the controller */
2270        udc->gadget.name = driver_name;         /* gadget name */
2271
2272        eps_init(udc);
2273
2274        /* VBUS detect: we can disable/enable clock on demand.*/
2275        if (udc->transceiver)
2276                udc->clock_gating = 1;
2277        else if (pdata->vbus) {
2278                udc->clock_gating = 1;
2279                retval = devm_request_threaded_irq(&pdev->dev,
2280                                pdata->vbus->irq, NULL,
2281                                mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2282                if (retval) {
2283                        dev_info(&pdev->dev,
2284                                "Can not request irq for VBUS, "
2285                                "disable clock gating\n");
2286                        udc->clock_gating = 0;
2287                }
2288
2289                udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2290                if (!udc->qwork) {
2291                        dev_err(&pdev->dev, "cannot create workqueue\n");
2292                        retval = -ENOMEM;
2293                        goto err_destroy_dma;
2294                }
2295
2296                INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2297        }
2298
2299        /*
2300         * When clock gating is supported, we can disable clk and phy.
2301         * If not, it means that VBUS detection is not supported, we
2302         * have to enable vbus active all the time to let controller work.
2303         */
2304        if (udc->clock_gating)
2305                mv_udc_disable_internal(udc);
2306        else
2307                udc->vbus_active = 1;
2308
2309        retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2310                        gadget_release);
2311        if (retval)
2312                goto err_create_workqueue;
2313
2314        platform_set_drvdata(pdev, udc);
2315        dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2316                udc->clock_gating ? "with" : "without");
2317
2318        return 0;
2319
2320err_create_workqueue:
2321        destroy_workqueue(udc->qwork);
2322err_destroy_dma:
2323        dma_pool_destroy(udc->dtd_pool);
2324err_free_dma:
2325        dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2326                        udc->ep_dqh, udc->ep_dqh_dma);
2327err_disable_clock:
2328        mv_udc_disable_internal(udc);
2329
2330        return retval;
2331}
2332
2333#ifdef CONFIG_PM
2334static int mv_udc_suspend(struct device *dev)
2335{
2336        struct mv_udc *udc;
2337
2338        udc = dev_get_drvdata(dev);
2339
2340        /* if OTG is enabled, the following will be done in OTG driver*/
2341        if (udc->transceiver)
2342                return 0;
2343
2344        if (udc->pdata->vbus && udc->pdata->vbus->poll)
2345                if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2346                        dev_info(&udc->dev->dev, "USB cable is connected!\n");
2347                        return -EAGAIN;
2348                }
2349
2350        /*
2351         * only cable is unplugged, udc can suspend.
2352         * So do not care about clock_gating == 1.
2353         */
2354        if (!udc->clock_gating) {
2355                udc_stop(udc);
2356
2357                spin_lock_irq(&udc->lock);
2358                /* stop all usb activities */
2359                stop_activity(udc, udc->driver);
2360                spin_unlock_irq(&udc->lock);
2361
2362                mv_udc_disable_internal(udc);
2363        }
2364
2365        return 0;
2366}
2367
2368static int mv_udc_resume(struct device *dev)
2369{
2370        struct mv_udc *udc;
2371        int retval;
2372
2373        udc = dev_get_drvdata(dev);
2374
2375        /* if OTG is enabled, the following will be done in OTG driver*/
2376        if (udc->transceiver)
2377                return 0;
2378
2379        if (!udc->clock_gating) {
2380                retval = mv_udc_enable_internal(udc);
2381                if (retval)
2382                        return retval;
2383
2384                if (udc->driver && udc->softconnect) {
2385                        udc_reset(udc);
2386                        ep0_reset(udc);
2387                        udc_start(udc);
2388                }
2389        }
2390
2391        return 0;
2392}
2393
2394static const struct dev_pm_ops mv_udc_pm_ops = {
2395        .suspend        = mv_udc_suspend,
2396        .resume         = mv_udc_resume,
2397};
2398#endif
2399
2400static void mv_udc_shutdown(struct platform_device *pdev)
2401{
2402        struct mv_udc *udc;
2403        u32 mode;
2404
2405        udc = platform_get_drvdata(pdev);
2406        /* reset controller mode to IDLE */
2407        mv_udc_enable(udc);
2408        mode = readl(&udc->op_regs->usbmode);
2409        mode &= ~3;
2410        writel(mode, &udc->op_regs->usbmode);
2411        mv_udc_disable(udc);
2412}
2413
2414static struct platform_driver udc_driver = {
2415        .probe          = mv_udc_probe,
2416        .remove         = mv_udc_remove,
2417        .shutdown       = mv_udc_shutdown,
2418        .driver         = {
2419                .name   = "mv-udc",
2420#ifdef CONFIG_PM
2421                .pm     = &mv_udc_pm_ops,
2422#endif
2423        },
2424};
2425
2426module_platform_driver(udc_driver);
2427MODULE_ALIAS("platform:mv-udc");
2428MODULE_DESCRIPTION(DRIVER_DESC);
2429MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2430MODULE_VERSION(DRIVER_VERSION);
2431MODULE_LICENSE("GPL");
2432