linux/drivers/usb/dwc2/gadget.c
<<
>>
Prefs
   1/**
   2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   3 *              http://www.samsung.com
   4 *
   5 * Copyright 2008 Openmoko, Inc.
   6 * Copyright 2008 Simtec Electronics
   7 *      Ben Dooks <ben@simtec.co.uk>
   8 *      http://armlinux.simtec.co.uk/
   9 *
  10 * S3C USB2.0 High-speed / OtG driver
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License version 2 as
  14 * published by the Free Software Foundation.
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/spinlock.h>
  20#include <linux/interrupt.h>
  21#include <linux/platform_device.h>
  22#include <linux/dma-mapping.h>
  23#include <linux/debugfs.h>
  24#include <linux/mutex.h>
  25#include <linux/seq_file.h>
  26#include <linux/delay.h>
  27#include <linux/io.h>
  28#include <linux/slab.h>
  29#include <linux/clk.h>
  30#include <linux/regulator/consumer.h>
  31#include <linux/of_platform.h>
  32#include <linux/phy/phy.h>
  33
  34#include <linux/usb/ch9.h>
  35#include <linux/usb/gadget.h>
  36#include <linux/usb/phy.h>
  37#include <linux/platform_data/s3c-hsotg.h>
  38#include <linux/uaccess.h>
  39
  40#include "core.h"
  41#include "hw.h"
  42
  43/* conversion functions */
  44static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
  45{
  46        return container_of(req, struct s3c_hsotg_req, req);
  47}
  48
  49static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
  50{
  51        return container_of(ep, struct s3c_hsotg_ep, ep);
  52}
  53
  54static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
  55{
  56        return container_of(gadget, struct dwc2_hsotg, gadget);
  57}
  58
  59static inline void __orr32(void __iomem *ptr, u32 val)
  60{
  61        writel(readl(ptr) | val, ptr);
  62}
  63
  64static inline void __bic32(void __iomem *ptr, u32 val)
  65{
  66        writel(readl(ptr) & ~val, ptr);
  67}
  68
  69static inline struct s3c_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
  70                                                u32 ep_index, u32 dir_in)
  71{
  72        if (dir_in)
  73                return hsotg->eps_in[ep_index];
  74        else
  75                return hsotg->eps_out[ep_index];
  76}
  77
  78/* forward declaration of functions */
  79static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg);
  80
  81/**
  82 * using_dma - return the DMA status of the driver.
  83 * @hsotg: The driver state.
  84 *
  85 * Return true if we're using DMA.
  86 *
  87 * Currently, we have the DMA support code worked into everywhere
  88 * that needs it, but the AMBA DMA implementation in the hardware can
  89 * only DMA from 32bit aligned addresses. This means that gadgets such
  90 * as the CDC Ethernet cannot work as they often pass packets which are
  91 * not 32bit aligned.
  92 *
  93 * Unfortunately the choice to use DMA or not is global to the controller
  94 * and seems to be only settable when the controller is being put through
  95 * a core reset. This means we either need to fix the gadgets to take
  96 * account of DMA alignment, or add bounce buffers (yuerk).
  97 *
  98 * g_using_dma is set depending on dts flag.
  99 */
 100static inline bool using_dma(struct dwc2_hsotg *hsotg)
 101{
 102        return hsotg->g_using_dma;
 103}
 104
 105/**
 106 * s3c_hsotg_en_gsint - enable one or more of the general interrupt
 107 * @hsotg: The device state
 108 * @ints: A bitmask of the interrupts to enable
 109 */
 110static void s3c_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
 111{
 112        u32 gsintmsk = readl(hsotg->regs + GINTMSK);
 113        u32 new_gsintmsk;
 114
 115        new_gsintmsk = gsintmsk | ints;
 116
 117        if (new_gsintmsk != gsintmsk) {
 118                dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
 119                writel(new_gsintmsk, hsotg->regs + GINTMSK);
 120        }
 121}
 122
 123/**
 124 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
 125 * @hsotg: The device state
 126 * @ints: A bitmask of the interrupts to enable
 127 */
 128static void s3c_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
 129{
 130        u32 gsintmsk = readl(hsotg->regs + GINTMSK);
 131        u32 new_gsintmsk;
 132
 133        new_gsintmsk = gsintmsk & ~ints;
 134
 135        if (new_gsintmsk != gsintmsk)
 136                writel(new_gsintmsk, hsotg->regs + GINTMSK);
 137}
 138
 139/**
 140 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
 141 * @hsotg: The device state
 142 * @ep: The endpoint index
 143 * @dir_in: True if direction is in.
 144 * @en: The enable value, true to enable
 145 *
 146 * Set or clear the mask for an individual endpoint's interrupt
 147 * request.
 148 */
 149static void s3c_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
 150                                 unsigned int ep, unsigned int dir_in,
 151                                 unsigned int en)
 152{
 153        unsigned long flags;
 154        u32 bit = 1 << ep;
 155        u32 daint;
 156
 157        if (!dir_in)
 158                bit <<= 16;
 159
 160        local_irq_save(flags);
 161        daint = readl(hsotg->regs + DAINTMSK);
 162        if (en)
 163                daint |= bit;
 164        else
 165                daint &= ~bit;
 166        writel(daint, hsotg->regs + DAINTMSK);
 167        local_irq_restore(flags);
 168}
 169
 170/**
 171 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
 172 * @hsotg: The device instance.
 173 */
 174static void s3c_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
 175{
 176        unsigned int ep;
 177        unsigned int addr;
 178        int timeout;
 179        u32 val;
 180
 181        /* Reset fifo map if not correctly cleared during previous session */
 182        WARN_ON(hsotg->fifo_map);
 183        hsotg->fifo_map = 0;
 184
 185        /* set RX/NPTX FIFO sizes */
 186        writel(hsotg->g_rx_fifo_sz, hsotg->regs + GRXFSIZ);
 187        writel((hsotg->g_rx_fifo_sz << FIFOSIZE_STARTADDR_SHIFT) |
 188                (hsotg->g_np_g_tx_fifo_sz << FIFOSIZE_DEPTH_SHIFT),
 189                hsotg->regs + GNPTXFSIZ);
 190
 191        /*
 192         * arange all the rest of the TX FIFOs, as some versions of this
 193         * block have overlapping default addresses. This also ensures
 194         * that if the settings have been changed, then they are set to
 195         * known values.
 196         */
 197
 198        /* start at the end of the GNPTXFSIZ, rounded up */
 199        addr = hsotg->g_rx_fifo_sz + hsotg->g_np_g_tx_fifo_sz;
 200
 201        /*
 202         * Configure fifos sizes from provided configuration and assign
 203         * them to endpoints dynamically according to maxpacket size value of
 204         * given endpoint.
 205         */
 206        for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
 207                if (!hsotg->g_tx_fifo_sz[ep])
 208                        continue;
 209                val = addr;
 210                val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
 211                WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
 212                          "insufficient fifo memory");
 213                addr += hsotg->g_tx_fifo_sz[ep];
 214
 215                writel(val, hsotg->regs + DPTXFSIZN(ep));
 216        }
 217
 218        /*
 219         * according to p428 of the design guide, we need to ensure that
 220         * all fifos are flushed before continuing
 221         */
 222
 223        writel(GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
 224               GRSTCTL_RXFFLSH, hsotg->regs + GRSTCTL);
 225
 226        /* wait until the fifos are both flushed */
 227        timeout = 100;
 228        while (1) {
 229                val = readl(hsotg->regs + GRSTCTL);
 230
 231                if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
 232                        break;
 233
 234                if (--timeout == 0) {
 235                        dev_err(hsotg->dev,
 236                                "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
 237                                __func__, val);
 238                        break;
 239                }
 240
 241                udelay(1);
 242        }
 243
 244        dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
 245}
 246
 247/**
 248 * @ep: USB endpoint to allocate request for.
 249 * @flags: Allocation flags
 250 *
 251 * Allocate a new USB request structure appropriate for the specified endpoint
 252 */
 253static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
 254                                                      gfp_t flags)
 255{
 256        struct s3c_hsotg_req *req;
 257
 258        req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
 259        if (!req)
 260                return NULL;
 261
 262        INIT_LIST_HEAD(&req->queue);
 263
 264        return &req->req;
 265}
 266
 267/**
 268 * is_ep_periodic - return true if the endpoint is in periodic mode.
 269 * @hs_ep: The endpoint to query.
 270 *
 271 * Returns true if the endpoint is in periodic mode, meaning it is being
 272 * used for an Interrupt or ISO transfer.
 273 */
 274static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
 275{
 276        return hs_ep->periodic;
 277}
 278
 279/**
 280 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
 281 * @hsotg: The device state.
 282 * @hs_ep: The endpoint for the request
 283 * @hs_req: The request being processed.
 284 *
 285 * This is the reverse of s3c_hsotg_map_dma(), called for the completion
 286 * of a request to ensure the buffer is ready for access by the caller.
 287 */
 288static void s3c_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
 289                                struct s3c_hsotg_ep *hs_ep,
 290                                struct s3c_hsotg_req *hs_req)
 291{
 292        struct usb_request *req = &hs_req->req;
 293
 294        /* ignore this if we're not moving any data */
 295        if (hs_req->req.length == 0)
 296                return;
 297
 298        usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
 299}
 300
 301/**
 302 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
 303 * @hsotg: The controller state.
 304 * @hs_ep: The endpoint we're going to write for.
 305 * @hs_req: The request to write data for.
 306 *
 307 * This is called when the TxFIFO has some space in it to hold a new
 308 * transmission and we have something to give it. The actual setup of
 309 * the data size is done elsewhere, so all we have to do is to actually
 310 * write the data.
 311 *
 312 * The return value is zero if there is more space (or nothing was done)
 313 * otherwise -ENOSPC is returned if the FIFO space was used up.
 314 *
 315 * This routine is only needed for PIO
 316 */
 317static int s3c_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
 318                                struct s3c_hsotg_ep *hs_ep,
 319                                struct s3c_hsotg_req *hs_req)
 320{
 321        bool periodic = is_ep_periodic(hs_ep);
 322        u32 gnptxsts = readl(hsotg->regs + GNPTXSTS);
 323        int buf_pos = hs_req->req.actual;
 324        int to_write = hs_ep->size_loaded;
 325        void *data;
 326        int can_write;
 327        int pkt_round;
 328        int max_transfer;
 329
 330        to_write -= (buf_pos - hs_ep->last_load);
 331
 332        /* if there's nothing to write, get out early */
 333        if (to_write == 0)
 334                return 0;
 335
 336        if (periodic && !hsotg->dedicated_fifos) {
 337                u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
 338                int size_left;
 339                int size_done;
 340
 341                /*
 342                 * work out how much data was loaded so we can calculate
 343                 * how much data is left in the fifo.
 344                 */
 345
 346                size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
 347
 348                /*
 349                 * if shared fifo, we cannot write anything until the
 350                 * previous data has been completely sent.
 351                 */
 352                if (hs_ep->fifo_load != 0) {
 353                        s3c_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
 354                        return -ENOSPC;
 355                }
 356
 357                dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
 358                        __func__, size_left,
 359                        hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
 360
 361                /* how much of the data has moved */
 362                size_done = hs_ep->size_loaded - size_left;
 363
 364                /* how much data is left in the fifo */
 365                can_write = hs_ep->fifo_load - size_done;
 366                dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
 367                        __func__, can_write);
 368
 369                can_write = hs_ep->fifo_size - can_write;
 370                dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
 371                        __func__, can_write);
 372
 373                if (can_write <= 0) {
 374                        s3c_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
 375                        return -ENOSPC;
 376                }
 377        } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
 378                can_write = readl(hsotg->regs + DTXFSTS(hs_ep->index));
 379
 380                can_write &= 0xffff;
 381                can_write *= 4;
 382        } else {
 383                if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
 384                        dev_dbg(hsotg->dev,
 385                                "%s: no queue slots available (0x%08x)\n",
 386                                __func__, gnptxsts);
 387
 388                        s3c_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
 389                        return -ENOSPC;
 390                }
 391
 392                can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
 393                can_write *= 4; /* fifo size is in 32bit quantities. */
 394        }
 395
 396        max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
 397
 398        dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
 399                 __func__, gnptxsts, can_write, to_write, max_transfer);
 400
 401        /*
 402         * limit to 512 bytes of data, it seems at least on the non-periodic
 403         * FIFO, requests of >512 cause the endpoint to get stuck with a
 404         * fragment of the end of the transfer in it.
 405         */
 406        if (can_write > 512 && !periodic)
 407                can_write = 512;
 408
 409        /*
 410         * limit the write to one max-packet size worth of data, but allow
 411         * the transfer to return that it did not run out of fifo space
 412         * doing it.
 413         */
 414        if (to_write > max_transfer) {
 415                to_write = max_transfer;
 416
 417                /* it's needed only when we do not use dedicated fifos */
 418                if (!hsotg->dedicated_fifos)
 419                        s3c_hsotg_en_gsint(hsotg,
 420                                           periodic ? GINTSTS_PTXFEMP :
 421                                           GINTSTS_NPTXFEMP);
 422        }
 423
 424        /* see if we can write data */
 425
 426        if (to_write > can_write) {
 427                to_write = can_write;
 428                pkt_round = to_write % max_transfer;
 429
 430                /*
 431                 * Round the write down to an
 432                 * exact number of packets.
 433                 *
 434                 * Note, we do not currently check to see if we can ever
 435                 * write a full packet or not to the FIFO.
 436                 */
 437
 438                if (pkt_round)
 439                        to_write -= pkt_round;
 440
 441                /*
 442                 * enable correct FIFO interrupt to alert us when there
 443                 * is more room left.
 444                 */
 445
 446                /* it's needed only when we do not use dedicated fifos */
 447                if (!hsotg->dedicated_fifos)
 448                        s3c_hsotg_en_gsint(hsotg,
 449                                           periodic ? GINTSTS_PTXFEMP :
 450                                           GINTSTS_NPTXFEMP);
 451        }
 452
 453        dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
 454                 to_write, hs_req->req.length, can_write, buf_pos);
 455
 456        if (to_write <= 0)
 457                return -ENOSPC;
 458
 459        hs_req->req.actual = buf_pos + to_write;
 460        hs_ep->total_data += to_write;
 461
 462        if (periodic)
 463                hs_ep->fifo_load += to_write;
 464
 465        to_write = DIV_ROUND_UP(to_write, 4);
 466        data = hs_req->req.buf + buf_pos;
 467
 468        iowrite32_rep(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
 469
 470        return (to_write >= can_write) ? -ENOSPC : 0;
 471}
 472
 473/**
 474 * get_ep_limit - get the maximum data legnth for this endpoint
 475 * @hs_ep: The endpoint
 476 *
 477 * Return the maximum data that can be queued in one go on a given endpoint
 478 * so that transfers that are too long can be split.
 479 */
 480static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
 481{
 482        int index = hs_ep->index;
 483        unsigned maxsize;
 484        unsigned maxpkt;
 485
 486        if (index != 0) {
 487                maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
 488                maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
 489        } else {
 490                maxsize = 64+64;
 491                if (hs_ep->dir_in)
 492                        maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
 493                else
 494                        maxpkt = 2;
 495        }
 496
 497        /* we made the constant loading easier above by using +1 */
 498        maxpkt--;
 499        maxsize--;
 500
 501        /*
 502         * constrain by packet count if maxpkts*pktsize is greater
 503         * than the length register size.
 504         */
 505
 506        if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
 507                maxsize = maxpkt * hs_ep->ep.maxpacket;
 508
 509        return maxsize;
 510}
 511
 512/**
 513 * s3c_hsotg_start_req - start a USB request from an endpoint's queue
 514 * @hsotg: The controller state.
 515 * @hs_ep: The endpoint to process a request for
 516 * @hs_req: The request to start.
 517 * @continuing: True if we are doing more for the current request.
 518 *
 519 * Start the given request running by setting the endpoint registers
 520 * appropriately, and writing any data to the FIFOs.
 521 */
 522static void s3c_hsotg_start_req(struct dwc2_hsotg *hsotg,
 523                                struct s3c_hsotg_ep *hs_ep,
 524                                struct s3c_hsotg_req *hs_req,
 525                                bool continuing)
 526{
 527        struct usb_request *ureq = &hs_req->req;
 528        int index = hs_ep->index;
 529        int dir_in = hs_ep->dir_in;
 530        u32 epctrl_reg;
 531        u32 epsize_reg;
 532        u32 epsize;
 533        u32 ctrl;
 534        unsigned length;
 535        unsigned packets;
 536        unsigned maxreq;
 537
 538        if (index != 0) {
 539                if (hs_ep->req && !continuing) {
 540                        dev_err(hsotg->dev, "%s: active request\n", __func__);
 541                        WARN_ON(1);
 542                        return;
 543                } else if (hs_ep->req != hs_req && continuing) {
 544                        dev_err(hsotg->dev,
 545                                "%s: continue different req\n", __func__);
 546                        WARN_ON(1);
 547                        return;
 548                }
 549        }
 550
 551        epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
 552        epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
 553
 554        dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
 555                __func__, readl(hsotg->regs + epctrl_reg), index,
 556                hs_ep->dir_in ? "in" : "out");
 557
 558        /* If endpoint is stalled, we will restart request later */
 559        ctrl = readl(hsotg->regs + epctrl_reg);
 560
 561        if (ctrl & DXEPCTL_STALL) {
 562                dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
 563                return;
 564        }
 565
 566        length = ureq->length - ureq->actual;
 567        dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
 568                ureq->length, ureq->actual);
 569
 570        maxreq = get_ep_limit(hs_ep);
 571        if (length > maxreq) {
 572                int round = maxreq % hs_ep->ep.maxpacket;
 573
 574                dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
 575                        __func__, length, maxreq, round);
 576
 577                /* round down to multiple of packets */
 578                if (round)
 579                        maxreq -= round;
 580
 581                length = maxreq;
 582        }
 583
 584        if (length)
 585                packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
 586        else
 587                packets = 1;    /* send one packet if length is zero. */
 588
 589        if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
 590                dev_err(hsotg->dev, "req length > maxpacket*mc\n");
 591                return;
 592        }
 593
 594        if (dir_in && index != 0)
 595                if (hs_ep->isochronous)
 596                        epsize = DXEPTSIZ_MC(packets);
 597                else
 598                        epsize = DXEPTSIZ_MC(1);
 599        else
 600                epsize = 0;
 601
 602        /*
 603         * zero length packet should be programmed on its own and should not
 604         * be counted in DIEPTSIZ.PktCnt with other packets.
 605         */
 606        if (dir_in && ureq->zero && !continuing) {
 607                /* Test if zlp is actually required. */
 608                if ((ureq->length >= hs_ep->ep.maxpacket) &&
 609                                        !(ureq->length % hs_ep->ep.maxpacket))
 610                        hs_ep->send_zlp = 1;
 611        }
 612
 613        epsize |= DXEPTSIZ_PKTCNT(packets);
 614        epsize |= DXEPTSIZ_XFERSIZE(length);
 615
 616        dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
 617                __func__, packets, length, ureq->length, epsize, epsize_reg);
 618
 619        /* store the request as the current one we're doing */
 620        hs_ep->req = hs_req;
 621
 622        /* write size / packets */
 623        writel(epsize, hsotg->regs + epsize_reg);
 624
 625        if (using_dma(hsotg) && !continuing) {
 626                unsigned int dma_reg;
 627
 628                /*
 629                 * write DMA address to control register, buffer already
 630                 * synced by s3c_hsotg_ep_queue().
 631                 */
 632
 633                dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
 634                writel(ureq->dma, hsotg->regs + dma_reg);
 635
 636                dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
 637                        __func__, &ureq->dma, dma_reg);
 638        }
 639
 640        ctrl |= DXEPCTL_EPENA;  /* ensure ep enabled */
 641        ctrl |= DXEPCTL_USBACTEP;
 642
 643        dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
 644
 645        /* For Setup request do not clear NAK */
 646        if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
 647                ctrl |= DXEPCTL_CNAK;   /* clear NAK set by core */
 648
 649        dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
 650        writel(ctrl, hsotg->regs + epctrl_reg);
 651
 652        /*
 653         * set these, it seems that DMA support increments past the end
 654         * of the packet buffer so we need to calculate the length from
 655         * this information.
 656         */
 657        hs_ep->size_loaded = length;
 658        hs_ep->last_load = ureq->actual;
 659
 660        if (dir_in && !using_dma(hsotg)) {
 661                /* set these anyway, we may need them for non-periodic in */
 662                hs_ep->fifo_load = 0;
 663
 664                s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
 665        }
 666
 667        /*
 668         * clear the INTknTXFEmpMsk when we start request, more as a aide
 669         * to debugging to see what is going on.
 670         */
 671        if (dir_in)
 672                writel(DIEPMSK_INTKNTXFEMPMSK,
 673                       hsotg->regs + DIEPINT(index));
 674
 675        /*
 676         * Note, trying to clear the NAK here causes problems with transmit
 677         * on the S3C6400 ending up with the TXFIFO becoming full.
 678         */
 679
 680        /* check ep is enabled */
 681        if (!(readl(hsotg->regs + epctrl_reg) & DXEPCTL_EPENA))
 682                dev_dbg(hsotg->dev,
 683                         "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
 684                         index, readl(hsotg->regs + epctrl_reg));
 685
 686        dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
 687                __func__, readl(hsotg->regs + epctrl_reg));
 688
 689        /* enable ep interrupts */
 690        s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
 691}
 692
 693/**
 694 * s3c_hsotg_map_dma - map the DMA memory being used for the request
 695 * @hsotg: The device state.
 696 * @hs_ep: The endpoint the request is on.
 697 * @req: The request being processed.
 698 *
 699 * We've been asked to queue a request, so ensure that the memory buffer
 700 * is correctly setup for DMA. If we've been passed an extant DMA address
 701 * then ensure the buffer has been synced to memory. If our buffer has no
 702 * DMA memory, then we map the memory and mark our request to allow us to
 703 * cleanup on completion.
 704 */
 705static int s3c_hsotg_map_dma(struct dwc2_hsotg *hsotg,
 706                             struct s3c_hsotg_ep *hs_ep,
 707                             struct usb_request *req)
 708{
 709        struct s3c_hsotg_req *hs_req = our_req(req);
 710        int ret;
 711
 712        /* if the length is zero, ignore the DMA data */
 713        if (hs_req->req.length == 0)
 714                return 0;
 715
 716        ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
 717        if (ret)
 718                goto dma_error;
 719
 720        return 0;
 721
 722dma_error:
 723        dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
 724                __func__, req->buf, req->length);
 725
 726        return -EIO;
 727}
 728
 729static int s3c_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
 730        struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req)
 731{
 732        void *req_buf = hs_req->req.buf;
 733
 734        /* If dma is not being used or buffer is aligned */
 735        if (!using_dma(hsotg) || !((long)req_buf & 3))
 736                return 0;
 737
 738        WARN_ON(hs_req->saved_req_buf);
 739
 740        dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
 741                        hs_ep->ep.name, req_buf, hs_req->req.length);
 742
 743        hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
 744        if (!hs_req->req.buf) {
 745                hs_req->req.buf = req_buf;
 746                dev_err(hsotg->dev,
 747                        "%s: unable to allocate memory for bounce buffer\n",
 748                        __func__);
 749                return -ENOMEM;
 750        }
 751
 752        /* Save actual buffer */
 753        hs_req->saved_req_buf = req_buf;
 754
 755        if (hs_ep->dir_in)
 756                memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
 757        return 0;
 758}
 759
 760static void s3c_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
 761        struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req)
 762{
 763        /* If dma is not being used or buffer was aligned */
 764        if (!using_dma(hsotg) || !hs_req->saved_req_buf)
 765                return;
 766
 767        dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
 768                hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
 769
 770        /* Copy data from bounce buffer on successful out transfer */
 771        if (!hs_ep->dir_in && !hs_req->req.status)
 772                memcpy(hs_req->saved_req_buf, hs_req->req.buf,
 773                                                        hs_req->req.actual);
 774
 775        /* Free bounce buffer */
 776        kfree(hs_req->req.buf);
 777
 778        hs_req->req.buf = hs_req->saved_req_buf;
 779        hs_req->saved_req_buf = NULL;
 780}
 781
 782static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
 783                              gfp_t gfp_flags)
 784{
 785        struct s3c_hsotg_req *hs_req = our_req(req);
 786        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
 787        struct dwc2_hsotg *hs = hs_ep->parent;
 788        bool first;
 789        int ret;
 790
 791        dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
 792                ep->name, req, req->length, req->buf, req->no_interrupt,
 793                req->zero, req->short_not_ok);
 794
 795        /* initialise status of the request */
 796        INIT_LIST_HEAD(&hs_req->queue);
 797        req->actual = 0;
 798        req->status = -EINPROGRESS;
 799
 800        ret = s3c_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
 801        if (ret)
 802                return ret;
 803
 804        /* if we're using DMA, sync the buffers as necessary */
 805        if (using_dma(hs)) {
 806                ret = s3c_hsotg_map_dma(hs, hs_ep, req);
 807                if (ret)
 808                        return ret;
 809        }
 810
 811        first = list_empty(&hs_ep->queue);
 812        list_add_tail(&hs_req->queue, &hs_ep->queue);
 813
 814        if (first)
 815                s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
 816
 817        return 0;
 818}
 819
 820static int s3c_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
 821                              gfp_t gfp_flags)
 822{
 823        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
 824        struct dwc2_hsotg *hs = hs_ep->parent;
 825        unsigned long flags = 0;
 826        int ret = 0;
 827
 828        spin_lock_irqsave(&hs->lock, flags);
 829        ret = s3c_hsotg_ep_queue(ep, req, gfp_flags);
 830        spin_unlock_irqrestore(&hs->lock, flags);
 831
 832        return ret;
 833}
 834
 835static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
 836                                      struct usb_request *req)
 837{
 838        struct s3c_hsotg_req *hs_req = our_req(req);
 839
 840        kfree(hs_req);
 841}
 842
 843/**
 844 * s3c_hsotg_complete_oursetup - setup completion callback
 845 * @ep: The endpoint the request was on.
 846 * @req: The request completed.
 847 *
 848 * Called on completion of any requests the driver itself
 849 * submitted that need cleaning up.
 850 */
 851static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
 852                                        struct usb_request *req)
 853{
 854        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
 855        struct dwc2_hsotg *hsotg = hs_ep->parent;
 856
 857        dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
 858
 859        s3c_hsotg_ep_free_request(ep, req);
 860}
 861
 862/**
 863 * ep_from_windex - convert control wIndex value to endpoint
 864 * @hsotg: The driver state.
 865 * @windex: The control request wIndex field (in host order).
 866 *
 867 * Convert the given wIndex into a pointer to an driver endpoint
 868 * structure, or return NULL if it is not a valid endpoint.
 869 */
 870static struct s3c_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
 871                                           u32 windex)
 872{
 873        struct s3c_hsotg_ep *ep;
 874        int dir = (windex & USB_DIR_IN) ? 1 : 0;
 875        int idx = windex & 0x7F;
 876
 877        if (windex >= 0x100)
 878                return NULL;
 879
 880        if (idx > hsotg->num_of_eps)
 881                return NULL;
 882
 883        ep = index_to_ep(hsotg, idx, dir);
 884
 885        if (idx && ep->dir_in != dir)
 886                return NULL;
 887
 888        return ep;
 889}
 890
 891/**
 892 * s3c_hsotg_set_test_mode - Enable usb Test Modes
 893 * @hsotg: The driver state.
 894 * @testmode: requested usb test mode
 895 * Enable usb Test Mode requested by the Host.
 896 */
 897static int s3c_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
 898{
 899        int dctl = readl(hsotg->regs + DCTL);
 900
 901        dctl &= ~DCTL_TSTCTL_MASK;
 902        switch (testmode) {
 903        case TEST_J:
 904        case TEST_K:
 905        case TEST_SE0_NAK:
 906        case TEST_PACKET:
 907        case TEST_FORCE_EN:
 908                dctl |= testmode << DCTL_TSTCTL_SHIFT;
 909                break;
 910        default:
 911                return -EINVAL;
 912        }
 913        writel(dctl, hsotg->regs + DCTL);
 914        return 0;
 915}
 916
 917/**
 918 * s3c_hsotg_send_reply - send reply to control request
 919 * @hsotg: The device state
 920 * @ep: Endpoint 0
 921 * @buff: Buffer for request
 922 * @length: Length of reply.
 923 *
 924 * Create a request and queue it on the given endpoint. This is useful as
 925 * an internal method of sending replies to certain control requests, etc.
 926 */
 927static int s3c_hsotg_send_reply(struct dwc2_hsotg *hsotg,
 928                                struct s3c_hsotg_ep *ep,
 929                                void *buff,
 930                                int length)
 931{
 932        struct usb_request *req;
 933        int ret;
 934
 935        dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
 936
 937        req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
 938        hsotg->ep0_reply = req;
 939        if (!req) {
 940                dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
 941                return -ENOMEM;
 942        }
 943
 944        req->buf = hsotg->ep0_buff;
 945        req->length = length;
 946        /*
 947         * zero flag is for sending zlp in DATA IN stage. It has no impact on
 948         * STATUS stage.
 949         */
 950        req->zero = 0;
 951        req->complete = s3c_hsotg_complete_oursetup;
 952
 953        if (length)
 954                memcpy(req->buf, buff, length);
 955
 956        ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
 957        if (ret) {
 958                dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
 959                return ret;
 960        }
 961
 962        return 0;
 963}
 964
 965/**
 966 * s3c_hsotg_process_req_status - process request GET_STATUS
 967 * @hsotg: The device state
 968 * @ctrl: USB control request
 969 */
 970static int s3c_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
 971                                        struct usb_ctrlrequest *ctrl)
 972{
 973        struct s3c_hsotg_ep *ep0 = hsotg->eps_out[0];
 974        struct s3c_hsotg_ep *ep;
 975        __le16 reply;
 976        int ret;
 977
 978        dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
 979
 980        if (!ep0->dir_in) {
 981                dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
 982                return -EINVAL;
 983        }
 984
 985        switch (ctrl->bRequestType & USB_RECIP_MASK) {
 986        case USB_RECIP_DEVICE:
 987                reply = cpu_to_le16(0); /* bit 0 => self powered,
 988                                         * bit 1 => remote wakeup */
 989                break;
 990
 991        case USB_RECIP_INTERFACE:
 992                /* currently, the data result should be zero */
 993                reply = cpu_to_le16(0);
 994                break;
 995
 996        case USB_RECIP_ENDPOINT:
 997                ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
 998                if (!ep)
 999                        return -ENOENT;
1000
1001                reply = cpu_to_le16(ep->halted ? 1 : 0);
1002                break;
1003
1004        default:
1005                return 0;
1006        }
1007
1008        if (le16_to_cpu(ctrl->wLength) != 2)
1009                return -EINVAL;
1010
1011        ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
1012        if (ret) {
1013                dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1014                return ret;
1015        }
1016
1017        return 1;
1018}
1019
1020static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
1021
1022/**
1023 * get_ep_head - return the first request on the endpoint
1024 * @hs_ep: The controller endpoint to get
1025 *
1026 * Get the first request on the endpoint.
1027 */
1028static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
1029{
1030        if (list_empty(&hs_ep->queue))
1031                return NULL;
1032
1033        return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
1034}
1035
1036/**
1037 * s3c_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
1038 * @hsotg: The device state
1039 * @ctrl: USB control request
1040 */
1041static int s3c_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
1042                                         struct usb_ctrlrequest *ctrl)
1043{
1044        struct s3c_hsotg_ep *ep0 = hsotg->eps_out[0];
1045        struct s3c_hsotg_req *hs_req;
1046        bool restart;
1047        bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1048        struct s3c_hsotg_ep *ep;
1049        int ret;
1050        bool halted;
1051        u32 recip;
1052        u32 wValue;
1053        u32 wIndex;
1054
1055        dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1056                __func__, set ? "SET" : "CLEAR");
1057
1058        wValue = le16_to_cpu(ctrl->wValue);
1059        wIndex = le16_to_cpu(ctrl->wIndex);
1060        recip = ctrl->bRequestType & USB_RECIP_MASK;
1061
1062        switch (recip) {
1063        case USB_RECIP_DEVICE:
1064                switch (wValue) {
1065                case USB_DEVICE_TEST_MODE:
1066                        if ((wIndex & 0xff) != 0)
1067                                return -EINVAL;
1068                        if (!set)
1069                                return -EINVAL;
1070
1071                        hsotg->test_mode = wIndex >> 8;
1072                        ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1073                        if (ret) {
1074                                dev_err(hsotg->dev,
1075                                        "%s: failed to send reply\n", __func__);
1076                                return ret;
1077                        }
1078                        break;
1079                default:
1080                        return -ENOENT;
1081                }
1082                break;
1083
1084        case USB_RECIP_ENDPOINT:
1085                ep = ep_from_windex(hsotg, wIndex);
1086                if (!ep) {
1087                        dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1088                                __func__, wIndex);
1089                        return -ENOENT;
1090                }
1091
1092                switch (wValue) {
1093                case USB_ENDPOINT_HALT:
1094                        halted = ep->halted;
1095
1096                        s3c_hsotg_ep_sethalt(&ep->ep, set);
1097
1098                        ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1099                        if (ret) {
1100                                dev_err(hsotg->dev,
1101                                        "%s: failed to send reply\n", __func__);
1102                                return ret;
1103                        }
1104
1105                        /*
1106                         * we have to complete all requests for ep if it was
1107                         * halted, and the halt was cleared by CLEAR_FEATURE
1108                         */
1109
1110                        if (!set && halted) {
1111                                /*
1112                                 * If we have request in progress,
1113                                 * then complete it
1114                                 */
1115                                if (ep->req) {
1116                                        hs_req = ep->req;
1117                                        ep->req = NULL;
1118                                        list_del_init(&hs_req->queue);
1119                                        if (hs_req->req.complete) {
1120                                                spin_unlock(&hsotg->lock);
1121                                                usb_gadget_giveback_request(
1122                                                        &ep->ep, &hs_req->req);
1123                                                spin_lock(&hsotg->lock);
1124                                        }
1125                                }
1126
1127                                /* If we have pending request, then start it */
1128                                if (!ep->req) {
1129                                        restart = !list_empty(&ep->queue);
1130                                        if (restart) {
1131                                                hs_req = get_ep_head(ep);
1132                                                s3c_hsotg_start_req(hsotg, ep,
1133                                                                hs_req, false);
1134                                        }
1135                                }
1136                        }
1137
1138                        break;
1139
1140                default:
1141                        return -ENOENT;
1142                }
1143                break;
1144        default:
1145                return -ENOENT;
1146        }
1147        return 1;
1148}
1149
1150static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
1151
1152/**
1153 * s3c_hsotg_stall_ep0 - stall ep0
1154 * @hsotg: The device state
1155 *
1156 * Set stall for ep0 as response for setup request.
1157 */
1158static void s3c_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
1159{
1160        struct s3c_hsotg_ep *ep0 = hsotg->eps_out[0];
1161        u32 reg;
1162        u32 ctrl;
1163
1164        dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1165        reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1166
1167        /*
1168         * DxEPCTL_Stall will be cleared by EP once it has
1169         * taken effect, so no need to clear later.
1170         */
1171
1172        ctrl = readl(hsotg->regs + reg);
1173        ctrl |= DXEPCTL_STALL;
1174        ctrl |= DXEPCTL_CNAK;
1175        writel(ctrl, hsotg->regs + reg);
1176
1177        dev_dbg(hsotg->dev,
1178                "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
1179                ctrl, reg, readl(hsotg->regs + reg));
1180
1181         /*
1182          * complete won't be called, so we enqueue
1183          * setup request here
1184          */
1185         s3c_hsotg_enqueue_setup(hsotg);
1186}
1187
1188/**
1189 * s3c_hsotg_process_control - process a control request
1190 * @hsotg: The device state
1191 * @ctrl: The control request received
1192 *
1193 * The controller has received the SETUP phase of a control request, and
1194 * needs to work out what to do next (and whether to pass it on to the
1195 * gadget driver).
1196 */
1197static void s3c_hsotg_process_control(struct dwc2_hsotg *hsotg,
1198                                      struct usb_ctrlrequest *ctrl)
1199{
1200        struct s3c_hsotg_ep *ep0 = hsotg->eps_out[0];
1201        int ret = 0;
1202        u32 dcfg;
1203
1204        dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
1205                 ctrl->bRequest, ctrl->bRequestType,
1206                 ctrl->wValue, ctrl->wLength);
1207
1208        if (ctrl->wLength == 0) {
1209                ep0->dir_in = 1;
1210                hsotg->ep0_state = DWC2_EP0_STATUS_IN;
1211        } else if (ctrl->bRequestType & USB_DIR_IN) {
1212                ep0->dir_in = 1;
1213                hsotg->ep0_state = DWC2_EP0_DATA_IN;
1214        } else {
1215                ep0->dir_in = 0;
1216                hsotg->ep0_state = DWC2_EP0_DATA_OUT;
1217        }
1218
1219        if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1220                switch (ctrl->bRequest) {
1221                case USB_REQ_SET_ADDRESS:
1222                        hsotg->connected = 1;
1223                        dcfg = readl(hsotg->regs + DCFG);
1224                        dcfg &= ~DCFG_DEVADDR_MASK;
1225                        dcfg |= (le16_to_cpu(ctrl->wValue) <<
1226                                 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
1227                        writel(dcfg, hsotg->regs + DCFG);
1228
1229                        dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1230
1231                        ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1232                        return;
1233
1234                case USB_REQ_GET_STATUS:
1235                        ret = s3c_hsotg_process_req_status(hsotg, ctrl);
1236                        break;
1237
1238                case USB_REQ_CLEAR_FEATURE:
1239                case USB_REQ_SET_FEATURE:
1240                        ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
1241                        break;
1242                }
1243        }
1244
1245        /* as a fallback, try delivering it to the driver to deal with */
1246
1247        if (ret == 0 && hsotg->driver) {
1248                spin_unlock(&hsotg->lock);
1249                ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1250                spin_lock(&hsotg->lock);
1251                if (ret < 0)
1252                        dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1253        }
1254
1255        /*
1256         * the request is either unhandlable, or is not formatted correctly
1257         * so respond with a STALL for the status stage to indicate failure.
1258         */
1259
1260        if (ret < 0)
1261                s3c_hsotg_stall_ep0(hsotg);
1262}
1263
1264/**
1265 * s3c_hsotg_complete_setup - completion of a setup transfer
1266 * @ep: The endpoint the request was on.
1267 * @req: The request completed.
1268 *
1269 * Called on completion of any requests the driver itself submitted for
1270 * EP0 setup packets
1271 */
1272static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1273                                     struct usb_request *req)
1274{
1275        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
1276        struct dwc2_hsotg *hsotg = hs_ep->parent;
1277
1278        if (req->status < 0) {
1279                dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1280                return;
1281        }
1282
1283        spin_lock(&hsotg->lock);
1284        if (req->actual == 0)
1285                s3c_hsotg_enqueue_setup(hsotg);
1286        else
1287                s3c_hsotg_process_control(hsotg, req->buf);
1288        spin_unlock(&hsotg->lock);
1289}
1290
1291/**
1292 * s3c_hsotg_enqueue_setup - start a request for EP0 packets
1293 * @hsotg: The device state.
1294 *
1295 * Enqueue a request on EP0 if necessary to received any SETUP packets
1296 * received from the host.
1297 */
1298static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
1299{
1300        struct usb_request *req = hsotg->ctrl_req;
1301        struct s3c_hsotg_req *hs_req = our_req(req);
1302        int ret;
1303
1304        dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
1305
1306        req->zero = 0;
1307        req->length = 8;
1308        req->buf = hsotg->ctrl_buff;
1309        req->complete = s3c_hsotg_complete_setup;
1310
1311        if (!list_empty(&hs_req->queue)) {
1312                dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
1313                return;
1314        }
1315
1316        hsotg->eps_out[0]->dir_in = 0;
1317        hsotg->eps_out[0]->send_zlp = 0;
1318        hsotg->ep0_state = DWC2_EP0_SETUP;
1319
1320        ret = s3c_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
1321        if (ret < 0) {
1322                dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
1323                /*
1324                 * Don't think there's much we can do other than watch the
1325                 * driver fail.
1326                 */
1327        }
1328}
1329
1330static void s3c_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
1331                                        struct s3c_hsotg_ep *hs_ep)
1332{
1333        u32 ctrl;
1334        u8 index = hs_ep->index;
1335        u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
1336        u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
1337
1338        if (hs_ep->dir_in)
1339                dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
1340                                                                        index);
1341        else
1342                dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
1343                                                                        index);
1344
1345        writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
1346                        DXEPTSIZ_XFERSIZE(0), hsotg->regs +
1347                        epsiz_reg);
1348
1349        ctrl = readl(hsotg->regs + epctl_reg);
1350        ctrl |= DXEPCTL_CNAK;  /* clear NAK set by core */
1351        ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
1352        ctrl |= DXEPCTL_USBACTEP;
1353        writel(ctrl, hsotg->regs + epctl_reg);
1354}
1355
1356/**
1357 * s3c_hsotg_complete_request - complete a request given to us
1358 * @hsotg: The device state.
1359 * @hs_ep: The endpoint the request was on.
1360 * @hs_req: The request to complete.
1361 * @result: The result code (0 => Ok, otherwise errno)
1362 *
1363 * The given request has finished, so call the necessary completion
1364 * if it has one and then look to see if we can start a new request
1365 * on the endpoint.
1366 *
1367 * Note, expects the ep to already be locked as appropriate.
1368 */
1369static void s3c_hsotg_complete_request(struct dwc2_hsotg *hsotg,
1370                                       struct s3c_hsotg_ep *hs_ep,
1371                                       struct s3c_hsotg_req *hs_req,
1372                                       int result)
1373{
1374        bool restart;
1375
1376        if (!hs_req) {
1377                dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
1378                return;
1379        }
1380
1381        dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
1382                hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
1383
1384        /*
1385         * only replace the status if we've not already set an error
1386         * from a previous transaction
1387         */
1388
1389        if (hs_req->req.status == -EINPROGRESS)
1390                hs_req->req.status = result;
1391
1392        s3c_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
1393
1394        hs_ep->req = NULL;
1395        list_del_init(&hs_req->queue);
1396
1397        if (using_dma(hsotg))
1398                s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
1399
1400        /*
1401         * call the complete request with the locks off, just in case the
1402         * request tries to queue more work for this endpoint.
1403         */
1404
1405        if (hs_req->req.complete) {
1406                spin_unlock(&hsotg->lock);
1407                usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
1408                spin_lock(&hsotg->lock);
1409        }
1410
1411        /*
1412         * Look to see if there is anything else to do. Note, the completion
1413         * of the previous request may have caused a new request to be started
1414         * so be careful when doing this.
1415         */
1416
1417        if (!hs_ep->req && result >= 0) {
1418                restart = !list_empty(&hs_ep->queue);
1419                if (restart) {
1420                        hs_req = get_ep_head(hs_ep);
1421                        s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1422                }
1423        }
1424}
1425
1426/**
1427 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
1428 * @hsotg: The device state.
1429 * @ep_idx: The endpoint index for the data
1430 * @size: The size of data in the fifo, in bytes
1431 *
1432 * The FIFO status shows there is data to read from the FIFO for a given
1433 * endpoint, so sort out whether we need to read the data into a request
1434 * that has been made for that endpoint.
1435 */
1436static void s3c_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
1437{
1438        struct s3c_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
1439        struct s3c_hsotg_req *hs_req = hs_ep->req;
1440        void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
1441        int to_read;
1442        int max_req;
1443        int read_ptr;
1444
1445
1446        if (!hs_req) {
1447                u32 epctl = readl(hsotg->regs + DOEPCTL(ep_idx));
1448                int ptr;
1449
1450                dev_dbg(hsotg->dev,
1451                         "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
1452                         __func__, size, ep_idx, epctl);
1453
1454                /* dump the data from the FIFO, we've nothing we can do */
1455                for (ptr = 0; ptr < size; ptr += 4)
1456                        (void)readl(fifo);
1457
1458                return;
1459        }
1460
1461        to_read = size;
1462        read_ptr = hs_req->req.actual;
1463        max_req = hs_req->req.length - read_ptr;
1464
1465        dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
1466                __func__, to_read, max_req, read_ptr, hs_req->req.length);
1467
1468        if (to_read > max_req) {
1469                /*
1470                 * more data appeared than we where willing
1471                 * to deal with in this request.
1472                 */
1473
1474                /* currently we don't deal this */
1475                WARN_ON_ONCE(1);
1476        }
1477
1478        hs_ep->total_data += to_read;
1479        hs_req->req.actual += to_read;
1480        to_read = DIV_ROUND_UP(to_read, 4);
1481
1482        /*
1483         * note, we might over-write the buffer end by 3 bytes depending on
1484         * alignment of the data.
1485         */
1486        ioread32_rep(fifo, hs_req->req.buf + read_ptr, to_read);
1487}
1488
1489/**
1490 * s3c_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
1491 * @hsotg: The device instance
1492 * @dir_in: If IN zlp
1493 *
1494 * Generate a zero-length IN packet request for terminating a SETUP
1495 * transaction.
1496 *
1497 * Note, since we don't write any data to the TxFIFO, then it is
1498 * currently believed that we do not need to wait for any space in
1499 * the TxFIFO.
1500 */
1501static void s3c_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
1502{
1503        /* eps_out[0] is used in both directions */
1504        hsotg->eps_out[0]->dir_in = dir_in;
1505        hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
1506
1507        s3c_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
1508}
1509
1510/**
1511 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
1512 * @hsotg: The device instance
1513 * @epnum: The endpoint received from
1514 *
1515 * The RXFIFO has delivered an OutDone event, which means that the data
1516 * transfer for an OUT endpoint has been completed, either by a short
1517 * packet or by the finish of a transfer.
1518 */
1519static void s3c_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
1520{
1521        u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum));
1522        struct s3c_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
1523        struct s3c_hsotg_req *hs_req = hs_ep->req;
1524        struct usb_request *req = &hs_req->req;
1525        unsigned size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
1526        int result = 0;
1527
1528        if (!hs_req) {
1529                dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
1530                return;
1531        }
1532
1533        if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
1534                dev_dbg(hsotg->dev, "zlp packet received\n");
1535                s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
1536                s3c_hsotg_enqueue_setup(hsotg);
1537                return;
1538        }
1539
1540        if (using_dma(hsotg)) {
1541                unsigned size_done;
1542
1543                /*
1544                 * Calculate the size of the transfer by checking how much
1545                 * is left in the endpoint size register and then working it
1546                 * out from the amount we loaded for the transfer.
1547                 *
1548                 * We need to do this as DMA pointers are always 32bit aligned
1549                 * so may overshoot/undershoot the transfer.
1550                 */
1551
1552                size_done = hs_ep->size_loaded - size_left;
1553                size_done += hs_ep->last_load;
1554
1555                req->actual = size_done;
1556        }
1557
1558        /* if there is more request to do, schedule new transfer */
1559        if (req->actual < req->length && size_left == 0) {
1560                s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1561                return;
1562        }
1563
1564        if (req->actual < req->length && req->short_not_ok) {
1565                dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
1566                        __func__, req->actual, req->length);
1567
1568                /*
1569                 * todo - what should we return here? there's no one else
1570                 * even bothering to check the status.
1571                 */
1572        }
1573
1574        if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
1575                /* Move to STATUS IN */
1576                s3c_hsotg_ep0_zlp(hsotg, true);
1577                return;
1578        }
1579
1580        s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
1581}
1582
1583/**
1584 * s3c_hsotg_read_frameno - read current frame number
1585 * @hsotg: The device instance
1586 *
1587 * Return the current frame number
1588 */
1589static u32 s3c_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
1590{
1591        u32 dsts;
1592
1593        dsts = readl(hsotg->regs + DSTS);
1594        dsts &= DSTS_SOFFN_MASK;
1595        dsts >>= DSTS_SOFFN_SHIFT;
1596
1597        return dsts;
1598}
1599
1600/**
1601 * s3c_hsotg_handle_rx - RX FIFO has data
1602 * @hsotg: The device instance
1603 *
1604 * The IRQ handler has detected that the RX FIFO has some data in it
1605 * that requires processing, so find out what is in there and do the
1606 * appropriate read.
1607 *
1608 * The RXFIFO is a true FIFO, the packets coming out are still in packet
1609 * chunks, so if you have x packets received on an endpoint you'll get x
1610 * FIFO events delivered, each with a packet's worth of data in it.
1611 *
1612 * When using DMA, we should not be processing events from the RXFIFO
1613 * as the actual data should be sent to the memory directly and we turn
1614 * on the completion interrupts to get notifications of transfer completion.
1615 */
1616static void s3c_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
1617{
1618        u32 grxstsr = readl(hsotg->regs + GRXSTSP);
1619        u32 epnum, status, size;
1620
1621        WARN_ON(using_dma(hsotg));
1622
1623        epnum = grxstsr & GRXSTS_EPNUM_MASK;
1624        status = grxstsr & GRXSTS_PKTSTS_MASK;
1625
1626        size = grxstsr & GRXSTS_BYTECNT_MASK;
1627        size >>= GRXSTS_BYTECNT_SHIFT;
1628
1629        dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
1630                        __func__, grxstsr, size, epnum);
1631
1632        switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
1633        case GRXSTS_PKTSTS_GLOBALOUTNAK:
1634                dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
1635                break;
1636
1637        case GRXSTS_PKTSTS_OUTDONE:
1638                dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
1639                        s3c_hsotg_read_frameno(hsotg));
1640
1641                if (!using_dma(hsotg))
1642                        s3c_hsotg_handle_outdone(hsotg, epnum);
1643                break;
1644
1645        case GRXSTS_PKTSTS_SETUPDONE:
1646                dev_dbg(hsotg->dev,
1647                        "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1648                        s3c_hsotg_read_frameno(hsotg),
1649                        readl(hsotg->regs + DOEPCTL(0)));
1650                /*
1651                 * Call s3c_hsotg_handle_outdone here if it was not called from
1652                 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
1653                 * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
1654                 */
1655                if (hsotg->ep0_state == DWC2_EP0_SETUP)
1656                        s3c_hsotg_handle_outdone(hsotg, epnum);
1657                break;
1658
1659        case GRXSTS_PKTSTS_OUTRX:
1660                s3c_hsotg_rx_data(hsotg, epnum, size);
1661                break;
1662
1663        case GRXSTS_PKTSTS_SETUPRX:
1664                dev_dbg(hsotg->dev,
1665                        "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1666                        s3c_hsotg_read_frameno(hsotg),
1667                        readl(hsotg->regs + DOEPCTL(0)));
1668
1669                WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
1670
1671                s3c_hsotg_rx_data(hsotg, epnum, size);
1672                break;
1673
1674        default:
1675                dev_warn(hsotg->dev, "%s: unknown status %08x\n",
1676                         __func__, grxstsr);
1677
1678                s3c_hsotg_dump(hsotg);
1679                break;
1680        }
1681}
1682
1683/**
1684 * s3c_hsotg_ep0_mps - turn max packet size into register setting
1685 * @mps: The maximum packet size in bytes.
1686 */
1687static u32 s3c_hsotg_ep0_mps(unsigned int mps)
1688{
1689        switch (mps) {
1690        case 64:
1691                return D0EPCTL_MPS_64;
1692        case 32:
1693                return D0EPCTL_MPS_32;
1694        case 16:
1695                return D0EPCTL_MPS_16;
1696        case 8:
1697                return D0EPCTL_MPS_8;
1698        }
1699
1700        /* bad max packet size, warn and return invalid result */
1701        WARN_ON(1);
1702        return (u32)-1;
1703}
1704
1705/**
1706 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
1707 * @hsotg: The driver state.
1708 * @ep: The index number of the endpoint
1709 * @mps: The maximum packet size in bytes
1710 *
1711 * Configure the maximum packet size for the given endpoint, updating
1712 * the hardware control registers to reflect this.
1713 */
1714static void s3c_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
1715                        unsigned int ep, unsigned int mps, unsigned int dir_in)
1716{
1717        struct s3c_hsotg_ep *hs_ep;
1718        void __iomem *regs = hsotg->regs;
1719        u32 mpsval;
1720        u32 mcval;
1721        u32 reg;
1722
1723        hs_ep = index_to_ep(hsotg, ep, dir_in);
1724        if (!hs_ep)
1725                return;
1726
1727        if (ep == 0) {
1728                /* EP0 is a special case */
1729                mpsval = s3c_hsotg_ep0_mps(mps);
1730                if (mpsval > 3)
1731                        goto bad_mps;
1732                hs_ep->ep.maxpacket = mps;
1733                hs_ep->mc = 1;
1734        } else {
1735                mpsval = mps & DXEPCTL_MPS_MASK;
1736                if (mpsval > 1024)
1737                        goto bad_mps;
1738                mcval = ((mps >> 11) & 0x3) + 1;
1739                hs_ep->mc = mcval;
1740                if (mcval > 3)
1741                        goto bad_mps;
1742                hs_ep->ep.maxpacket = mpsval;
1743        }
1744
1745        if (dir_in) {
1746                reg = readl(regs + DIEPCTL(ep));
1747                reg &= ~DXEPCTL_MPS_MASK;
1748                reg |= mpsval;
1749                writel(reg, regs + DIEPCTL(ep));
1750        } else {
1751                reg = readl(regs + DOEPCTL(ep));
1752                reg &= ~DXEPCTL_MPS_MASK;
1753                reg |= mpsval;
1754                writel(reg, regs + DOEPCTL(ep));
1755        }
1756
1757        return;
1758
1759bad_mps:
1760        dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
1761}
1762
1763/**
1764 * s3c_hsotg_txfifo_flush - flush Tx FIFO
1765 * @hsotg: The driver state
1766 * @idx: The index for the endpoint (0..15)
1767 */
1768static void s3c_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
1769{
1770        int timeout;
1771        int val;
1772
1773        writel(GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
1774                hsotg->regs + GRSTCTL);
1775
1776        /* wait until the fifo is flushed */
1777        timeout = 100;
1778
1779        while (1) {
1780                val = readl(hsotg->regs + GRSTCTL);
1781
1782                if ((val & (GRSTCTL_TXFFLSH)) == 0)
1783                        break;
1784
1785                if (--timeout == 0) {
1786                        dev_err(hsotg->dev,
1787                                "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
1788                                __func__, val);
1789                        break;
1790                }
1791
1792                udelay(1);
1793        }
1794}
1795
1796/**
1797 * s3c_hsotg_trytx - check to see if anything needs transmitting
1798 * @hsotg: The driver state
1799 * @hs_ep: The driver endpoint to check.
1800 *
1801 * Check to see if there is a request that has data to send, and if so
1802 * make an attempt to write data into the FIFO.
1803 */
1804static int s3c_hsotg_trytx(struct dwc2_hsotg *hsotg,
1805                           struct s3c_hsotg_ep *hs_ep)
1806{
1807        struct s3c_hsotg_req *hs_req = hs_ep->req;
1808
1809        if (!hs_ep->dir_in || !hs_req) {
1810                /**
1811                 * if request is not enqueued, we disable interrupts
1812                 * for endpoints, excepting ep0
1813                 */
1814                if (hs_ep->index != 0)
1815                        s3c_hsotg_ctrl_epint(hsotg, hs_ep->index,
1816                                             hs_ep->dir_in, 0);
1817                return 0;
1818        }
1819
1820        if (hs_req->req.actual < hs_req->req.length) {
1821                dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
1822                        hs_ep->index);
1823                return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1824        }
1825
1826        return 0;
1827}
1828
1829/**
1830 * s3c_hsotg_complete_in - complete IN transfer
1831 * @hsotg: The device state.
1832 * @hs_ep: The endpoint that has just completed.
1833 *
1834 * An IN transfer has been completed, update the transfer's state and then
1835 * call the relevant completion routines.
1836 */
1837static void s3c_hsotg_complete_in(struct dwc2_hsotg *hsotg,
1838                                  struct s3c_hsotg_ep *hs_ep)
1839{
1840        struct s3c_hsotg_req *hs_req = hs_ep->req;
1841        u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
1842        int size_left, size_done;
1843
1844        if (!hs_req) {
1845                dev_dbg(hsotg->dev, "XferCompl but no req\n");
1846                return;
1847        }
1848
1849        /* Finish ZLP handling for IN EP0 transactions */
1850        if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
1851                dev_dbg(hsotg->dev, "zlp packet sent\n");
1852                s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
1853                if (hsotg->test_mode) {
1854                        int ret;
1855
1856                        ret = s3c_hsotg_set_test_mode(hsotg, hsotg->test_mode);
1857                        if (ret < 0) {
1858                                dev_dbg(hsotg->dev, "Invalid Test #%d\n",
1859                                                hsotg->test_mode);
1860                                s3c_hsotg_stall_ep0(hsotg);
1861                                return;
1862                        }
1863                }
1864                s3c_hsotg_enqueue_setup(hsotg);
1865                return;
1866        }
1867
1868        /*
1869         * Calculate the size of the transfer by checking how much is left
1870         * in the endpoint size register and then working it out from
1871         * the amount we loaded for the transfer.
1872         *
1873         * We do this even for DMA, as the transfer may have incremented
1874         * past the end of the buffer (DMA transfers are always 32bit
1875         * aligned).
1876         */
1877
1878        size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
1879
1880        size_done = hs_ep->size_loaded - size_left;
1881        size_done += hs_ep->last_load;
1882
1883        if (hs_req->req.actual != size_done)
1884                dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
1885                        __func__, hs_req->req.actual, size_done);
1886
1887        hs_req->req.actual = size_done;
1888        dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
1889                hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
1890
1891        if (!size_left && hs_req->req.actual < hs_req->req.length) {
1892                dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
1893                s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1894                return;
1895        }
1896
1897        /* Zlp for all endpoints, for ep0 only in DATA IN stage */
1898        if (hs_ep->send_zlp) {
1899                s3c_hsotg_program_zlp(hsotg, hs_ep);
1900                hs_ep->send_zlp = 0;
1901                /* transfer will be completed on next complete interrupt */
1902                return;
1903        }
1904
1905        if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
1906                /* Move to STATUS OUT */
1907                s3c_hsotg_ep0_zlp(hsotg, false);
1908                return;
1909        }
1910
1911        s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
1912}
1913
1914/**
1915 * s3c_hsotg_epint - handle an in/out endpoint interrupt
1916 * @hsotg: The driver state
1917 * @idx: The index for the endpoint (0..15)
1918 * @dir_in: Set if this is an IN endpoint
1919 *
1920 * Process and clear any interrupt pending for an individual endpoint
1921 */
1922static void s3c_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
1923                            int dir_in)
1924{
1925        struct s3c_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
1926        u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
1927        u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
1928        u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
1929        u32 ints;
1930        u32 ctrl;
1931
1932        ints = readl(hsotg->regs + epint_reg);
1933        ctrl = readl(hsotg->regs + epctl_reg);
1934
1935        /* Clear endpoint interrupts */
1936        writel(ints, hsotg->regs + epint_reg);
1937
1938        if (!hs_ep) {
1939                dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
1940                                        __func__, idx, dir_in ? "in" : "out");
1941                return;
1942        }
1943
1944        dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
1945                __func__, idx, dir_in ? "in" : "out", ints);
1946
1947        /* Don't process XferCompl interrupt if it is a setup packet */
1948        if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
1949                ints &= ~DXEPINT_XFERCOMPL;
1950
1951        if (ints & DXEPINT_XFERCOMPL) {
1952                if (hs_ep->isochronous && hs_ep->interval == 1) {
1953                        if (ctrl & DXEPCTL_EOFRNUM)
1954                                ctrl |= DXEPCTL_SETEVENFR;
1955                        else
1956                                ctrl |= DXEPCTL_SETODDFR;
1957                        writel(ctrl, hsotg->regs + epctl_reg);
1958                }
1959
1960                dev_dbg(hsotg->dev,
1961                        "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
1962                        __func__, readl(hsotg->regs + epctl_reg),
1963                        readl(hsotg->regs + epsiz_reg));
1964
1965                /*
1966                 * we get OutDone from the FIFO, so we only need to look
1967                 * at completing IN requests here
1968                 */
1969                if (dir_in) {
1970                        s3c_hsotg_complete_in(hsotg, hs_ep);
1971
1972                        if (idx == 0 && !hs_ep->req)
1973                                s3c_hsotg_enqueue_setup(hsotg);
1974                } else if (using_dma(hsotg)) {
1975                        /*
1976                         * We're using DMA, we need to fire an OutDone here
1977                         * as we ignore the RXFIFO.
1978                         */
1979
1980                        s3c_hsotg_handle_outdone(hsotg, idx);
1981                }
1982        }
1983
1984        if (ints & DXEPINT_EPDISBLD) {
1985                dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
1986
1987                if (dir_in) {
1988                        int epctl = readl(hsotg->regs + epctl_reg);
1989
1990                        s3c_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
1991
1992                        if ((epctl & DXEPCTL_STALL) &&
1993                                (epctl & DXEPCTL_EPTYPE_BULK)) {
1994                                int dctl = readl(hsotg->regs + DCTL);
1995
1996                                dctl |= DCTL_CGNPINNAK;
1997                                writel(dctl, hsotg->regs + DCTL);
1998                        }
1999                }
2000        }
2001
2002        if (ints & DXEPINT_AHBERR)
2003                dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
2004
2005        if (ints & DXEPINT_SETUP) {  /* Setup or Timeout */
2006                dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
2007
2008                if (using_dma(hsotg) && idx == 0) {
2009                        /*
2010                         * this is the notification we've received a
2011                         * setup packet. In non-DMA mode we'd get this
2012                         * from the RXFIFO, instead we need to process
2013                         * the setup here.
2014                         */
2015
2016                        if (dir_in)
2017                                WARN_ON_ONCE(1);
2018                        else
2019                                s3c_hsotg_handle_outdone(hsotg, 0);
2020                }
2021        }
2022
2023        if (ints & DXEPINT_BACK2BACKSETUP)
2024                dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
2025
2026        if (dir_in && !hs_ep->isochronous) {
2027                /* not sure if this is important, but we'll clear it anyway */
2028                if (ints & DIEPMSK_INTKNTXFEMPMSK) {
2029                        dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
2030                                __func__, idx);
2031                }
2032
2033                /* this probably means something bad is happening */
2034                if (ints & DIEPMSK_INTKNEPMISMSK) {
2035                        dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
2036                                 __func__, idx);
2037                }
2038
2039                /* FIFO has space or is empty (see GAHBCFG) */
2040                if (hsotg->dedicated_fifos &&
2041                    ints & DIEPMSK_TXFIFOEMPTY) {
2042                        dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
2043                                __func__, idx);
2044                        if (!using_dma(hsotg))
2045                                s3c_hsotg_trytx(hsotg, hs_ep);
2046                }
2047        }
2048}
2049
2050/**
2051 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
2052 * @hsotg: The device state.
2053 *
2054 * Handle updating the device settings after the enumeration phase has
2055 * been completed.
2056 */
2057static void s3c_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
2058{
2059        u32 dsts = readl(hsotg->regs + DSTS);
2060        int ep0_mps = 0, ep_mps = 8;
2061
2062        /*
2063         * This should signal the finish of the enumeration phase
2064         * of the USB handshaking, so we should now know what rate
2065         * we connected at.
2066         */
2067
2068        dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
2069
2070        /*
2071         * note, since we're limited by the size of transfer on EP0, and
2072         * it seems IN transfers must be a even number of packets we do
2073         * not advertise a 64byte MPS on EP0.
2074         */
2075
2076        /* catch both EnumSpd_FS and EnumSpd_FS48 */
2077        switch (dsts & DSTS_ENUMSPD_MASK) {
2078        case DSTS_ENUMSPD_FS:
2079        case DSTS_ENUMSPD_FS48:
2080                hsotg->gadget.speed = USB_SPEED_FULL;
2081                ep0_mps = EP0_MPS_LIMIT;
2082                ep_mps = 1023;
2083                break;
2084
2085        case DSTS_ENUMSPD_HS:
2086                hsotg->gadget.speed = USB_SPEED_HIGH;
2087                ep0_mps = EP0_MPS_LIMIT;
2088                ep_mps = 1024;
2089                break;
2090
2091        case DSTS_ENUMSPD_LS:
2092                hsotg->gadget.speed = USB_SPEED_LOW;
2093                /*
2094                 * note, we don't actually support LS in this driver at the
2095                 * moment, and the documentation seems to imply that it isn't
2096                 * supported by the PHYs on some of the devices.
2097                 */
2098                break;
2099        }
2100        dev_info(hsotg->dev, "new device is %s\n",
2101                 usb_speed_string(hsotg->gadget.speed));
2102
2103        /*
2104         * we should now know the maximum packet size for an
2105         * endpoint, so set the endpoints to a default value.
2106         */
2107
2108        if (ep0_mps) {
2109                int i;
2110                /* Initialize ep0 for both in and out directions */
2111                s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 1);
2112                s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0);
2113                for (i = 1; i < hsotg->num_of_eps; i++) {
2114                        if (hsotg->eps_in[i])
2115                                s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 1);
2116                        if (hsotg->eps_out[i])
2117                                s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 0);
2118                }
2119        }
2120
2121        /* ensure after enumeration our EP0 is active */
2122
2123        s3c_hsotg_enqueue_setup(hsotg);
2124
2125        dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2126                readl(hsotg->regs + DIEPCTL0),
2127                readl(hsotg->regs + DOEPCTL0));
2128}
2129
2130/**
2131 * kill_all_requests - remove all requests from the endpoint's queue
2132 * @hsotg: The device state.
2133 * @ep: The endpoint the requests may be on.
2134 * @result: The result code to use.
2135 *
2136 * Go through the requests on the given endpoint and mark them
2137 * completed with the given result code.
2138 */
2139static void kill_all_requests(struct dwc2_hsotg *hsotg,
2140                              struct s3c_hsotg_ep *ep,
2141                              int result)
2142{
2143        struct s3c_hsotg_req *req, *treq;
2144        unsigned size;
2145
2146        ep->req = NULL;
2147
2148        list_for_each_entry_safe(req, treq, &ep->queue, queue)
2149                s3c_hsotg_complete_request(hsotg, ep, req,
2150                                           result);
2151
2152        if (!hsotg->dedicated_fifos)
2153                return;
2154        size = (readl(hsotg->regs + DTXFSTS(ep->index)) & 0xffff) * 4;
2155        if (size < ep->fifo_size)
2156                s3c_hsotg_txfifo_flush(hsotg, ep->fifo_index);
2157}
2158
2159/**
2160 * s3c_hsotg_disconnect - disconnect service
2161 * @hsotg: The device state.
2162 *
2163 * The device has been disconnected. Remove all current
2164 * transactions and signal the gadget driver that this
2165 * has happened.
2166 */
2167void s3c_hsotg_disconnect(struct dwc2_hsotg *hsotg)
2168{
2169        unsigned ep;
2170
2171        if (!hsotg->connected)
2172                return;
2173
2174        hsotg->connected = 0;
2175        hsotg->test_mode = 0;
2176
2177        for (ep = 0; ep < hsotg->num_of_eps; ep++) {
2178                if (hsotg->eps_in[ep])
2179                        kill_all_requests(hsotg, hsotg->eps_in[ep],
2180                                                                -ESHUTDOWN);
2181                if (hsotg->eps_out[ep])
2182                        kill_all_requests(hsotg, hsotg->eps_out[ep],
2183                                                                -ESHUTDOWN);
2184        }
2185
2186        call_gadget(hsotg, disconnect);
2187}
2188EXPORT_SYMBOL_GPL(s3c_hsotg_disconnect);
2189
2190/**
2191 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
2192 * @hsotg: The device state:
2193 * @periodic: True if this is a periodic FIFO interrupt
2194 */
2195static void s3c_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
2196{
2197        struct s3c_hsotg_ep *ep;
2198        int epno, ret;
2199
2200        /* look through for any more data to transmit */
2201        for (epno = 0; epno < hsotg->num_of_eps; epno++) {
2202                ep = index_to_ep(hsotg, epno, 1);
2203
2204                if (!ep)
2205                        continue;
2206
2207                if (!ep->dir_in)
2208                        continue;
2209
2210                if ((periodic && !ep->periodic) ||
2211                    (!periodic && ep->periodic))
2212                        continue;
2213
2214                ret = s3c_hsotg_trytx(hsotg, ep);
2215                if (ret < 0)
2216                        break;
2217        }
2218}
2219
2220/* IRQ flags which will trigger a retry around the IRQ loop */
2221#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
2222                        GINTSTS_PTXFEMP |  \
2223                        GINTSTS_RXFLVL)
2224
2225/**
2226 * s3c_hsotg_corereset - issue softreset to the core
2227 * @hsotg: The device state
2228 *
2229 * Issue a soft reset to the core, and await the core finishing it.
2230 */
2231static int s3c_hsotg_corereset(struct dwc2_hsotg *hsotg)
2232{
2233        int timeout;
2234        u32 grstctl;
2235
2236        dev_dbg(hsotg->dev, "resetting core\n");
2237
2238        /* issue soft reset */
2239        writel(GRSTCTL_CSFTRST, hsotg->regs + GRSTCTL);
2240
2241        timeout = 10000;
2242        do {
2243                grstctl = readl(hsotg->regs + GRSTCTL);
2244        } while ((grstctl & GRSTCTL_CSFTRST) && timeout-- > 0);
2245
2246        if (grstctl & GRSTCTL_CSFTRST) {
2247                dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
2248                return -EINVAL;
2249        }
2250
2251        timeout = 10000;
2252
2253        while (1) {
2254                u32 grstctl = readl(hsotg->regs + GRSTCTL);
2255
2256                if (timeout-- < 0) {
2257                        dev_info(hsotg->dev,
2258                                 "%s: reset failed, GRSTCTL=%08x\n",
2259                                 __func__, grstctl);
2260                        return -ETIMEDOUT;
2261                }
2262
2263                if (!(grstctl & GRSTCTL_AHBIDLE))
2264                        continue;
2265
2266                break;          /* reset done */
2267        }
2268
2269        dev_dbg(hsotg->dev, "reset successful\n");
2270        return 0;
2271}
2272
2273/**
2274 * s3c_hsotg_core_init - issue softreset to the core
2275 * @hsotg: The device state
2276 *
2277 * Issue a soft reset to the core, and await the core finishing it.
2278 */
2279void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2280                                                bool is_usb_reset)
2281{
2282        u32 val;
2283
2284        if (!is_usb_reset)
2285                s3c_hsotg_corereset(hsotg);
2286
2287        /*
2288         * we must now enable ep0 ready for host detection and then
2289         * set configuration.
2290         */
2291
2292        /* set the PLL on, remove the HNP/SRP and set the PHY */
2293        val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
2294        writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
2295               (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG);
2296
2297        s3c_hsotg_init_fifo(hsotg);
2298
2299        if (!is_usb_reset)
2300                __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
2301
2302        writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS,  hsotg->regs + DCFG);
2303
2304        /* Clear any pending OTG interrupts */
2305        writel(0xffffffff, hsotg->regs + GOTGINT);
2306
2307        /* Clear any pending interrupts */
2308        writel(0xffffffff, hsotg->regs + GINTSTS);
2309
2310        writel(GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
2311                GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
2312                GINTSTS_CONIDSTSCHNG | GINTSTS_USBRST |
2313                GINTSTS_ENUMDONE | GINTSTS_OTGINT |
2314                GINTSTS_USBSUSP | GINTSTS_WKUPINT,
2315                hsotg->regs + GINTMSK);
2316
2317        if (using_dma(hsotg))
2318                writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
2319                       (GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT),
2320                       hsotg->regs + GAHBCFG);
2321        else
2322                writel(((hsotg->dedicated_fifos) ? (GAHBCFG_NP_TXF_EMP_LVL |
2323                                                    GAHBCFG_P_TXF_EMP_LVL) : 0) |
2324                       GAHBCFG_GLBL_INTR_EN,
2325                       hsotg->regs + GAHBCFG);
2326
2327        /*
2328         * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
2329         * when we have no data to transfer. Otherwise we get being flooded by
2330         * interrupts.
2331         */
2332
2333        writel(((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
2334                DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
2335                DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
2336                DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
2337                DIEPMSK_INTKNEPMISMSK,
2338                hsotg->regs + DIEPMSK);
2339
2340        /*
2341         * don't need XferCompl, we get that from RXFIFO in slave mode. In
2342         * DMA mode we may need this.
2343         */
2344        writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
2345                                    DIEPMSK_TIMEOUTMSK) : 0) |
2346                DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
2347                DOEPMSK_SETUPMSK,
2348                hsotg->regs + DOEPMSK);
2349
2350        writel(0, hsotg->regs + DAINTMSK);
2351
2352        dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2353                readl(hsotg->regs + DIEPCTL0),
2354                readl(hsotg->regs + DOEPCTL0));
2355
2356        /* enable in and out endpoint interrupts */
2357        s3c_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
2358
2359        /*
2360         * Enable the RXFIFO when in slave mode, as this is how we collect
2361         * the data. In DMA mode, we get events from the FIFO but also
2362         * things we cannot process, so do not use it.
2363         */
2364        if (!using_dma(hsotg))
2365                s3c_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
2366
2367        /* Enable interrupts for EP0 in and out */
2368        s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
2369        s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
2370
2371        if (!is_usb_reset) {
2372                __orr32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
2373                udelay(10);  /* see openiboot */
2374                __bic32(hsotg->regs + DCTL, DCTL_PWRONPRGDONE);
2375        }
2376
2377        dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + DCTL));
2378
2379        /*
2380         * DxEPCTL_USBActEp says RO in manual, but seems to be set by
2381         * writing to the EPCTL register..
2382         */
2383
2384        /* set to read 1 8byte packet */
2385        writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
2386               DXEPTSIZ_XFERSIZE(8), hsotg->regs + DOEPTSIZ0);
2387
2388        writel(s3c_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
2389               DXEPCTL_CNAK | DXEPCTL_EPENA |
2390               DXEPCTL_USBACTEP,
2391               hsotg->regs + DOEPCTL0);
2392
2393        /* enable, but don't activate EP0in */
2394        writel(s3c_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
2395               DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
2396
2397        s3c_hsotg_enqueue_setup(hsotg);
2398
2399        dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2400                readl(hsotg->regs + DIEPCTL0),
2401                readl(hsotg->regs + DOEPCTL0));
2402
2403        /* clear global NAKs */
2404        val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
2405        if (!is_usb_reset)
2406                val |= DCTL_SFTDISCON;
2407        __orr32(hsotg->regs + DCTL, val);
2408
2409        /* must be at-least 3ms to allow bus to see disconnect */
2410        mdelay(3);
2411
2412        hsotg->last_rst = jiffies;
2413}
2414
2415static void s3c_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
2416{
2417        /* set the soft-disconnect bit */
2418        __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
2419}
2420
2421void s3c_hsotg_core_connect(struct dwc2_hsotg *hsotg)
2422{
2423        /* remove the soft-disconnect and let's go */
2424        __bic32(hsotg->regs + DCTL, DCTL_SFTDISCON);
2425}
2426
2427/**
2428 * s3c_hsotg_irq - handle device interrupt
2429 * @irq: The IRQ number triggered
2430 * @pw: The pw value when registered the handler.
2431 */
2432static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
2433{
2434        struct dwc2_hsotg *hsotg = pw;
2435        int retry_count = 8;
2436        u32 gintsts;
2437        u32 gintmsk;
2438
2439        spin_lock(&hsotg->lock);
2440irq_retry:
2441        gintsts = readl(hsotg->regs + GINTSTS);
2442        gintmsk = readl(hsotg->regs + GINTMSK);
2443
2444        dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
2445                __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
2446
2447        gintsts &= gintmsk;
2448
2449        if (gintsts & GINTSTS_ENUMDONE) {
2450                writel(GINTSTS_ENUMDONE, hsotg->regs + GINTSTS);
2451
2452                s3c_hsotg_irq_enumdone(hsotg);
2453        }
2454
2455        if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
2456                u32 daint = readl(hsotg->regs + DAINT);
2457                u32 daintmsk = readl(hsotg->regs + DAINTMSK);
2458                u32 daint_out, daint_in;
2459                int ep;
2460
2461                daint &= daintmsk;
2462                daint_out = daint >> DAINT_OUTEP_SHIFT;
2463                daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
2464
2465                dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
2466
2467                for (ep = 0; ep < hsotg->num_of_eps && daint_out;
2468                                                ep++, daint_out >>= 1) {
2469                        if (daint_out & 1)
2470                                s3c_hsotg_epint(hsotg, ep, 0);
2471                }
2472
2473                for (ep = 0; ep < hsotg->num_of_eps  && daint_in;
2474                                                ep++, daint_in >>= 1) {
2475                        if (daint_in & 1)
2476                                s3c_hsotg_epint(hsotg, ep, 1);
2477                }
2478        }
2479
2480        if (gintsts & GINTSTS_USBRST) {
2481
2482                u32 usb_status = readl(hsotg->regs + GOTGCTL);
2483
2484                dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
2485                dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
2486                        readl(hsotg->regs + GNPTXSTS));
2487
2488                writel(GINTSTS_USBRST, hsotg->regs + GINTSTS);
2489
2490                /* Report disconnection if it is not already done. */
2491                s3c_hsotg_disconnect(hsotg);
2492
2493                if (usb_status & GOTGCTL_BSESVLD) {
2494                        if (time_after(jiffies, hsotg->last_rst +
2495                                       msecs_to_jiffies(200))) {
2496
2497                                kill_all_requests(hsotg, hsotg->eps_out[0],
2498                                                          -ECONNRESET);
2499
2500                                s3c_hsotg_core_init_disconnected(hsotg, true);
2501                        }
2502                }
2503        }
2504
2505        /* check both FIFOs */
2506
2507        if (gintsts & GINTSTS_NPTXFEMP) {
2508                dev_dbg(hsotg->dev, "NPTxFEmp\n");
2509
2510                /*
2511                 * Disable the interrupt to stop it happening again
2512                 * unless one of these endpoint routines decides that
2513                 * it needs re-enabling
2514                 */
2515
2516                s3c_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
2517                s3c_hsotg_irq_fifoempty(hsotg, false);
2518        }
2519
2520        if (gintsts & GINTSTS_PTXFEMP) {
2521                dev_dbg(hsotg->dev, "PTxFEmp\n");
2522
2523                /* See note in GINTSTS_NPTxFEmp */
2524
2525                s3c_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
2526                s3c_hsotg_irq_fifoempty(hsotg, true);
2527        }
2528
2529        if (gintsts & GINTSTS_RXFLVL) {
2530                /*
2531                 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
2532                 * we need to retry s3c_hsotg_handle_rx if this is still
2533                 * set.
2534                 */
2535
2536                s3c_hsotg_handle_rx(hsotg);
2537        }
2538
2539        if (gintsts & GINTSTS_ERLYSUSP) {
2540                dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
2541                writel(GINTSTS_ERLYSUSP, hsotg->regs + GINTSTS);
2542        }
2543
2544        /*
2545         * these next two seem to crop-up occasionally causing the core
2546         * to shutdown the USB transfer, so try clearing them and logging
2547         * the occurrence.
2548         */
2549
2550        if (gintsts & GINTSTS_GOUTNAKEFF) {
2551                dev_info(hsotg->dev, "GOUTNakEff triggered\n");
2552
2553                writel(DCTL_CGOUTNAK, hsotg->regs + DCTL);
2554
2555                s3c_hsotg_dump(hsotg);
2556        }
2557
2558        if (gintsts & GINTSTS_GINNAKEFF) {
2559                dev_info(hsotg->dev, "GINNakEff triggered\n");
2560
2561                writel(DCTL_CGNPINNAK, hsotg->regs + DCTL);
2562
2563                s3c_hsotg_dump(hsotg);
2564        }
2565
2566        /*
2567         * if we've had fifo events, we should try and go around the
2568         * loop again to see if there's any point in returning yet.
2569         */
2570
2571        if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
2572                        goto irq_retry;
2573
2574        spin_unlock(&hsotg->lock);
2575
2576        return IRQ_HANDLED;
2577}
2578
2579/**
2580 * s3c_hsotg_ep_enable - enable the given endpoint
2581 * @ep: The USB endpint to configure
2582 * @desc: The USB endpoint descriptor to configure with.
2583 *
2584 * This is called from the USB gadget code's usb_ep_enable().
2585 */
2586static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2587                               const struct usb_endpoint_descriptor *desc)
2588{
2589        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2590        struct dwc2_hsotg *hsotg = hs_ep->parent;
2591        unsigned long flags;
2592        unsigned int index = hs_ep->index;
2593        u32 epctrl_reg;
2594        u32 epctrl;
2595        u32 mps;
2596        unsigned int dir_in;
2597        unsigned int i, val, size;
2598        int ret = 0;
2599
2600        dev_dbg(hsotg->dev,
2601                "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
2602                __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
2603                desc->wMaxPacketSize, desc->bInterval);
2604
2605        /* not to be called for EP0 */
2606        WARN_ON(index == 0);
2607
2608        dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
2609        if (dir_in != hs_ep->dir_in) {
2610                dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
2611                return -EINVAL;
2612        }
2613
2614        mps = usb_endpoint_maxp(desc);
2615
2616        /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
2617
2618        epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
2619        epctrl = readl(hsotg->regs + epctrl_reg);
2620
2621        dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
2622                __func__, epctrl, epctrl_reg);
2623
2624        spin_lock_irqsave(&hsotg->lock, flags);
2625
2626        epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
2627        epctrl |= DXEPCTL_MPS(mps);
2628
2629        /*
2630         * mark the endpoint as active, otherwise the core may ignore
2631         * transactions entirely for this endpoint
2632         */
2633        epctrl |= DXEPCTL_USBACTEP;
2634
2635        /*
2636         * set the NAK status on the endpoint, otherwise we might try and
2637         * do something with data that we've yet got a request to process
2638         * since the RXFIFO will take data for an endpoint even if the
2639         * size register hasn't been set.
2640         */
2641
2642        epctrl |= DXEPCTL_SNAK;
2643
2644        /* update the endpoint state */
2645        s3c_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
2646
2647        /* default, set to non-periodic */
2648        hs_ep->isochronous = 0;
2649        hs_ep->periodic = 0;
2650        hs_ep->halted = 0;
2651        hs_ep->interval = desc->bInterval;
2652
2653        if (hs_ep->interval > 1 && hs_ep->mc > 1)
2654                dev_err(hsotg->dev, "MC > 1 when interval is not 1\n");
2655
2656        switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
2657        case USB_ENDPOINT_XFER_ISOC:
2658                epctrl |= DXEPCTL_EPTYPE_ISO;
2659                epctrl |= DXEPCTL_SETEVENFR;
2660                hs_ep->isochronous = 1;
2661                if (dir_in)
2662                        hs_ep->periodic = 1;
2663                break;
2664
2665        case USB_ENDPOINT_XFER_BULK:
2666                epctrl |= DXEPCTL_EPTYPE_BULK;
2667                break;
2668
2669        case USB_ENDPOINT_XFER_INT:
2670                if (dir_in)
2671                        hs_ep->periodic = 1;
2672
2673                epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
2674                break;
2675
2676        case USB_ENDPOINT_XFER_CONTROL:
2677                epctrl |= DXEPCTL_EPTYPE_CONTROL;
2678                break;
2679        }
2680
2681        /* If fifo is already allocated for this ep */
2682        if (hs_ep->fifo_index) {
2683                size =  hs_ep->ep.maxpacket * hs_ep->mc;
2684                /* If bigger fifo is required deallocate current one */
2685                if (size > hs_ep->fifo_size) {
2686                        hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
2687                        hs_ep->fifo_index = 0;
2688                        hs_ep->fifo_size = 0;
2689                }
2690        }
2691
2692        /*
2693         * if the hardware has dedicated fifos, we must give each IN EP
2694         * a unique tx-fifo even if it is non-periodic.
2695         */
2696        if (dir_in && hsotg->dedicated_fifos && !hs_ep->fifo_index) {
2697                u32 fifo_index = 0;
2698                u32 fifo_size = UINT_MAX;
2699                size = hs_ep->ep.maxpacket*hs_ep->mc;
2700                for (i = 1; i < hsotg->num_of_eps; ++i) {
2701                        if (hsotg->fifo_map & (1<<i))
2702                                continue;
2703                        val = readl(hsotg->regs + DPTXFSIZN(i));
2704                        val = (val >> FIFOSIZE_DEPTH_SHIFT)*4;
2705                        if (val < size)
2706                                continue;
2707                        /* Search for smallest acceptable fifo */
2708                        if (val < fifo_size) {
2709                                fifo_size = val;
2710                                fifo_index = i;
2711                        }
2712                }
2713                if (!fifo_index) {
2714                        dev_err(hsotg->dev,
2715                                "%s: No suitable fifo found\n", __func__);
2716                        ret = -ENOMEM;
2717                        goto error;
2718                }
2719                hsotg->fifo_map |= 1 << fifo_index;
2720                epctrl |= DXEPCTL_TXFNUM(fifo_index);
2721                hs_ep->fifo_index = fifo_index;
2722                hs_ep->fifo_size = fifo_size;
2723        }
2724
2725        /* for non control endpoints, set PID to D0 */
2726        if (index)
2727                epctrl |= DXEPCTL_SETD0PID;
2728
2729        dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
2730                __func__, epctrl);
2731
2732        writel(epctrl, hsotg->regs + epctrl_reg);
2733        dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
2734                __func__, readl(hsotg->regs + epctrl_reg));
2735
2736        /* enable the endpoint interrupt */
2737        s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2738
2739error:
2740        spin_unlock_irqrestore(&hsotg->lock, flags);
2741        return ret;
2742}
2743
2744/**
2745 * s3c_hsotg_ep_disable - disable given endpoint
2746 * @ep: The endpoint to disable.
2747 */
2748static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force)
2749{
2750        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2751        struct dwc2_hsotg *hsotg = hs_ep->parent;
2752        int dir_in = hs_ep->dir_in;
2753        int index = hs_ep->index;
2754        unsigned long flags;
2755        u32 epctrl_reg;
2756        u32 ctrl;
2757
2758        dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
2759
2760        if (ep == &hsotg->eps_out[0]->ep) {
2761                dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
2762                return -EINVAL;
2763        }
2764
2765        epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
2766
2767        spin_lock_irqsave(&hsotg->lock, flags);
2768
2769        hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
2770        hs_ep->fifo_index = 0;
2771        hs_ep->fifo_size = 0;
2772
2773        ctrl = readl(hsotg->regs + epctrl_reg);
2774        ctrl &= ~DXEPCTL_EPENA;
2775        ctrl &= ~DXEPCTL_USBACTEP;
2776        ctrl |= DXEPCTL_SNAK;
2777
2778        dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
2779        writel(ctrl, hsotg->regs + epctrl_reg);
2780
2781        /* disable endpoint interrupts */
2782        s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
2783
2784        /* terminate all requests with shutdown */
2785        kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
2786
2787        spin_unlock_irqrestore(&hsotg->lock, flags);
2788        return 0;
2789}
2790
2791static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2792{
2793        return s3c_hsotg_ep_disable_force(ep, false);
2794}
2795/**
2796 * on_list - check request is on the given endpoint
2797 * @ep: The endpoint to check.
2798 * @test: The request to test if it is on the endpoint.
2799 */
2800static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
2801{
2802        struct s3c_hsotg_req *req, *treq;
2803
2804        list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2805                if (req == test)
2806                        return true;
2807        }
2808
2809        return false;
2810}
2811
2812/**
2813 * s3c_hsotg_ep_dequeue - dequeue given endpoint
2814 * @ep: The endpoint to dequeue.
2815 * @req: The request to be removed from a queue.
2816 */
2817static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2818{
2819        struct s3c_hsotg_req *hs_req = our_req(req);
2820        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2821        struct dwc2_hsotg *hs = hs_ep->parent;
2822        unsigned long flags;
2823
2824        dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
2825
2826        spin_lock_irqsave(&hs->lock, flags);
2827
2828        if (!on_list(hs_ep, hs_req)) {
2829                spin_unlock_irqrestore(&hs->lock, flags);
2830                return -EINVAL;
2831        }
2832
2833        s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
2834        spin_unlock_irqrestore(&hs->lock, flags);
2835
2836        return 0;
2837}
2838
2839/**
2840 * s3c_hsotg_ep_sethalt - set halt on a given endpoint
2841 * @ep: The endpoint to set halt.
2842 * @value: Set or unset the halt.
2843 */
2844static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2845{
2846        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2847        struct dwc2_hsotg *hs = hs_ep->parent;
2848        int index = hs_ep->index;
2849        u32 epreg;
2850        u32 epctl;
2851        u32 xfertype;
2852
2853        dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
2854
2855        if (index == 0) {
2856                if (value)
2857                        s3c_hsotg_stall_ep0(hs);
2858                else
2859                        dev_warn(hs->dev,
2860                                 "%s: can't clear halt on ep0\n", __func__);
2861                return 0;
2862        }
2863
2864        if (hs_ep->dir_in) {
2865                epreg = DIEPCTL(index);
2866                epctl = readl(hs->regs + epreg);
2867
2868                if (value) {
2869                        epctl |= DXEPCTL_STALL + DXEPCTL_SNAK;
2870                        if (epctl & DXEPCTL_EPENA)
2871                                epctl |= DXEPCTL_EPDIS;
2872                } else {
2873                        epctl &= ~DXEPCTL_STALL;
2874                        xfertype = epctl & DXEPCTL_EPTYPE_MASK;
2875                        if (xfertype == DXEPCTL_EPTYPE_BULK ||
2876                                xfertype == DXEPCTL_EPTYPE_INTERRUPT)
2877                                        epctl |= DXEPCTL_SETD0PID;
2878                }
2879                writel(epctl, hs->regs + epreg);
2880        } else {
2881
2882                epreg = DOEPCTL(index);
2883                epctl = readl(hs->regs + epreg);
2884
2885                if (value)
2886                        epctl |= DXEPCTL_STALL;
2887                else {
2888                        epctl &= ~DXEPCTL_STALL;
2889                        xfertype = epctl & DXEPCTL_EPTYPE_MASK;
2890                        if (xfertype == DXEPCTL_EPTYPE_BULK ||
2891                                xfertype == DXEPCTL_EPTYPE_INTERRUPT)
2892                                        epctl |= DXEPCTL_SETD0PID;
2893                }
2894                writel(epctl, hs->regs + epreg);
2895        }
2896
2897        hs_ep->halted = value;
2898
2899        return 0;
2900}
2901
2902/**
2903 * s3c_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
2904 * @ep: The endpoint to set halt.
2905 * @value: Set or unset the halt.
2906 */
2907static int s3c_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
2908{
2909        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2910        struct dwc2_hsotg *hs = hs_ep->parent;
2911        unsigned long flags = 0;
2912        int ret = 0;
2913
2914        spin_lock_irqsave(&hs->lock, flags);
2915        ret = s3c_hsotg_ep_sethalt(ep, value);
2916        spin_unlock_irqrestore(&hs->lock, flags);
2917
2918        return ret;
2919}
2920
2921static struct usb_ep_ops s3c_hsotg_ep_ops = {
2922        .enable         = s3c_hsotg_ep_enable,
2923        .disable        = s3c_hsotg_ep_disable,
2924        .alloc_request  = s3c_hsotg_ep_alloc_request,
2925        .free_request   = s3c_hsotg_ep_free_request,
2926        .queue          = s3c_hsotg_ep_queue_lock,
2927        .dequeue        = s3c_hsotg_ep_dequeue,
2928        .set_halt       = s3c_hsotg_ep_sethalt_lock,
2929        /* note, don't believe we have any call for the fifo routines */
2930};
2931
2932/**
2933 * s3c_hsotg_phy_enable - enable platform phy dev
2934 * @hsotg: The driver state
2935 *
2936 * A wrapper for platform code responsible for controlling
2937 * low-level USB code
2938 */
2939static void s3c_hsotg_phy_enable(struct dwc2_hsotg *hsotg)
2940{
2941        struct platform_device *pdev = to_platform_device(hsotg->dev);
2942
2943        dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
2944
2945        if (hsotg->uphy)
2946                usb_phy_init(hsotg->uphy);
2947        else if (hsotg->plat && hsotg->plat->phy_init)
2948                hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
2949        else {
2950                phy_init(hsotg->phy);
2951                phy_power_on(hsotg->phy);
2952        }
2953}
2954
2955/**
2956 * s3c_hsotg_phy_disable - disable platform phy dev
2957 * @hsotg: The driver state
2958 *
2959 * A wrapper for platform code responsible for controlling
2960 * low-level USB code
2961 */
2962static void s3c_hsotg_phy_disable(struct dwc2_hsotg *hsotg)
2963{
2964        struct platform_device *pdev = to_platform_device(hsotg->dev);
2965
2966        if (hsotg->uphy)
2967                usb_phy_shutdown(hsotg->uphy);
2968        else if (hsotg->plat && hsotg->plat->phy_exit)
2969                hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
2970        else {
2971                phy_power_off(hsotg->phy);
2972                phy_exit(hsotg->phy);
2973        }
2974}
2975
2976/**
2977 * s3c_hsotg_init - initalize the usb core
2978 * @hsotg: The driver state
2979 */
2980static void s3c_hsotg_init(struct dwc2_hsotg *hsotg)
2981{
2982        u32 trdtim;
2983        /* unmask subset of endpoint interrupts */
2984
2985        writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
2986                DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
2987                hsotg->regs + DIEPMSK);
2988
2989        writel(DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
2990                DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
2991                hsotg->regs + DOEPMSK);
2992
2993        writel(0, hsotg->regs + DAINTMSK);
2994
2995        /* Be in disconnected state until gadget is registered */
2996        __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
2997
2998        /* setup fifos */
2999
3000        dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
3001                readl(hsotg->regs + GRXFSIZ),
3002                readl(hsotg->regs + GNPTXFSIZ));
3003
3004        s3c_hsotg_init_fifo(hsotg);
3005
3006        /* set the PLL on, remove the HNP/SRP and set the PHY */
3007        trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
3008        writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
3009                (trdtim << GUSBCFG_USBTRDTIM_SHIFT),
3010                hsotg->regs + GUSBCFG);
3011
3012        if (using_dma(hsotg))
3013                __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
3014}
3015
3016/**
3017 * s3c_hsotg_udc_start - prepare the udc for work
3018 * @gadget: The usb gadget state
3019 * @driver: The usb gadget driver
3020 *
3021 * Perform initialization to prepare udc device and driver
3022 * to work.
3023 */
3024static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
3025                           struct usb_gadget_driver *driver)
3026{
3027        struct dwc2_hsotg *hsotg = to_hsotg(gadget);
3028        unsigned long flags;
3029        int ret;
3030
3031        if (!hsotg) {
3032                pr_err("%s: called with no device\n", __func__);
3033                return -ENODEV;
3034        }
3035
3036        if (!driver) {
3037                dev_err(hsotg->dev, "%s: no driver\n", __func__);
3038                return -EINVAL;
3039        }
3040
3041        if (driver->max_speed < USB_SPEED_FULL)
3042                dev_err(hsotg->dev, "%s: bad speed\n", __func__);
3043
3044        if (!driver->setup) {
3045                dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
3046                return -EINVAL;
3047        }
3048
3049        mutex_lock(&hsotg->init_mutex);
3050        WARN_ON(hsotg->driver);
3051
3052        driver->driver.bus = NULL;
3053        hsotg->driver = driver;
3054        hsotg->gadget.dev.of_node = hsotg->dev->of_node;
3055        hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3056
3057        clk_enable(hsotg->clk);
3058
3059        ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
3060                                    hsotg->supplies);
3061        if (ret) {
3062                dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
3063                goto err;
3064        }
3065
3066        s3c_hsotg_phy_enable(hsotg);
3067        if (!IS_ERR_OR_NULL(hsotg->uphy))
3068                otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
3069
3070        spin_lock_irqsave(&hsotg->lock, flags);
3071        s3c_hsotg_init(hsotg);
3072        s3c_hsotg_core_init_disconnected(hsotg, false);
3073        hsotg->enabled = 0;
3074        spin_unlock_irqrestore(&hsotg->lock, flags);
3075
3076        dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
3077
3078        mutex_unlock(&hsotg->init_mutex);
3079
3080        return 0;
3081
3082err:
3083        mutex_unlock(&hsotg->init_mutex);
3084        hsotg->driver = NULL;
3085        return ret;
3086}
3087
3088/**
3089 * s3c_hsotg_udc_stop - stop the udc
3090 * @gadget: The usb gadget state
3091 * @driver: The usb gadget driver
3092 *
3093 * Stop udc hw block and stay tunned for future transmissions
3094 */
3095static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
3096{
3097        struct dwc2_hsotg *hsotg = to_hsotg(gadget);
3098        unsigned long flags = 0;
3099        int ep;
3100
3101        if (!hsotg)
3102                return -ENODEV;
3103
3104        mutex_lock(&hsotg->init_mutex);
3105
3106        /* all endpoints should be shutdown */
3107        for (ep = 1; ep < hsotg->num_of_eps; ep++) {
3108                if (hsotg->eps_in[ep])
3109                        s3c_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3110                if (hsotg->eps_out[ep])
3111                        s3c_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3112        }
3113
3114        spin_lock_irqsave(&hsotg->lock, flags);
3115
3116        hsotg->driver = NULL;
3117        hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3118        hsotg->enabled = 0;
3119
3120        spin_unlock_irqrestore(&hsotg->lock, flags);
3121
3122        if (!IS_ERR_OR_NULL(hsotg->uphy))
3123                otg_set_peripheral(hsotg->uphy->otg, NULL);
3124        s3c_hsotg_phy_disable(hsotg);
3125
3126        regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
3127
3128        clk_disable(hsotg->clk);
3129
3130        mutex_unlock(&hsotg->init_mutex);
3131
3132        return 0;
3133}
3134
3135/**
3136 * s3c_hsotg_gadget_getframe - read the frame number
3137 * @gadget: The usb gadget state
3138 *
3139 * Read the {micro} frame number
3140 */
3141static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
3142{
3143        return s3c_hsotg_read_frameno(to_hsotg(gadget));
3144}
3145
3146/**
3147 * s3c_hsotg_pullup - connect/disconnect the USB PHY
3148 * @gadget: The usb gadget state
3149 * @is_on: Current state of the USB PHY
3150 *
3151 * Connect/Disconnect the USB PHY pullup
3152 */
3153static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
3154{
3155        struct dwc2_hsotg *hsotg = to_hsotg(gadget);
3156        unsigned long flags = 0;
3157
3158        dev_dbg(hsotg->dev, "%s: is_on: %d\n", __func__, is_on);
3159
3160        mutex_lock(&hsotg->init_mutex);
3161        spin_lock_irqsave(&hsotg->lock, flags);
3162        if (is_on) {
3163                clk_enable(hsotg->clk);
3164                hsotg->enabled = 1;
3165                s3c_hsotg_core_init_disconnected(hsotg, false);
3166                s3c_hsotg_core_connect(hsotg);
3167        } else {
3168                s3c_hsotg_core_disconnect(hsotg);
3169                s3c_hsotg_disconnect(hsotg);
3170                hsotg->enabled = 0;
3171                clk_disable(hsotg->clk);
3172        }
3173
3174        hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3175        spin_unlock_irqrestore(&hsotg->lock, flags);
3176        mutex_unlock(&hsotg->init_mutex);
3177
3178        return 0;
3179}
3180
3181static int s3c_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
3182{
3183        struct dwc2_hsotg *hsotg = to_hsotg(gadget);
3184        unsigned long flags;
3185
3186        dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
3187        spin_lock_irqsave(&hsotg->lock, flags);
3188
3189        if (is_active) {
3190                /* Kill any ep0 requests as controller will be reinitialized */
3191                kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
3192                s3c_hsotg_core_init_disconnected(hsotg, false);
3193                if (hsotg->enabled)
3194                        s3c_hsotg_core_connect(hsotg);
3195        } else {
3196                s3c_hsotg_core_disconnect(hsotg);
3197                s3c_hsotg_disconnect(hsotg);
3198        }
3199
3200        spin_unlock_irqrestore(&hsotg->lock, flags);
3201        return 0;
3202}
3203
3204/**
3205 * s3c_hsotg_vbus_draw - report bMaxPower field
3206 * @gadget: The usb gadget state
3207 * @mA: Amount of current
3208 *
3209 * Report how much power the device may consume to the phy.
3210 */
3211static int s3c_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned mA)
3212{
3213        struct dwc2_hsotg *hsotg = to_hsotg(gadget);
3214
3215        if (IS_ERR_OR_NULL(hsotg->uphy))
3216                return -ENOTSUPP;
3217        return usb_phy_set_power(hsotg->uphy, mA);
3218}
3219
3220static const struct usb_gadget_ops s3c_hsotg_gadget_ops = {
3221        .get_frame      = s3c_hsotg_gadget_getframe,
3222        .udc_start              = s3c_hsotg_udc_start,
3223        .udc_stop               = s3c_hsotg_udc_stop,
3224        .pullup                 = s3c_hsotg_pullup,
3225        .vbus_session           = s3c_hsotg_vbus_session,
3226        .vbus_draw              = s3c_hsotg_vbus_draw,
3227};
3228
3229/**
3230 * s3c_hsotg_initep - initialise a single endpoint
3231 * @hsotg: The device state.
3232 * @hs_ep: The endpoint to be initialised.
3233 * @epnum: The endpoint number
3234 *
3235 * Initialise the given endpoint (as part of the probe and device state
3236 * creation) to give to the gadget driver. Setup the endpoint name, any
3237 * direction information and other state that may be required.
3238 */
3239static void s3c_hsotg_initep(struct dwc2_hsotg *hsotg,
3240                                       struct s3c_hsotg_ep *hs_ep,
3241                                       int epnum,
3242                                       bool dir_in)
3243{
3244        char *dir;
3245
3246        if (epnum == 0)
3247                dir = "";
3248        else if (dir_in)
3249                dir = "in";
3250        else
3251                dir = "out";
3252
3253        hs_ep->dir_in = dir_in;
3254        hs_ep->index = epnum;
3255
3256        snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
3257
3258        INIT_LIST_HEAD(&hs_ep->queue);
3259        INIT_LIST_HEAD(&hs_ep->ep.ep_list);
3260
3261        /* add to the list of endpoints known by the gadget driver */
3262        if (epnum)
3263                list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
3264
3265        hs_ep->parent = hsotg;
3266        hs_ep->ep.name = hs_ep->name;
3267        usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
3268        hs_ep->ep.ops = &s3c_hsotg_ep_ops;
3269
3270        /*
3271         * if we're using dma, we need to set the next-endpoint pointer
3272         * to be something valid.
3273         */
3274
3275        if (using_dma(hsotg)) {
3276                u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
3277                if (dir_in)
3278                        writel(next, hsotg->regs + DIEPCTL(epnum));
3279                else
3280                        writel(next, hsotg->regs + DOEPCTL(epnum));
3281        }
3282}
3283
3284/**
3285 * s3c_hsotg_hw_cfg - read HW configuration registers
3286 * @param: The device state
3287 *
3288 * Read the USB core HW configuration registers
3289 */
3290static int s3c_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
3291{
3292        u32 cfg;
3293        u32 ep_type;
3294        u32 i;
3295
3296        /* check hardware configuration */
3297
3298        cfg = readl(hsotg->regs + GHWCFG2);
3299        hsotg->num_of_eps = (cfg >> GHWCFG2_NUM_DEV_EP_SHIFT) & 0xF;
3300        /* Add ep0 */
3301        hsotg->num_of_eps++;
3302
3303        hsotg->eps_in[0] = devm_kzalloc(hsotg->dev, sizeof(struct s3c_hsotg_ep),
3304                                                                GFP_KERNEL);
3305        if (!hsotg->eps_in[0])
3306                return -ENOMEM;
3307        /* Same s3c_hsotg_ep is used in both directions for ep0 */
3308        hsotg->eps_out[0] = hsotg->eps_in[0];
3309
3310        cfg = readl(hsotg->regs + GHWCFG1);
3311        for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
3312                ep_type = cfg & 3;
3313                /* Direction in or both */
3314                if (!(ep_type & 2)) {
3315                        hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
3316                                sizeof(struct s3c_hsotg_ep), GFP_KERNEL);
3317                        if (!hsotg->eps_in[i])
3318                                return -ENOMEM;
3319                }
3320                /* Direction out or both */
3321                if (!(ep_type & 1)) {
3322                        hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
3323                                sizeof(struct s3c_hsotg_ep), GFP_KERNEL);
3324                        if (!hsotg->eps_out[i])
3325                                return -ENOMEM;
3326                }
3327        }
3328
3329        cfg = readl(hsotg->regs + GHWCFG3);
3330        hsotg->fifo_mem = (cfg >> GHWCFG3_DFIFO_DEPTH_SHIFT);
3331
3332        cfg = readl(hsotg->regs + GHWCFG4);
3333        hsotg->dedicated_fifos = (cfg >> GHWCFG4_DED_FIFO_SHIFT) & 1;
3334
3335        dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
3336                 hsotg->num_of_eps,
3337                 hsotg->dedicated_fifos ? "dedicated" : "shared",
3338                 hsotg->fifo_mem);
3339        return 0;
3340}
3341
3342/**
3343 * s3c_hsotg_dump - dump state of the udc
3344 * @param: The device state
3345 */
3346static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg)
3347{
3348#ifdef DEBUG
3349        struct device *dev = hsotg->dev;
3350        void __iomem *regs = hsotg->regs;
3351        u32 val;
3352        int idx;
3353
3354        dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
3355                 readl(regs + DCFG), readl(regs + DCTL),
3356                 readl(regs + DIEPMSK));
3357
3358        dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
3359                 readl(regs + GAHBCFG), readl(regs + GHWCFG1));
3360
3361        dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
3362                 readl(regs + GRXFSIZ), readl(regs + GNPTXFSIZ));
3363
3364        /* show periodic fifo settings */
3365
3366        for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3367                val = readl(regs + DPTXFSIZN(idx));
3368                dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
3369                         val >> FIFOSIZE_DEPTH_SHIFT,
3370                         val & FIFOSIZE_STARTADDR_MASK);
3371        }
3372
3373        for (idx = 0; idx < hsotg->num_of_eps; idx++) {
3374                dev_info(dev,
3375                         "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
3376                         readl(regs + DIEPCTL(idx)),
3377                         readl(regs + DIEPTSIZ(idx)),
3378                         readl(regs + DIEPDMA(idx)));
3379
3380                val = readl(regs + DOEPCTL(idx));
3381                dev_info(dev,
3382                         "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
3383                         idx, readl(regs + DOEPCTL(idx)),
3384                         readl(regs + DOEPTSIZ(idx)),
3385                         readl(regs + DOEPDMA(idx)));
3386
3387        }
3388
3389        dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
3390                 readl(regs + DVBUSDIS), readl(regs + DVBUSPULSE));
3391#endif
3392}
3393
3394/**
3395 * testmode_write - debugfs: change usb test mode
3396 * @seq: The seq file to write to.
3397 * @v: Unused parameter.
3398 *
3399 * This debugfs entry modify the current usb test mode.
3400 */
3401static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t
3402                count, loff_t *ppos)
3403{
3404        struct seq_file         *s = file->private_data;
3405        struct dwc2_hsotg       *hsotg = s->private;
3406        unsigned long           flags;
3407        u32                     testmode = 0;
3408        char                    buf[32];
3409
3410        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
3411                return -EFAULT;
3412
3413        if (!strncmp(buf, "test_j", 6))
3414                testmode = TEST_J;
3415        else if (!strncmp(buf, "test_k", 6))
3416                testmode = TEST_K;
3417        else if (!strncmp(buf, "test_se0_nak", 12))
3418                testmode = TEST_SE0_NAK;
3419        else if (!strncmp(buf, "test_packet", 11))
3420                testmode = TEST_PACKET;
3421        else if (!strncmp(buf, "test_force_enable", 17))
3422                testmode = TEST_FORCE_EN;
3423        else
3424                testmode = 0;
3425
3426        spin_lock_irqsave(&hsotg->lock, flags);
3427        s3c_hsotg_set_test_mode(hsotg, testmode);
3428        spin_unlock_irqrestore(&hsotg->lock, flags);
3429        return count;
3430}
3431
3432/**
3433 * testmode_show - debugfs: show usb test mode state
3434 * @seq: The seq file to write to.
3435 * @v: Unused parameter.
3436 *
3437 * This debugfs entry shows which usb test mode is currently enabled.
3438 */
3439static int testmode_show(struct seq_file *s, void *unused)
3440{
3441        struct dwc2_hsotg *hsotg = s->private;
3442        unsigned long flags;
3443        int dctl;
3444
3445        spin_lock_irqsave(&hsotg->lock, flags);
3446        dctl = readl(hsotg->regs + DCTL);
3447        dctl &= DCTL_TSTCTL_MASK;
3448        dctl >>= DCTL_TSTCTL_SHIFT;
3449        spin_unlock_irqrestore(&hsotg->lock, flags);
3450
3451        switch (dctl) {
3452        case 0:
3453                seq_puts(s, "no test\n");
3454                break;
3455        case TEST_J:
3456                seq_puts(s, "test_j\n");
3457                break;
3458        case TEST_K:
3459                seq_puts(s, "test_k\n");
3460                break;
3461        case TEST_SE0_NAK:
3462                seq_puts(s, "test_se0_nak\n");
3463                break;
3464        case TEST_PACKET:
3465                seq_puts(s, "test_packet\n");
3466                break;
3467        case TEST_FORCE_EN:
3468                seq_puts(s, "test_force_enable\n");
3469                break;
3470        default:
3471                seq_printf(s, "UNKNOWN %d\n", dctl);
3472        }
3473
3474        return 0;
3475}
3476
3477static int testmode_open(struct inode *inode, struct file *file)
3478{
3479        return single_open(file, testmode_show, inode->i_private);
3480}
3481
3482static const struct file_operations testmode_fops = {
3483        .owner          = THIS_MODULE,
3484        .open           = testmode_open,
3485        .write          = testmode_write,
3486        .read           = seq_read,
3487        .llseek         = seq_lseek,
3488        .release        = single_release,
3489};
3490
3491/**
3492 * state_show - debugfs: show overall driver and device state.
3493 * @seq: The seq file to write to.
3494 * @v: Unused parameter.
3495 *
3496 * This debugfs entry shows the overall state of the hardware and
3497 * some general information about each of the endpoints available
3498 * to the system.
3499 */
3500static int state_show(struct seq_file *seq, void *v)
3501{
3502        struct dwc2_hsotg *hsotg = seq->private;
3503        void __iomem *regs = hsotg->regs;
3504        int idx;
3505
3506        seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
3507                 readl(regs + DCFG),
3508                 readl(regs + DCTL),
3509                 readl(regs + DSTS));
3510
3511        seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
3512                   readl(regs + DIEPMSK), readl(regs + DOEPMSK));
3513
3514        seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
3515                   readl(regs + GINTMSK),
3516                   readl(regs + GINTSTS));
3517
3518        seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
3519                   readl(regs + DAINTMSK),
3520                   readl(regs + DAINT));
3521
3522        seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
3523                   readl(regs + GNPTXSTS),
3524                   readl(regs + GRXSTSR));
3525
3526        seq_puts(seq, "\nEndpoint status:\n");
3527
3528        for (idx = 0; idx < hsotg->num_of_eps; idx++) {
3529                u32 in, out;
3530
3531                in = readl(regs + DIEPCTL(idx));
3532                out = readl(regs + DOEPCTL(idx));
3533
3534                seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
3535                           idx, in, out);
3536
3537                in = readl(regs + DIEPTSIZ(idx));
3538                out = readl(regs + DOEPTSIZ(idx));
3539
3540                seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
3541                           in, out);
3542
3543                seq_puts(seq, "\n");
3544        }
3545
3546        return 0;
3547}
3548
3549static int state_open(struct inode *inode, struct file *file)
3550{
3551        return single_open(file, state_show, inode->i_private);
3552}
3553
3554static const struct file_operations state_fops = {
3555        .owner          = THIS_MODULE,
3556        .open           = state_open,
3557        .read           = seq_read,
3558        .llseek         = seq_lseek,
3559        .release        = single_release,
3560};
3561
3562/**
3563 * fifo_show - debugfs: show the fifo information
3564 * @seq: The seq_file to write data to.
3565 * @v: Unused parameter.
3566 *
3567 * Show the FIFO information for the overall fifo and all the
3568 * periodic transmission FIFOs.
3569 */
3570static int fifo_show(struct seq_file *seq, void *v)
3571{
3572        struct dwc2_hsotg *hsotg = seq->private;
3573        void __iomem *regs = hsotg->regs;
3574        u32 val;
3575        int idx;
3576
3577        seq_puts(seq, "Non-periodic FIFOs:\n");
3578        seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + GRXFSIZ));
3579
3580        val = readl(regs + GNPTXFSIZ);
3581        seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
3582                   val >> FIFOSIZE_DEPTH_SHIFT,
3583                   val & FIFOSIZE_DEPTH_MASK);
3584
3585        seq_puts(seq, "\nPeriodic TXFIFOs:\n");
3586
3587        for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3588                val = readl(regs + DPTXFSIZN(idx));
3589
3590                seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
3591                           val >> FIFOSIZE_DEPTH_SHIFT,
3592                           val & FIFOSIZE_STARTADDR_MASK);
3593        }
3594
3595        return 0;
3596}
3597
3598static int fifo_open(struct inode *inode, struct file *file)
3599{
3600        return single_open(file, fifo_show, inode->i_private);
3601}
3602
3603static const struct file_operations fifo_fops = {
3604        .owner          = THIS_MODULE,
3605        .open           = fifo_open,
3606        .read           = seq_read,
3607        .llseek         = seq_lseek,
3608        .release        = single_release,
3609};
3610
3611
3612static const char *decode_direction(int is_in)
3613{
3614        return is_in ? "in" : "out";
3615}
3616
3617/**
3618 * ep_show - debugfs: show the state of an endpoint.
3619 * @seq: The seq_file to write data to.
3620 * @v: Unused parameter.
3621 *
3622 * This debugfs entry shows the state of the given endpoint (one is
3623 * registered for each available).
3624 */
3625static int ep_show(struct seq_file *seq, void *v)
3626{
3627        struct s3c_hsotg_ep *ep = seq->private;
3628        struct dwc2_hsotg *hsotg = ep->parent;
3629        struct s3c_hsotg_req *req;
3630        void __iomem *regs = hsotg->regs;
3631        int index = ep->index;
3632        int show_limit = 15;
3633        unsigned long flags;
3634
3635        seq_printf(seq, "Endpoint index %d, named %s,  dir %s:\n",
3636                   ep->index, ep->ep.name, decode_direction(ep->dir_in));
3637
3638        /* first show the register state */
3639
3640        seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
3641                   readl(regs + DIEPCTL(index)),
3642                   readl(regs + DOEPCTL(index)));
3643
3644        seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
3645                   readl(regs + DIEPDMA(index)),
3646                   readl(regs + DOEPDMA(index)));
3647
3648        seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
3649                   readl(regs + DIEPINT(index)),
3650                   readl(regs + DOEPINT(index)));
3651
3652        seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
3653                   readl(regs + DIEPTSIZ(index)),
3654                   readl(regs + DOEPTSIZ(index)));
3655
3656        seq_puts(seq, "\n");
3657        seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
3658        seq_printf(seq, "total_data=%ld\n", ep->total_data);
3659
3660        seq_printf(seq, "request list (%p,%p):\n",
3661                   ep->queue.next, ep->queue.prev);
3662
3663        spin_lock_irqsave(&hsotg->lock, flags);
3664
3665        list_for_each_entry(req, &ep->queue, queue) {
3666                if (--show_limit < 0) {
3667                        seq_puts(seq, "not showing more requests...\n");
3668                        break;
3669                }
3670
3671                seq_printf(seq, "%c req %p: %d bytes @%p, ",
3672                           req == ep->req ? '*' : ' ',
3673                           req, req->req.length, req->req.buf);
3674                seq_printf(seq, "%d done, res %d\n",
3675                           req->req.actual, req->req.status);
3676        }
3677
3678        spin_unlock_irqrestore(&hsotg->lock, flags);
3679
3680        return 0;
3681}
3682
3683static int ep_open(struct inode *inode, struct file *file)
3684{
3685        return single_open(file, ep_show, inode->i_private);
3686}
3687
3688static const struct file_operations ep_fops = {
3689        .owner          = THIS_MODULE,
3690        .open           = ep_open,
3691        .read           = seq_read,
3692        .llseek         = seq_lseek,
3693        .release        = single_release,
3694};
3695
3696/**
3697 * s3c_hsotg_create_debug - create debugfs directory and files
3698 * @hsotg: The driver state
3699 *
3700 * Create the debugfs files to allow the user to get information
3701 * about the state of the system. The directory name is created
3702 * with the same name as the device itself, in case we end up
3703 * with multiple blocks in future systems.
3704 */
3705static void s3c_hsotg_create_debug(struct dwc2_hsotg *hsotg)
3706{
3707        struct dentry *root;
3708        unsigned epidx;
3709
3710        root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
3711        hsotg->debug_root = root;
3712        if (IS_ERR(root)) {
3713                dev_err(hsotg->dev, "cannot create debug root\n");
3714                return;
3715        }
3716
3717        /* create general state file */
3718
3719        hsotg->debug_file = debugfs_create_file("state", S_IRUGO, root,
3720                                                hsotg, &state_fops);
3721
3722        if (IS_ERR(hsotg->debug_file))
3723                dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
3724
3725        hsotg->debug_testmode = debugfs_create_file("testmode",
3726                                        S_IRUGO | S_IWUSR, root,
3727                                        hsotg, &testmode_fops);
3728
3729        if (IS_ERR(hsotg->debug_testmode))
3730                dev_err(hsotg->dev, "%s: failed to create testmode\n",
3731                                __func__);
3732
3733        hsotg->debug_fifo = debugfs_create_file("fifo", S_IRUGO, root,
3734                                                hsotg, &fifo_fops);
3735
3736        if (IS_ERR(hsotg->debug_fifo))
3737                dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
3738
3739        /* Create one file for each out endpoint */
3740        for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
3741                struct s3c_hsotg_ep *ep;
3742
3743                ep = hsotg->eps_out[epidx];
3744                if (ep) {
3745                        ep->debugfs = debugfs_create_file(ep->name, S_IRUGO,
3746                                                          root, ep, &ep_fops);
3747
3748                        if (IS_ERR(ep->debugfs))
3749                                dev_err(hsotg->dev, "failed to create %s debug file\n",
3750                                        ep->name);
3751                }
3752        }
3753        /* Create one file for each in endpoint. EP0 is handled with out eps */
3754        for (epidx = 1; epidx < hsotg->num_of_eps; epidx++) {
3755                struct s3c_hsotg_ep *ep;
3756
3757                ep = hsotg->eps_in[epidx];
3758                if (ep) {
3759                        ep->debugfs = debugfs_create_file(ep->name, S_IRUGO,
3760                                                          root, ep, &ep_fops);
3761
3762                        if (IS_ERR(ep->debugfs))
3763                                dev_err(hsotg->dev, "failed to create %s debug file\n",
3764                                        ep->name);
3765                }
3766        }
3767}
3768
3769/**
3770 * s3c_hsotg_delete_debug - cleanup debugfs entries
3771 * @hsotg: The driver state
3772 *
3773 * Cleanup (remove) the debugfs files for use on module exit.
3774 */
3775static void s3c_hsotg_delete_debug(struct dwc2_hsotg *hsotg)
3776{
3777        unsigned epidx;
3778
3779        for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
3780                if (hsotg->eps_in[epidx])
3781                        debugfs_remove(hsotg->eps_in[epidx]->debugfs);
3782                if (hsotg->eps_out[epidx])
3783                        debugfs_remove(hsotg->eps_out[epidx]->debugfs);
3784        }
3785
3786        debugfs_remove(hsotg->debug_file);
3787        debugfs_remove(hsotg->debug_testmode);
3788        debugfs_remove(hsotg->debug_fifo);
3789        debugfs_remove(hsotg->debug_root);
3790}
3791
3792#ifdef CONFIG_OF
3793static void s3c_hsotg_of_probe(struct dwc2_hsotg *hsotg)
3794{
3795        struct device_node *np = hsotg->dev->of_node;
3796        u32 len = 0;
3797        u32 i = 0;
3798
3799        /* Enable dma if requested in device tree */
3800        hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
3801
3802        /*
3803        * Register TX periodic fifo size per endpoint.
3804        * EP0 is excluded since it has no fifo configuration.
3805        */
3806        if (!of_find_property(np, "g-tx-fifo-size", &len))
3807                goto rx_fifo;
3808
3809        len /= sizeof(u32);
3810
3811        /* Read tx fifo sizes other than ep0 */
3812        if (of_property_read_u32_array(np, "g-tx-fifo-size",
3813                                                &hsotg->g_tx_fifo_sz[1], len))
3814                goto rx_fifo;
3815
3816        /* Add ep0 */
3817        len++;
3818
3819        /* Make remaining TX fifos unavailable */
3820        if (len < MAX_EPS_CHANNELS) {
3821                for (i = len; i < MAX_EPS_CHANNELS; i++)
3822                        hsotg->g_tx_fifo_sz[i] = 0;
3823        }
3824
3825rx_fifo:
3826        /* Register RX fifo size */
3827        of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
3828
3829        /* Register NPTX fifo size */
3830        of_property_read_u32(np, "g-np-tx-fifo-size",
3831                                                &hsotg->g_np_g_tx_fifo_sz);
3832}
3833#else
3834static inline void s3c_hsotg_of_probe(struct dwc2_hsotg *hsotg) { }
3835#endif
3836
3837/**
3838 * dwc2_gadget_init - init function for gadget
3839 * @dwc2: The data structure for the DWC2 driver.
3840 * @irq: The IRQ number for the controller.
3841 */
3842int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
3843{
3844        struct device *dev = hsotg->dev;
3845        struct s3c_hsotg_plat *plat = dev->platform_data;
3846        int epnum;
3847        int ret;
3848        int i;
3849        u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
3850
3851        /* Set default UTMI width */
3852        hsotg->phyif = GUSBCFG_PHYIF16;
3853
3854        s3c_hsotg_of_probe(hsotg);
3855
3856        /* Initialize to legacy fifo configuration values */
3857        hsotg->g_rx_fifo_sz = 2048;
3858        hsotg->g_np_g_tx_fifo_sz = 1024;
3859        memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
3860        /* Device tree specific probe */
3861        s3c_hsotg_of_probe(hsotg);
3862        /* Dump fifo information */
3863        dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
3864                                                hsotg->g_np_g_tx_fifo_sz);
3865        dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
3866        for (i = 0; i < MAX_EPS_CHANNELS; i++)
3867                dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
3868                                                hsotg->g_tx_fifo_sz[i]);
3869        /*
3870         * If platform probe couldn't find a generic PHY or an old style
3871         * USB PHY, fall back to pdata
3872         */
3873        if (IS_ERR_OR_NULL(hsotg->phy) && IS_ERR_OR_NULL(hsotg->uphy)) {
3874                plat = dev_get_platdata(dev);
3875                if (!plat) {
3876                        dev_err(dev,
3877                        "no platform data or transceiver defined\n");
3878                        return -EPROBE_DEFER;
3879                }
3880                hsotg->plat = plat;
3881        } else if (hsotg->phy) {
3882                /*
3883                 * If using the generic PHY framework, check if the PHY bus
3884                 * width is 8-bit and set the phyif appropriately.
3885                 */
3886                if (phy_get_bus_width(hsotg->phy) == 8)
3887                        hsotg->phyif = GUSBCFG_PHYIF8;
3888        }
3889
3890        hsotg->clk = devm_clk_get(dev, "otg");
3891        if (IS_ERR(hsotg->clk)) {
3892                hsotg->clk = NULL;
3893                dev_dbg(dev, "cannot get otg clock\n");
3894        }
3895
3896        hsotg->gadget.max_speed = USB_SPEED_HIGH;
3897        hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
3898        hsotg->gadget.name = dev_name(dev);
3899
3900        /* reset the system */
3901
3902        ret = clk_prepare_enable(hsotg->clk);
3903        if (ret) {
3904                dev_err(dev, "failed to enable otg clk\n");
3905                goto err_clk;
3906        }
3907
3908
3909        /* regulators */
3910
3911        for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
3912                hsotg->supplies[i].supply = s3c_hsotg_supply_names[i];
3913
3914        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies),
3915                                 hsotg->supplies);
3916        if (ret) {
3917                dev_err(dev, "failed to request supplies: %d\n", ret);
3918                goto err_clk;
3919        }
3920
3921        ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
3922                                    hsotg->supplies);
3923
3924        if (ret) {
3925                dev_err(dev, "failed to enable supplies: %d\n", ret);
3926                goto err_clk;
3927        }
3928
3929        /* usb phy enable */
3930        s3c_hsotg_phy_enable(hsotg);
3931
3932        /*
3933         * Force Device mode before initialization.
3934         * This allows correctly configuring fifo for device mode.
3935         */
3936        __bic32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEHOSTMODE);
3937        __orr32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEDEVMODE);
3938
3939        /*
3940         * According to Synopsys databook, this sleep is needed for the force
3941         * device mode to take effect.
3942         */
3943        msleep(25);
3944
3945        s3c_hsotg_corereset(hsotg);
3946        ret = s3c_hsotg_hw_cfg(hsotg);
3947        if (ret) {
3948                dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
3949                goto err_clk;
3950        }
3951
3952        s3c_hsotg_init(hsotg);
3953
3954        /* Switch back to default configuration */
3955        __bic32(hsotg->regs + GUSBCFG, GUSBCFG_FORCEDEVMODE);
3956
3957        hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
3958                        DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
3959        if (!hsotg->ctrl_buff) {
3960                dev_err(dev, "failed to allocate ctrl request buff\n");
3961                ret = -ENOMEM;
3962                goto err_supplies;
3963        }
3964
3965        hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
3966                        DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
3967        if (!hsotg->ep0_buff) {
3968                dev_err(dev, "failed to allocate ctrl reply buff\n");
3969                ret = -ENOMEM;
3970                goto err_supplies;
3971        }
3972
3973        ret = devm_request_irq(hsotg->dev, irq, s3c_hsotg_irq, IRQF_SHARED,
3974                                dev_name(hsotg->dev), hsotg);
3975        if (ret < 0) {
3976                s3c_hsotg_phy_disable(hsotg);
3977                clk_disable_unprepare(hsotg->clk);
3978                regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
3979                                       hsotg->supplies);
3980                dev_err(dev, "cannot claim IRQ for gadget\n");
3981                goto err_supplies;
3982        }
3983
3984        /* hsotg->num_of_eps holds number of EPs other than ep0 */
3985
3986        if (hsotg->num_of_eps == 0) {
3987                dev_err(dev, "wrong number of EPs (zero)\n");
3988                ret = -EINVAL;
3989                goto err_supplies;
3990        }
3991
3992        /* setup endpoint information */
3993
3994        INIT_LIST_HEAD(&hsotg->gadget.ep_list);
3995        hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
3996
3997        /* allocate EP0 request */
3998
3999        hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
4000                                                     GFP_KERNEL);
4001        if (!hsotg->ctrl_req) {
4002                dev_err(dev, "failed to allocate ctrl req\n");
4003                ret = -ENOMEM;
4004                goto err_supplies;
4005        }
4006
4007        /* initialise the endpoints now the core has been initialised */
4008        for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
4009                if (hsotg->eps_in[epnum])
4010                        s3c_hsotg_initep(hsotg, hsotg->eps_in[epnum],
4011                                                                epnum, 1);
4012                if (hsotg->eps_out[epnum])
4013                        s3c_hsotg_initep(hsotg, hsotg->eps_out[epnum],
4014                                                                epnum, 0);
4015        }
4016
4017        /* disable power and clock */
4018        s3c_hsotg_phy_disable(hsotg);
4019
4020        ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
4021                                    hsotg->supplies);
4022        if (ret) {
4023                dev_err(dev, "failed to disable supplies: %d\n", ret);
4024                goto err_supplies;
4025        }
4026
4027        ret = usb_add_gadget_udc(dev, &hsotg->gadget);
4028        if (ret)
4029                goto err_supplies;
4030
4031        s3c_hsotg_create_debug(hsotg);
4032
4033        s3c_hsotg_dump(hsotg);
4034
4035        return 0;
4036
4037err_supplies:
4038        s3c_hsotg_phy_disable(hsotg);
4039err_clk:
4040        clk_disable_unprepare(hsotg->clk);
4041
4042        return ret;
4043}
4044EXPORT_SYMBOL_GPL(dwc2_gadget_init);
4045
4046/**
4047 * s3c_hsotg_remove - remove function for hsotg driver
4048 * @pdev: The platform information for the driver
4049 */
4050int s3c_hsotg_remove(struct dwc2_hsotg *hsotg)
4051{
4052        usb_del_gadget_udc(&hsotg->gadget);
4053        s3c_hsotg_delete_debug(hsotg);
4054        clk_disable_unprepare(hsotg->clk);
4055
4056        return 0;
4057}
4058EXPORT_SYMBOL_GPL(s3c_hsotg_remove);
4059
4060int s3c_hsotg_suspend(struct dwc2_hsotg *hsotg)
4061{
4062        unsigned long flags;
4063        int ret = 0;
4064
4065        mutex_lock(&hsotg->init_mutex);
4066
4067        if (hsotg->driver) {
4068                int ep;
4069
4070                dev_info(hsotg->dev, "suspending usb gadget %s\n",
4071                         hsotg->driver->driver.name);
4072
4073                spin_lock_irqsave(&hsotg->lock, flags);
4074                if (hsotg->enabled)
4075                        s3c_hsotg_core_disconnect(hsotg);
4076                s3c_hsotg_disconnect(hsotg);
4077                hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4078                spin_unlock_irqrestore(&hsotg->lock, flags);
4079
4080                s3c_hsotg_phy_disable(hsotg);
4081
4082                for (ep = 0; ep < hsotg->num_of_eps; ep++) {
4083                        if (hsotg->eps_in[ep])
4084                                s3c_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
4085                        if (hsotg->eps_out[ep])
4086                                s3c_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
4087                }
4088
4089                ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
4090                                             hsotg->supplies);
4091                clk_disable(hsotg->clk);
4092        }
4093
4094        mutex_unlock(&hsotg->init_mutex);
4095
4096        return ret;
4097}
4098EXPORT_SYMBOL_GPL(s3c_hsotg_suspend);
4099
4100int s3c_hsotg_resume(struct dwc2_hsotg *hsotg)
4101{
4102        unsigned long flags;
4103        int ret = 0;
4104
4105        mutex_lock(&hsotg->init_mutex);
4106
4107        if (hsotg->driver) {
4108                dev_info(hsotg->dev, "resuming usb gadget %s\n",
4109                         hsotg->driver->driver.name);
4110
4111                clk_enable(hsotg->clk);
4112                ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
4113                                            hsotg->supplies);
4114
4115                s3c_hsotg_phy_enable(hsotg);
4116
4117                spin_lock_irqsave(&hsotg->lock, flags);
4118                s3c_hsotg_core_init_disconnected(hsotg, false);
4119                if (hsotg->enabled)
4120                        s3c_hsotg_core_connect(hsotg);
4121                spin_unlock_irqrestore(&hsotg->lock, flags);
4122        }
4123        mutex_unlock(&hsotg->init_mutex);
4124
4125        return ret;
4126}
4127EXPORT_SYMBOL_GPL(s3c_hsotg_resume);
4128