linux/drivers/usb/gadget/amd5536udc.c
<<
>>
Prefs
   1/*
   2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
   3 *
   4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
   5 * Author: Thomas Dahlmann
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  20 */
  21
  22/*
  23 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
  24 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
  25 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
  26 *
  27 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
  28 * be used as host port) and UOC bits PAD_EN and APU are set (should be done
  29 * by BIOS init).
  30 *
  31 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
  32 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
  33 * can be used with gadget ether.
  34 */
  35
  36/* debug control */
  37/* #define UDC_VERBOSE */
  38
  39/* Driver strings */
  40#define UDC_MOD_DESCRIPTION             "AMD 5536 UDC - USB Device Controller"
  41#define UDC_DRIVER_VERSION_STRING       "01.00.0206 - $Revision: #3 $"
  42
  43/* system */
  44#include <linux/module.h>
  45#include <linux/pci.h>
  46#include <linux/kernel.h>
  47#include <linux/version.h>
  48#include <linux/delay.h>
  49#include <linux/ioport.h>
  50#include <linux/sched.h>
  51#include <linux/slab.h>
  52#include <linux/smp_lock.h>
  53#include <linux/errno.h>
  54#include <linux/init.h>
  55#include <linux/timer.h>
  56#include <linux/list.h>
  57#include <linux/interrupt.h>
  58#include <linux/ioctl.h>
  59#include <linux/fs.h>
  60#include <linux/dmapool.h>
  61#include <linux/moduleparam.h>
  62#include <linux/device.h>
  63#include <linux/io.h>
  64#include <linux/irq.h>
  65
  66#include <asm/byteorder.h>
  67#include <asm/system.h>
  68#include <asm/unaligned.h>
  69
  70/* gadget stack */
  71#include <linux/usb/ch9.h>
  72#include <linux/usb/gadget.h>
  73
  74/* udc specific */
  75#include "amd5536udc.h"
  76
  77
  78static void udc_tasklet_disconnect(unsigned long);
  79static void empty_req_queue(struct udc_ep *);
  80static int udc_probe(struct udc *dev);
  81static void udc_basic_init(struct udc *dev);
  82static void udc_setup_endpoints(struct udc *dev);
  83static void udc_soft_reset(struct udc *dev);
  84static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
  85static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
  86static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
  87static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
  88                                unsigned long buf_len, gfp_t gfp_flags);
  89static int udc_remote_wakeup(struct udc *dev);
  90static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  91static void udc_pci_remove(struct pci_dev *pdev);
  92
  93/* description */
  94static const char mod_desc[] = UDC_MOD_DESCRIPTION;
  95static const char name[] = "amd5536udc";
  96
  97/* structure to hold endpoint function pointers */
  98static const struct usb_ep_ops udc_ep_ops;
  99
 100/* received setup data */
 101static union udc_setup_data setup_data;
 102
 103/* pointer to device object */
 104static struct udc *udc;
 105
 106/* irq spin lock for soft reset */
 107static DEFINE_SPINLOCK(udc_irq_spinlock);
 108/* stall spin lock */
 109static DEFINE_SPINLOCK(udc_stall_spinlock);
 110
 111/*
 112* slave mode: pending bytes in rx fifo after nyet,
 113* used if EPIN irq came but no req was available
 114*/
 115static unsigned int udc_rxfifo_pending;
 116
 117/* count soft resets after suspend to avoid loop */
 118static int soft_reset_occured;
 119static int soft_reset_after_usbreset_occured;
 120
 121/* timer */
 122static struct timer_list udc_timer;
 123static int stop_timer;
 124
 125/* set_rde -- Is used to control enabling of RX DMA. Problem is
 126 * that UDC has only one bit (RDE) to enable/disable RX DMA for
 127 * all OUT endpoints. So we have to handle race conditions like
 128 * when OUT data reaches the fifo but no request was queued yet.
 129 * This cannot be solved by letting the RX DMA disabled until a
 130 * request gets queued because there may be other OUT packets
 131 * in the FIFO (important for not blocking control traffic).
 132 * The value of set_rde controls the correspondig timer.
 133 *
 134 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
 135 * set_rde  0 == do not touch RDE, do no start the RDE timer
 136 * set_rde  1 == timer function will look whether FIFO has data
 137 * set_rde  2 == set by timer function to enable RX DMA on next call
 138 */
 139static int set_rde = -1;
 140
 141static DECLARE_COMPLETION(on_exit);
 142static struct timer_list udc_pollstall_timer;
 143static int stop_pollstall_timer;
 144static DECLARE_COMPLETION(on_pollstall_exit);
 145
 146/* tasklet for usb disconnect */
 147static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
 148                (unsigned long) &udc);
 149
 150
 151/* endpoint names used for print */
 152static const char ep0_string[] = "ep0in";
 153static const char *ep_string[] = {
 154        ep0_string,
 155        "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
 156        "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
 157        "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
 158        "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
 159        "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
 160        "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
 161        "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
 162};
 163
 164/* DMA usage flag */
 165static int use_dma = 1;
 166/* packet per buffer dma */
 167static int use_dma_ppb = 1;
 168/* with per descr. update */
 169static int use_dma_ppb_du;
 170/* buffer fill mode */
 171static int use_dma_bufferfill_mode;
 172/* full speed only mode */
 173static int use_fullspeed;
 174/* tx buffer size for high speed */
 175static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
 176
 177/* module parameters */
 178module_param(use_dma, bool, S_IRUGO);
 179MODULE_PARM_DESC(use_dma, "true for DMA");
 180module_param(use_dma_ppb, bool, S_IRUGO);
 181MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
 182module_param(use_dma_ppb_du, bool, S_IRUGO);
 183MODULE_PARM_DESC(use_dma_ppb_du,
 184        "true for DMA in packet per buffer mode with descriptor update");
 185module_param(use_fullspeed, bool, S_IRUGO);
 186MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
 187
 188/*---------------------------------------------------------------------------*/
 189/* Prints UDC device registers and endpoint irq registers */
 190static void print_regs(struct udc *dev)
 191{
 192        DBG(dev, "------- Device registers -------\n");
 193        DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
 194        DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
 195        DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
 196        DBG(dev, "\n");
 197        DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
 198        DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
 199        DBG(dev, "\n");
 200        DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
 201        DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
 202        DBG(dev, "\n");
 203        DBG(dev, "USE DMA        = %d\n", use_dma);
 204        if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
 205                DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
 206                        "WITHOUT desc. update)\n");
 207                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
 208        } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) {
 209                DBG(dev, "DMA mode       = PPBDU (packet per buffer "
 210                        "WITH desc. update)\n");
 211                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
 212        }
 213        if (use_dma && use_dma_bufferfill_mode) {
 214                DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
 215                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
 216        }
 217        if (!use_dma) {
 218                dev_info(&dev->pdev->dev, "FIFO mode\n");
 219        }
 220        DBG(dev, "-------------------------------------------------------\n");
 221}
 222
 223/* Masks unused interrupts */
 224static int udc_mask_unused_interrupts(struct udc *dev)
 225{
 226        u32 tmp;
 227
 228        /* mask all dev interrupts */
 229        tmp =   AMD_BIT(UDC_DEVINT_SVC) |
 230                AMD_BIT(UDC_DEVINT_ENUM) |
 231                AMD_BIT(UDC_DEVINT_US) |
 232                AMD_BIT(UDC_DEVINT_UR) |
 233                AMD_BIT(UDC_DEVINT_ES) |
 234                AMD_BIT(UDC_DEVINT_SI) |
 235                AMD_BIT(UDC_DEVINT_SOF)|
 236                AMD_BIT(UDC_DEVINT_SC);
 237        writel(tmp, &dev->regs->irqmsk);
 238
 239        /* mask all ep interrupts */
 240        writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
 241
 242        return 0;
 243}
 244
 245/* Enables endpoint 0 interrupts */
 246static int udc_enable_ep0_interrupts(struct udc *dev)
 247{
 248        u32 tmp;
 249
 250        DBG(dev, "udc_enable_ep0_interrupts()\n");
 251
 252        /* read irq mask */
 253        tmp = readl(&dev->regs->ep_irqmsk);
 254        /* enable ep0 irq's */
 255        tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
 256                & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
 257        writel(tmp, &dev->regs->ep_irqmsk);
 258
 259        return 0;
 260}
 261
 262/* Enables device interrupts for SET_INTF and SET_CONFIG */
 263static int udc_enable_dev_setup_interrupts(struct udc *dev)
 264{
 265        u32 tmp;
 266
 267        DBG(dev, "enable device interrupts for setup data\n");
 268
 269        /* read irq mask */
 270        tmp = readl(&dev->regs->irqmsk);
 271
 272        /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
 273        tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
 274                & AMD_UNMASK_BIT(UDC_DEVINT_SC)
 275                & AMD_UNMASK_BIT(UDC_DEVINT_UR)
 276                & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
 277                & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
 278        writel(tmp, &dev->regs->irqmsk);
 279
 280        return 0;
 281}
 282
 283/* Calculates fifo start of endpoint based on preceeding endpoints */
 284static int udc_set_txfifo_addr(struct udc_ep *ep)
 285{
 286        struct udc      *dev;
 287        u32 tmp;
 288        int i;
 289
 290        if (!ep || !(ep->in))
 291                return -EINVAL;
 292
 293        dev = ep->dev;
 294        ep->txfifo = dev->txfifo;
 295
 296        /* traverse ep's */
 297        for (i = 0; i < ep->num; i++) {
 298                if (dev->ep[i].regs) {
 299                        /* read fifo size */
 300                        tmp = readl(&dev->ep[i].regs->bufin_framenum);
 301                        tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
 302                        ep->txfifo += tmp;
 303                }
 304        }
 305        return 0;
 306}
 307
 308/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
 309static u32 cnak_pending;
 310
 311static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
 312{
 313        if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
 314                DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
 315                cnak_pending |= 1 << (num);
 316                ep->naking = 1;
 317        } else
 318                cnak_pending = cnak_pending & (~(1 << (num)));
 319}
 320
 321
 322/* Enables endpoint, is called by gadget driver */
 323static int
 324udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
 325{
 326        struct udc_ep           *ep;
 327        struct udc              *dev;
 328        u32                     tmp;
 329        unsigned long           iflags;
 330        u8 udc_csr_epix;
 331
 332        if (!usbep
 333                        || usbep->name == ep0_string
 334                        || !desc
 335                        || desc->bDescriptorType != USB_DT_ENDPOINT)
 336                return -EINVAL;
 337
 338        ep = container_of(usbep, struct udc_ep, ep);
 339        dev = ep->dev;
 340
 341        DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
 342
 343        if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 344                return -ESHUTDOWN;
 345
 346        spin_lock_irqsave(&dev->lock, iflags);
 347        ep->desc = desc;
 348
 349        ep->halted = 0;
 350
 351        /* set traffic type */
 352        tmp = readl(&dev->ep[ep->num].regs->ctl);
 353        tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
 354        writel(tmp, &dev->ep[ep->num].regs->ctl);
 355
 356        /* set max packet size */
 357        tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
 358        tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_EP_MAX_PKT_SIZE);
 359        ep->ep.maxpacket = desc->wMaxPacketSize;
 360        writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
 361
 362        /* IN ep */
 363        if (ep->in) {
 364
 365                /* ep ix in UDC CSR register space */
 366                udc_csr_epix = ep->num;
 367
 368                /* set buffer size (tx fifo entries) */
 369                tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
 370                /* double buffering: fifo size = 2 x max packet size */
 371                tmp = AMD_ADDBITS(
 372                                tmp,
 373                                desc->wMaxPacketSize * UDC_EPIN_BUFF_SIZE_MULT
 374                                                / UDC_DWORD_BYTES,
 375                                UDC_EPIN_BUFF_SIZE);
 376                writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
 377
 378                /* calc. tx fifo base addr */
 379                udc_set_txfifo_addr(ep);
 380
 381                /* flush fifo */
 382                tmp = readl(&ep->regs->ctl);
 383                tmp |= AMD_BIT(UDC_EPCTL_F);
 384                writel(tmp, &ep->regs->ctl);
 385
 386        /* OUT ep */
 387        } else {
 388                /* ep ix in UDC CSR register space */
 389                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 390
 391                /* set max packet size UDC CSR  */
 392                tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 393                tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize,
 394                                        UDC_CSR_NE_MAX_PKT);
 395                writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 396
 397                if (use_dma && !ep->in) {
 398                        /* alloc and init BNA dummy request */
 399                        ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
 400                        ep->bna_occurred = 0;
 401                }
 402
 403                if (ep->num != UDC_EP0OUT_IX)
 404                        dev->data_ep_enabled = 1;
 405        }
 406
 407        /* set ep values */
 408        tmp = readl(&dev->csr->ne[udc_csr_epix]);
 409        /* max packet */
 410        tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_CSR_NE_MAX_PKT);
 411        /* ep number */
 412        tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
 413        /* ep direction */
 414        tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
 415        /* ep type */
 416        tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
 417        /* ep config */
 418        tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
 419        /* ep interface */
 420        tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
 421        /* ep alt */
 422        tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
 423        /* write reg */
 424        writel(tmp, &dev->csr->ne[udc_csr_epix]);
 425
 426        /* enable ep irq */
 427        tmp = readl(&dev->regs->ep_irqmsk);
 428        tmp &= AMD_UNMASK_BIT(ep->num);
 429        writel(tmp, &dev->regs->ep_irqmsk);
 430
 431        /*
 432         * clear NAK by writing CNAK
 433         * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
 434         */
 435        if (!use_dma || ep->in) {
 436                tmp = readl(&ep->regs->ctl);
 437                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 438                writel(tmp, &ep->regs->ctl);
 439                ep->naking = 0;
 440                UDC_QUEUE_CNAK(ep, ep->num);
 441        }
 442        tmp = desc->bEndpointAddress;
 443        DBG(dev, "%s enabled\n", usbep->name);
 444
 445        spin_unlock_irqrestore(&dev->lock, iflags);
 446        return 0;
 447}
 448
 449/* Resets endpoint */
 450static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
 451{
 452        u32             tmp;
 453
 454        VDBG(ep->dev, "ep-%d reset\n", ep->num);
 455        ep->desc = NULL;
 456        ep->ep.ops = &udc_ep_ops;
 457        INIT_LIST_HEAD(&ep->queue);
 458
 459        ep->ep.maxpacket = (u16) ~0;
 460        /* set NAK */
 461        tmp = readl(&ep->regs->ctl);
 462        tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 463        writel(tmp, &ep->regs->ctl);
 464        ep->naking = 1;
 465
 466        /* disable interrupt */
 467        tmp = readl(&regs->ep_irqmsk);
 468        tmp |= AMD_BIT(ep->num);
 469        writel(tmp, &regs->ep_irqmsk);
 470
 471        if (ep->in) {
 472                /* unset P and IN bit of potential former DMA */
 473                tmp = readl(&ep->regs->ctl);
 474                tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
 475                writel(tmp, &ep->regs->ctl);
 476
 477                tmp = readl(&ep->regs->sts);
 478                tmp |= AMD_BIT(UDC_EPSTS_IN);
 479                writel(tmp, &ep->regs->sts);
 480
 481                /* flush the fifo */
 482                tmp = readl(&ep->regs->ctl);
 483                tmp |= AMD_BIT(UDC_EPCTL_F);
 484                writel(tmp, &ep->regs->ctl);
 485
 486        }
 487        /* reset desc pointer */
 488        writel(0, &ep->regs->desptr);
 489}
 490
 491/* Disables endpoint, is called by gadget driver */
 492static int udc_ep_disable(struct usb_ep *usbep)
 493{
 494        struct udc_ep   *ep = NULL;
 495        unsigned long   iflags;
 496
 497        if (!usbep)
 498                return -EINVAL;
 499
 500        ep = container_of(usbep, struct udc_ep, ep);
 501        if (usbep->name == ep0_string || !ep->desc)
 502                return -EINVAL;
 503
 504        DBG(ep->dev, "Disable ep-%d\n", ep->num);
 505
 506        spin_lock_irqsave(&ep->dev->lock, iflags);
 507        udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
 508        empty_req_queue(ep);
 509        ep_init(ep->dev->regs, ep);
 510        spin_unlock_irqrestore(&ep->dev->lock, iflags);
 511
 512        return 0;
 513}
 514
 515/* Allocates request packet, called by gadget driver */
 516static struct usb_request *
 517udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
 518{
 519        struct udc_request      *req;
 520        struct udc_data_dma     *dma_desc;
 521        struct udc_ep   *ep;
 522
 523        if (!usbep)
 524                return NULL;
 525
 526        ep = container_of(usbep, struct udc_ep, ep);
 527
 528        VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
 529        req = kzalloc(sizeof(struct udc_request), gfp);
 530        if (!req)
 531                return NULL;
 532
 533        req->req.dma = DMA_DONT_USE;
 534        INIT_LIST_HEAD(&req->queue);
 535
 536        if (ep->dma) {
 537                /* ep0 in requests are allocated from data pool here */
 538                dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
 539                                                &req->td_phys);
 540                if (!dma_desc) {
 541                        kfree(req);
 542                        return NULL;
 543                }
 544
 545                VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
 546                                "td_phys = %lx\n",
 547                                req, dma_desc,
 548                                (unsigned long)req->td_phys);
 549                /* prevent from using desc. - set HOST BUSY */
 550                dma_desc->status = AMD_ADDBITS(dma_desc->status,
 551                                                UDC_DMA_STP_STS_BS_HOST_BUSY,
 552                                                UDC_DMA_STP_STS_BS);
 553                dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE);
 554                req->td_data = dma_desc;
 555                req->td_data_last = NULL;
 556                req->chain_len = 1;
 557        }
 558
 559        return &req->req;
 560}
 561
 562/* Frees request packet, called by gadget driver */
 563static void
 564udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
 565{
 566        struct udc_ep   *ep;
 567        struct udc_request      *req;
 568
 569        if (!usbep || !usbreq)
 570                return;
 571
 572        ep = container_of(usbep, struct udc_ep, ep);
 573        req = container_of(usbreq, struct udc_request, req);
 574        VDBG(ep->dev, "free_req req=%p\n", req);
 575        BUG_ON(!list_empty(&req->queue));
 576        if (req->td_data) {
 577                VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
 578
 579                /* free dma chain if created */
 580                if (req->chain_len > 1) {
 581                        udc_free_dma_chain(ep->dev, req);
 582                }
 583
 584                pci_pool_free(ep->dev->data_requests, req->td_data,
 585                                                        req->td_phys);
 586        }
 587        kfree(req);
 588}
 589
 590/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
 591static void udc_init_bna_dummy(struct udc_request *req)
 592{
 593        if (req) {
 594                /* set last bit */
 595                req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 596                /* set next pointer to itself */
 597                req->td_data->next = req->td_phys;
 598                /* set HOST BUSY */
 599                req->td_data->status
 600                        = AMD_ADDBITS(req->td_data->status,
 601                                        UDC_DMA_STP_STS_BS_DMA_DONE,
 602                                        UDC_DMA_STP_STS_BS);
 603#ifdef UDC_VERBOSE
 604                pr_debug("bna desc = %p, sts = %08x\n",
 605                        req->td_data, req->td_data->status);
 606#endif
 607        }
 608}
 609
 610/* Allocate BNA dummy descriptor */
 611static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
 612{
 613        struct udc_request *req = NULL;
 614        struct usb_request *_req = NULL;
 615
 616        /* alloc the dummy request */
 617        _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
 618        if (_req) {
 619                req = container_of(_req, struct udc_request, req);
 620                ep->bna_dummy_req = req;
 621                udc_init_bna_dummy(req);
 622        }
 623        return req;
 624}
 625
 626/* Write data to TX fifo for IN packets */
 627static void
 628udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
 629{
 630        u8                      *req_buf;
 631        u32                     *buf;
 632        int                     i, j;
 633        unsigned                bytes = 0;
 634        unsigned                remaining = 0;
 635
 636        if (!req || !ep)
 637                return;
 638
 639        req_buf = req->buf + req->actual;
 640        prefetch(req_buf);
 641        remaining = req->length - req->actual;
 642
 643        buf = (u32 *) req_buf;
 644
 645        bytes = ep->ep.maxpacket;
 646        if (bytes > remaining)
 647                bytes = remaining;
 648
 649        /* dwords first */
 650        for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 651                writel(*(buf + i), ep->txfifo);
 652        }
 653
 654        /* remaining bytes must be written by byte access */
 655        for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 656                writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
 657                                                        ep->txfifo);
 658        }
 659
 660        /* dummy write confirm */
 661        writel(0, &ep->regs->confirm);
 662}
 663
 664/* Read dwords from RX fifo for OUT transfers */
 665static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
 666{
 667        int i;
 668
 669        VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
 670
 671        for (i = 0; i < dwords; i++) {
 672                *(buf + i) = readl(dev->rxfifo);
 673        }
 674        return 0;
 675}
 676
 677/* Read bytes from RX fifo for OUT transfers */
 678static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
 679{
 680        int i, j;
 681        u32 tmp;
 682
 683        VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
 684
 685        /* dwords first */
 686        for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 687                *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
 688        }
 689
 690        /* remaining bytes must be read by byte access */
 691        if (bytes % UDC_DWORD_BYTES) {
 692                tmp = readl(dev->rxfifo);
 693                for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 694                        *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
 695                        tmp = tmp >> UDC_BITS_PER_BYTE;
 696                }
 697        }
 698
 699        return 0;
 700}
 701
 702/* Read data from RX fifo for OUT transfers */
 703static int
 704udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
 705{
 706        u8 *buf;
 707        unsigned buf_space;
 708        unsigned bytes = 0;
 709        unsigned finished = 0;
 710
 711        /* received number bytes */
 712        bytes = readl(&ep->regs->sts);
 713        bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
 714
 715        buf_space = req->req.length - req->req.actual;
 716        buf = req->req.buf + req->req.actual;
 717        if (bytes > buf_space) {
 718                if ((buf_space % ep->ep.maxpacket) != 0) {
 719                        DBG(ep->dev,
 720                                "%s: rx %d bytes, rx-buf space = %d bytesn\n",
 721                                ep->ep.name, bytes, buf_space);
 722                        req->req.status = -EOVERFLOW;
 723                }
 724                bytes = buf_space;
 725        }
 726        req->req.actual += bytes;
 727
 728        /* last packet ? */
 729        if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
 730                || ((req->req.actual == req->req.length) && !req->req.zero))
 731                finished = 1;
 732
 733        /* read rx fifo bytes */
 734        VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
 735        udc_rxfifo_read_bytes(ep->dev, buf, bytes);
 736
 737        return finished;
 738}
 739
 740/* create/re-init a DMA descriptor or a DMA descriptor chain */
 741static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
 742{
 743        int     retval = 0;
 744        u32     tmp;
 745
 746        VDBG(ep->dev, "prep_dma\n");
 747        VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
 748                        ep->num, req->td_data);
 749
 750        /* set buffer pointer */
 751        req->td_data->bufptr = req->req.dma;
 752
 753        /* set last bit */
 754        req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 755
 756        /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
 757        if (use_dma_ppb) {
 758
 759                retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
 760                if (retval != 0) {
 761                        if (retval == -ENOMEM)
 762                                DBG(ep->dev, "Out of DMA memory\n");
 763                        return retval;
 764                }
 765                if (ep->in) {
 766                        if (req->req.length == ep->ep.maxpacket) {
 767                                /* write tx bytes */
 768                                req->td_data->status =
 769                                        AMD_ADDBITS(req->td_data->status,
 770                                                ep->ep.maxpacket,
 771                                                UDC_DMA_IN_STS_TXBYTES);
 772
 773                        }
 774                }
 775
 776        }
 777
 778        if (ep->in) {
 779                VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
 780                                "maxpacket=%d ep%d\n",
 781                                use_dma_ppb, req->req.length,
 782                                ep->ep.maxpacket, ep->num);
 783                /*
 784                 * if bytes < max packet then tx bytes must
 785                 * be written in packet per buffer mode
 786                 */
 787                if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
 788                                || ep->num == UDC_EP0OUT_IX
 789                                || ep->num == UDC_EP0IN_IX) {
 790                        /* write tx bytes */
 791                        req->td_data->status =
 792                                AMD_ADDBITS(req->td_data->status,
 793                                                req->req.length,
 794                                                UDC_DMA_IN_STS_TXBYTES);
 795                        /* reset frame num */
 796                        req->td_data->status =
 797                                AMD_ADDBITS(req->td_data->status,
 798                                                0,
 799                                                UDC_DMA_IN_STS_FRAMENUM);
 800                }
 801                /* set HOST BUSY */
 802                req->td_data->status =
 803                        AMD_ADDBITS(req->td_data->status,
 804                                UDC_DMA_STP_STS_BS_HOST_BUSY,
 805                                UDC_DMA_STP_STS_BS);
 806        } else {
 807                VDBG(ep->dev, "OUT set host ready\n");
 808                /* set HOST READY */
 809                req->td_data->status =
 810                        AMD_ADDBITS(req->td_data->status,
 811                                UDC_DMA_STP_STS_BS_HOST_READY,
 812                                UDC_DMA_STP_STS_BS);
 813
 814
 815                        /* clear NAK by writing CNAK */
 816                        if (ep->naking) {
 817                                tmp = readl(&ep->regs->ctl);
 818                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 819                                writel(tmp, &ep->regs->ctl);
 820                                ep->naking = 0;
 821                                UDC_QUEUE_CNAK(ep, ep->num);
 822                        }
 823
 824        }
 825
 826        return retval;
 827}
 828
 829/* Completes request packet ... caller MUST hold lock */
 830static void
 831complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
 832__releases(ep->dev->lock)
 833__acquires(ep->dev->lock)
 834{
 835        struct udc              *dev;
 836        unsigned                halted;
 837
 838        VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
 839
 840        dev = ep->dev;
 841        /* unmap DMA */
 842        if (req->dma_mapping) {
 843                if (ep->in)
 844                        pci_unmap_single(dev->pdev,
 845                                        req->req.dma,
 846                                        req->req.length,
 847                                        PCI_DMA_TODEVICE);
 848                else
 849                        pci_unmap_single(dev->pdev,
 850                                        req->req.dma,
 851                                        req->req.length,
 852                                        PCI_DMA_FROMDEVICE);
 853                req->dma_mapping = 0;
 854                req->req.dma = DMA_DONT_USE;
 855        }
 856
 857        halted = ep->halted;
 858        ep->halted = 1;
 859
 860        /* set new status if pending */
 861        if (req->req.status == -EINPROGRESS)
 862                req->req.status = sts;
 863
 864        /* remove from ep queue */
 865        list_del_init(&req->queue);
 866
 867        VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
 868                &req->req, req->req.length, ep->ep.name, sts);
 869
 870        spin_unlock(&dev->lock);
 871        req->req.complete(&ep->ep, &req->req);
 872        spin_lock(&dev->lock);
 873        ep->halted = halted;
 874}
 875
 876/* frees pci pool descriptors of a DMA chain */
 877static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
 878{
 879
 880        int ret_val = 0;
 881        struct udc_data_dma     *td;
 882        struct udc_data_dma     *td_last = NULL;
 883        unsigned int i;
 884
 885        DBG(dev, "free chain req = %p\n", req);
 886
 887        /* do not free first desc., will be done by free for request */
 888        td_last = req->td_data;
 889        td = phys_to_virt(td_last->next);
 890
 891        for (i = 1; i < req->chain_len; i++) {
 892
 893                pci_pool_free(dev->data_requests, td,
 894                                (dma_addr_t) td_last->next);
 895                td_last = td;
 896                td = phys_to_virt(td_last->next);
 897        }
 898
 899        return ret_val;
 900}
 901
 902/* Iterates to the end of a DMA chain and returns last descriptor */
 903static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
 904{
 905        struct udc_data_dma     *td;
 906
 907        td = req->td_data;
 908        while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 909                td = phys_to_virt(td->next);
 910        }
 911
 912        return td;
 913
 914}
 915
 916/* Iterates to the end of a DMA chain and counts bytes received */
 917static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
 918{
 919        struct udc_data_dma     *td;
 920        u32 count;
 921
 922        td = req->td_data;
 923        /* received number bytes */
 924        count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
 925
 926        while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 927                td = phys_to_virt(td->next);
 928                /* received number bytes */
 929                if (td) {
 930                        count += AMD_GETBITS(td->status,
 931                                UDC_DMA_OUT_STS_RXBYTES);
 932                }
 933        }
 934
 935        return count;
 936
 937}
 938
 939/* Creates or re-inits a DMA chain */
 940static int udc_create_dma_chain(
 941        struct udc_ep *ep,
 942        struct udc_request *req,
 943        unsigned long buf_len, gfp_t gfp_flags
 944)
 945{
 946        unsigned long bytes = req->req.length;
 947        unsigned int i;
 948        dma_addr_t dma_addr;
 949        struct udc_data_dma     *td = NULL;
 950        struct udc_data_dma     *last = NULL;
 951        unsigned long txbytes;
 952        unsigned create_new_chain = 0;
 953        unsigned len;
 954
 955        VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
 956                        bytes, buf_len);
 957        dma_addr = DMA_DONT_USE;
 958
 959        /* unset L bit in first desc for OUT */
 960        if (!ep->in) {
 961                req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
 962        }
 963
 964        /* alloc only new desc's if not already available */
 965        len = req->req.length / ep->ep.maxpacket;
 966        if (req->req.length % ep->ep.maxpacket) {
 967                len++;
 968        }
 969
 970        if (len > req->chain_len) {
 971                /* shorter chain already allocated before */
 972                if (req->chain_len > 1) {
 973                        udc_free_dma_chain(ep->dev, req);
 974                }
 975                req->chain_len = len;
 976                create_new_chain = 1;
 977        }
 978
 979        td = req->td_data;
 980        /* gen. required number of descriptors and buffers */
 981        for (i = buf_len; i < bytes; i += buf_len) {
 982                /* create or determine next desc. */
 983                if (create_new_chain) {
 984
 985                        td = pci_pool_alloc(ep->dev->data_requests,
 986                                        gfp_flags, &dma_addr);
 987                        if (!td)
 988                                return -ENOMEM;
 989
 990                        td->status = 0;
 991                } else if (i == buf_len) {
 992                        /* first td */
 993                        td = (struct udc_data_dma *) phys_to_virt(
 994                                                req->td_data->next);
 995                        td->status = 0;
 996                } else {
 997                        td = (struct udc_data_dma *) phys_to_virt(last->next);
 998                        td->status = 0;
 999                }
1000
1001
1002                if (td)
1003                        td->bufptr = req->req.dma + i; /* assign buffer */
1004                else
1005                        break;
1006
1007                /* short packet ? */
1008                if ((bytes - i) >= buf_len) {
1009                        txbytes = buf_len;
1010                } else {
1011                        /* short packet */
1012                        txbytes = bytes - i;
1013                }
1014
1015                /* link td and assign tx bytes */
1016                if (i == buf_len) {
1017                        if (create_new_chain) {
1018                                req->td_data->next = dma_addr;
1019                        } else {
1020                                /* req->td_data->next = virt_to_phys(td); */
1021                        }
1022                        /* write tx bytes */
1023                        if (ep->in) {
1024                                /* first desc */
1025                                req->td_data->status =
1026                                        AMD_ADDBITS(req->td_data->status,
1027                                                        ep->ep.maxpacket,
1028                                                        UDC_DMA_IN_STS_TXBYTES);
1029                                /* second desc */
1030                                td->status = AMD_ADDBITS(td->status,
1031                                                        txbytes,
1032                                                        UDC_DMA_IN_STS_TXBYTES);
1033                        }
1034                } else {
1035                        if (create_new_chain) {
1036                                last->next = dma_addr;
1037                        } else {
1038                                /* last->next = virt_to_phys(td); */
1039                        }
1040                        if (ep->in) {
1041                                /* write tx bytes */
1042                                td->status = AMD_ADDBITS(td->status,
1043                                                        txbytes,
1044                                                        UDC_DMA_IN_STS_TXBYTES);
1045                        }
1046                }
1047                last = td;
1048        }
1049        /* set last bit */
1050        if (td) {
1051                td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1052                /* last desc. points to itself */
1053                req->td_data_last = td;
1054        }
1055
1056        return 0;
1057}
1058
1059/* Enabling RX DMA */
1060static void udc_set_rde(struct udc *dev)
1061{
1062        u32 tmp;
1063
1064        VDBG(dev, "udc_set_rde()\n");
1065        /* stop RDE timer */
1066        if (timer_pending(&udc_timer)) {
1067                set_rde = 0;
1068                mod_timer(&udc_timer, jiffies - 1);
1069        }
1070        /* set RDE */
1071        tmp = readl(&dev->regs->ctl);
1072        tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1073        writel(tmp, &dev->regs->ctl);
1074}
1075
1076/* Queues a request packet, called by gadget driver */
1077static int
1078udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1079{
1080        int                     retval = 0;
1081        u8                      open_rxfifo = 0;
1082        unsigned long           iflags;
1083        struct udc_ep           *ep;
1084        struct udc_request      *req;
1085        struct udc              *dev;
1086        u32                     tmp;
1087
1088        /* check the inputs */
1089        req = container_of(usbreq, struct udc_request, req);
1090
1091        if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1092                        || !list_empty(&req->queue))
1093                return -EINVAL;
1094
1095        ep = container_of(usbep, struct udc_ep, ep);
1096        if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1097                return -EINVAL;
1098
1099        VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1100        dev = ep->dev;
1101
1102        if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1103                return -ESHUTDOWN;
1104
1105        /* map dma (usually done before) */
1106        if (ep->dma && usbreq->length != 0
1107                        && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
1108                VDBG(dev, "DMA map req %p\n", req);
1109                if (ep->in)
1110                        usbreq->dma = pci_map_single(dev->pdev,
1111                                                usbreq->buf,
1112                                                usbreq->length,
1113                                                PCI_DMA_TODEVICE);
1114                else
1115                        usbreq->dma = pci_map_single(dev->pdev,
1116                                                usbreq->buf,
1117                                                usbreq->length,
1118                                                PCI_DMA_FROMDEVICE);
1119                req->dma_mapping = 1;
1120        }
1121
1122        VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1123                        usbep->name, usbreq, usbreq->length,
1124                        req->td_data, usbreq->buf);
1125
1126        spin_lock_irqsave(&dev->lock, iflags);
1127        usbreq->actual = 0;
1128        usbreq->status = -EINPROGRESS;
1129        req->dma_done = 0;
1130
1131        /* on empty queue just do first transfer */
1132        if (list_empty(&ep->queue)) {
1133                /* zlp */
1134                if (usbreq->length == 0) {
1135                        /* IN zlp's are handled by hardware */
1136                        complete_req(ep, req, 0);
1137                        VDBG(dev, "%s: zlp\n", ep->ep.name);
1138                        /*
1139                         * if set_config or set_intf is waiting for ack by zlp
1140                         * then set CSR_DONE
1141                         */
1142                        if (dev->set_cfg_not_acked) {
1143                                tmp = readl(&dev->regs->ctl);
1144                                tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1145                                writel(tmp, &dev->regs->ctl);
1146                                dev->set_cfg_not_acked = 0;
1147                        }
1148                        /* setup command is ACK'ed now by zlp */
1149                        if (dev->waiting_zlp_ack_ep0in) {
1150                                /* clear NAK by writing CNAK in EP0_IN */
1151                                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1152                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1153                                writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1154                                dev->ep[UDC_EP0IN_IX].naking = 0;
1155                                UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1156                                                        UDC_EP0IN_IX);
1157                                dev->waiting_zlp_ack_ep0in = 0;
1158                        }
1159                        goto finished;
1160                }
1161                if (ep->dma) {
1162                        retval = prep_dma(ep, req, gfp);
1163                        if (retval != 0)
1164                                goto finished;
1165                        /* write desc pointer to enable DMA */
1166                        if (ep->in) {
1167                                /* set HOST READY */
1168                                req->td_data->status =
1169                                        AMD_ADDBITS(req->td_data->status,
1170                                                UDC_DMA_IN_STS_BS_HOST_READY,
1171                                                UDC_DMA_IN_STS_BS);
1172                        }
1173
1174                        /* disabled rx dma while descriptor update */
1175                        if (!ep->in) {
1176                                /* stop RDE timer */
1177                                if (timer_pending(&udc_timer)) {
1178                                        set_rde = 0;
1179                                        mod_timer(&udc_timer, jiffies - 1);
1180                                }
1181                                /* clear RDE */
1182                                tmp = readl(&dev->regs->ctl);
1183                                tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1184                                writel(tmp, &dev->regs->ctl);
1185                                open_rxfifo = 1;
1186
1187                                /*
1188                                 * if BNA occurred then let BNA dummy desc.
1189                                 * point to current desc.
1190                                 */
1191                                if (ep->bna_occurred) {
1192                                        VDBG(dev, "copy to BNA dummy desc.\n");
1193                                        memcpy(ep->bna_dummy_req->td_data,
1194                                                req->td_data,
1195                                                sizeof(struct udc_data_dma));
1196                                }
1197                        }
1198                        /* write desc pointer */
1199                        writel(req->td_phys, &ep->regs->desptr);
1200
1201                        /* clear NAK by writing CNAK */
1202                        if (ep->naking) {
1203                                tmp = readl(&ep->regs->ctl);
1204                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1205                                writel(tmp, &ep->regs->ctl);
1206                                ep->naking = 0;
1207                                UDC_QUEUE_CNAK(ep, ep->num);
1208                        }
1209
1210                        if (ep->in) {
1211                                /* enable ep irq */
1212                                tmp = readl(&dev->regs->ep_irqmsk);
1213                                tmp &= AMD_UNMASK_BIT(ep->num);
1214                                writel(tmp, &dev->regs->ep_irqmsk);
1215                        }
1216                }
1217
1218        } else if (ep->dma) {
1219
1220                /*
1221                 * prep_dma not used for OUT ep's, this is not possible
1222                 * for PPB modes, because of chain creation reasons
1223                 */
1224                if (ep->in) {
1225                        retval = prep_dma(ep, req, gfp);
1226                        if (retval != 0)
1227                                goto finished;
1228                }
1229        }
1230        VDBG(dev, "list_add\n");
1231        /* add request to ep queue */
1232        if (req) {
1233
1234                list_add_tail(&req->queue, &ep->queue);
1235
1236                /* open rxfifo if out data queued */
1237                if (open_rxfifo) {
1238                        /* enable DMA */
1239                        req->dma_going = 1;
1240                        udc_set_rde(dev);
1241                        if (ep->num != UDC_EP0OUT_IX)
1242                                dev->data_ep_queued = 1;
1243                }
1244                /* stop OUT naking */
1245                if (!ep->in) {
1246                        if (!use_dma && udc_rxfifo_pending) {
1247                                DBG(dev, "udc_queue(): pending bytes in"
1248                                        "rxfifo after nyet\n");
1249                                /*
1250                                 * read pending bytes afer nyet:
1251                                 * referring to isr
1252                                 */
1253                                if (udc_rxfifo_read(ep, req)) {
1254                                        /* finish */
1255                                        complete_req(ep, req, 0);
1256                                }
1257                                udc_rxfifo_pending = 0;
1258
1259                        }
1260                }
1261        }
1262
1263finished:
1264        spin_unlock_irqrestore(&dev->lock, iflags);
1265        return retval;
1266}
1267
1268/* Empty request queue of an endpoint; caller holds spinlock */
1269static void empty_req_queue(struct udc_ep *ep)
1270{
1271        struct udc_request      *req;
1272
1273        ep->halted = 1;
1274        while (!list_empty(&ep->queue)) {
1275                req = list_entry(ep->queue.next,
1276                        struct udc_request,
1277                        queue);
1278                complete_req(ep, req, -ESHUTDOWN);
1279        }
1280}
1281
1282/* Dequeues a request packet, called by gadget driver */
1283static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1284{
1285        struct udc_ep           *ep;
1286        struct udc_request      *req;
1287        unsigned                halted;
1288        unsigned long           iflags;
1289
1290        ep = container_of(usbep, struct udc_ep, ep);
1291        if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
1292                                && ep->num != UDC_EP0OUT_IX)))
1293                return -EINVAL;
1294
1295        req = container_of(usbreq, struct udc_request, req);
1296
1297        spin_lock_irqsave(&ep->dev->lock, iflags);
1298        halted = ep->halted;
1299        ep->halted = 1;
1300        /* request in processing or next one */
1301        if (ep->queue.next == &req->queue) {
1302                if (ep->dma && req->dma_going) {
1303                        if (ep->in)
1304                                ep->cancel_transfer = 1;
1305                        else {
1306                                u32 tmp;
1307                                u32 dma_sts;
1308                                /* stop potential receive DMA */
1309                                tmp = readl(&udc->regs->ctl);
1310                                writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1311                                                        &udc->regs->ctl);
1312                                /*
1313                                 * Cancel transfer later in ISR
1314                                 * if descriptor was touched.
1315                                 */
1316                                dma_sts = AMD_GETBITS(req->td_data->status,
1317                                                        UDC_DMA_OUT_STS_BS);
1318                                if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1319                                        ep->cancel_transfer = 1;
1320                                else {
1321                                        udc_init_bna_dummy(ep->req);
1322                                        writel(ep->bna_dummy_req->td_phys,
1323                                                &ep->regs->desptr);
1324                                }
1325                                writel(tmp, &udc->regs->ctl);
1326                        }
1327                }
1328        }
1329        complete_req(ep, req, -ECONNRESET);
1330        ep->halted = halted;
1331
1332        spin_unlock_irqrestore(&ep->dev->lock, iflags);
1333        return 0;
1334}
1335
1336/* Halt or clear halt of endpoint */
1337static int
1338udc_set_halt(struct usb_ep *usbep, int halt)
1339{
1340        struct udc_ep   *ep;
1341        u32 tmp;
1342        unsigned long iflags;
1343        int retval = 0;
1344
1345        if (!usbep)
1346                return -EINVAL;
1347
1348        pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1349
1350        ep = container_of(usbep, struct udc_ep, ep);
1351        if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1352                return -EINVAL;
1353        if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1354                return -ESHUTDOWN;
1355
1356        spin_lock_irqsave(&udc_stall_spinlock, iflags);
1357        /* halt or clear halt */
1358        if (halt) {
1359                if (ep->num == 0)
1360                        ep->dev->stall_ep0in = 1;
1361                else {
1362                        /*
1363                         * set STALL
1364                         * rxfifo empty not taken into acount
1365                         */
1366                        tmp = readl(&ep->regs->ctl);
1367                        tmp |= AMD_BIT(UDC_EPCTL_S);
1368                        writel(tmp, &ep->regs->ctl);
1369                        ep->halted = 1;
1370
1371                        /* setup poll timer */
1372                        if (!timer_pending(&udc_pollstall_timer)) {
1373                                udc_pollstall_timer.expires = jiffies +
1374                                        HZ * UDC_POLLSTALL_TIMER_USECONDS
1375                                        / (1000 * 1000);
1376                                if (!stop_pollstall_timer) {
1377                                        DBG(ep->dev, "start polltimer\n");
1378                                        add_timer(&udc_pollstall_timer);
1379                                }
1380                        }
1381                }
1382        } else {
1383                /* ep is halted by set_halt() before */
1384                if (ep->halted) {
1385                        tmp = readl(&ep->regs->ctl);
1386                        /* clear stall bit */
1387                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1388                        /* clear NAK by writing CNAK */
1389                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1390                        writel(tmp, &ep->regs->ctl);
1391                        ep->halted = 0;
1392                        UDC_QUEUE_CNAK(ep, ep->num);
1393                }
1394        }
1395        spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1396        return retval;
1397}
1398
1399/* gadget interface */
1400static const struct usb_ep_ops udc_ep_ops = {
1401        .enable         = udc_ep_enable,
1402        .disable        = udc_ep_disable,
1403
1404        .alloc_request  = udc_alloc_request,
1405        .free_request   = udc_free_request,
1406
1407        .queue          = udc_queue,
1408        .dequeue        = udc_dequeue,
1409
1410        .set_halt       = udc_set_halt,
1411        /* fifo ops not implemented */
1412};
1413
1414/*-------------------------------------------------------------------------*/
1415
1416/* Get frame counter (not implemented) */
1417static int udc_get_frame(struct usb_gadget *gadget)
1418{
1419        return -EOPNOTSUPP;
1420}
1421
1422/* Remote wakeup gadget interface */
1423static int udc_wakeup(struct usb_gadget *gadget)
1424{
1425        struct udc              *dev;
1426
1427        if (!gadget)
1428                return -EINVAL;
1429        dev = container_of(gadget, struct udc, gadget);
1430        udc_remote_wakeup(dev);
1431
1432        return 0;
1433}
1434
1435/* gadget operations */
1436static const struct usb_gadget_ops udc_ops = {
1437        .wakeup         = udc_wakeup,
1438        .get_frame      = udc_get_frame,
1439};
1440
1441/* Setups endpoint parameters, adds endpoints to linked list */
1442static void make_ep_lists(struct udc *dev)
1443{
1444        /* make gadget ep lists */
1445        INIT_LIST_HEAD(&dev->gadget.ep_list);
1446        list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1447                                                &dev->gadget.ep_list);
1448        list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1449                                                &dev->gadget.ep_list);
1450        list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1451                                                &dev->gadget.ep_list);
1452
1453        /* fifo config */
1454        dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1455        if (dev->gadget.speed == USB_SPEED_FULL)
1456                dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1457        else if (dev->gadget.speed == USB_SPEED_HIGH)
1458                dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1459        dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1460}
1461
1462/* init registers at driver load time */
1463static int startup_registers(struct udc *dev)
1464{
1465        u32 tmp;
1466
1467        /* init controller by soft reset */
1468        udc_soft_reset(dev);
1469
1470        /* mask not needed interrupts */
1471        udc_mask_unused_interrupts(dev);
1472
1473        /* put into initial config */
1474        udc_basic_init(dev);
1475        /* link up all endpoints */
1476        udc_setup_endpoints(dev);
1477
1478        /* program speed */
1479        tmp = readl(&dev->regs->cfg);
1480        if (use_fullspeed) {
1481                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1482        } else {
1483                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1484        }
1485        writel(tmp, &dev->regs->cfg);
1486
1487        return 0;
1488}
1489
1490/* Inits UDC context */
1491static void udc_basic_init(struct udc *dev)
1492{
1493        u32     tmp;
1494
1495        DBG(dev, "udc_basic_init()\n");
1496
1497        dev->gadget.speed = USB_SPEED_UNKNOWN;
1498
1499        /* stop RDE timer */
1500        if (timer_pending(&udc_timer)) {
1501                set_rde = 0;
1502                mod_timer(&udc_timer, jiffies - 1);
1503        }
1504        /* stop poll stall timer */
1505        if (timer_pending(&udc_pollstall_timer)) {
1506                mod_timer(&udc_pollstall_timer, jiffies - 1);
1507        }
1508        /* disable DMA */
1509        tmp = readl(&dev->regs->ctl);
1510        tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1511        tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1512        writel(tmp, &dev->regs->ctl);
1513
1514        /* enable dynamic CSR programming */
1515        tmp = readl(&dev->regs->cfg);
1516        tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1517        /* set self powered */
1518        tmp |= AMD_BIT(UDC_DEVCFG_SP);
1519        /* set remote wakeupable */
1520        tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1521        writel(tmp, &dev->regs->cfg);
1522
1523        make_ep_lists(dev);
1524
1525        dev->data_ep_enabled = 0;
1526        dev->data_ep_queued = 0;
1527}
1528
1529/* Sets initial endpoint parameters */
1530static void udc_setup_endpoints(struct udc *dev)
1531{
1532        struct udc_ep   *ep;
1533        u32     tmp;
1534        u32     reg;
1535
1536        DBG(dev, "udc_setup_endpoints()\n");
1537
1538        /* read enum speed */
1539        tmp = readl(&dev->regs->sts);
1540        tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1541        if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
1542                dev->gadget.speed = USB_SPEED_HIGH;
1543        } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
1544                dev->gadget.speed = USB_SPEED_FULL;
1545        }
1546
1547        /* set basic ep parameters */
1548        for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1549                ep = &dev->ep[tmp];
1550                ep->dev = dev;
1551                ep->ep.name = ep_string[tmp];
1552                ep->num = tmp;
1553                /* txfifo size is calculated at enable time */
1554                ep->txfifo = dev->txfifo;
1555
1556                /* fifo size */
1557                if (tmp < UDC_EPIN_NUM) {
1558                        ep->fifo_depth = UDC_TXFIFO_SIZE;
1559                        ep->in = 1;
1560                } else {
1561                        ep->fifo_depth = UDC_RXFIFO_SIZE;
1562                        ep->in = 0;
1563
1564                }
1565                ep->regs = &dev->ep_regs[tmp];
1566                /*
1567                 * ep will be reset only if ep was not enabled before to avoid
1568                 * disabling ep interrupts when ENUM interrupt occurs but ep is
1569                 * not enabled by gadget driver
1570                 */
1571                if (!ep->desc) {
1572                        ep_init(dev->regs, ep);
1573                }
1574
1575                if (use_dma) {
1576                        /*
1577                         * ep->dma is not really used, just to indicate that
1578                         * DMA is active: remove this
1579                         * dma regs = dev control regs
1580                         */
1581                        ep->dma = &dev->regs->ctl;
1582
1583                        /* nak OUT endpoints until enable - not for ep0 */
1584                        if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1585                                                && tmp > UDC_EPIN_NUM) {
1586                                /* set NAK */
1587                                reg = readl(&dev->ep[tmp].regs->ctl);
1588                                reg |= AMD_BIT(UDC_EPCTL_SNAK);
1589                                writel(reg, &dev->ep[tmp].regs->ctl);
1590                                dev->ep[tmp].naking = 1;
1591
1592                        }
1593                }
1594        }
1595        /* EP0 max packet */
1596        if (dev->gadget.speed == USB_SPEED_FULL) {
1597                dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
1598                dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
1599                                                UDC_FS_EP0OUT_MAX_PKT_SIZE;
1600        } else if (dev->gadget.speed == USB_SPEED_HIGH) {
1601                dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
1602                dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
1603        }
1604
1605        /*
1606         * with suspend bug workaround, ep0 params for gadget driver
1607         * are set at gadget driver bind() call
1608         */
1609        dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1610        dev->ep[UDC_EP0IN_IX].halted = 0;
1611        INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1612
1613        /* init cfg/alt/int */
1614        dev->cur_config = 0;
1615        dev->cur_intf = 0;
1616        dev->cur_alt = 0;
1617}
1618
1619/* Bringup after Connect event, initial bringup to be ready for ep0 events */
1620static void usb_connect(struct udc *dev)
1621{
1622
1623        dev_info(&dev->pdev->dev, "USB Connect\n");
1624
1625        dev->connected = 1;
1626
1627        /* put into initial config */
1628        udc_basic_init(dev);
1629
1630        /* enable device setup interrupts */
1631        udc_enable_dev_setup_interrupts(dev);
1632}
1633
1634/*
1635 * Calls gadget with disconnect event and resets the UDC and makes
1636 * initial bringup to be ready for ep0 events
1637 */
1638static void usb_disconnect(struct udc *dev)
1639{
1640
1641        dev_info(&dev->pdev->dev, "USB Disconnect\n");
1642
1643        dev->connected = 0;
1644
1645        /* mask interrupts */
1646        udc_mask_unused_interrupts(dev);
1647
1648        /* REVISIT there doesn't seem to be a point to having this
1649         * talk to a tasklet ... do it directly, we already hold
1650         * the spinlock needed to process the disconnect.
1651         */
1652
1653        tasklet_schedule(&disconnect_tasklet);
1654}
1655
1656/* Tasklet for disconnect to be outside of interrupt context */
1657static void udc_tasklet_disconnect(unsigned long par)
1658{
1659        struct udc *dev = (struct udc *)(*((struct udc **) par));
1660        u32 tmp;
1661
1662        DBG(dev, "Tasklet disconnect\n");
1663        spin_lock_irq(&dev->lock);
1664
1665        if (dev->driver) {
1666                spin_unlock(&dev->lock);
1667                dev->driver->disconnect(&dev->gadget);
1668                spin_lock(&dev->lock);
1669
1670                /* empty queues */
1671                for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1672                        empty_req_queue(&dev->ep[tmp]);
1673                }
1674
1675        }
1676
1677        /* disable ep0 */
1678        ep_init(dev->regs,
1679                        &dev->ep[UDC_EP0IN_IX]);
1680
1681
1682        if (!soft_reset_occured) {
1683                /* init controller by soft reset */
1684                udc_soft_reset(dev);
1685                soft_reset_occured++;
1686        }
1687
1688        /* re-enable dev interrupts */
1689        udc_enable_dev_setup_interrupts(dev);
1690        /* back to full speed ? */
1691        if (use_fullspeed) {
1692                tmp = readl(&dev->regs->cfg);
1693                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1694                writel(tmp, &dev->regs->cfg);
1695        }
1696
1697        spin_unlock_irq(&dev->lock);
1698}
1699
1700/* Reset the UDC core */
1701static void udc_soft_reset(struct udc *dev)
1702{
1703        unsigned long   flags;
1704
1705        DBG(dev, "Soft reset\n");
1706        /*
1707         * reset possible waiting interrupts, because int.
1708         * status is lost after soft reset,
1709         * ep int. status reset
1710         */
1711        writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1712        /* device int. status reset */
1713        writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1714
1715        spin_lock_irqsave(&udc_irq_spinlock, flags);
1716        writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1717        readl(&dev->regs->cfg);
1718        spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1719
1720}
1721
1722/* RDE timer callback to set RDE bit */
1723static void udc_timer_function(unsigned long v)
1724{
1725        u32 tmp;
1726
1727        spin_lock_irq(&udc_irq_spinlock);
1728
1729        if (set_rde > 0) {
1730                /*
1731                 * open the fifo if fifo was filled on last timer call
1732                 * conditionally
1733                 */
1734                if (set_rde > 1) {
1735                        /* set RDE to receive setup data */
1736                        tmp = readl(&udc->regs->ctl);
1737                        tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1738                        writel(tmp, &udc->regs->ctl);
1739                        set_rde = -1;
1740                } else if (readl(&udc->regs->sts)
1741                                & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1742                        /*
1743                         * if fifo empty setup polling, do not just
1744                         * open the fifo
1745                         */
1746                        udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1747                        if (!stop_timer) {
1748                                add_timer(&udc_timer);
1749                        }
1750                } else {
1751                        /*
1752                         * fifo contains data now, setup timer for opening
1753                         * the fifo when timer expires to be able to receive
1754                         * setup packets, when data packets gets queued by
1755                         * gadget layer then timer will forced to expire with
1756                         * set_rde=0 (RDE is set in udc_queue())
1757                         */
1758                        set_rde++;
1759                        /* debug: lhadmot_timer_start = 221070 */
1760                        udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1761                        if (!stop_timer) {
1762                                add_timer(&udc_timer);
1763                        }
1764                }
1765
1766        } else
1767                set_rde = -1; /* RDE was set by udc_queue() */
1768        spin_unlock_irq(&udc_irq_spinlock);
1769        if (stop_timer)
1770                complete(&on_exit);
1771
1772}
1773
1774/* Handle halt state, used in stall poll timer */
1775static void udc_handle_halt_state(struct udc_ep *ep)
1776{
1777        u32 tmp;
1778        /* set stall as long not halted */
1779        if (ep->halted == 1) {
1780                tmp = readl(&ep->regs->ctl);
1781                /* STALL cleared ? */
1782                if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1783                        /*
1784                         * FIXME: MSC spec requires that stall remains
1785                         * even on receivng of CLEAR_FEATURE HALT. So
1786                         * we would set STALL again here to be compliant.
1787                         * But with current mass storage drivers this does
1788                         * not work (would produce endless host retries).
1789                         * So we clear halt on CLEAR_FEATURE.
1790                         *
1791                        DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1792                        tmp |= AMD_BIT(UDC_EPCTL_S);
1793                        writel(tmp, &ep->regs->ctl);*/
1794
1795                        /* clear NAK by writing CNAK */
1796                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1797                        writel(tmp, &ep->regs->ctl);
1798                        ep->halted = 0;
1799                        UDC_QUEUE_CNAK(ep, ep->num);
1800                }
1801        }
1802}
1803
1804/* Stall timer callback to poll S bit and set it again after */
1805static void udc_pollstall_timer_function(unsigned long v)
1806{
1807        struct udc_ep *ep;
1808        int halted = 0;
1809
1810        spin_lock_irq(&udc_stall_spinlock);
1811        /*
1812         * only one IN and OUT endpoints are handled
1813         * IN poll stall
1814         */
1815        ep = &udc->ep[UDC_EPIN_IX];
1816        udc_handle_halt_state(ep);
1817        if (ep->halted)
1818                halted = 1;
1819        /* OUT poll stall */
1820        ep = &udc->ep[UDC_EPOUT_IX];
1821        udc_handle_halt_state(ep);
1822        if (ep->halted)
1823                halted = 1;
1824
1825        /* setup timer again when still halted */
1826        if (!stop_pollstall_timer && halted) {
1827                udc_pollstall_timer.expires = jiffies +
1828                                        HZ * UDC_POLLSTALL_TIMER_USECONDS
1829                                        / (1000 * 1000);
1830                add_timer(&udc_pollstall_timer);
1831        }
1832        spin_unlock_irq(&udc_stall_spinlock);
1833
1834        if (stop_pollstall_timer)
1835                complete(&on_pollstall_exit);
1836}
1837
1838/* Inits endpoint 0 so that SETUP packets are processed */
1839static void activate_control_endpoints(struct udc *dev)
1840{
1841        u32 tmp;
1842
1843        DBG(dev, "activate_control_endpoints\n");
1844
1845        /* flush fifo */
1846        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1847        tmp |= AMD_BIT(UDC_EPCTL_F);
1848        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1849
1850        /* set ep0 directions */
1851        dev->ep[UDC_EP0IN_IX].in = 1;
1852        dev->ep[UDC_EP0OUT_IX].in = 0;
1853
1854        /* set buffer size (tx fifo entries) of EP0_IN */
1855        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1856        if (dev->gadget.speed == USB_SPEED_FULL)
1857                tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1858                                        UDC_EPIN_BUFF_SIZE);
1859        else if (dev->gadget.speed == USB_SPEED_HIGH)
1860                tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1861                                        UDC_EPIN_BUFF_SIZE);
1862        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1863
1864        /* set max packet size of EP0_IN */
1865        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1866        if (dev->gadget.speed == USB_SPEED_FULL)
1867                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1868                                        UDC_EP_MAX_PKT_SIZE);
1869        else if (dev->gadget.speed == USB_SPEED_HIGH)
1870                tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1871                                UDC_EP_MAX_PKT_SIZE);
1872        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1873
1874        /* set max packet size of EP0_OUT */
1875        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1876        if (dev->gadget.speed == USB_SPEED_FULL)
1877                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1878                                        UDC_EP_MAX_PKT_SIZE);
1879        else if (dev->gadget.speed == USB_SPEED_HIGH)
1880                tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1881                                        UDC_EP_MAX_PKT_SIZE);
1882        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1883
1884        /* set max packet size of EP0 in UDC CSR */
1885        tmp = readl(&dev->csr->ne[0]);
1886        if (dev->gadget.speed == USB_SPEED_FULL)
1887                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1888                                        UDC_CSR_NE_MAX_PKT);
1889        else if (dev->gadget.speed == USB_SPEED_HIGH)
1890                tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1891                                        UDC_CSR_NE_MAX_PKT);
1892        writel(tmp, &dev->csr->ne[0]);
1893
1894        if (use_dma) {
1895                dev->ep[UDC_EP0OUT_IX].td->status |=
1896                        AMD_BIT(UDC_DMA_OUT_STS_L);
1897                /* write dma desc address */
1898                writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1899                        &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1900                writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1901                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1902                /* stop RDE timer */
1903                if (timer_pending(&udc_timer)) {
1904                        set_rde = 0;
1905                        mod_timer(&udc_timer, jiffies - 1);
1906                }
1907                /* stop pollstall timer */
1908                if (timer_pending(&udc_pollstall_timer)) {
1909                        mod_timer(&udc_pollstall_timer, jiffies - 1);
1910                }
1911                /* enable DMA */
1912                tmp = readl(&dev->regs->ctl);
1913                tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1914                                | AMD_BIT(UDC_DEVCTL_RDE)
1915                                | AMD_BIT(UDC_DEVCTL_TDE);
1916                if (use_dma_bufferfill_mode) {
1917                        tmp |= AMD_BIT(UDC_DEVCTL_BF);
1918                } else if (use_dma_ppb_du) {
1919                        tmp |= AMD_BIT(UDC_DEVCTL_DU);
1920                }
1921                writel(tmp, &dev->regs->ctl);
1922        }
1923
1924        /* clear NAK by writing CNAK for EP0IN */
1925        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1926        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1927        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1928        dev->ep[UDC_EP0IN_IX].naking = 0;
1929        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1930
1931        /* clear NAK by writing CNAK for EP0OUT */
1932        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1933        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1934        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1935        dev->ep[UDC_EP0OUT_IX].naking = 0;
1936        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1937}
1938
1939/* Make endpoint 0 ready for control traffic */
1940static int setup_ep0(struct udc *dev)
1941{
1942        activate_control_endpoints(dev);
1943        /* enable ep0 interrupts */
1944        udc_enable_ep0_interrupts(dev);
1945        /* enable device setup interrupts */
1946        udc_enable_dev_setup_interrupts(dev);
1947
1948        return 0;
1949}
1950
1951/* Called by gadget driver to register itself */
1952int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1953{
1954        struct udc              *dev = udc;
1955        int                     retval;
1956        u32 tmp;
1957
1958        if (!driver || !driver->bind || !driver->setup
1959                        || driver->speed != USB_SPEED_HIGH)
1960                return -EINVAL;
1961        if (!dev)
1962                return -ENODEV;
1963        if (dev->driver)
1964                return -EBUSY;
1965
1966        driver->driver.bus = NULL;
1967        dev->driver = driver;
1968        dev->gadget.dev.driver = &driver->driver;
1969
1970        retval = driver->bind(&dev->gadget);
1971
1972        /* Some gadget drivers use both ep0 directions.
1973         * NOTE: to gadget driver, ep0 is just one endpoint...
1974         */
1975        dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1976                dev->ep[UDC_EP0IN_IX].ep.driver_data;
1977
1978        if (retval) {
1979                DBG(dev, "binding to %s returning %d\n",
1980                                driver->driver.name, retval);
1981                dev->driver = NULL;
1982                dev->gadget.dev.driver = NULL;
1983                return retval;
1984        }
1985
1986        /* get ready for ep0 traffic */
1987        setup_ep0(dev);
1988
1989        /* clear SD */
1990        tmp = readl(&dev->regs->ctl);
1991        tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1992        writel(tmp, &dev->regs->ctl);
1993
1994        usb_connect(dev);
1995
1996        return 0;
1997}
1998EXPORT_SYMBOL(usb_gadget_register_driver);
1999
2000/* shutdown requests and disconnect from gadget */
2001static void
2002shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2003__releases(dev->lock)
2004__acquires(dev->lock)
2005{
2006        int tmp;
2007
2008        /* empty queues and init hardware */
2009        udc_basic_init(dev);
2010        for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
2011                empty_req_queue(&dev->ep[tmp]);
2012        }
2013
2014        if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2015                spin_unlock(&dev->lock);
2016                driver->disconnect(&dev->gadget);
2017                spin_lock(&dev->lock);
2018        }
2019        /* init */
2020        udc_setup_endpoints(dev);
2021}
2022
2023/* Called by gadget driver to unregister itself */
2024int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2025{
2026        struct udc      *dev = udc;
2027        unsigned long   flags;
2028        u32 tmp;
2029
2030        if (!dev)
2031                return -ENODEV;
2032        if (!driver || driver != dev->driver || !driver->unbind)
2033                return -EINVAL;
2034
2035        spin_lock_irqsave(&dev->lock, flags);
2036        udc_mask_unused_interrupts(dev);
2037        shutdown(dev, driver);
2038        spin_unlock_irqrestore(&dev->lock, flags);
2039
2040        driver->unbind(&dev->gadget);
2041        dev->driver = NULL;
2042
2043        /* set SD */
2044        tmp = readl(&dev->regs->ctl);
2045        tmp |= AMD_BIT(UDC_DEVCTL_SD);
2046        writel(tmp, &dev->regs->ctl);
2047
2048
2049        DBG(dev, "%s: unregistered\n", driver->driver.name);
2050
2051        return 0;
2052}
2053EXPORT_SYMBOL(usb_gadget_unregister_driver);
2054
2055
2056/* Clear pending NAK bits */
2057static void udc_process_cnak_queue(struct udc *dev)
2058{
2059        u32 tmp;
2060        u32 reg;
2061
2062        /* check epin's */
2063        DBG(dev, "CNAK pending queue processing\n");
2064        for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2065                if (cnak_pending & (1 << tmp)) {
2066                        DBG(dev, "CNAK pending for ep%d\n", tmp);
2067                        /* clear NAK by writing CNAK */
2068                        reg = readl(&dev->ep[tmp].regs->ctl);
2069                        reg |= AMD_BIT(UDC_EPCTL_CNAK);
2070                        writel(reg, &dev->ep[tmp].regs->ctl);
2071                        dev->ep[tmp].naking = 0;
2072                        UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2073                }
2074        }
2075        /* ...  and ep0out */
2076        if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2077                DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2078                /* clear NAK by writing CNAK */
2079                reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2080                reg |= AMD_BIT(UDC_EPCTL_CNAK);
2081                writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2082                dev->ep[UDC_EP0OUT_IX].naking = 0;
2083                UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2084                                dev->ep[UDC_EP0OUT_IX].num);
2085        }
2086}
2087
2088/* Enabling RX DMA after setup packet */
2089static void udc_ep0_set_rde(struct udc *dev)
2090{
2091        if (use_dma) {
2092                /*
2093                 * only enable RXDMA when no data endpoint enabled
2094                 * or data is queued
2095                 */
2096                if (!dev->data_ep_enabled || dev->data_ep_queued) {
2097                        udc_set_rde(dev);
2098                } else {
2099                        /*
2100                         * setup timer for enabling RDE (to not enable
2101                         * RXFIFO DMA for data endpoints to early)
2102                         */
2103                        if (set_rde != 0 && !timer_pending(&udc_timer)) {
2104                                udc_timer.expires =
2105                                        jiffies + HZ/UDC_RDE_TIMER_DIV;
2106                                set_rde = 1;
2107                                if (!stop_timer) {
2108                                        add_timer(&udc_timer);
2109                                }
2110                        }
2111                }
2112        }
2113}
2114
2115
2116/* Interrupt handler for data OUT traffic */
2117static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2118{
2119        irqreturn_t             ret_val = IRQ_NONE;
2120        u32                     tmp;
2121        struct udc_ep           *ep;
2122        struct udc_request      *req;
2123        unsigned int            count;
2124        struct udc_data_dma     *td = NULL;
2125        unsigned                dma_done;
2126
2127        VDBG(dev, "ep%d irq\n", ep_ix);
2128        ep = &dev->ep[ep_ix];
2129
2130        tmp = readl(&ep->regs->sts);
2131        if (use_dma) {
2132                /* BNA event ? */
2133                if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2134                        DBG(dev, "BNA ep%dout occured - DESPTR = %x \n",
2135                                        ep->num, readl(&ep->regs->desptr));
2136                        /* clear BNA */
2137                        writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2138                        if (!ep->cancel_transfer)
2139                                ep->bna_occurred = 1;
2140                        else
2141                                ep->cancel_transfer = 0;
2142                        ret_val = IRQ_HANDLED;
2143                        goto finished;
2144                }
2145        }
2146        /* HE event ? */
2147        if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2148                dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num);
2149
2150                /* clear HE */
2151                writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2152                ret_val = IRQ_HANDLED;
2153                goto finished;
2154        }
2155
2156        if (!list_empty(&ep->queue)) {
2157
2158                /* next request */
2159                req = list_entry(ep->queue.next,
2160                        struct udc_request, queue);
2161        } else {
2162                req = NULL;
2163                udc_rxfifo_pending = 1;
2164        }
2165        VDBG(dev, "req = %p\n", req);
2166        /* fifo mode */
2167        if (!use_dma) {
2168
2169                /* read fifo */
2170                if (req && udc_rxfifo_read(ep, req)) {
2171                        ret_val = IRQ_HANDLED;
2172
2173                        /* finish */
2174                        complete_req(ep, req, 0);
2175                        /* next request */
2176                        if (!list_empty(&ep->queue) && !ep->halted) {
2177                                req = list_entry(ep->queue.next,
2178                                        struct udc_request, queue);
2179                        } else
2180                                req = NULL;
2181                }
2182
2183        /* DMA */
2184        } else if (!ep->cancel_transfer && req != NULL) {
2185                ret_val = IRQ_HANDLED;
2186
2187                /* check for DMA done */
2188                if (!use_dma_ppb) {
2189                        dma_done = AMD_GETBITS(req->td_data->status,
2190                                                UDC_DMA_OUT_STS_BS);
2191                /* packet per buffer mode - rx bytes */
2192                } else {
2193                        /*
2194                         * if BNA occurred then recover desc. from
2195                         * BNA dummy desc.
2196                         */
2197                        if (ep->bna_occurred) {
2198                                VDBG(dev, "Recover desc. from BNA dummy\n");
2199                                memcpy(req->td_data, ep->bna_dummy_req->td_data,
2200                                                sizeof(struct udc_data_dma));
2201                                ep->bna_occurred = 0;
2202                                udc_init_bna_dummy(ep->req);
2203                        }
2204                        td = udc_get_last_dma_desc(req);
2205                        dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2206                }
2207                if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2208                        /* buffer fill mode - rx bytes */
2209                        if (!use_dma_ppb) {
2210                                /* received number bytes */
2211                                count = AMD_GETBITS(req->td_data->status,
2212                                                UDC_DMA_OUT_STS_RXBYTES);
2213                                VDBG(dev, "rx bytes=%u\n", count);
2214                        /* packet per buffer mode - rx bytes */
2215                        } else {
2216                                VDBG(dev, "req->td_data=%p\n", req->td_data);
2217                                VDBG(dev, "last desc = %p\n", td);
2218                                /* received number bytes */
2219                                if (use_dma_ppb_du) {
2220                                        /* every desc. counts bytes */
2221                                        count = udc_get_ppbdu_rxbytes(req);
2222                                } else {
2223                                        /* last desc. counts bytes */
2224                                        count = AMD_GETBITS(td->status,
2225                                                UDC_DMA_OUT_STS_RXBYTES);
2226                                        if (!count && req->req.length
2227                                                == UDC_DMA_MAXPACKET) {
2228                                                /*
2229                                                 * on 64k packets the RXBYTES
2230                                                 * field is zero
2231                                                 */
2232                                                count = UDC_DMA_MAXPACKET;
2233                                        }
2234                                }
2235                                VDBG(dev, "last desc rx bytes=%u\n", count);
2236                        }
2237
2238                        tmp = req->req.length - req->req.actual;
2239                        if (count > tmp) {
2240                                if ((tmp % ep->ep.maxpacket) != 0) {
2241                                        DBG(dev, "%s: rx %db, space=%db\n",
2242                                                ep->ep.name, count, tmp);
2243                                        req->req.status = -EOVERFLOW;
2244                                }
2245                                count = tmp;
2246                        }
2247                        req->req.actual += count;
2248                        req->dma_going = 0;
2249                        /* complete request */
2250                        complete_req(ep, req, 0);
2251
2252                        /* next request */
2253                        if (!list_empty(&ep->queue) && !ep->halted) {
2254                                req = list_entry(ep->queue.next,
2255                                        struct udc_request,
2256                                        queue);
2257                                /*
2258                                 * DMA may be already started by udc_queue()
2259                                 * called by gadget drivers completion
2260                                 * routine. This happens when queue
2261                                 * holds one request only.
2262                                 */
2263                                if (req->dma_going == 0) {
2264                                        /* next dma */
2265                                        if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2266                                                goto finished;
2267                                        /* write desc pointer */
2268                                        writel(req->td_phys,
2269                                                &ep->regs->desptr);
2270                                        req->dma_going = 1;
2271                                        /* enable DMA */
2272                                        udc_set_rde(dev);
2273                                }
2274                        } else {
2275                                /*
2276                                 * implant BNA dummy descriptor to allow
2277                                 * RXFIFO opening by RDE
2278                                 */
2279                                if (ep->bna_dummy_req) {
2280                                        /* write desc pointer */
2281                                        writel(ep->bna_dummy_req->td_phys,
2282                                                &ep->regs->desptr);
2283                                        ep->bna_occurred = 0;
2284                                }
2285
2286                                /*
2287                                 * schedule timer for setting RDE if queue
2288                                 * remains empty to allow ep0 packets pass
2289                                 * through
2290                                 */
2291                                if (set_rde != 0
2292                                                && !timer_pending(&udc_timer)) {
2293                                        udc_timer.expires =
2294                                                jiffies
2295                                                + HZ*UDC_RDE_TIMER_SECONDS;
2296                                        set_rde = 1;
2297                                        if (!stop_timer) {
2298                                                add_timer(&udc_timer);
2299                                        }
2300                                }
2301                                if (ep->num != UDC_EP0OUT_IX)
2302                                        dev->data_ep_queued = 0;
2303                        }
2304
2305                } else {
2306                        /*
2307                        * RX DMA must be reenabled for each desc in PPBDU mode
2308                        * and must be enabled for PPBNDU mode in case of BNA
2309                        */
2310                        udc_set_rde(dev);
2311                }
2312
2313        } else if (ep->cancel_transfer) {
2314                ret_val = IRQ_HANDLED;
2315                ep->cancel_transfer = 0;
2316        }
2317
2318        /* check pending CNAKS */
2319        if (cnak_pending) {
2320                /* CNAk processing when rxfifo empty only */
2321                if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2322                        udc_process_cnak_queue(dev);
2323                }
2324        }
2325
2326        /* clear OUT bits in ep status */
2327        writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2328finished:
2329        return ret_val;
2330}
2331
2332/* Interrupt handler for data IN traffic */
2333static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2334{
2335        irqreturn_t ret_val = IRQ_NONE;
2336        u32 tmp;
2337        u32 epsts;
2338        struct udc_ep *ep;
2339        struct udc_request *req;
2340        struct udc_data_dma *td;
2341        unsigned dma_done;
2342        unsigned len;
2343
2344        ep = &dev->ep[ep_ix];
2345
2346        epsts = readl(&ep->regs->sts);
2347        if (use_dma) {
2348                /* BNA ? */
2349                if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2350                        dev_err(&dev->pdev->dev,
2351                                "BNA ep%din occured - DESPTR = %08lx \n",
2352                                ep->num,
2353                                (unsigned long) readl(&ep->regs->desptr));
2354
2355                        /* clear BNA */
2356                        writel(epsts, &ep->regs->sts);
2357                        ret_val = IRQ_HANDLED;
2358                        goto finished;
2359                }
2360        }
2361        /* HE event ? */
2362        if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2363                dev_err(&dev->pdev->dev,
2364                        "HE ep%dn occured - DESPTR = %08lx \n",
2365                        ep->num, (unsigned long) readl(&ep->regs->desptr));
2366
2367                /* clear HE */
2368                writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2369                ret_val = IRQ_HANDLED;
2370                goto finished;
2371        }
2372
2373        /* DMA completion */
2374        if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2375                VDBG(dev, "TDC set- completion\n");
2376                ret_val = IRQ_HANDLED;
2377                if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2378                        req = list_entry(ep->queue.next,
2379                                        struct udc_request, queue);
2380                        if (req) {
2381                                /*
2382                                 * length bytes transfered
2383                                 * check dma done of last desc. in PPBDU mode
2384                                 */
2385                                if (use_dma_ppb_du) {
2386                                        td = udc_get_last_dma_desc(req);
2387                                        if (td) {
2388                                                dma_done =
2389                                                        AMD_GETBITS(td->status,
2390                                                        UDC_DMA_IN_STS_BS);
2391                                                /* don't care DMA done */
2392                                                req->req.actual =
2393                                                        req->req.length;
2394                                        }
2395                                } else {
2396                                        /* assume all bytes transferred */
2397                                        req->req.actual = req->req.length;
2398                                }
2399
2400                                if (req->req.actual == req->req.length) {
2401                                        /* complete req */
2402                                        complete_req(ep, req, 0);
2403                                        req->dma_going = 0;
2404                                        /* further request available ? */
2405                                        if (list_empty(&ep->queue)) {
2406                                                /* disable interrupt */
2407                                                tmp = readl(
2408                                                        &dev->regs->ep_irqmsk);
2409                                                tmp |= AMD_BIT(ep->num);
2410                                                writel(tmp,
2411                                                        &dev->regs->ep_irqmsk);
2412                                        }
2413
2414                                }
2415                        }
2416                }
2417                ep->cancel_transfer = 0;
2418
2419        }
2420        /*
2421         * status reg has IN bit set and TDC not set (if TDC was handled,
2422         * IN must not be handled (UDC defect) ?
2423         */
2424        if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2425                        && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2426                ret_val = IRQ_HANDLED;
2427                if (!list_empty(&ep->queue)) {
2428                        /* next request */
2429                        req = list_entry(ep->queue.next,
2430                                        struct udc_request, queue);
2431                        /* FIFO mode */
2432                        if (!use_dma) {
2433                                /* write fifo */
2434                                udc_txfifo_write(ep, &req->req);
2435                                len = req->req.length - req->req.actual;
2436                                                if (len > ep->ep.maxpacket)
2437                                                        len = ep->ep.maxpacket;
2438                                                req->req.actual += len;
2439                                if (req->req.actual == req->req.length
2440                                        || (len != ep->ep.maxpacket)) {
2441                                        /* complete req */
2442                                        complete_req(ep, req, 0);
2443                                }
2444                        /* DMA */
2445                        } else if (req && !req->dma_going) {
2446                                VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2447                                        req, req->td_data);
2448                                if (req->td_data) {
2449
2450                                        req->dma_going = 1;
2451
2452                                        /*
2453                                         * unset L bit of first desc.
2454                                         * for chain
2455                                         */
2456                                        if (use_dma_ppb && req->req.length >
2457                                                        ep->ep.maxpacket) {
2458                                                req->td_data->status &=
2459                                                        AMD_CLEAR_BIT(
2460                                                        UDC_DMA_IN_STS_L);
2461                                        }
2462
2463                                        /* write desc pointer */
2464                                        writel(req->td_phys, &ep->regs->desptr);
2465
2466                                        /* set HOST READY */
2467                                        req->td_data->status =
2468                                                AMD_ADDBITS(
2469                                                req->td_data->status,
2470                                                UDC_DMA_IN_STS_BS_HOST_READY,
2471                                                UDC_DMA_IN_STS_BS);
2472
2473                                        /* set poll demand bit */
2474                                        tmp = readl(&ep->regs->ctl);
2475                                        tmp |= AMD_BIT(UDC_EPCTL_P);
2476                                        writel(tmp, &ep->regs->ctl);
2477                                }
2478                        }
2479
2480                }
2481        }
2482        /* clear status bits */
2483        writel(epsts, &ep->regs->sts);
2484
2485finished:
2486        return ret_val;
2487
2488}
2489
2490/* Interrupt handler for Control OUT traffic */
2491static irqreturn_t udc_control_out_isr(struct udc *dev)
2492__releases(dev->lock)
2493__acquires(dev->lock)
2494{
2495        irqreturn_t ret_val = IRQ_NONE;
2496        u32 tmp;
2497        int setup_supported;
2498        u32 count;
2499        int set = 0;
2500        struct udc_ep   *ep;
2501        struct udc_ep   *ep_tmp;
2502
2503        ep = &dev->ep[UDC_EP0OUT_IX];
2504
2505        /* clear irq */
2506        writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2507
2508        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2509        /* check BNA and clear if set */
2510        if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2511                VDBG(dev, "ep0: BNA set\n");
2512                writel(AMD_BIT(UDC_EPSTS_BNA),
2513                        &dev->ep[UDC_EP0OUT_IX].regs->sts);
2514                ep->bna_occurred = 1;
2515                ret_val = IRQ_HANDLED;
2516                goto finished;
2517        }
2518
2519        /* type of data: SETUP or DATA 0 bytes */
2520        tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2521        VDBG(dev, "data_typ = %x\n", tmp);
2522
2523        /* setup data */
2524        if (tmp == UDC_EPSTS_OUT_SETUP) {
2525                ret_val = IRQ_HANDLED;
2526
2527                ep->dev->stall_ep0in = 0;
2528                dev->waiting_zlp_ack_ep0in = 0;
2529
2530                /* set NAK for EP0_IN */
2531                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2532                tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2533                writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2534                dev->ep[UDC_EP0IN_IX].naking = 1;
2535                /* get setup data */
2536                if (use_dma) {
2537
2538                        /* clear OUT bits in ep status */
2539                        writel(UDC_EPSTS_OUT_CLEAR,
2540                                &dev->ep[UDC_EP0OUT_IX].regs->sts);
2541
2542                        setup_data.data[0] =
2543                                dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2544                        setup_data.data[1] =
2545                                dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2546                        /* set HOST READY */
2547                        dev->ep[UDC_EP0OUT_IX].td_stp->status =
2548                                        UDC_DMA_STP_STS_BS_HOST_READY;
2549                } else {
2550                        /* read fifo */
2551                        udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2552                }
2553
2554                /* determine direction of control data */
2555                if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2556                        dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2557                        /* enable RDE */
2558                        udc_ep0_set_rde(dev);
2559                        set = 0;
2560                } else {
2561                        dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2562                        /*
2563                         * implant BNA dummy descriptor to allow RXFIFO opening
2564                         * by RDE
2565                         */
2566                        if (ep->bna_dummy_req) {
2567                                /* write desc pointer */
2568                                writel(ep->bna_dummy_req->td_phys,
2569                                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2570                                ep->bna_occurred = 0;
2571                        }
2572
2573                        set = 1;
2574                        dev->ep[UDC_EP0OUT_IX].naking = 1;
2575                        /*
2576                         * setup timer for enabling RDE (to not enable
2577                         * RXFIFO DMA for data to early)
2578                         */
2579                        set_rde = 1;
2580                        if (!timer_pending(&udc_timer)) {
2581                                udc_timer.expires = jiffies +
2582                                                        HZ/UDC_RDE_TIMER_DIV;
2583                                if (!stop_timer) {
2584                                        add_timer(&udc_timer);
2585                                }
2586                        }
2587                }
2588
2589                /*
2590                 * mass storage reset must be processed here because
2591                 * next packet may be a CLEAR_FEATURE HALT which would not
2592                 * clear the stall bit when no STALL handshake was received
2593                 * before (autostall can cause this)
2594                 */
2595                if (setup_data.data[0] == UDC_MSCRES_DWORD0
2596                                && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2597                        DBG(dev, "MSC Reset\n");
2598                        /*
2599                         * clear stall bits
2600                         * only one IN and OUT endpoints are handled
2601                         */
2602                        ep_tmp = &udc->ep[UDC_EPIN_IX];
2603                        udc_set_halt(&ep_tmp->ep, 0);
2604                        ep_tmp = &udc->ep[UDC_EPOUT_IX];
2605                        udc_set_halt(&ep_tmp->ep, 0);
2606                }
2607
2608                /* call gadget with setup data received */
2609                spin_unlock(&dev->lock);
2610                setup_supported = dev->driver->setup(&dev->gadget,
2611                                                &setup_data.request);
2612                spin_lock(&dev->lock);
2613
2614                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2615                /* ep0 in returns data (not zlp) on IN phase */
2616                if (setup_supported >= 0 && setup_supported <
2617                                UDC_EP0IN_MAXPACKET) {
2618                        /* clear NAK by writing CNAK in EP0_IN */
2619                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2620                        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2621                        dev->ep[UDC_EP0IN_IX].naking = 0;
2622                        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2623
2624                /* if unsupported request then stall */
2625                } else if (setup_supported < 0) {
2626                        tmp |= AMD_BIT(UDC_EPCTL_S);
2627                        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2628                } else
2629                        dev->waiting_zlp_ack_ep0in = 1;
2630
2631
2632                /* clear NAK by writing CNAK in EP0_OUT */
2633                if (!set) {
2634                        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2635                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2636                        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2637                        dev->ep[UDC_EP0OUT_IX].naking = 0;
2638                        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2639                }
2640
2641                if (!use_dma) {
2642                        /* clear OUT bits in ep status */
2643                        writel(UDC_EPSTS_OUT_CLEAR,
2644                                &dev->ep[UDC_EP0OUT_IX].regs->sts);
2645                }
2646
2647        /* data packet 0 bytes */
2648        } else if (tmp == UDC_EPSTS_OUT_DATA) {
2649                /* clear OUT bits in ep status */
2650                writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2651
2652                /* get setup data: only 0 packet */
2653                if (use_dma) {
2654                        /* no req if 0 packet, just reactivate */
2655                        if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2656                                VDBG(dev, "ZLP\n");
2657
2658                                /* set HOST READY */
2659                                dev->ep[UDC_EP0OUT_IX].td->status =
2660                                        AMD_ADDBITS(
2661                                        dev->ep[UDC_EP0OUT_IX].td->status,
2662                                        UDC_DMA_OUT_STS_BS_HOST_READY,
2663                                        UDC_DMA_OUT_STS_BS);
2664                                /* enable RDE */
2665                                udc_ep0_set_rde(dev);
2666                                ret_val = IRQ_HANDLED;
2667
2668                        } else {
2669                                /* control write */
2670                                ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2671                                /* re-program desc. pointer for possible ZLPs */
2672                                writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2673                                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2674                                /* enable RDE */
2675                                udc_ep0_set_rde(dev);
2676                        }
2677                } else {
2678
2679                        /* received number bytes */
2680                        count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2681                        count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2682                        /* out data for fifo mode not working */
2683                        count = 0;
2684
2685                        /* 0 packet or real data ? */
2686                        if (count != 0) {
2687                                ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2688                        } else {
2689                                /* dummy read confirm */
2690                                readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2691                                ret_val = IRQ_HANDLED;
2692                        }
2693                }
2694        }
2695
2696        /* check pending CNAKS */
2697        if (cnak_pending) {
2698                /* CNAk processing when rxfifo empty only */
2699                if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2700                        udc_process_cnak_queue(dev);
2701                }
2702        }
2703
2704finished:
2705        return ret_val;
2706}
2707
2708/* Interrupt handler for Control IN traffic */
2709static irqreturn_t udc_control_in_isr(struct udc *dev)
2710{
2711        irqreturn_t ret_val = IRQ_NONE;
2712        u32 tmp;
2713        struct udc_ep *ep;
2714        struct udc_request *req;
2715        unsigned len;
2716
2717        ep = &dev->ep[UDC_EP0IN_IX];
2718
2719        /* clear irq */
2720        writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2721
2722        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2723        /* DMA completion */
2724        if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2725                VDBG(dev, "isr: TDC clear \n");
2726                ret_val = IRQ_HANDLED;
2727
2728                /* clear TDC bit */
2729                writel(AMD_BIT(UDC_EPSTS_TDC),
2730                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2731
2732        /* status reg has IN bit set ? */
2733        } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2734                ret_val = IRQ_HANDLED;
2735
2736                if (ep->dma) {
2737                        /* clear IN bit */
2738                        writel(AMD_BIT(UDC_EPSTS_IN),
2739                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2740                }
2741                if (dev->stall_ep0in) {
2742                        DBG(dev, "stall ep0in\n");
2743                        /* halt ep0in */
2744                        tmp = readl(&ep->regs->ctl);
2745                        tmp |= AMD_BIT(UDC_EPCTL_S);
2746                        writel(tmp, &ep->regs->ctl);
2747                } else {
2748                        if (!list_empty(&ep->queue)) {
2749                                /* next request */
2750                                req = list_entry(ep->queue.next,
2751                                                struct udc_request, queue);
2752
2753                                if (ep->dma) {
2754                                        /* write desc pointer */
2755                                        writel(req->td_phys, &ep->regs->desptr);
2756                                        /* set HOST READY */
2757                                        req->td_data->status =
2758                                                AMD_ADDBITS(
2759                                                req->td_data->status,
2760                                                UDC_DMA_STP_STS_BS_HOST_READY,
2761                                                UDC_DMA_STP_STS_BS);
2762
2763                                        /* set poll demand bit */
2764                                        tmp =
2765                                        readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2766                                        tmp |= AMD_BIT(UDC_EPCTL_P);
2767                                        writel(tmp,
2768                                        &dev->ep[UDC_EP0IN_IX].regs->ctl);
2769
2770                                        /* all bytes will be transferred */
2771                                        req->req.actual = req->req.length;
2772
2773                                        /* complete req */
2774                                        complete_req(ep, req, 0);
2775
2776                                } else {
2777                                        /* write fifo */
2778                                        udc_txfifo_write(ep, &req->req);
2779
2780                                        /* lengh bytes transfered */
2781                                        len = req->req.length - req->req.actual;
2782                                        if (len > ep->ep.maxpacket)
2783                                                len = ep->ep.maxpacket;
2784
2785                                        req->req.actual += len;
2786                                        if (req->req.actual == req->req.length
2787                                                || (len != ep->ep.maxpacket)) {
2788                                                /* complete req */
2789                                                complete_req(ep, req, 0);
2790                                        }
2791                                }
2792
2793                        }
2794                }
2795                ep->halted = 0;
2796                dev->stall_ep0in = 0;
2797                if (!ep->dma) {
2798                        /* clear IN bit */
2799                        writel(AMD_BIT(UDC_EPSTS_IN),
2800                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2801                }
2802        }
2803
2804        return ret_val;
2805}
2806
2807
2808/* Interrupt handler for global device events */
2809static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2810__releases(dev->lock)
2811__acquires(dev->lock)
2812{
2813        irqreturn_t ret_val = IRQ_NONE;
2814        u32 tmp;
2815        u32 cfg;
2816        struct udc_ep *ep;
2817        u16 i;
2818        u8 udc_csr_epix;
2819
2820        /* SET_CONFIG irq ? */
2821        if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2822                ret_val = IRQ_HANDLED;
2823
2824                /* read config value */
2825                tmp = readl(&dev->regs->sts);
2826                cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2827                DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2828                dev->cur_config = cfg;
2829                dev->set_cfg_not_acked = 1;
2830
2831                /* make usb request for gadget driver */
2832                memset(&setup_data, 0 , sizeof(union udc_setup_data));
2833                setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2834                setup_data.request.wValue = dev->cur_config;
2835
2836                /* programm the NE registers */
2837                for (i = 0; i < UDC_EP_NUM; i++) {
2838                        ep = &dev->ep[i];
2839                        if (ep->in) {
2840
2841                                /* ep ix in UDC CSR register space */
2842                                udc_csr_epix = ep->num;
2843
2844
2845                        /* OUT ep */
2846                        } else {
2847                                /* ep ix in UDC CSR register space */
2848                                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2849                        }
2850
2851                        tmp = readl(&dev->csr->ne[udc_csr_epix]);
2852                        /* ep cfg */
2853                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2854                                                UDC_CSR_NE_CFG);
2855                        /* write reg */
2856                        writel(tmp, &dev->csr->ne[udc_csr_epix]);
2857
2858                        /* clear stall bits */
2859                        ep->halted = 0;
2860                        tmp = readl(&ep->regs->ctl);
2861                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2862                        writel(tmp, &ep->regs->ctl);
2863                }
2864                /* call gadget zero with setup data received */
2865                spin_unlock(&dev->lock);
2866                tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2867                spin_lock(&dev->lock);
2868
2869        } /* SET_INTERFACE ? */
2870        if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2871                ret_val = IRQ_HANDLED;
2872
2873                dev->set_cfg_not_acked = 1;
2874                /* read interface and alt setting values */
2875                tmp = readl(&dev->regs->sts);
2876                dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2877                dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2878
2879                /* make usb request for gadget driver */
2880                memset(&setup_data, 0 , sizeof(union udc_setup_data));
2881                setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2882                setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2883                setup_data.request.wValue = dev->cur_alt;
2884                setup_data.request.wIndex = dev->cur_intf;
2885
2886                DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2887                                dev->cur_alt, dev->cur_intf);
2888
2889                /* programm the NE registers */
2890                for (i = 0; i < UDC_EP_NUM; i++) {
2891                        ep = &dev->ep[i];
2892                        if (ep->in) {
2893
2894                                /* ep ix in UDC CSR register space */
2895                                udc_csr_epix = ep->num;
2896
2897
2898                        /* OUT ep */
2899                        } else {
2900                                /* ep ix in UDC CSR register space */
2901                                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2902                        }
2903
2904                        /* UDC CSR reg */
2905                        /* set ep values */
2906                        tmp = readl(&dev->csr->ne[udc_csr_epix]);
2907                        /* ep interface */
2908                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2909                                                UDC_CSR_NE_INTF);
2910                        /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2911                        /* ep alt */
2912                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2913                                                UDC_CSR_NE_ALT);
2914                        /* write reg */
2915                        writel(tmp, &dev->csr->ne[udc_csr_epix]);
2916
2917                        /* clear stall bits */
2918                        ep->halted = 0;
2919                        tmp = readl(&ep->regs->ctl);
2920                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2921                        writel(tmp, &ep->regs->ctl);
2922                }
2923
2924                /* call gadget zero with setup data received */
2925                spin_unlock(&dev->lock);
2926                tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2927                spin_lock(&dev->lock);
2928
2929        } /* USB reset */
2930        if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2931                DBG(dev, "USB Reset interrupt\n");
2932                ret_val = IRQ_HANDLED;
2933
2934                /* allow soft reset when suspend occurs */
2935                soft_reset_occured = 0;
2936
2937                dev->waiting_zlp_ack_ep0in = 0;
2938                dev->set_cfg_not_acked = 0;
2939
2940                /* mask not needed interrupts */
2941                udc_mask_unused_interrupts(dev);
2942
2943                /* call gadget to resume and reset configs etc. */
2944                spin_unlock(&dev->lock);
2945                if (dev->sys_suspended && dev->driver->resume) {
2946                        dev->driver->resume(&dev->gadget);
2947                        dev->sys_suspended = 0;
2948                }
2949                dev->driver->disconnect(&dev->gadget);
2950                spin_lock(&dev->lock);
2951
2952                /* disable ep0 to empty req queue */
2953                empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2954                ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2955
2956                /* soft reset when rxfifo not empty */
2957                tmp = readl(&dev->regs->sts);
2958                if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2959                                && !soft_reset_after_usbreset_occured) {
2960                        udc_soft_reset(dev);
2961                        soft_reset_after_usbreset_occured++;
2962                }
2963
2964                /*
2965                 * DMA reset to kill potential old DMA hw hang,
2966                 * POLL bit is already reset by ep_init() through
2967                 * disconnect()
2968                 */
2969                DBG(dev, "DMA machine reset\n");
2970                tmp = readl(&dev->regs->cfg);
2971                writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2972                writel(tmp, &dev->regs->cfg);
2973
2974                /* put into initial config */
2975                udc_basic_init(dev);
2976
2977                /* enable device setup interrupts */
2978                udc_enable_dev_setup_interrupts(dev);
2979
2980                /* enable suspend interrupt */
2981                tmp = readl(&dev->regs->irqmsk);
2982                tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2983                writel(tmp, &dev->regs->irqmsk);
2984
2985        } /* USB suspend */
2986        if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2987                DBG(dev, "USB Suspend interrupt\n");
2988                ret_val = IRQ_HANDLED;
2989                if (dev->driver->suspend) {
2990                        spin_unlock(&dev->lock);
2991                        dev->sys_suspended = 1;
2992                        dev->driver->suspend(&dev->gadget);
2993                        spin_lock(&dev->lock);
2994                }
2995        } /* new speed ? */
2996        if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2997                DBG(dev, "ENUM interrupt\n");
2998                ret_val = IRQ_HANDLED;
2999                soft_reset_after_usbreset_occured = 0;
3000
3001                /* disable ep0 to empty req queue */
3002                empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3003                ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3004
3005                /* link up all endpoints */
3006                udc_setup_endpoints(dev);
3007                if (dev->gadget.speed == USB_SPEED_HIGH) {
3008                        dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
3009                                "high");
3010                } else if (dev->gadget.speed == USB_SPEED_FULL) {
3011                        dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
3012                                "full");
3013                }
3014
3015                /* init ep 0 */
3016                activate_control_endpoints(dev);
3017
3018                /* enable ep0 interrupts */
3019                udc_enable_ep0_interrupts(dev);
3020        }
3021        /* session valid change interrupt */
3022        if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3023                DBG(dev, "USB SVC interrupt\n");
3024                ret_val = IRQ_HANDLED;
3025
3026                /* check that session is not valid to detect disconnect */
3027                tmp = readl(&dev->regs->sts);
3028                if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3029                        /* disable suspend interrupt */
3030                        tmp = readl(&dev->regs->irqmsk);
3031                        tmp |= AMD_BIT(UDC_DEVINT_US);
3032                        writel(tmp, &dev->regs->irqmsk);
3033                        DBG(dev, "USB Disconnect (session valid low)\n");
3034                        /* cleanup on disconnect */
3035                        usb_disconnect(udc);
3036                }
3037
3038        }
3039
3040        return ret_val;
3041}
3042
3043/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3044static irqreturn_t udc_irq(int irq, void *pdev)
3045{
3046        struct udc *dev = pdev;
3047        u32 reg;
3048        u16 i;
3049        u32 ep_irq;
3050        irqreturn_t ret_val = IRQ_NONE;
3051
3052        spin_lock(&dev->lock);
3053
3054        /* check for ep irq */
3055        reg = readl(&dev->regs->ep_irqsts);
3056        if (reg) {
3057                if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3058                        ret_val |= udc_control_out_isr(dev);
3059                if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3060                        ret_val |= udc_control_in_isr(dev);
3061
3062                /*
3063                 * data endpoint
3064                 * iterate ep's
3065                 */
3066                for (i = 1; i < UDC_EP_NUM; i++) {
3067                        ep_irq = 1 << i;
3068                        if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3069                                continue;
3070
3071                        /* clear irq status */
3072                        writel(ep_irq, &dev->regs->ep_irqsts);
3073
3074                        /* irq for out ep ? */
3075                        if (i > UDC_EPIN_NUM)
3076                                ret_val |= udc_data_out_isr(dev, i);
3077                        else
3078                                ret_val |= udc_data_in_isr(dev, i);
3079                }
3080
3081        }
3082
3083
3084        /* check for dev irq */
3085        reg = readl(&dev->regs->irqsts);
3086        if (reg) {
3087                /* clear irq */
3088                writel(reg, &dev->regs->irqsts);
3089                ret_val |= udc_dev_isr(dev, reg);
3090        }
3091
3092
3093        spin_unlock(&dev->lock);
3094        return ret_val;
3095}
3096
3097/* Tears down device */
3098static void gadget_release(struct device *pdev)
3099{
3100        struct amd5536udc *dev = dev_get_drvdata(pdev);
3101        kfree(dev);
3102}
3103
3104/* Cleanup on device remove */
3105static void udc_remove(struct udc *dev)
3106{
3107        /* remove timer */
3108        stop_timer++;
3109        if (timer_pending(&udc_timer))
3110                wait_for_completion(&on_exit);
3111        if (udc_timer.data)
3112                del_timer_sync(&udc_timer);
3113        /* remove pollstall timer */
3114        stop_pollstall_timer++;
3115        if (timer_pending(&udc_pollstall_timer))
3116                wait_for_completion(&on_pollstall_exit);
3117        if (udc_pollstall_timer.data)
3118                del_timer_sync(&udc_pollstall_timer);
3119        udc = NULL;
3120}
3121
3122/* Reset all pci context */
3123static void udc_pci_remove(struct pci_dev *pdev)
3124{
3125        struct udc              *dev;
3126
3127        dev = pci_get_drvdata(pdev);
3128
3129        /* gadget driver must not be registered */
3130        BUG_ON(dev->driver != NULL);
3131
3132        /* dma pool cleanup */
3133        if (dev->data_requests)
3134                pci_pool_destroy(dev->data_requests);
3135
3136        if (dev->stp_requests) {
3137                /* cleanup DMA desc's for ep0in */
3138                pci_pool_free(dev->stp_requests,
3139                        dev->ep[UDC_EP0OUT_IX].td_stp,
3140                        dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3141                pci_pool_free(dev->stp_requests,
3142                        dev->ep[UDC_EP0OUT_IX].td,
3143                        dev->ep[UDC_EP0OUT_IX].td_phys);
3144
3145                pci_pool_destroy(dev->stp_requests);
3146        }
3147
3148        /* reset controller */
3149        writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3150        if (dev->irq_registered)
3151                free_irq(pdev->irq, dev);
3152        if (dev->regs)
3153                iounmap(dev->regs);
3154        if (dev->mem_region)
3155                release_mem_region(pci_resource_start(pdev, 0),
3156                                pci_resource_len(pdev, 0));
3157        if (dev->active)
3158                pci_disable_device(pdev);
3159
3160        device_unregister(&dev->gadget.dev);
3161        pci_set_drvdata(pdev, NULL);
3162
3163        udc_remove(dev);
3164}
3165
3166/* create dma pools on init */
3167static int init_dma_pools(struct udc *dev)
3168{
3169        struct udc_stp_dma      *td_stp;
3170        struct udc_data_dma     *td_data;
3171        int retval;
3172
3173        /* consistent DMA mode setting ? */
3174        if (use_dma_ppb) {
3175                use_dma_bufferfill_mode = 0;
3176        } else {
3177                use_dma_ppb_du = 0;
3178                use_dma_bufferfill_mode = 1;
3179        }
3180
3181        /* DMA setup */
3182        dev->data_requests = dma_pool_create("data_requests", NULL,
3183                sizeof(struct udc_data_dma), 0, 0);
3184        if (!dev->data_requests) {
3185                DBG(dev, "can't get request data pool\n");
3186                retval = -ENOMEM;
3187                goto finished;
3188        }
3189
3190        /* EP0 in dma regs = dev control regs */
3191        dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3192
3193        /* dma desc for setup data */
3194        dev->stp_requests = dma_pool_create("setup requests", NULL,
3195                sizeof(struct udc_stp_dma), 0, 0);
3196        if (!dev->stp_requests) {
3197                DBG(dev, "can't get stp request pool\n");
3198                retval = -ENOMEM;
3199                goto finished;
3200        }
3201        /* setup */
3202        td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3203                                &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3204        if (td_stp == NULL) {
3205                retval = -ENOMEM;
3206                goto finished;
3207        }
3208        dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3209
3210        /* data: 0 packets !? */
3211        td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3212                                &dev->ep[UDC_EP0OUT_IX].td_phys);
3213        if (td_data == NULL) {
3214                retval = -ENOMEM;
3215                goto finished;
3216        }
3217        dev->ep[UDC_EP0OUT_IX].td = td_data;
3218        return 0;
3219
3220finished:
3221        return retval;
3222}
3223
3224/* Called by pci bus driver to init pci context */
3225static int udc_pci_probe(
3226        struct pci_dev *pdev,
3227        const struct pci_device_id *id
3228)
3229{
3230        struct udc              *dev;
3231        unsigned long           resource;
3232        unsigned long           len;
3233        int                     retval = 0;
3234
3235        /* one udc only */
3236        if (udc) {
3237                dev_dbg(&pdev->dev, "already probed\n");
3238                return -EBUSY;
3239        }
3240
3241        /* init */
3242        dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3243        if (!dev) {
3244                retval = -ENOMEM;
3245                goto finished;
3246        }
3247
3248        /* pci setup */
3249        if (pci_enable_device(pdev) < 0) {
3250                retval = -ENODEV;
3251                goto finished;
3252        }
3253        dev->active = 1;
3254
3255        /* PCI resource allocation */
3256        resource = pci_resource_start(pdev, 0);
3257        len = pci_resource_len(pdev, 0);
3258
3259        if (!request_mem_region(resource, len, name)) {
3260                dev_dbg(&pdev->dev, "pci device used already\n");
3261                retval = -EBUSY;
3262                goto finished;
3263        }
3264        dev->mem_region = 1;
3265
3266        dev->virt_addr = ioremap_nocache(resource, len);
3267        if (dev->virt_addr == NULL) {
3268                dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3269                retval = -EFAULT;
3270                goto finished;
3271        }
3272
3273        if (!pdev->irq) {
3274                dev_err(&dev->pdev->dev, "irq not set\n");
3275                retval = -ENODEV;
3276                goto finished;
3277        }
3278
3279        if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3280                dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3281                retval = -EBUSY;
3282                goto finished;
3283        }
3284        dev->irq_registered = 1;
3285
3286        pci_set_drvdata(pdev, dev);
3287
3288        /* chip revision for Hs AMD5536 */
3289        dev->chiprev = pdev->revision;
3290
3291        pci_set_master(pdev);
3292        pci_try_set_mwi(pdev);
3293
3294        /* init dma pools */
3295        if (use_dma) {
3296                retval = init_dma_pools(dev);
3297                if (retval != 0)
3298                        goto finished;
3299        }
3300
3301        dev->phys_addr = resource;
3302        dev->irq = pdev->irq;
3303        dev->pdev = pdev;
3304        dev->gadget.dev.parent = &pdev->dev;
3305        dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3306
3307        /* general probing */
3308        if (udc_probe(dev) == 0)
3309                return 0;
3310
3311finished:
3312        if (dev)
3313                udc_pci_remove(pdev);
3314        return retval;
3315}
3316
3317/* general probe */
3318static int udc_probe(struct udc *dev)
3319{
3320        char            tmp[128];
3321        u32             reg;
3322        int             retval;
3323
3324        /* mark timer as not initialized */
3325        udc_timer.data = 0;
3326        udc_pollstall_timer.data = 0;
3327
3328        /* device struct setup */
3329        spin_lock_init(&dev->lock);
3330        dev->gadget.ops = &udc_ops;
3331
3332        strcpy(dev->gadget.dev.bus_id, "gadget");
3333        dev->gadget.dev.release = gadget_release;
3334        dev->gadget.name = name;
3335        dev->gadget.name = name;
3336        dev->gadget.is_dualspeed = 1;
3337
3338        /* udc csr registers base */
3339        dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3340        /* dev registers base */
3341        dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3342        /* ep registers base */
3343        dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3344        /* fifo's base */
3345        dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3346        dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3347
3348        /* init registers, interrupts, ... */
3349        startup_registers(dev);
3350
3351        dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3352
3353        snprintf(tmp, sizeof tmp, "%d", dev->irq);
3354        dev_info(&dev->pdev->dev,
3355                "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3356                tmp, dev->phys_addr, dev->chiprev,
3357                (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3358        strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3359        if (dev->chiprev == UDC_HSA0_REV) {
3360                dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3361                retval = -ENODEV;
3362                goto finished;
3363        }
3364        dev_info(&dev->pdev->dev,
3365                "driver version: %s(for Geode5536 B1)\n", tmp);
3366        udc = dev;
3367
3368        retval = device_register(&dev->gadget.dev);
3369        if (retval)
3370                goto finished;
3371
3372        /* timer init */
3373        init_timer(&udc_timer);
3374        udc_timer.function = udc_timer_function;
3375        udc_timer.data = 1;
3376        /* timer pollstall init */
3377        init_timer(&udc_pollstall_timer);
3378        udc_pollstall_timer.function = udc_pollstall_timer_function;
3379        udc_pollstall_timer.data = 1;
3380
3381        /* set SD */
3382        reg = readl(&dev->regs->ctl);
3383        reg |= AMD_BIT(UDC_DEVCTL_SD);
3384        writel(reg, &dev->regs->ctl);
3385
3386        /* print dev register info */
3387        print_regs(dev);
3388
3389        return 0;
3390
3391finished:
3392        return retval;
3393}
3394
3395/* Initiates a remote wakeup */
3396static int udc_remote_wakeup(struct udc *dev)
3397{
3398        unsigned long flags;
3399        u32 tmp;
3400
3401        DBG(dev, "UDC initiates remote wakeup\n");
3402
3403        spin_lock_irqsave(&dev->lock, flags);
3404
3405        tmp = readl(&dev->regs->ctl);
3406        tmp |= AMD_BIT(UDC_DEVCTL_RES);
3407        writel(tmp, &dev->regs->ctl);
3408        tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
3409        writel(tmp, &dev->regs->ctl);
3410
3411        spin_unlock_irqrestore(&dev->lock, flags);
3412        return 0;
3413}
3414
3415/* PCI device parameters */
3416static const struct pci_device_id pci_id[] = {
3417        {
3418                PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3419                .class =        (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3420                .class_mask =   0xffffffff,
3421        },
3422        {},
3423};
3424MODULE_DEVICE_TABLE(pci, pci_id);
3425
3426/* PCI functions */
3427static struct pci_driver udc_pci_driver = {
3428        .name =         (char *) name,
3429        .id_table =     pci_id,
3430        .probe =        udc_pci_probe,
3431        .remove =       udc_pci_remove,
3432};
3433
3434/* Inits driver */
3435static int __init init(void)
3436{
3437        return pci_register_driver(&udc_pci_driver);
3438}
3439module_init(init);
3440
3441/* Cleans driver */
3442static void __exit cleanup(void)
3443{
3444        pci_unregister_driver(&udc_pci_driver);
3445}
3446module_exit(cleanup);
3447
3448MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3449MODULE_AUTHOR("Thomas Dahlmann");
3450MODULE_LICENSE("GPL");
3451
3452