linux/drivers/usb/gadget/amd5536udc.c
<<
>>
Prefs
   1/*
   2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
   3 *
   4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
   5 * Author: Thomas Dahlmann
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  20 */
  21
  22/*
  23 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
  24 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
  25 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
  26 *
  27 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
  28 * be used as host port) and UOC bits PAD_EN and APU are set (should be done
  29 * by BIOS init).
  30 *
  31 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
  32 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
  33 * can be used with gadget ether.
  34 */
  35
  36/* debug control */
  37/* #define UDC_VERBOSE */
  38
  39/* Driver strings */
  40#define UDC_MOD_DESCRIPTION             "AMD 5536 UDC - USB Device Controller"
  41#define UDC_DRIVER_VERSION_STRING       "01.00.0206 - $Revision: #3 $"
  42
  43/* system */
  44#include <linux/module.h>
  45#include <linux/pci.h>
  46#include <linux/kernel.h>
  47#include <linux/delay.h>
  48#include <linux/ioport.h>
  49#include <linux/sched.h>
  50#include <linux/slab.h>
  51#include <linux/errno.h>
  52#include <linux/init.h>
  53#include <linux/timer.h>
  54#include <linux/list.h>
  55#include <linux/interrupt.h>
  56#include <linux/ioctl.h>
  57#include <linux/fs.h>
  58#include <linux/dmapool.h>
  59#include <linux/moduleparam.h>
  60#include <linux/device.h>
  61#include <linux/io.h>
  62#include <linux/irq.h>
  63
  64#include <asm/byteorder.h>
  65#include <asm/system.h>
  66#include <asm/unaligned.h>
  67
  68/* gadget stack */
  69#include <linux/usb/ch9.h>
  70#include <linux/usb/gadget.h>
  71
  72/* udc specific */
  73#include "amd5536udc.h"
  74
  75
  76static void udc_tasklet_disconnect(unsigned long);
  77static void empty_req_queue(struct udc_ep *);
  78static int udc_probe(struct udc *dev);
  79static void udc_basic_init(struct udc *dev);
  80static void udc_setup_endpoints(struct udc *dev);
  81static void udc_soft_reset(struct udc *dev);
  82static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
  83static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
  84static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
  85static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
  86                                unsigned long buf_len, gfp_t gfp_flags);
  87static int udc_remote_wakeup(struct udc *dev);
  88static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  89static void udc_pci_remove(struct pci_dev *pdev);
  90
  91/* description */
  92static const char mod_desc[] = UDC_MOD_DESCRIPTION;
  93static const char name[] = "amd5536udc";
  94
  95/* structure to hold endpoint function pointers */
  96static const struct usb_ep_ops udc_ep_ops;
  97
  98/* received setup data */
  99static union udc_setup_data setup_data;
 100
 101/* pointer to device object */
 102static struct udc *udc;
 103
 104/* irq spin lock for soft reset */
 105static DEFINE_SPINLOCK(udc_irq_spinlock);
 106/* stall spin lock */
 107static DEFINE_SPINLOCK(udc_stall_spinlock);
 108
 109/*
 110* slave mode: pending bytes in rx fifo after nyet,
 111* used if EPIN irq came but no req was available
 112*/
 113static unsigned int udc_rxfifo_pending;
 114
 115/* count soft resets after suspend to avoid loop */
 116static int soft_reset_occured;
 117static int soft_reset_after_usbreset_occured;
 118
 119/* timer */
 120static struct timer_list udc_timer;
 121static int stop_timer;
 122
 123/* set_rde -- Is used to control enabling of RX DMA. Problem is
 124 * that UDC has only one bit (RDE) to enable/disable RX DMA for
 125 * all OUT endpoints. So we have to handle race conditions like
 126 * when OUT data reaches the fifo but no request was queued yet.
 127 * This cannot be solved by letting the RX DMA disabled until a
 128 * request gets queued because there may be other OUT packets
 129 * in the FIFO (important for not blocking control traffic).
 130 * The value of set_rde controls the correspondig timer.
 131 *
 132 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
 133 * set_rde  0 == do not touch RDE, do no start the RDE timer
 134 * set_rde  1 == timer function will look whether FIFO has data
 135 * set_rde  2 == set by timer function to enable RX DMA on next call
 136 */
 137static int set_rde = -1;
 138
 139static DECLARE_COMPLETION(on_exit);
 140static struct timer_list udc_pollstall_timer;
 141static int stop_pollstall_timer;
 142static DECLARE_COMPLETION(on_pollstall_exit);
 143
 144/* tasklet for usb disconnect */
 145static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
 146                (unsigned long) &udc);
 147
 148
 149/* endpoint names used for print */
 150static const char ep0_string[] = "ep0in";
 151static const char *ep_string[] = {
 152        ep0_string,
 153        "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
 154        "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
 155        "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
 156        "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
 157        "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
 158        "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
 159        "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
 160};
 161
 162/* DMA usage flag */
 163static int use_dma = 1;
 164/* packet per buffer dma */
 165static int use_dma_ppb = 1;
 166/* with per descr. update */
 167static int use_dma_ppb_du;
 168/* buffer fill mode */
 169static int use_dma_bufferfill_mode;
 170/* full speed only mode */
 171static int use_fullspeed;
 172/* tx buffer size for high speed */
 173static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
 174
 175/* module parameters */
 176module_param(use_dma, bool, S_IRUGO);
 177MODULE_PARM_DESC(use_dma, "true for DMA");
 178module_param(use_dma_ppb, bool, S_IRUGO);
 179MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
 180module_param(use_dma_ppb_du, bool, S_IRUGO);
 181MODULE_PARM_DESC(use_dma_ppb_du,
 182        "true for DMA in packet per buffer mode with descriptor update");
 183module_param(use_fullspeed, bool, S_IRUGO);
 184MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
 185
 186/*---------------------------------------------------------------------------*/
 187/* Prints UDC device registers and endpoint irq registers */
 188static void print_regs(struct udc *dev)
 189{
 190        DBG(dev, "------- Device registers -------\n");
 191        DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
 192        DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
 193        DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
 194        DBG(dev, "\n");
 195        DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
 196        DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
 197        DBG(dev, "\n");
 198        DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
 199        DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
 200        DBG(dev, "\n");
 201        DBG(dev, "USE DMA        = %d\n", use_dma);
 202        if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
 203                DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
 204                        "WITHOUT desc. update)\n");
 205                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
 206        } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) {
 207                DBG(dev, "DMA mode       = PPBDU (packet per buffer "
 208                        "WITH desc. update)\n");
 209                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
 210        }
 211        if (use_dma && use_dma_bufferfill_mode) {
 212                DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
 213                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
 214        }
 215        if (!use_dma) {
 216                dev_info(&dev->pdev->dev, "FIFO mode\n");
 217        }
 218        DBG(dev, "-------------------------------------------------------\n");
 219}
 220
 221/* Masks unused interrupts */
 222static int udc_mask_unused_interrupts(struct udc *dev)
 223{
 224        u32 tmp;
 225
 226        /* mask all dev interrupts */
 227        tmp =   AMD_BIT(UDC_DEVINT_SVC) |
 228                AMD_BIT(UDC_DEVINT_ENUM) |
 229                AMD_BIT(UDC_DEVINT_US) |
 230                AMD_BIT(UDC_DEVINT_UR) |
 231                AMD_BIT(UDC_DEVINT_ES) |
 232                AMD_BIT(UDC_DEVINT_SI) |
 233                AMD_BIT(UDC_DEVINT_SOF)|
 234                AMD_BIT(UDC_DEVINT_SC);
 235        writel(tmp, &dev->regs->irqmsk);
 236
 237        /* mask all ep interrupts */
 238        writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
 239
 240        return 0;
 241}
 242
 243/* Enables endpoint 0 interrupts */
 244static int udc_enable_ep0_interrupts(struct udc *dev)
 245{
 246        u32 tmp;
 247
 248        DBG(dev, "udc_enable_ep0_interrupts()\n");
 249
 250        /* read irq mask */
 251        tmp = readl(&dev->regs->ep_irqmsk);
 252        /* enable ep0 irq's */
 253        tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
 254                & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
 255        writel(tmp, &dev->regs->ep_irqmsk);
 256
 257        return 0;
 258}
 259
 260/* Enables device interrupts for SET_INTF and SET_CONFIG */
 261static int udc_enable_dev_setup_interrupts(struct udc *dev)
 262{
 263        u32 tmp;
 264
 265        DBG(dev, "enable device interrupts for setup data\n");
 266
 267        /* read irq mask */
 268        tmp = readl(&dev->regs->irqmsk);
 269
 270        /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
 271        tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
 272                & AMD_UNMASK_BIT(UDC_DEVINT_SC)
 273                & AMD_UNMASK_BIT(UDC_DEVINT_UR)
 274                & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
 275                & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
 276        writel(tmp, &dev->regs->irqmsk);
 277
 278        return 0;
 279}
 280
 281/* Calculates fifo start of endpoint based on preceeding endpoints */
 282static int udc_set_txfifo_addr(struct udc_ep *ep)
 283{
 284        struct udc      *dev;
 285        u32 tmp;
 286        int i;
 287
 288        if (!ep || !(ep->in))
 289                return -EINVAL;
 290
 291        dev = ep->dev;
 292        ep->txfifo = dev->txfifo;
 293
 294        /* traverse ep's */
 295        for (i = 0; i < ep->num; i++) {
 296                if (dev->ep[i].regs) {
 297                        /* read fifo size */
 298                        tmp = readl(&dev->ep[i].regs->bufin_framenum);
 299                        tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
 300                        ep->txfifo += tmp;
 301                }
 302        }
 303        return 0;
 304}
 305
 306/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
 307static u32 cnak_pending;
 308
 309static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
 310{
 311        if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
 312                DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
 313                cnak_pending |= 1 << (num);
 314                ep->naking = 1;
 315        } else
 316                cnak_pending = cnak_pending & (~(1 << (num)));
 317}
 318
 319
 320/* Enables endpoint, is called by gadget driver */
 321static int
 322udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
 323{
 324        struct udc_ep           *ep;
 325        struct udc              *dev;
 326        u32                     tmp;
 327        unsigned long           iflags;
 328        u8 udc_csr_epix;
 329        unsigned                maxpacket;
 330
 331        if (!usbep
 332                        || usbep->name == ep0_string
 333                        || !desc
 334                        || desc->bDescriptorType != USB_DT_ENDPOINT)
 335                return -EINVAL;
 336
 337        ep = container_of(usbep, struct udc_ep, ep);
 338        dev = ep->dev;
 339
 340        DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
 341
 342        if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 343                return -ESHUTDOWN;
 344
 345        spin_lock_irqsave(&dev->lock, iflags);
 346        ep->desc = desc;
 347
 348        ep->halted = 0;
 349
 350        /* set traffic type */
 351        tmp = readl(&dev->ep[ep->num].regs->ctl);
 352        tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
 353        writel(tmp, &dev->ep[ep->num].regs->ctl);
 354
 355        /* set max packet size */
 356        maxpacket = le16_to_cpu(desc->wMaxPacketSize);
 357        tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
 358        tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
 359        ep->ep.maxpacket = maxpacket;
 360        writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
 361
 362        /* IN ep */
 363        if (ep->in) {
 364
 365                /* ep ix in UDC CSR register space */
 366                udc_csr_epix = ep->num;
 367
 368                /* set buffer size (tx fifo entries) */
 369                tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
 370                /* double buffering: fifo size = 2 x max packet size */
 371                tmp = AMD_ADDBITS(
 372                                tmp,
 373                                maxpacket * UDC_EPIN_BUFF_SIZE_MULT
 374                                          / UDC_DWORD_BYTES,
 375                                UDC_EPIN_BUFF_SIZE);
 376                writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
 377
 378                /* calc. tx fifo base addr */
 379                udc_set_txfifo_addr(ep);
 380
 381                /* flush fifo */
 382                tmp = readl(&ep->regs->ctl);
 383                tmp |= AMD_BIT(UDC_EPCTL_F);
 384                writel(tmp, &ep->regs->ctl);
 385
 386        /* OUT ep */
 387        } else {
 388                /* ep ix in UDC CSR register space */
 389                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 390
 391                /* set max packet size UDC CSR  */
 392                tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 393                tmp = AMD_ADDBITS(tmp, maxpacket,
 394                                        UDC_CSR_NE_MAX_PKT);
 395                writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 396
 397                if (use_dma && !ep->in) {
 398                        /* alloc and init BNA dummy request */
 399                        ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
 400                        ep->bna_occurred = 0;
 401                }
 402
 403                if (ep->num != UDC_EP0OUT_IX)
 404                        dev->data_ep_enabled = 1;
 405        }
 406
 407        /* set ep values */
 408        tmp = readl(&dev->csr->ne[udc_csr_epix]);
 409        /* max packet */
 410        tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
 411        /* ep number */
 412        tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
 413        /* ep direction */
 414        tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
 415        /* ep type */
 416        tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
 417        /* ep config */
 418        tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
 419        /* ep interface */
 420        tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
 421        /* ep alt */
 422        tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
 423        /* write reg */
 424        writel(tmp, &dev->csr->ne[udc_csr_epix]);
 425
 426        /* enable ep irq */
 427        tmp = readl(&dev->regs->ep_irqmsk);
 428        tmp &= AMD_UNMASK_BIT(ep->num);
 429        writel(tmp, &dev->regs->ep_irqmsk);
 430
 431        /*
 432         * clear NAK by writing CNAK
 433         * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
 434         */
 435        if (!use_dma || ep->in) {
 436                tmp = readl(&ep->regs->ctl);
 437                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 438                writel(tmp, &ep->regs->ctl);
 439                ep->naking = 0;
 440                UDC_QUEUE_CNAK(ep, ep->num);
 441        }
 442        tmp = desc->bEndpointAddress;
 443        DBG(dev, "%s enabled\n", usbep->name);
 444
 445        spin_unlock_irqrestore(&dev->lock, iflags);
 446        return 0;
 447}
 448
 449/* Resets endpoint */
 450static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
 451{
 452        u32             tmp;
 453
 454        VDBG(ep->dev, "ep-%d reset\n", ep->num);
 455        ep->desc = NULL;
 456        ep->ep.ops = &udc_ep_ops;
 457        INIT_LIST_HEAD(&ep->queue);
 458
 459        ep->ep.maxpacket = (u16) ~0;
 460        /* set NAK */
 461        tmp = readl(&ep->regs->ctl);
 462        tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 463        writel(tmp, &ep->regs->ctl);
 464        ep->naking = 1;
 465
 466        /* disable interrupt */
 467        tmp = readl(&regs->ep_irqmsk);
 468        tmp |= AMD_BIT(ep->num);
 469        writel(tmp, &regs->ep_irqmsk);
 470
 471        if (ep->in) {
 472                /* unset P and IN bit of potential former DMA */
 473                tmp = readl(&ep->regs->ctl);
 474                tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
 475                writel(tmp, &ep->regs->ctl);
 476
 477                tmp = readl(&ep->regs->sts);
 478                tmp |= AMD_BIT(UDC_EPSTS_IN);
 479                writel(tmp, &ep->regs->sts);
 480
 481                /* flush the fifo */
 482                tmp = readl(&ep->regs->ctl);
 483                tmp |= AMD_BIT(UDC_EPCTL_F);
 484                writel(tmp, &ep->regs->ctl);
 485
 486        }
 487        /* reset desc pointer */
 488        writel(0, &ep->regs->desptr);
 489}
 490
 491/* Disables endpoint, is called by gadget driver */
 492static int udc_ep_disable(struct usb_ep *usbep)
 493{
 494        struct udc_ep   *ep = NULL;
 495        unsigned long   iflags;
 496
 497        if (!usbep)
 498                return -EINVAL;
 499
 500        ep = container_of(usbep, struct udc_ep, ep);
 501        if (usbep->name == ep0_string || !ep->desc)
 502                return -EINVAL;
 503
 504        DBG(ep->dev, "Disable ep-%d\n", ep->num);
 505
 506        spin_lock_irqsave(&ep->dev->lock, iflags);
 507        udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
 508        empty_req_queue(ep);
 509        ep_init(ep->dev->regs, ep);
 510        spin_unlock_irqrestore(&ep->dev->lock, iflags);
 511
 512        return 0;
 513}
 514
 515/* Allocates request packet, called by gadget driver */
 516static struct usb_request *
 517udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
 518{
 519        struct udc_request      *req;
 520        struct udc_data_dma     *dma_desc;
 521        struct udc_ep   *ep;
 522
 523        if (!usbep)
 524                return NULL;
 525
 526        ep = container_of(usbep, struct udc_ep, ep);
 527
 528        VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
 529        req = kzalloc(sizeof(struct udc_request), gfp);
 530        if (!req)
 531                return NULL;
 532
 533        req->req.dma = DMA_DONT_USE;
 534        INIT_LIST_HEAD(&req->queue);
 535
 536        if (ep->dma) {
 537                /* ep0 in requests are allocated from data pool here */
 538                dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
 539                                                &req->td_phys);
 540                if (!dma_desc) {
 541                        kfree(req);
 542                        return NULL;
 543                }
 544
 545                VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
 546                                "td_phys = %lx\n",
 547                                req, dma_desc,
 548                                (unsigned long)req->td_phys);
 549                /* prevent from using desc. - set HOST BUSY */
 550                dma_desc->status = AMD_ADDBITS(dma_desc->status,
 551                                                UDC_DMA_STP_STS_BS_HOST_BUSY,
 552                                                UDC_DMA_STP_STS_BS);
 553                dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
 554                req->td_data = dma_desc;
 555                req->td_data_last = NULL;
 556                req->chain_len = 1;
 557        }
 558
 559        return &req->req;
 560}
 561
 562/* Frees request packet, called by gadget driver */
 563static void
 564udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
 565{
 566        struct udc_ep   *ep;
 567        struct udc_request      *req;
 568
 569        if (!usbep || !usbreq)
 570                return;
 571
 572        ep = container_of(usbep, struct udc_ep, ep);
 573        req = container_of(usbreq, struct udc_request, req);
 574        VDBG(ep->dev, "free_req req=%p\n", req);
 575        BUG_ON(!list_empty(&req->queue));
 576        if (req->td_data) {
 577                VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
 578
 579                /* free dma chain if created */
 580                if (req->chain_len > 1) {
 581                        udc_free_dma_chain(ep->dev, req);
 582                }
 583
 584                pci_pool_free(ep->dev->data_requests, req->td_data,
 585                                                        req->td_phys);
 586        }
 587        kfree(req);
 588}
 589
 590/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
 591static void udc_init_bna_dummy(struct udc_request *req)
 592{
 593        if (req) {
 594                /* set last bit */
 595                req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 596                /* set next pointer to itself */
 597                req->td_data->next = req->td_phys;
 598                /* set HOST BUSY */
 599                req->td_data->status
 600                        = AMD_ADDBITS(req->td_data->status,
 601                                        UDC_DMA_STP_STS_BS_DMA_DONE,
 602                                        UDC_DMA_STP_STS_BS);
 603#ifdef UDC_VERBOSE
 604                pr_debug("bna desc = %p, sts = %08x\n",
 605                        req->td_data, req->td_data->status);
 606#endif
 607        }
 608}
 609
 610/* Allocate BNA dummy descriptor */
 611static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
 612{
 613        struct udc_request *req = NULL;
 614        struct usb_request *_req = NULL;
 615
 616        /* alloc the dummy request */
 617        _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
 618        if (_req) {
 619                req = container_of(_req, struct udc_request, req);
 620                ep->bna_dummy_req = req;
 621                udc_init_bna_dummy(req);
 622        }
 623        return req;
 624}
 625
 626/* Write data to TX fifo for IN packets */
 627static void
 628udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
 629{
 630        u8                      *req_buf;
 631        u32                     *buf;
 632        int                     i, j;
 633        unsigned                bytes = 0;
 634        unsigned                remaining = 0;
 635
 636        if (!req || !ep)
 637                return;
 638
 639        req_buf = req->buf + req->actual;
 640        prefetch(req_buf);
 641        remaining = req->length - req->actual;
 642
 643        buf = (u32 *) req_buf;
 644
 645        bytes = ep->ep.maxpacket;
 646        if (bytes > remaining)
 647                bytes = remaining;
 648
 649        /* dwords first */
 650        for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 651                writel(*(buf + i), ep->txfifo);
 652        }
 653
 654        /* remaining bytes must be written by byte access */
 655        for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 656                writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
 657                                                        ep->txfifo);
 658        }
 659
 660        /* dummy write confirm */
 661        writel(0, &ep->regs->confirm);
 662}
 663
 664/* Read dwords from RX fifo for OUT transfers */
 665static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
 666{
 667        int i;
 668
 669        VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
 670
 671        for (i = 0; i < dwords; i++) {
 672                *(buf + i) = readl(dev->rxfifo);
 673        }
 674        return 0;
 675}
 676
 677/* Read bytes from RX fifo for OUT transfers */
 678static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
 679{
 680        int i, j;
 681        u32 tmp;
 682
 683        VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
 684
 685        /* dwords first */
 686        for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 687                *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
 688        }
 689
 690        /* remaining bytes must be read by byte access */
 691        if (bytes % UDC_DWORD_BYTES) {
 692                tmp = readl(dev->rxfifo);
 693                for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 694                        *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
 695                        tmp = tmp >> UDC_BITS_PER_BYTE;
 696                }
 697        }
 698
 699        return 0;
 700}
 701
 702/* Read data from RX fifo for OUT transfers */
 703static int
 704udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
 705{
 706        u8 *buf;
 707        unsigned buf_space;
 708        unsigned bytes = 0;
 709        unsigned finished = 0;
 710
 711        /* received number bytes */
 712        bytes = readl(&ep->regs->sts);
 713        bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
 714
 715        buf_space = req->req.length - req->req.actual;
 716        buf = req->req.buf + req->req.actual;
 717        if (bytes > buf_space) {
 718                if ((buf_space % ep->ep.maxpacket) != 0) {
 719                        DBG(ep->dev,
 720                                "%s: rx %d bytes, rx-buf space = %d bytesn\n",
 721                                ep->ep.name, bytes, buf_space);
 722                        req->req.status = -EOVERFLOW;
 723                }
 724                bytes = buf_space;
 725        }
 726        req->req.actual += bytes;
 727
 728        /* last packet ? */
 729        if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
 730                || ((req->req.actual == req->req.length) && !req->req.zero))
 731                finished = 1;
 732
 733        /* read rx fifo bytes */
 734        VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
 735        udc_rxfifo_read_bytes(ep->dev, buf, bytes);
 736
 737        return finished;
 738}
 739
 740/* create/re-init a DMA descriptor or a DMA descriptor chain */
 741static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
 742{
 743        int     retval = 0;
 744        u32     tmp;
 745
 746        VDBG(ep->dev, "prep_dma\n");
 747        VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
 748                        ep->num, req->td_data);
 749
 750        /* set buffer pointer */
 751        req->td_data->bufptr = req->req.dma;
 752
 753        /* set last bit */
 754        req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 755
 756        /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
 757        if (use_dma_ppb) {
 758
 759                retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
 760                if (retval != 0) {
 761                        if (retval == -ENOMEM)
 762                                DBG(ep->dev, "Out of DMA memory\n");
 763                        return retval;
 764                }
 765                if (ep->in) {
 766                        if (req->req.length == ep->ep.maxpacket) {
 767                                /* write tx bytes */
 768                                req->td_data->status =
 769                                        AMD_ADDBITS(req->td_data->status,
 770                                                ep->ep.maxpacket,
 771                                                UDC_DMA_IN_STS_TXBYTES);
 772
 773                        }
 774                }
 775
 776        }
 777
 778        if (ep->in) {
 779                VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
 780                                "maxpacket=%d ep%d\n",
 781                                use_dma_ppb, req->req.length,
 782                                ep->ep.maxpacket, ep->num);
 783                /*
 784                 * if bytes < max packet then tx bytes must
 785                 * be written in packet per buffer mode
 786                 */
 787                if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
 788                                || ep->num == UDC_EP0OUT_IX
 789                                || ep->num == UDC_EP0IN_IX) {
 790                        /* write tx bytes */
 791                        req->td_data->status =
 792                                AMD_ADDBITS(req->td_data->status,
 793                                                req->req.length,
 794                                                UDC_DMA_IN_STS_TXBYTES);
 795                        /* reset frame num */
 796                        req->td_data->status =
 797                                AMD_ADDBITS(req->td_data->status,
 798                                                0,
 799                                                UDC_DMA_IN_STS_FRAMENUM);
 800                }
 801                /* set HOST BUSY */
 802                req->td_data->status =
 803                        AMD_ADDBITS(req->td_data->status,
 804                                UDC_DMA_STP_STS_BS_HOST_BUSY,
 805                                UDC_DMA_STP_STS_BS);
 806        } else {
 807                VDBG(ep->dev, "OUT set host ready\n");
 808                /* set HOST READY */
 809                req->td_data->status =
 810                        AMD_ADDBITS(req->td_data->status,
 811                                UDC_DMA_STP_STS_BS_HOST_READY,
 812                                UDC_DMA_STP_STS_BS);
 813
 814
 815                        /* clear NAK by writing CNAK */
 816                        if (ep->naking) {
 817                                tmp = readl(&ep->regs->ctl);
 818                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 819                                writel(tmp, &ep->regs->ctl);
 820                                ep->naking = 0;
 821                                UDC_QUEUE_CNAK(ep, ep->num);
 822                        }
 823
 824        }
 825
 826        return retval;
 827}
 828
 829/* Completes request packet ... caller MUST hold lock */
 830static void
 831complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
 832__releases(ep->dev->lock)
 833__acquires(ep->dev->lock)
 834{
 835        struct udc              *dev;
 836        unsigned                halted;
 837
 838        VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
 839
 840        dev = ep->dev;
 841        /* unmap DMA */
 842        if (req->dma_mapping) {
 843                if (ep->in)
 844                        pci_unmap_single(dev->pdev,
 845                                        req->req.dma,
 846                                        req->req.length,
 847                                        PCI_DMA_TODEVICE);
 848                else
 849                        pci_unmap_single(dev->pdev,
 850                                        req->req.dma,
 851                                        req->req.length,
 852                                        PCI_DMA_FROMDEVICE);
 853                req->dma_mapping = 0;
 854                req->req.dma = DMA_DONT_USE;
 855        }
 856
 857        halted = ep->halted;
 858        ep->halted = 1;
 859
 860        /* set new status if pending */
 861        if (req->req.status == -EINPROGRESS)
 862                req->req.status = sts;
 863
 864        /* remove from ep queue */
 865        list_del_init(&req->queue);
 866
 867        VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
 868                &req->req, req->req.length, ep->ep.name, sts);
 869
 870        spin_unlock(&dev->lock);
 871        req->req.complete(&ep->ep, &req->req);
 872        spin_lock(&dev->lock);
 873        ep->halted = halted;
 874}
 875
 876/* frees pci pool descriptors of a DMA chain */
 877static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
 878{
 879
 880        int ret_val = 0;
 881        struct udc_data_dma     *td;
 882        struct udc_data_dma     *td_last = NULL;
 883        unsigned int i;
 884
 885        DBG(dev, "free chain req = %p\n", req);
 886
 887        /* do not free first desc., will be done by free for request */
 888        td_last = req->td_data;
 889        td = phys_to_virt(td_last->next);
 890
 891        for (i = 1; i < req->chain_len; i++) {
 892
 893                pci_pool_free(dev->data_requests, td,
 894                                (dma_addr_t) td_last->next);
 895                td_last = td;
 896                td = phys_to_virt(td_last->next);
 897        }
 898
 899        return ret_val;
 900}
 901
 902/* Iterates to the end of a DMA chain and returns last descriptor */
 903static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
 904{
 905        struct udc_data_dma     *td;
 906
 907        td = req->td_data;
 908        while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 909                td = phys_to_virt(td->next);
 910        }
 911
 912        return td;
 913
 914}
 915
 916/* Iterates to the end of a DMA chain and counts bytes received */
 917static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
 918{
 919        struct udc_data_dma     *td;
 920        u32 count;
 921
 922        td = req->td_data;
 923        /* received number bytes */
 924        count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
 925
 926        while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 927                td = phys_to_virt(td->next);
 928                /* received number bytes */
 929                if (td) {
 930                        count += AMD_GETBITS(td->status,
 931                                UDC_DMA_OUT_STS_RXBYTES);
 932                }
 933        }
 934
 935        return count;
 936
 937}
 938
 939/* Creates or re-inits a DMA chain */
 940static int udc_create_dma_chain(
 941        struct udc_ep *ep,
 942        struct udc_request *req,
 943        unsigned long buf_len, gfp_t gfp_flags
 944)
 945{
 946        unsigned long bytes = req->req.length;
 947        unsigned int i;
 948        dma_addr_t dma_addr;
 949        struct udc_data_dma     *td = NULL;
 950        struct udc_data_dma     *last = NULL;
 951        unsigned long txbytes;
 952        unsigned create_new_chain = 0;
 953        unsigned len;
 954
 955        VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
 956                        bytes, buf_len);
 957        dma_addr = DMA_DONT_USE;
 958
 959        /* unset L bit in first desc for OUT */
 960        if (!ep->in) {
 961                req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
 962        }
 963
 964        /* alloc only new desc's if not already available */
 965        len = req->req.length / ep->ep.maxpacket;
 966        if (req->req.length % ep->ep.maxpacket) {
 967                len++;
 968        }
 969
 970        if (len > req->chain_len) {
 971                /* shorter chain already allocated before */
 972                if (req->chain_len > 1) {
 973                        udc_free_dma_chain(ep->dev, req);
 974                }
 975                req->chain_len = len;
 976                create_new_chain = 1;
 977        }
 978
 979        td = req->td_data;
 980        /* gen. required number of descriptors and buffers */
 981        for (i = buf_len; i < bytes; i += buf_len) {
 982                /* create or determine next desc. */
 983                if (create_new_chain) {
 984
 985                        td = pci_pool_alloc(ep->dev->data_requests,
 986                                        gfp_flags, &dma_addr);
 987                        if (!td)
 988                                return -ENOMEM;
 989
 990                        td->status = 0;
 991                } else if (i == buf_len) {
 992                        /* first td */
 993                        td = (struct udc_data_dma *) phys_to_virt(
 994                                                req->td_data->next);
 995                        td->status = 0;
 996                } else {
 997                        td = (struct udc_data_dma *) phys_to_virt(last->next);
 998                        td->status = 0;
 999                }
1000
1001
1002                if (td)
1003                        td->bufptr = req->req.dma + i; /* assign buffer */
1004                else
1005                        break;
1006
1007                /* short packet ? */
1008                if ((bytes - i) >= buf_len) {
1009                        txbytes = buf_len;
1010                } else {
1011                        /* short packet */
1012                        txbytes = bytes - i;
1013                }
1014
1015                /* link td and assign tx bytes */
1016                if (i == buf_len) {
1017                        if (create_new_chain) {
1018                                req->td_data->next = dma_addr;
1019                        } else {
1020                                /* req->td_data->next = virt_to_phys(td); */
1021                        }
1022                        /* write tx bytes */
1023                        if (ep->in) {
1024                                /* first desc */
1025                                req->td_data->status =
1026                                        AMD_ADDBITS(req->td_data->status,
1027                                                        ep->ep.maxpacket,
1028                                                        UDC_DMA_IN_STS_TXBYTES);
1029                                /* second desc */
1030                                td->status = AMD_ADDBITS(td->status,
1031                                                        txbytes,
1032                                                        UDC_DMA_IN_STS_TXBYTES);
1033                        }
1034                } else {
1035                        if (create_new_chain) {
1036                                last->next = dma_addr;
1037                        } else {
1038                                /* last->next = virt_to_phys(td); */
1039                        }
1040                        if (ep->in) {
1041                                /* write tx bytes */
1042                                td->status = AMD_ADDBITS(td->status,
1043                                                        txbytes,
1044                                                        UDC_DMA_IN_STS_TXBYTES);
1045                        }
1046                }
1047                last = td;
1048        }
1049        /* set last bit */
1050        if (td) {
1051                td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1052                /* last desc. points to itself */
1053                req->td_data_last = td;
1054        }
1055
1056        return 0;
1057}
1058
1059/* Enabling RX DMA */
1060static void udc_set_rde(struct udc *dev)
1061{
1062        u32 tmp;
1063
1064        VDBG(dev, "udc_set_rde()\n");
1065        /* stop RDE timer */
1066        if (timer_pending(&udc_timer)) {
1067                set_rde = 0;
1068                mod_timer(&udc_timer, jiffies - 1);
1069        }
1070        /* set RDE */
1071        tmp = readl(&dev->regs->ctl);
1072        tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1073        writel(tmp, &dev->regs->ctl);
1074}
1075
1076/* Queues a request packet, called by gadget driver */
1077static int
1078udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1079{
1080        int                     retval = 0;
1081        u8                      open_rxfifo = 0;
1082        unsigned long           iflags;
1083        struct udc_ep           *ep;
1084        struct udc_request      *req;
1085        struct udc              *dev;
1086        u32                     tmp;
1087
1088        /* check the inputs */
1089        req = container_of(usbreq, struct udc_request, req);
1090
1091        if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1092                        || !list_empty(&req->queue))
1093                return -EINVAL;
1094
1095        ep = container_of(usbep, struct udc_ep, ep);
1096        if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1097                return -EINVAL;
1098
1099        VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1100        dev = ep->dev;
1101
1102        if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1103                return -ESHUTDOWN;
1104
1105        /* map dma (usually done before) */
1106        if (ep->dma && usbreq->length != 0
1107                        && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
1108                VDBG(dev, "DMA map req %p\n", req);
1109                if (ep->in)
1110                        usbreq->dma = pci_map_single(dev->pdev,
1111                                                usbreq->buf,
1112                                                usbreq->length,
1113                                                PCI_DMA_TODEVICE);
1114                else
1115                        usbreq->dma = pci_map_single(dev->pdev,
1116                                                usbreq->buf,
1117                                                usbreq->length,
1118                                                PCI_DMA_FROMDEVICE);
1119                req->dma_mapping = 1;
1120        }
1121
1122        VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1123                        usbep->name, usbreq, usbreq->length,
1124                        req->td_data, usbreq->buf);
1125
1126        spin_lock_irqsave(&dev->lock, iflags);
1127        usbreq->actual = 0;
1128        usbreq->status = -EINPROGRESS;
1129        req->dma_done = 0;
1130
1131        /* on empty queue just do first transfer */
1132        if (list_empty(&ep->queue)) {
1133                /* zlp */
1134                if (usbreq->length == 0) {
1135                        /* IN zlp's are handled by hardware */
1136                        complete_req(ep, req, 0);
1137                        VDBG(dev, "%s: zlp\n", ep->ep.name);
1138                        /*
1139                         * if set_config or set_intf is waiting for ack by zlp
1140                         * then set CSR_DONE
1141                         */
1142                        if (dev->set_cfg_not_acked) {
1143                                tmp = readl(&dev->regs->ctl);
1144                                tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1145                                writel(tmp, &dev->regs->ctl);
1146                                dev->set_cfg_not_acked = 0;
1147                        }
1148                        /* setup command is ACK'ed now by zlp */
1149                        if (dev->waiting_zlp_ack_ep0in) {
1150                                /* clear NAK by writing CNAK in EP0_IN */
1151                                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1152                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1153                                writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1154                                dev->ep[UDC_EP0IN_IX].naking = 0;
1155                                UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1156                                                        UDC_EP0IN_IX);
1157                                dev->waiting_zlp_ack_ep0in = 0;
1158                        }
1159                        goto finished;
1160                }
1161                if (ep->dma) {
1162                        retval = prep_dma(ep, req, gfp);
1163                        if (retval != 0)
1164                                goto finished;
1165                        /* write desc pointer to enable DMA */
1166                        if (ep->in) {
1167                                /* set HOST READY */
1168                                req->td_data->status =
1169                                        AMD_ADDBITS(req->td_data->status,
1170                                                UDC_DMA_IN_STS_BS_HOST_READY,
1171                                                UDC_DMA_IN_STS_BS);
1172                        }
1173
1174                        /* disabled rx dma while descriptor update */
1175                        if (!ep->in) {
1176                                /* stop RDE timer */
1177                                if (timer_pending(&udc_timer)) {
1178                                        set_rde = 0;
1179                                        mod_timer(&udc_timer, jiffies - 1);
1180                                }
1181                                /* clear RDE */
1182                                tmp = readl(&dev->regs->ctl);
1183                                tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1184                                writel(tmp, &dev->regs->ctl);
1185                                open_rxfifo = 1;
1186
1187                                /*
1188                                 * if BNA occurred then let BNA dummy desc.
1189                                 * point to current desc.
1190                                 */
1191                                if (ep->bna_occurred) {
1192                                        VDBG(dev, "copy to BNA dummy desc.\n");
1193                                        memcpy(ep->bna_dummy_req->td_data,
1194                                                req->td_data,
1195                                                sizeof(struct udc_data_dma));
1196                                }
1197                        }
1198                        /* write desc pointer */
1199                        writel(req->td_phys, &ep->regs->desptr);
1200
1201                        /* clear NAK by writing CNAK */
1202                        if (ep->naking) {
1203                                tmp = readl(&ep->regs->ctl);
1204                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1205                                writel(tmp, &ep->regs->ctl);
1206                                ep->naking = 0;
1207                                UDC_QUEUE_CNAK(ep, ep->num);
1208                        }
1209
1210                        if (ep->in) {
1211                                /* enable ep irq */
1212                                tmp = readl(&dev->regs->ep_irqmsk);
1213                                tmp &= AMD_UNMASK_BIT(ep->num);
1214                                writel(tmp, &dev->regs->ep_irqmsk);
1215                        }
1216                } else if (ep->in) {
1217                                /* enable ep irq */
1218                                tmp = readl(&dev->regs->ep_irqmsk);
1219                                tmp &= AMD_UNMASK_BIT(ep->num);
1220                                writel(tmp, &dev->regs->ep_irqmsk);
1221                        }
1222
1223        } else if (ep->dma) {
1224
1225                /*
1226                 * prep_dma not used for OUT ep's, this is not possible
1227                 * for PPB modes, because of chain creation reasons
1228                 */
1229                if (ep->in) {
1230                        retval = prep_dma(ep, req, gfp);
1231                        if (retval != 0)
1232                                goto finished;
1233                }
1234        }
1235        VDBG(dev, "list_add\n");
1236        /* add request to ep queue */
1237        if (req) {
1238
1239                list_add_tail(&req->queue, &ep->queue);
1240
1241                /* open rxfifo if out data queued */
1242                if (open_rxfifo) {
1243                        /* enable DMA */
1244                        req->dma_going = 1;
1245                        udc_set_rde(dev);
1246                        if (ep->num != UDC_EP0OUT_IX)
1247                                dev->data_ep_queued = 1;
1248                }
1249                /* stop OUT naking */
1250                if (!ep->in) {
1251                        if (!use_dma && udc_rxfifo_pending) {
1252                                DBG(dev, "udc_queue(): pending bytes in "
1253                                        "rxfifo after nyet\n");
1254                                /*
1255                                 * read pending bytes afer nyet:
1256                                 * referring to isr
1257                                 */
1258                                if (udc_rxfifo_read(ep, req)) {
1259                                        /* finish */
1260                                        complete_req(ep, req, 0);
1261                                }
1262                                udc_rxfifo_pending = 0;
1263
1264                        }
1265                }
1266        }
1267
1268finished:
1269        spin_unlock_irqrestore(&dev->lock, iflags);
1270        return retval;
1271}
1272
1273/* Empty request queue of an endpoint; caller holds spinlock */
1274static void empty_req_queue(struct udc_ep *ep)
1275{
1276        struct udc_request      *req;
1277
1278        ep->halted = 1;
1279        while (!list_empty(&ep->queue)) {
1280                req = list_entry(ep->queue.next,
1281                        struct udc_request,
1282                        queue);
1283                complete_req(ep, req, -ESHUTDOWN);
1284        }
1285}
1286
1287/* Dequeues a request packet, called by gadget driver */
1288static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1289{
1290        struct udc_ep           *ep;
1291        struct udc_request      *req;
1292        unsigned                halted;
1293        unsigned long           iflags;
1294
1295        ep = container_of(usbep, struct udc_ep, ep);
1296        if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
1297                                && ep->num != UDC_EP0OUT_IX)))
1298                return -EINVAL;
1299
1300        req = container_of(usbreq, struct udc_request, req);
1301
1302        spin_lock_irqsave(&ep->dev->lock, iflags);
1303        halted = ep->halted;
1304        ep->halted = 1;
1305        /* request in processing or next one */
1306        if (ep->queue.next == &req->queue) {
1307                if (ep->dma && req->dma_going) {
1308                        if (ep->in)
1309                                ep->cancel_transfer = 1;
1310                        else {
1311                                u32 tmp;
1312                                u32 dma_sts;
1313                                /* stop potential receive DMA */
1314                                tmp = readl(&udc->regs->ctl);
1315                                writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1316                                                        &udc->regs->ctl);
1317                                /*
1318                                 * Cancel transfer later in ISR
1319                                 * if descriptor was touched.
1320                                 */
1321                                dma_sts = AMD_GETBITS(req->td_data->status,
1322                                                        UDC_DMA_OUT_STS_BS);
1323                                if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1324                                        ep->cancel_transfer = 1;
1325                                else {
1326                                        udc_init_bna_dummy(ep->req);
1327                                        writel(ep->bna_dummy_req->td_phys,
1328                                                &ep->regs->desptr);
1329                                }
1330                                writel(tmp, &udc->regs->ctl);
1331                        }
1332                }
1333        }
1334        complete_req(ep, req, -ECONNRESET);
1335        ep->halted = halted;
1336
1337        spin_unlock_irqrestore(&ep->dev->lock, iflags);
1338        return 0;
1339}
1340
1341/* Halt or clear halt of endpoint */
1342static int
1343udc_set_halt(struct usb_ep *usbep, int halt)
1344{
1345        struct udc_ep   *ep;
1346        u32 tmp;
1347        unsigned long iflags;
1348        int retval = 0;
1349
1350        if (!usbep)
1351                return -EINVAL;
1352
1353        pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1354
1355        ep = container_of(usbep, struct udc_ep, ep);
1356        if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1357                return -EINVAL;
1358        if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1359                return -ESHUTDOWN;
1360
1361        spin_lock_irqsave(&udc_stall_spinlock, iflags);
1362        /* halt or clear halt */
1363        if (halt) {
1364                if (ep->num == 0)
1365                        ep->dev->stall_ep0in = 1;
1366                else {
1367                        /*
1368                         * set STALL
1369                         * rxfifo empty not taken into acount
1370                         */
1371                        tmp = readl(&ep->regs->ctl);
1372                        tmp |= AMD_BIT(UDC_EPCTL_S);
1373                        writel(tmp, &ep->regs->ctl);
1374                        ep->halted = 1;
1375
1376                        /* setup poll timer */
1377                        if (!timer_pending(&udc_pollstall_timer)) {
1378                                udc_pollstall_timer.expires = jiffies +
1379                                        HZ * UDC_POLLSTALL_TIMER_USECONDS
1380                                        / (1000 * 1000);
1381                                if (!stop_pollstall_timer) {
1382                                        DBG(ep->dev, "start polltimer\n");
1383                                        add_timer(&udc_pollstall_timer);
1384                                }
1385                        }
1386                }
1387        } else {
1388                /* ep is halted by set_halt() before */
1389                if (ep->halted) {
1390                        tmp = readl(&ep->regs->ctl);
1391                        /* clear stall bit */
1392                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1393                        /* clear NAK by writing CNAK */
1394                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1395                        writel(tmp, &ep->regs->ctl);
1396                        ep->halted = 0;
1397                        UDC_QUEUE_CNAK(ep, ep->num);
1398                }
1399        }
1400        spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1401        return retval;
1402}
1403
1404/* gadget interface */
1405static const struct usb_ep_ops udc_ep_ops = {
1406        .enable         = udc_ep_enable,
1407        .disable        = udc_ep_disable,
1408
1409        .alloc_request  = udc_alloc_request,
1410        .free_request   = udc_free_request,
1411
1412        .queue          = udc_queue,
1413        .dequeue        = udc_dequeue,
1414
1415        .set_halt       = udc_set_halt,
1416        /* fifo ops not implemented */
1417};
1418
1419/*-------------------------------------------------------------------------*/
1420
1421/* Get frame counter (not implemented) */
1422static int udc_get_frame(struct usb_gadget *gadget)
1423{
1424        return -EOPNOTSUPP;
1425}
1426
1427/* Remote wakeup gadget interface */
1428static int udc_wakeup(struct usb_gadget *gadget)
1429{
1430        struct udc              *dev;
1431
1432        if (!gadget)
1433                return -EINVAL;
1434        dev = container_of(gadget, struct udc, gadget);
1435        udc_remote_wakeup(dev);
1436
1437        return 0;
1438}
1439
1440/* gadget operations */
1441static const struct usb_gadget_ops udc_ops = {
1442        .wakeup         = udc_wakeup,
1443        .get_frame      = udc_get_frame,
1444};
1445
1446/* Setups endpoint parameters, adds endpoints to linked list */
1447static void make_ep_lists(struct udc *dev)
1448{
1449        /* make gadget ep lists */
1450        INIT_LIST_HEAD(&dev->gadget.ep_list);
1451        list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1452                                                &dev->gadget.ep_list);
1453        list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1454                                                &dev->gadget.ep_list);
1455        list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1456                                                &dev->gadget.ep_list);
1457
1458        /* fifo config */
1459        dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1460        if (dev->gadget.speed == USB_SPEED_FULL)
1461                dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1462        else if (dev->gadget.speed == USB_SPEED_HIGH)
1463                dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1464        dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1465}
1466
1467/* init registers at driver load time */
1468static int startup_registers(struct udc *dev)
1469{
1470        u32 tmp;
1471
1472        /* init controller by soft reset */
1473        udc_soft_reset(dev);
1474
1475        /* mask not needed interrupts */
1476        udc_mask_unused_interrupts(dev);
1477
1478        /* put into initial config */
1479        udc_basic_init(dev);
1480        /* link up all endpoints */
1481        udc_setup_endpoints(dev);
1482
1483        /* program speed */
1484        tmp = readl(&dev->regs->cfg);
1485        if (use_fullspeed) {
1486                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1487        } else {
1488                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1489        }
1490        writel(tmp, &dev->regs->cfg);
1491
1492        return 0;
1493}
1494
1495/* Inits UDC context */
1496static void udc_basic_init(struct udc *dev)
1497{
1498        u32     tmp;
1499
1500        DBG(dev, "udc_basic_init()\n");
1501
1502        dev->gadget.speed = USB_SPEED_UNKNOWN;
1503
1504        /* stop RDE timer */
1505        if (timer_pending(&udc_timer)) {
1506                set_rde = 0;
1507                mod_timer(&udc_timer, jiffies - 1);
1508        }
1509        /* stop poll stall timer */
1510        if (timer_pending(&udc_pollstall_timer)) {
1511                mod_timer(&udc_pollstall_timer, jiffies - 1);
1512        }
1513        /* disable DMA */
1514        tmp = readl(&dev->regs->ctl);
1515        tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1516        tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1517        writel(tmp, &dev->regs->ctl);
1518
1519        /* enable dynamic CSR programming */
1520        tmp = readl(&dev->regs->cfg);
1521        tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1522        /* set self powered */
1523        tmp |= AMD_BIT(UDC_DEVCFG_SP);
1524        /* set remote wakeupable */
1525        tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1526        writel(tmp, &dev->regs->cfg);
1527
1528        make_ep_lists(dev);
1529
1530        dev->data_ep_enabled = 0;
1531        dev->data_ep_queued = 0;
1532}
1533
1534/* Sets initial endpoint parameters */
1535static void udc_setup_endpoints(struct udc *dev)
1536{
1537        struct udc_ep   *ep;
1538        u32     tmp;
1539        u32     reg;
1540
1541        DBG(dev, "udc_setup_endpoints()\n");
1542
1543        /* read enum speed */
1544        tmp = readl(&dev->regs->sts);
1545        tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1546        if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
1547                dev->gadget.speed = USB_SPEED_HIGH;
1548        } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
1549                dev->gadget.speed = USB_SPEED_FULL;
1550        }
1551
1552        /* set basic ep parameters */
1553        for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1554                ep = &dev->ep[tmp];
1555                ep->dev = dev;
1556                ep->ep.name = ep_string[tmp];
1557                ep->num = tmp;
1558                /* txfifo size is calculated at enable time */
1559                ep->txfifo = dev->txfifo;
1560
1561                /* fifo size */
1562                if (tmp < UDC_EPIN_NUM) {
1563                        ep->fifo_depth = UDC_TXFIFO_SIZE;
1564                        ep->in = 1;
1565                } else {
1566                        ep->fifo_depth = UDC_RXFIFO_SIZE;
1567                        ep->in = 0;
1568
1569                }
1570                ep->regs = &dev->ep_regs[tmp];
1571                /*
1572                 * ep will be reset only if ep was not enabled before to avoid
1573                 * disabling ep interrupts when ENUM interrupt occurs but ep is
1574                 * not enabled by gadget driver
1575                 */
1576                if (!ep->desc) {
1577                        ep_init(dev->regs, ep);
1578                }
1579
1580                if (use_dma) {
1581                        /*
1582                         * ep->dma is not really used, just to indicate that
1583                         * DMA is active: remove this
1584                         * dma regs = dev control regs
1585                         */
1586                        ep->dma = &dev->regs->ctl;
1587
1588                        /* nak OUT endpoints until enable - not for ep0 */
1589                        if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1590                                                && tmp > UDC_EPIN_NUM) {
1591                                /* set NAK */
1592                                reg = readl(&dev->ep[tmp].regs->ctl);
1593                                reg |= AMD_BIT(UDC_EPCTL_SNAK);
1594                                writel(reg, &dev->ep[tmp].regs->ctl);
1595                                dev->ep[tmp].naking = 1;
1596
1597                        }
1598                }
1599        }
1600        /* EP0 max packet */
1601        if (dev->gadget.speed == USB_SPEED_FULL) {
1602                dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
1603                dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
1604                                                UDC_FS_EP0OUT_MAX_PKT_SIZE;
1605        } else if (dev->gadget.speed == USB_SPEED_HIGH) {
1606                dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
1607                dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
1608        }
1609
1610        /*
1611         * with suspend bug workaround, ep0 params for gadget driver
1612         * are set at gadget driver bind() call
1613         */
1614        dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1615        dev->ep[UDC_EP0IN_IX].halted = 0;
1616        INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1617
1618        /* init cfg/alt/int */
1619        dev->cur_config = 0;
1620        dev->cur_intf = 0;
1621        dev->cur_alt = 0;
1622}
1623
1624/* Bringup after Connect event, initial bringup to be ready for ep0 events */
1625static void usb_connect(struct udc *dev)
1626{
1627
1628        dev_info(&dev->pdev->dev, "USB Connect\n");
1629
1630        dev->connected = 1;
1631
1632        /* put into initial config */
1633        udc_basic_init(dev);
1634
1635        /* enable device setup interrupts */
1636        udc_enable_dev_setup_interrupts(dev);
1637}
1638
1639/*
1640 * Calls gadget with disconnect event and resets the UDC and makes
1641 * initial bringup to be ready for ep0 events
1642 */
1643static void usb_disconnect(struct udc *dev)
1644{
1645
1646        dev_info(&dev->pdev->dev, "USB Disconnect\n");
1647
1648        dev->connected = 0;
1649
1650        /* mask interrupts */
1651        udc_mask_unused_interrupts(dev);
1652
1653        /* REVISIT there doesn't seem to be a point to having this
1654         * talk to a tasklet ... do it directly, we already hold
1655         * the spinlock needed to process the disconnect.
1656         */
1657
1658        tasklet_schedule(&disconnect_tasklet);
1659}
1660
1661/* Tasklet for disconnect to be outside of interrupt context */
1662static void udc_tasklet_disconnect(unsigned long par)
1663{
1664        struct udc *dev = (struct udc *)(*((struct udc **) par));
1665        u32 tmp;
1666
1667        DBG(dev, "Tasklet disconnect\n");
1668        spin_lock_irq(&dev->lock);
1669
1670        if (dev->driver) {
1671                spin_unlock(&dev->lock);
1672                dev->driver->disconnect(&dev->gadget);
1673                spin_lock(&dev->lock);
1674
1675                /* empty queues */
1676                for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1677                        empty_req_queue(&dev->ep[tmp]);
1678                }
1679
1680        }
1681
1682        /* disable ep0 */
1683        ep_init(dev->regs,
1684                        &dev->ep[UDC_EP0IN_IX]);
1685
1686
1687        if (!soft_reset_occured) {
1688                /* init controller by soft reset */
1689                udc_soft_reset(dev);
1690                soft_reset_occured++;
1691        }
1692
1693        /* re-enable dev interrupts */
1694        udc_enable_dev_setup_interrupts(dev);
1695        /* back to full speed ? */
1696        if (use_fullspeed) {
1697                tmp = readl(&dev->regs->cfg);
1698                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1699                writel(tmp, &dev->regs->cfg);
1700        }
1701
1702        spin_unlock_irq(&dev->lock);
1703}
1704
1705/* Reset the UDC core */
1706static void udc_soft_reset(struct udc *dev)
1707{
1708        unsigned long   flags;
1709
1710        DBG(dev, "Soft reset\n");
1711        /*
1712         * reset possible waiting interrupts, because int.
1713         * status is lost after soft reset,
1714         * ep int. status reset
1715         */
1716        writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1717        /* device int. status reset */
1718        writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1719
1720        spin_lock_irqsave(&udc_irq_spinlock, flags);
1721        writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1722        readl(&dev->regs->cfg);
1723        spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1724
1725}
1726
1727/* RDE timer callback to set RDE bit */
1728static void udc_timer_function(unsigned long v)
1729{
1730        u32 tmp;
1731
1732        spin_lock_irq(&udc_irq_spinlock);
1733
1734        if (set_rde > 0) {
1735                /*
1736                 * open the fifo if fifo was filled on last timer call
1737                 * conditionally
1738                 */
1739                if (set_rde > 1) {
1740                        /* set RDE to receive setup data */
1741                        tmp = readl(&udc->regs->ctl);
1742                        tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1743                        writel(tmp, &udc->regs->ctl);
1744                        set_rde = -1;
1745                } else if (readl(&udc->regs->sts)
1746                                & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1747                        /*
1748                         * if fifo empty setup polling, do not just
1749                         * open the fifo
1750                         */
1751                        udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1752                        if (!stop_timer) {
1753                                add_timer(&udc_timer);
1754                        }
1755                } else {
1756                        /*
1757                         * fifo contains data now, setup timer for opening
1758                         * the fifo when timer expires to be able to receive
1759                         * setup packets, when data packets gets queued by
1760                         * gadget layer then timer will forced to expire with
1761                         * set_rde=0 (RDE is set in udc_queue())
1762                         */
1763                        set_rde++;
1764                        /* debug: lhadmot_timer_start = 221070 */
1765                        udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1766                        if (!stop_timer) {
1767                                add_timer(&udc_timer);
1768                        }
1769                }
1770
1771        } else
1772                set_rde = -1; /* RDE was set by udc_queue() */
1773        spin_unlock_irq(&udc_irq_spinlock);
1774        if (stop_timer)
1775                complete(&on_exit);
1776
1777}
1778
1779/* Handle halt state, used in stall poll timer */
1780static void udc_handle_halt_state(struct udc_ep *ep)
1781{
1782        u32 tmp;
1783        /* set stall as long not halted */
1784        if (ep->halted == 1) {
1785                tmp = readl(&ep->regs->ctl);
1786                /* STALL cleared ? */
1787                if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1788                        /*
1789                         * FIXME: MSC spec requires that stall remains
1790                         * even on receivng of CLEAR_FEATURE HALT. So
1791                         * we would set STALL again here to be compliant.
1792                         * But with current mass storage drivers this does
1793                         * not work (would produce endless host retries).
1794                         * So we clear halt on CLEAR_FEATURE.
1795                         *
1796                        DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1797                        tmp |= AMD_BIT(UDC_EPCTL_S);
1798                        writel(tmp, &ep->regs->ctl);*/
1799
1800                        /* clear NAK by writing CNAK */
1801                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1802                        writel(tmp, &ep->regs->ctl);
1803                        ep->halted = 0;
1804                        UDC_QUEUE_CNAK(ep, ep->num);
1805                }
1806        }
1807}
1808
1809/* Stall timer callback to poll S bit and set it again after */
1810static void udc_pollstall_timer_function(unsigned long v)
1811{
1812        struct udc_ep *ep;
1813        int halted = 0;
1814
1815        spin_lock_irq(&udc_stall_spinlock);
1816        /*
1817         * only one IN and OUT endpoints are handled
1818         * IN poll stall
1819         */
1820        ep = &udc->ep[UDC_EPIN_IX];
1821        udc_handle_halt_state(ep);
1822        if (ep->halted)
1823                halted = 1;
1824        /* OUT poll stall */
1825        ep = &udc->ep[UDC_EPOUT_IX];
1826        udc_handle_halt_state(ep);
1827        if (ep->halted)
1828                halted = 1;
1829
1830        /* setup timer again when still halted */
1831        if (!stop_pollstall_timer && halted) {
1832                udc_pollstall_timer.expires = jiffies +
1833                                        HZ * UDC_POLLSTALL_TIMER_USECONDS
1834                                        / (1000 * 1000);
1835                add_timer(&udc_pollstall_timer);
1836        }
1837        spin_unlock_irq(&udc_stall_spinlock);
1838
1839        if (stop_pollstall_timer)
1840                complete(&on_pollstall_exit);
1841}
1842
1843/* Inits endpoint 0 so that SETUP packets are processed */
1844static void activate_control_endpoints(struct udc *dev)
1845{
1846        u32 tmp;
1847
1848        DBG(dev, "activate_control_endpoints\n");
1849
1850        /* flush fifo */
1851        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1852        tmp |= AMD_BIT(UDC_EPCTL_F);
1853        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1854
1855        /* set ep0 directions */
1856        dev->ep[UDC_EP0IN_IX].in = 1;
1857        dev->ep[UDC_EP0OUT_IX].in = 0;
1858
1859        /* set buffer size (tx fifo entries) of EP0_IN */
1860        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1861        if (dev->gadget.speed == USB_SPEED_FULL)
1862                tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1863                                        UDC_EPIN_BUFF_SIZE);
1864        else if (dev->gadget.speed == USB_SPEED_HIGH)
1865                tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1866                                        UDC_EPIN_BUFF_SIZE);
1867        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1868
1869        /* set max packet size of EP0_IN */
1870        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1871        if (dev->gadget.speed == USB_SPEED_FULL)
1872                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1873                                        UDC_EP_MAX_PKT_SIZE);
1874        else if (dev->gadget.speed == USB_SPEED_HIGH)
1875                tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1876                                UDC_EP_MAX_PKT_SIZE);
1877        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1878
1879        /* set max packet size of EP0_OUT */
1880        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1881        if (dev->gadget.speed == USB_SPEED_FULL)
1882                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1883                                        UDC_EP_MAX_PKT_SIZE);
1884        else if (dev->gadget.speed == USB_SPEED_HIGH)
1885                tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1886                                        UDC_EP_MAX_PKT_SIZE);
1887        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1888
1889        /* set max packet size of EP0 in UDC CSR */
1890        tmp = readl(&dev->csr->ne[0]);
1891        if (dev->gadget.speed == USB_SPEED_FULL)
1892                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1893                                        UDC_CSR_NE_MAX_PKT);
1894        else if (dev->gadget.speed == USB_SPEED_HIGH)
1895                tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1896                                        UDC_CSR_NE_MAX_PKT);
1897        writel(tmp, &dev->csr->ne[0]);
1898
1899        if (use_dma) {
1900                dev->ep[UDC_EP0OUT_IX].td->status |=
1901                        AMD_BIT(UDC_DMA_OUT_STS_L);
1902                /* write dma desc address */
1903                writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1904                        &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1905                writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1906                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1907                /* stop RDE timer */
1908                if (timer_pending(&udc_timer)) {
1909                        set_rde = 0;
1910                        mod_timer(&udc_timer, jiffies - 1);
1911                }
1912                /* stop pollstall timer */
1913                if (timer_pending(&udc_pollstall_timer)) {
1914                        mod_timer(&udc_pollstall_timer, jiffies - 1);
1915                }
1916                /* enable DMA */
1917                tmp = readl(&dev->regs->ctl);
1918                tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1919                                | AMD_BIT(UDC_DEVCTL_RDE)
1920                                | AMD_BIT(UDC_DEVCTL_TDE);
1921                if (use_dma_bufferfill_mode) {
1922                        tmp |= AMD_BIT(UDC_DEVCTL_BF);
1923                } else if (use_dma_ppb_du) {
1924                        tmp |= AMD_BIT(UDC_DEVCTL_DU);
1925                }
1926                writel(tmp, &dev->regs->ctl);
1927        }
1928
1929        /* clear NAK by writing CNAK for EP0IN */
1930        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1931        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1932        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1933        dev->ep[UDC_EP0IN_IX].naking = 0;
1934        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1935
1936        /* clear NAK by writing CNAK for EP0OUT */
1937        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1938        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1939        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1940        dev->ep[UDC_EP0OUT_IX].naking = 0;
1941        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1942}
1943
1944/* Make endpoint 0 ready for control traffic */
1945static int setup_ep0(struct udc *dev)
1946{
1947        activate_control_endpoints(dev);
1948        /* enable ep0 interrupts */
1949        udc_enable_ep0_interrupts(dev);
1950        /* enable device setup interrupts */
1951        udc_enable_dev_setup_interrupts(dev);
1952
1953        return 0;
1954}
1955
1956/* Called by gadget driver to register itself */
1957int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1958{
1959        struct udc              *dev = udc;
1960        int                     retval;
1961        u32 tmp;
1962
1963        if (!driver || !driver->bind || !driver->setup
1964                        || driver->speed != USB_SPEED_HIGH)
1965                return -EINVAL;
1966        if (!dev)
1967                return -ENODEV;
1968        if (dev->driver)
1969                return -EBUSY;
1970
1971        driver->driver.bus = NULL;
1972        dev->driver = driver;
1973        dev->gadget.dev.driver = &driver->driver;
1974
1975        retval = driver->bind(&dev->gadget);
1976
1977        /* Some gadget drivers use both ep0 directions.
1978         * NOTE: to gadget driver, ep0 is just one endpoint...
1979         */
1980        dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1981                dev->ep[UDC_EP0IN_IX].ep.driver_data;
1982
1983        if (retval) {
1984                DBG(dev, "binding to %s returning %d\n",
1985                                driver->driver.name, retval);
1986                dev->driver = NULL;
1987                dev->gadget.dev.driver = NULL;
1988                return retval;
1989        }
1990
1991        /* get ready for ep0 traffic */
1992        setup_ep0(dev);
1993
1994        /* clear SD */
1995        tmp = readl(&dev->regs->ctl);
1996        tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1997        writel(tmp, &dev->regs->ctl);
1998
1999        usb_connect(dev);
2000
2001        return 0;
2002}
2003EXPORT_SYMBOL(usb_gadget_register_driver);
2004
2005/* shutdown requests and disconnect from gadget */
2006static void
2007shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2008__releases(dev->lock)
2009__acquires(dev->lock)
2010{
2011        int tmp;
2012
2013        if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2014                spin_unlock(&dev->lock);
2015                driver->disconnect(&dev->gadget);
2016                spin_lock(&dev->lock);
2017        }
2018
2019        /* empty queues and init hardware */
2020        udc_basic_init(dev);
2021        for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2022                empty_req_queue(&dev->ep[tmp]);
2023
2024        udc_setup_endpoints(dev);
2025}
2026
2027/* Called by gadget driver to unregister itself */
2028int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2029{
2030        struct udc      *dev = udc;
2031        unsigned long   flags;
2032        u32 tmp;
2033
2034        if (!dev)
2035                return -ENODEV;
2036        if (!driver || driver != dev->driver || !driver->unbind)
2037                return -EINVAL;
2038
2039        spin_lock_irqsave(&dev->lock, flags);
2040        udc_mask_unused_interrupts(dev);
2041        shutdown(dev, driver);
2042        spin_unlock_irqrestore(&dev->lock, flags);
2043
2044        driver->unbind(&dev->gadget);
2045        dev->gadget.dev.driver = NULL;
2046        dev->driver = NULL;
2047
2048        /* set SD */
2049        tmp = readl(&dev->regs->ctl);
2050        tmp |= AMD_BIT(UDC_DEVCTL_SD);
2051        writel(tmp, &dev->regs->ctl);
2052
2053
2054        DBG(dev, "%s: unregistered\n", driver->driver.name);
2055
2056        return 0;
2057}
2058EXPORT_SYMBOL(usb_gadget_unregister_driver);
2059
2060
2061/* Clear pending NAK bits */
2062static void udc_process_cnak_queue(struct udc *dev)
2063{
2064        u32 tmp;
2065        u32 reg;
2066
2067        /* check epin's */
2068        DBG(dev, "CNAK pending queue processing\n");
2069        for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2070                if (cnak_pending & (1 << tmp)) {
2071                        DBG(dev, "CNAK pending for ep%d\n", tmp);
2072                        /* clear NAK by writing CNAK */
2073                        reg = readl(&dev->ep[tmp].regs->ctl);
2074                        reg |= AMD_BIT(UDC_EPCTL_CNAK);
2075                        writel(reg, &dev->ep[tmp].regs->ctl);
2076                        dev->ep[tmp].naking = 0;
2077                        UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2078                }
2079        }
2080        /* ...  and ep0out */
2081        if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2082                DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2083                /* clear NAK by writing CNAK */
2084                reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2085                reg |= AMD_BIT(UDC_EPCTL_CNAK);
2086                writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2087                dev->ep[UDC_EP0OUT_IX].naking = 0;
2088                UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2089                                dev->ep[UDC_EP0OUT_IX].num);
2090        }
2091}
2092
2093/* Enabling RX DMA after setup packet */
2094static void udc_ep0_set_rde(struct udc *dev)
2095{
2096        if (use_dma) {
2097                /*
2098                 * only enable RXDMA when no data endpoint enabled
2099                 * or data is queued
2100                 */
2101                if (!dev->data_ep_enabled || dev->data_ep_queued) {
2102                        udc_set_rde(dev);
2103                } else {
2104                        /*
2105                         * setup timer for enabling RDE (to not enable
2106                         * RXFIFO DMA for data endpoints to early)
2107                         */
2108                        if (set_rde != 0 && !timer_pending(&udc_timer)) {
2109                                udc_timer.expires =
2110                                        jiffies + HZ/UDC_RDE_TIMER_DIV;
2111                                set_rde = 1;
2112                                if (!stop_timer) {
2113                                        add_timer(&udc_timer);
2114                                }
2115                        }
2116                }
2117        }
2118}
2119
2120
2121/* Interrupt handler for data OUT traffic */
2122static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2123{
2124        irqreturn_t             ret_val = IRQ_NONE;
2125        u32                     tmp;
2126        struct udc_ep           *ep;
2127        struct udc_request      *req;
2128        unsigned int            count;
2129        struct udc_data_dma     *td = NULL;
2130        unsigned                dma_done;
2131
2132        VDBG(dev, "ep%d irq\n", ep_ix);
2133        ep = &dev->ep[ep_ix];
2134
2135        tmp = readl(&ep->regs->sts);
2136        if (use_dma) {
2137                /* BNA event ? */
2138                if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2139                        DBG(dev, "BNA ep%dout occured - DESPTR = %x \n",
2140                                        ep->num, readl(&ep->regs->desptr));
2141                        /* clear BNA */
2142                        writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2143                        if (!ep->cancel_transfer)
2144                                ep->bna_occurred = 1;
2145                        else
2146                                ep->cancel_transfer = 0;
2147                        ret_val = IRQ_HANDLED;
2148                        goto finished;
2149                }
2150        }
2151        /* HE event ? */
2152        if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2153                dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num);
2154
2155                /* clear HE */
2156                writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2157                ret_val = IRQ_HANDLED;
2158                goto finished;
2159        }
2160
2161        if (!list_empty(&ep->queue)) {
2162
2163                /* next request */
2164                req = list_entry(ep->queue.next,
2165                        struct udc_request, queue);
2166        } else {
2167                req = NULL;
2168                udc_rxfifo_pending = 1;
2169        }
2170        VDBG(dev, "req = %p\n", req);
2171        /* fifo mode */
2172        if (!use_dma) {
2173
2174                /* read fifo */
2175                if (req && udc_rxfifo_read(ep, req)) {
2176                        ret_val = IRQ_HANDLED;
2177
2178                        /* finish */
2179                        complete_req(ep, req, 0);
2180                        /* next request */
2181                        if (!list_empty(&ep->queue) && !ep->halted) {
2182                                req = list_entry(ep->queue.next,
2183                                        struct udc_request, queue);
2184                        } else
2185                                req = NULL;
2186                }
2187
2188        /* DMA */
2189        } else if (!ep->cancel_transfer && req != NULL) {
2190                ret_val = IRQ_HANDLED;
2191
2192                /* check for DMA done */
2193                if (!use_dma_ppb) {
2194                        dma_done = AMD_GETBITS(req->td_data->status,
2195                                                UDC_DMA_OUT_STS_BS);
2196                /* packet per buffer mode - rx bytes */
2197                } else {
2198                        /*
2199                         * if BNA occurred then recover desc. from
2200                         * BNA dummy desc.
2201                         */
2202                        if (ep->bna_occurred) {
2203                                VDBG(dev, "Recover desc. from BNA dummy\n");
2204                                memcpy(req->td_data, ep->bna_dummy_req->td_data,
2205                                                sizeof(struct udc_data_dma));
2206                                ep->bna_occurred = 0;
2207                                udc_init_bna_dummy(ep->req);
2208                        }
2209                        td = udc_get_last_dma_desc(req);
2210                        dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2211                }
2212                if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2213                        /* buffer fill mode - rx bytes */
2214                        if (!use_dma_ppb) {
2215                                /* received number bytes */
2216                                count = AMD_GETBITS(req->td_data->status,
2217                                                UDC_DMA_OUT_STS_RXBYTES);
2218                                VDBG(dev, "rx bytes=%u\n", count);
2219                        /* packet per buffer mode - rx bytes */
2220                        } else {
2221                                VDBG(dev, "req->td_data=%p\n", req->td_data);
2222                                VDBG(dev, "last desc = %p\n", td);
2223                                /* received number bytes */
2224                                if (use_dma_ppb_du) {
2225                                        /* every desc. counts bytes */
2226                                        count = udc_get_ppbdu_rxbytes(req);
2227                                } else {
2228                                        /* last desc. counts bytes */
2229                                        count = AMD_GETBITS(td->status,
2230                                                UDC_DMA_OUT_STS_RXBYTES);
2231                                        if (!count && req->req.length
2232                                                == UDC_DMA_MAXPACKET) {
2233                                                /*
2234                                                 * on 64k packets the RXBYTES
2235                                                 * field is zero
2236                                                 */
2237                                                count = UDC_DMA_MAXPACKET;
2238                                        }
2239                                }
2240                                VDBG(dev, "last desc rx bytes=%u\n", count);
2241                        }
2242
2243                        tmp = req->req.length - req->req.actual;
2244                        if (count > tmp) {
2245                                if ((tmp % ep->ep.maxpacket) != 0) {
2246                                        DBG(dev, "%s: rx %db, space=%db\n",
2247                                                ep->ep.name, count, tmp);
2248                                        req->req.status = -EOVERFLOW;
2249                                }
2250                                count = tmp;
2251                        }
2252                        req->req.actual += count;
2253                        req->dma_going = 0;
2254                        /* complete request */
2255                        complete_req(ep, req, 0);
2256
2257                        /* next request */
2258                        if (!list_empty(&ep->queue) && !ep->halted) {
2259                                req = list_entry(ep->queue.next,
2260                                        struct udc_request,
2261                                        queue);
2262                                /*
2263                                 * DMA may be already started by udc_queue()
2264                                 * called by gadget drivers completion
2265                                 * routine. This happens when queue
2266                                 * holds one request only.
2267                                 */
2268                                if (req->dma_going == 0) {
2269                                        /* next dma */
2270                                        if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2271                                                goto finished;
2272                                        /* write desc pointer */
2273                                        writel(req->td_phys,
2274                                                &ep->regs->desptr);
2275                                        req->dma_going = 1;
2276                                        /* enable DMA */
2277                                        udc_set_rde(dev);
2278                                }
2279                        } else {
2280                                /*
2281                                 * implant BNA dummy descriptor to allow
2282                                 * RXFIFO opening by RDE
2283                                 */
2284                                if (ep->bna_dummy_req) {
2285                                        /* write desc pointer */
2286                                        writel(ep->bna_dummy_req->td_phys,
2287                                                &ep->regs->desptr);
2288                                        ep->bna_occurred = 0;
2289                                }
2290
2291                                /*
2292                                 * schedule timer for setting RDE if queue
2293                                 * remains empty to allow ep0 packets pass
2294                                 * through
2295                                 */
2296                                if (set_rde != 0
2297                                                && !timer_pending(&udc_timer)) {
2298                                        udc_timer.expires =
2299                                                jiffies
2300                                                + HZ*UDC_RDE_TIMER_SECONDS;
2301                                        set_rde = 1;
2302                                        if (!stop_timer) {
2303                                                add_timer(&udc_timer);
2304                                        }
2305                                }
2306                                if (ep->num != UDC_EP0OUT_IX)
2307                                        dev->data_ep_queued = 0;
2308                        }
2309
2310                } else {
2311                        /*
2312                        * RX DMA must be reenabled for each desc in PPBDU mode
2313                        * and must be enabled for PPBNDU mode in case of BNA
2314                        */
2315                        udc_set_rde(dev);
2316                }
2317
2318        } else if (ep->cancel_transfer) {
2319                ret_val = IRQ_HANDLED;
2320                ep->cancel_transfer = 0;
2321        }
2322
2323        /* check pending CNAKS */
2324        if (cnak_pending) {
2325                /* CNAk processing when rxfifo empty only */
2326                if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2327                        udc_process_cnak_queue(dev);
2328                }
2329        }
2330
2331        /* clear OUT bits in ep status */
2332        writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2333finished:
2334        return ret_val;
2335}
2336
2337/* Interrupt handler for data IN traffic */
2338static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2339{
2340        irqreturn_t ret_val = IRQ_NONE;
2341        u32 tmp;
2342        u32 epsts;
2343        struct udc_ep *ep;
2344        struct udc_request *req;
2345        struct udc_data_dma *td;
2346        unsigned dma_done;
2347        unsigned len;
2348
2349        ep = &dev->ep[ep_ix];
2350
2351        epsts = readl(&ep->regs->sts);
2352        if (use_dma) {
2353                /* BNA ? */
2354                if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2355                        dev_err(&dev->pdev->dev,
2356                                "BNA ep%din occured - DESPTR = %08lx \n",
2357                                ep->num,
2358                                (unsigned long) readl(&ep->regs->desptr));
2359
2360                        /* clear BNA */
2361                        writel(epsts, &ep->regs->sts);
2362                        ret_val = IRQ_HANDLED;
2363                        goto finished;
2364                }
2365        }
2366        /* HE event ? */
2367        if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2368                dev_err(&dev->pdev->dev,
2369                        "HE ep%dn occured - DESPTR = %08lx \n",
2370                        ep->num, (unsigned long) readl(&ep->regs->desptr));
2371
2372                /* clear HE */
2373                writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2374                ret_val = IRQ_HANDLED;
2375                goto finished;
2376        }
2377
2378        /* DMA completion */
2379        if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2380                VDBG(dev, "TDC set- completion\n");
2381                ret_val = IRQ_HANDLED;
2382                if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2383                        req = list_entry(ep->queue.next,
2384                                        struct udc_request, queue);
2385                        /*
2386                         * length bytes transfered
2387                         * check dma done of last desc. in PPBDU mode
2388                         */
2389                        if (use_dma_ppb_du) {
2390                                td = udc_get_last_dma_desc(req);
2391                                if (td) {
2392                                        dma_done =
2393                                                AMD_GETBITS(td->status,
2394                                                UDC_DMA_IN_STS_BS);
2395                                        /* don't care DMA done */
2396                                        req->req.actual = req->req.length;
2397                                }
2398                        } else {
2399                                /* assume all bytes transferred */
2400                                req->req.actual = req->req.length;
2401                        }
2402
2403                        if (req->req.actual == req->req.length) {
2404                                /* complete req */
2405                                complete_req(ep, req, 0);
2406                                req->dma_going = 0;
2407                                /* further request available ? */
2408                                if (list_empty(&ep->queue)) {
2409                                        /* disable interrupt */
2410                                        tmp = readl(&dev->regs->ep_irqmsk);
2411                                        tmp |= AMD_BIT(ep->num);
2412                                        writel(tmp, &dev->regs->ep_irqmsk);
2413                                }
2414                        }
2415                }
2416                ep->cancel_transfer = 0;
2417
2418        }
2419        /*
2420         * status reg has IN bit set and TDC not set (if TDC was handled,
2421         * IN must not be handled (UDC defect) ?
2422         */
2423        if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2424                        && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2425                ret_val = IRQ_HANDLED;
2426                if (!list_empty(&ep->queue)) {
2427                        /* next request */
2428                        req = list_entry(ep->queue.next,
2429                                        struct udc_request, queue);
2430                        /* FIFO mode */
2431                        if (!use_dma) {
2432                                /* write fifo */
2433                                udc_txfifo_write(ep, &req->req);
2434                                len = req->req.length - req->req.actual;
2435                                                if (len > ep->ep.maxpacket)
2436                                                        len = ep->ep.maxpacket;
2437                                                req->req.actual += len;
2438                                if (req->req.actual == req->req.length
2439                                        || (len != ep->ep.maxpacket)) {
2440                                        /* complete req */
2441                                        complete_req(ep, req, 0);
2442                                }
2443                        /* DMA */
2444                        } else if (req && !req->dma_going) {
2445                                VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2446                                        req, req->td_data);
2447                                if (req->td_data) {
2448
2449                                        req->dma_going = 1;
2450
2451                                        /*
2452                                         * unset L bit of first desc.
2453                                         * for chain
2454                                         */
2455                                        if (use_dma_ppb && req->req.length >
2456                                                        ep->ep.maxpacket) {
2457                                                req->td_data->status &=
2458                                                        AMD_CLEAR_BIT(
2459                                                        UDC_DMA_IN_STS_L);
2460                                        }
2461
2462                                        /* write desc pointer */
2463                                        writel(req->td_phys, &ep->regs->desptr);
2464
2465                                        /* set HOST READY */
2466                                        req->td_data->status =
2467                                                AMD_ADDBITS(
2468                                                req->td_data->status,
2469                                                UDC_DMA_IN_STS_BS_HOST_READY,
2470                                                UDC_DMA_IN_STS_BS);
2471
2472                                        /* set poll demand bit */
2473                                        tmp = readl(&ep->regs->ctl);
2474                                        tmp |= AMD_BIT(UDC_EPCTL_P);
2475                                        writel(tmp, &ep->regs->ctl);
2476                                }
2477                        }
2478
2479                } else if (!use_dma && ep->in) {
2480                        /* disable interrupt */
2481                        tmp = readl(
2482                                &dev->regs->ep_irqmsk);
2483                        tmp |= AMD_BIT(ep->num);
2484                        writel(tmp,
2485                                &dev->regs->ep_irqmsk);
2486                }
2487        }
2488        /* clear status bits */
2489        writel(epsts, &ep->regs->sts);
2490
2491finished:
2492        return ret_val;
2493
2494}
2495
2496/* Interrupt handler for Control OUT traffic */
2497static irqreturn_t udc_control_out_isr(struct udc *dev)
2498__releases(dev->lock)
2499__acquires(dev->lock)
2500{
2501        irqreturn_t ret_val = IRQ_NONE;
2502        u32 tmp;
2503        int setup_supported;
2504        u32 count;
2505        int set = 0;
2506        struct udc_ep   *ep;
2507        struct udc_ep   *ep_tmp;
2508
2509        ep = &dev->ep[UDC_EP0OUT_IX];
2510
2511        /* clear irq */
2512        writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2513
2514        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2515        /* check BNA and clear if set */
2516        if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2517                VDBG(dev, "ep0: BNA set\n");
2518                writel(AMD_BIT(UDC_EPSTS_BNA),
2519                        &dev->ep[UDC_EP0OUT_IX].regs->sts);
2520                ep->bna_occurred = 1;
2521                ret_val = IRQ_HANDLED;
2522                goto finished;
2523        }
2524
2525        /* type of data: SETUP or DATA 0 bytes */
2526        tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2527        VDBG(dev, "data_typ = %x\n", tmp);
2528
2529        /* setup data */
2530        if (tmp == UDC_EPSTS_OUT_SETUP) {
2531                ret_val = IRQ_HANDLED;
2532
2533                ep->dev->stall_ep0in = 0;
2534                dev->waiting_zlp_ack_ep0in = 0;
2535
2536                /* set NAK for EP0_IN */
2537                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2538                tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2539                writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2540                dev->ep[UDC_EP0IN_IX].naking = 1;
2541                /* get setup data */
2542                if (use_dma) {
2543
2544                        /* clear OUT bits in ep status */
2545                        writel(UDC_EPSTS_OUT_CLEAR,
2546                                &dev->ep[UDC_EP0OUT_IX].regs->sts);
2547
2548                        setup_data.data[0] =
2549                                dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2550                        setup_data.data[1] =
2551                                dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2552                        /* set HOST READY */
2553                        dev->ep[UDC_EP0OUT_IX].td_stp->status =
2554                                        UDC_DMA_STP_STS_BS_HOST_READY;
2555                } else {
2556                        /* read fifo */
2557                        udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2558                }
2559
2560                /* determine direction of control data */
2561                if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2562                        dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2563                        /* enable RDE */
2564                        udc_ep0_set_rde(dev);
2565                        set = 0;
2566                } else {
2567                        dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2568                        /*
2569                         * implant BNA dummy descriptor to allow RXFIFO opening
2570                         * by RDE
2571                         */
2572                        if (ep->bna_dummy_req) {
2573                                /* write desc pointer */
2574                                writel(ep->bna_dummy_req->td_phys,
2575                                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2576                                ep->bna_occurred = 0;
2577                        }
2578
2579                        set = 1;
2580                        dev->ep[UDC_EP0OUT_IX].naking = 1;
2581                        /*
2582                         * setup timer for enabling RDE (to not enable
2583                         * RXFIFO DMA for data to early)
2584                         */
2585                        set_rde = 1;
2586                        if (!timer_pending(&udc_timer)) {
2587                                udc_timer.expires = jiffies +
2588                                                        HZ/UDC_RDE_TIMER_DIV;
2589                                if (!stop_timer) {
2590                                        add_timer(&udc_timer);
2591                                }
2592                        }
2593                }
2594
2595                /*
2596                 * mass storage reset must be processed here because
2597                 * next packet may be a CLEAR_FEATURE HALT which would not
2598                 * clear the stall bit when no STALL handshake was received
2599                 * before (autostall can cause this)
2600                 */
2601                if (setup_data.data[0] == UDC_MSCRES_DWORD0
2602                                && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2603                        DBG(dev, "MSC Reset\n");
2604                        /*
2605                         * clear stall bits
2606                         * only one IN and OUT endpoints are handled
2607                         */
2608                        ep_tmp = &udc->ep[UDC_EPIN_IX];
2609                        udc_set_halt(&ep_tmp->ep, 0);
2610                        ep_tmp = &udc->ep[UDC_EPOUT_IX];
2611                        udc_set_halt(&ep_tmp->ep, 0);
2612                }
2613
2614                /* call gadget with setup data received */
2615                spin_unlock(&dev->lock);
2616                setup_supported = dev->driver->setup(&dev->gadget,
2617                                                &setup_data.request);
2618                spin_lock(&dev->lock);
2619
2620                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2621                /* ep0 in returns data (not zlp) on IN phase */
2622                if (setup_supported >= 0 && setup_supported <
2623                                UDC_EP0IN_MAXPACKET) {
2624                        /* clear NAK by writing CNAK in EP0_IN */
2625                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2626                        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2627                        dev->ep[UDC_EP0IN_IX].naking = 0;
2628                        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2629
2630                /* if unsupported request then stall */
2631                } else if (setup_supported < 0) {
2632                        tmp |= AMD_BIT(UDC_EPCTL_S);
2633                        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2634                } else
2635                        dev->waiting_zlp_ack_ep0in = 1;
2636
2637
2638                /* clear NAK by writing CNAK in EP0_OUT */
2639                if (!set) {
2640                        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2641                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2642                        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2643                        dev->ep[UDC_EP0OUT_IX].naking = 0;
2644                        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2645                }
2646
2647                if (!use_dma) {
2648                        /* clear OUT bits in ep status */
2649                        writel(UDC_EPSTS_OUT_CLEAR,
2650                                &dev->ep[UDC_EP0OUT_IX].regs->sts);
2651                }
2652
2653        /* data packet 0 bytes */
2654        } else if (tmp == UDC_EPSTS_OUT_DATA) {
2655                /* clear OUT bits in ep status */
2656                writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2657
2658                /* get setup data: only 0 packet */
2659                if (use_dma) {
2660                        /* no req if 0 packet, just reactivate */
2661                        if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2662                                VDBG(dev, "ZLP\n");
2663
2664                                /* set HOST READY */
2665                                dev->ep[UDC_EP0OUT_IX].td->status =
2666                                        AMD_ADDBITS(
2667                                        dev->ep[UDC_EP0OUT_IX].td->status,
2668                                        UDC_DMA_OUT_STS_BS_HOST_READY,
2669                                        UDC_DMA_OUT_STS_BS);
2670                                /* enable RDE */
2671                                udc_ep0_set_rde(dev);
2672                                ret_val = IRQ_HANDLED;
2673
2674                        } else {
2675                                /* control write */
2676                                ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2677                                /* re-program desc. pointer for possible ZLPs */
2678                                writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2679                                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2680                                /* enable RDE */
2681                                udc_ep0_set_rde(dev);
2682                        }
2683                } else {
2684
2685                        /* received number bytes */
2686                        count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2687                        count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2688                        /* out data for fifo mode not working */
2689                        count = 0;
2690
2691                        /* 0 packet or real data ? */
2692                        if (count != 0) {
2693                                ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2694                        } else {
2695                                /* dummy read confirm */
2696                                readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2697                                ret_val = IRQ_HANDLED;
2698                        }
2699                }
2700        }
2701
2702        /* check pending CNAKS */
2703        if (cnak_pending) {
2704                /* CNAk processing when rxfifo empty only */
2705                if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2706                        udc_process_cnak_queue(dev);
2707                }
2708        }
2709
2710finished:
2711        return ret_val;
2712}
2713
2714/* Interrupt handler for Control IN traffic */
2715static irqreturn_t udc_control_in_isr(struct udc *dev)
2716{
2717        irqreturn_t ret_val = IRQ_NONE;
2718        u32 tmp;
2719        struct udc_ep *ep;
2720        struct udc_request *req;
2721        unsigned len;
2722
2723        ep = &dev->ep[UDC_EP0IN_IX];
2724
2725        /* clear irq */
2726        writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2727
2728        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2729        /* DMA completion */
2730        if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2731                VDBG(dev, "isr: TDC clear \n");
2732                ret_val = IRQ_HANDLED;
2733
2734                /* clear TDC bit */
2735                writel(AMD_BIT(UDC_EPSTS_TDC),
2736                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2737
2738        /* status reg has IN bit set ? */
2739        } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2740                ret_val = IRQ_HANDLED;
2741
2742                if (ep->dma) {
2743                        /* clear IN bit */
2744                        writel(AMD_BIT(UDC_EPSTS_IN),
2745                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2746                }
2747                if (dev->stall_ep0in) {
2748                        DBG(dev, "stall ep0in\n");
2749                        /* halt ep0in */
2750                        tmp = readl(&ep->regs->ctl);
2751                        tmp |= AMD_BIT(UDC_EPCTL_S);
2752                        writel(tmp, &ep->regs->ctl);
2753                } else {
2754                        if (!list_empty(&ep->queue)) {
2755                                /* next request */
2756                                req = list_entry(ep->queue.next,
2757                                                struct udc_request, queue);
2758
2759                                if (ep->dma) {
2760                                        /* write desc pointer */
2761                                        writel(req->td_phys, &ep->regs->desptr);
2762                                        /* set HOST READY */
2763                                        req->td_data->status =
2764                                                AMD_ADDBITS(
2765                                                req->td_data->status,
2766                                                UDC_DMA_STP_STS_BS_HOST_READY,
2767                                                UDC_DMA_STP_STS_BS);
2768
2769                                        /* set poll demand bit */
2770                                        tmp =
2771                                        readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2772                                        tmp |= AMD_BIT(UDC_EPCTL_P);
2773                                        writel(tmp,
2774                                        &dev->ep[UDC_EP0IN_IX].regs->ctl);
2775
2776                                        /* all bytes will be transferred */
2777                                        req->req.actual = req->req.length;
2778
2779                                        /* complete req */
2780                                        complete_req(ep, req, 0);
2781
2782                                } else {
2783                                        /* write fifo */
2784                                        udc_txfifo_write(ep, &req->req);
2785
2786                                        /* lengh bytes transfered */
2787                                        len = req->req.length - req->req.actual;
2788                                        if (len > ep->ep.maxpacket)
2789                                                len = ep->ep.maxpacket;
2790
2791                                        req->req.actual += len;
2792                                        if (req->req.actual == req->req.length
2793                                                || (len != ep->ep.maxpacket)) {
2794                                                /* complete req */
2795                                                complete_req(ep, req, 0);
2796                                        }
2797                                }
2798
2799                        }
2800                }
2801                ep->halted = 0;
2802                dev->stall_ep0in = 0;
2803                if (!ep->dma) {
2804                        /* clear IN bit */
2805                        writel(AMD_BIT(UDC_EPSTS_IN),
2806                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2807                }
2808        }
2809
2810        return ret_val;
2811}
2812
2813
2814/* Interrupt handler for global device events */
2815static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2816__releases(dev->lock)
2817__acquires(dev->lock)
2818{
2819        irqreturn_t ret_val = IRQ_NONE;
2820        u32 tmp;
2821        u32 cfg;
2822        struct udc_ep *ep;
2823        u16 i;
2824        u8 udc_csr_epix;
2825
2826        /* SET_CONFIG irq ? */
2827        if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2828                ret_val = IRQ_HANDLED;
2829
2830                /* read config value */
2831                tmp = readl(&dev->regs->sts);
2832                cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2833                DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2834                dev->cur_config = cfg;
2835                dev->set_cfg_not_acked = 1;
2836
2837                /* make usb request for gadget driver */
2838                memset(&setup_data, 0 , sizeof(union udc_setup_data));
2839                setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2840                setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2841
2842                /* programm the NE registers */
2843                for (i = 0; i < UDC_EP_NUM; i++) {
2844                        ep = &dev->ep[i];
2845                        if (ep->in) {
2846
2847                                /* ep ix in UDC CSR register space */
2848                                udc_csr_epix = ep->num;
2849
2850
2851                        /* OUT ep */
2852                        } else {
2853                                /* ep ix in UDC CSR register space */
2854                                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2855                        }
2856
2857                        tmp = readl(&dev->csr->ne[udc_csr_epix]);
2858                        /* ep cfg */
2859                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2860                                                UDC_CSR_NE_CFG);
2861                        /* write reg */
2862                        writel(tmp, &dev->csr->ne[udc_csr_epix]);
2863
2864                        /* clear stall bits */
2865                        ep->halted = 0;
2866                        tmp = readl(&ep->regs->ctl);
2867                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2868                        writel(tmp, &ep->regs->ctl);
2869                }
2870                /* call gadget zero with setup data received */
2871                spin_unlock(&dev->lock);
2872                tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2873                spin_lock(&dev->lock);
2874
2875        } /* SET_INTERFACE ? */
2876        if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2877                ret_val = IRQ_HANDLED;
2878
2879                dev->set_cfg_not_acked = 1;
2880                /* read interface and alt setting values */
2881                tmp = readl(&dev->regs->sts);
2882                dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2883                dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2884
2885                /* make usb request for gadget driver */
2886                memset(&setup_data, 0 , sizeof(union udc_setup_data));
2887                setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2888                setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2889                setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2890                setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2891
2892                DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2893                                dev->cur_alt, dev->cur_intf);
2894
2895                /* programm the NE registers */
2896                for (i = 0; i < UDC_EP_NUM; i++) {
2897                        ep = &dev->ep[i];
2898                        if (ep->in) {
2899
2900                                /* ep ix in UDC CSR register space */
2901                                udc_csr_epix = ep->num;
2902
2903
2904                        /* OUT ep */
2905                        } else {
2906                                /* ep ix in UDC CSR register space */
2907                                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2908                        }
2909
2910                        /* UDC CSR reg */
2911                        /* set ep values */
2912                        tmp = readl(&dev->csr->ne[udc_csr_epix]);
2913                        /* ep interface */
2914                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2915                                                UDC_CSR_NE_INTF);
2916                        /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2917                        /* ep alt */
2918                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2919                                                UDC_CSR_NE_ALT);
2920                        /* write reg */
2921                        writel(tmp, &dev->csr->ne[udc_csr_epix]);
2922
2923                        /* clear stall bits */
2924                        ep->halted = 0;
2925                        tmp = readl(&ep->regs->ctl);
2926                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2927                        writel(tmp, &ep->regs->ctl);
2928                }
2929
2930                /* call gadget zero with setup data received */
2931                spin_unlock(&dev->lock);
2932                tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2933                spin_lock(&dev->lock);
2934
2935        } /* USB reset */
2936        if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2937                DBG(dev, "USB Reset interrupt\n");
2938                ret_val = IRQ_HANDLED;
2939
2940                /* allow soft reset when suspend occurs */
2941                soft_reset_occured = 0;
2942
2943                dev->waiting_zlp_ack_ep0in = 0;
2944                dev->set_cfg_not_acked = 0;
2945
2946                /* mask not needed interrupts */
2947                udc_mask_unused_interrupts(dev);
2948
2949                /* call gadget to resume and reset configs etc. */
2950                spin_unlock(&dev->lock);
2951                if (dev->sys_suspended && dev->driver->resume) {
2952                        dev->driver->resume(&dev->gadget);
2953                        dev->sys_suspended = 0;
2954                }
2955                dev->driver->disconnect(&dev->gadget);
2956                spin_lock(&dev->lock);
2957
2958                /* disable ep0 to empty req queue */
2959                empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2960                ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2961
2962                /* soft reset when rxfifo not empty */
2963                tmp = readl(&dev->regs->sts);
2964                if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2965                                && !soft_reset_after_usbreset_occured) {
2966                        udc_soft_reset(dev);
2967                        soft_reset_after_usbreset_occured++;
2968                }
2969
2970                /*
2971                 * DMA reset to kill potential old DMA hw hang,
2972                 * POLL bit is already reset by ep_init() through
2973                 * disconnect()
2974                 */
2975                DBG(dev, "DMA machine reset\n");
2976                tmp = readl(&dev->regs->cfg);
2977                writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2978                writel(tmp, &dev->regs->cfg);
2979
2980                /* put into initial config */
2981                udc_basic_init(dev);
2982
2983                /* enable device setup interrupts */
2984                udc_enable_dev_setup_interrupts(dev);
2985
2986                /* enable suspend interrupt */
2987                tmp = readl(&dev->regs->irqmsk);
2988                tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2989                writel(tmp, &dev->regs->irqmsk);
2990
2991        } /* USB suspend */
2992        if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2993                DBG(dev, "USB Suspend interrupt\n");
2994                ret_val = IRQ_HANDLED;
2995                if (dev->driver->suspend) {
2996                        spin_unlock(&dev->lock);
2997                        dev->sys_suspended = 1;
2998                        dev->driver->suspend(&dev->gadget);
2999                        spin_lock(&dev->lock);
3000                }
3001        } /* new speed ? */
3002        if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
3003                DBG(dev, "ENUM interrupt\n");
3004                ret_val = IRQ_HANDLED;
3005                soft_reset_after_usbreset_occured = 0;
3006
3007                /* disable ep0 to empty req queue */
3008                empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3009                ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3010
3011                /* link up all endpoints */
3012                udc_setup_endpoints(dev);
3013                if (dev->gadget.speed == USB_SPEED_HIGH) {
3014                        dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
3015                                "high");
3016                } else if (dev->gadget.speed == USB_SPEED_FULL) {
3017                        dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
3018                                "full");
3019                }
3020
3021                /* init ep 0 */
3022                activate_control_endpoints(dev);
3023
3024                /* enable ep0 interrupts */
3025                udc_enable_ep0_interrupts(dev);
3026        }
3027        /* session valid change interrupt */
3028        if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3029                DBG(dev, "USB SVC interrupt\n");
3030                ret_val = IRQ_HANDLED;
3031
3032                /* check that session is not valid to detect disconnect */
3033                tmp = readl(&dev->regs->sts);
3034                if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3035                        /* disable suspend interrupt */
3036                        tmp = readl(&dev->regs->irqmsk);
3037                        tmp |= AMD_BIT(UDC_DEVINT_US);
3038                        writel(tmp, &dev->regs->irqmsk);
3039                        DBG(dev, "USB Disconnect (session valid low)\n");
3040                        /* cleanup on disconnect */
3041                        usb_disconnect(udc);
3042                }
3043
3044        }
3045
3046        return ret_val;
3047}
3048
3049/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3050static irqreturn_t udc_irq(int irq, void *pdev)
3051{
3052        struct udc *dev = pdev;
3053        u32 reg;
3054        u16 i;
3055        u32 ep_irq;
3056        irqreturn_t ret_val = IRQ_NONE;
3057
3058        spin_lock(&dev->lock);
3059
3060        /* check for ep irq */
3061        reg = readl(&dev->regs->ep_irqsts);
3062        if (reg) {
3063                if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3064                        ret_val |= udc_control_out_isr(dev);
3065                if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3066                        ret_val |= udc_control_in_isr(dev);
3067
3068                /*
3069                 * data endpoint
3070                 * iterate ep's
3071                 */
3072                for (i = 1; i < UDC_EP_NUM; i++) {
3073                        ep_irq = 1 << i;
3074                        if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3075                                continue;
3076
3077                        /* clear irq status */
3078                        writel(ep_irq, &dev->regs->ep_irqsts);
3079
3080                        /* irq for out ep ? */
3081                        if (i > UDC_EPIN_NUM)
3082                                ret_val |= udc_data_out_isr(dev, i);
3083                        else
3084                                ret_val |= udc_data_in_isr(dev, i);
3085                }
3086
3087        }
3088
3089
3090        /* check for dev irq */
3091        reg = readl(&dev->regs->irqsts);
3092        if (reg) {
3093                /* clear irq */
3094                writel(reg, &dev->regs->irqsts);
3095                ret_val |= udc_dev_isr(dev, reg);
3096        }
3097
3098
3099        spin_unlock(&dev->lock);
3100        return ret_val;
3101}
3102
3103/* Tears down device */
3104static void gadget_release(struct device *pdev)
3105{
3106        struct amd5536udc *dev = dev_get_drvdata(pdev);
3107        kfree(dev);
3108}
3109
3110/* Cleanup on device remove */
3111static void udc_remove(struct udc *dev)
3112{
3113        /* remove timer */
3114        stop_timer++;
3115        if (timer_pending(&udc_timer))
3116                wait_for_completion(&on_exit);
3117        if (udc_timer.data)
3118                del_timer_sync(&udc_timer);
3119        /* remove pollstall timer */
3120        stop_pollstall_timer++;
3121        if (timer_pending(&udc_pollstall_timer))
3122                wait_for_completion(&on_pollstall_exit);
3123        if (udc_pollstall_timer.data)
3124                del_timer_sync(&udc_pollstall_timer);
3125        udc = NULL;
3126}
3127
3128/* Reset all pci context */
3129static void udc_pci_remove(struct pci_dev *pdev)
3130{
3131        struct udc              *dev;
3132
3133        dev = pci_get_drvdata(pdev);
3134
3135        /* gadget driver must not be registered */
3136        BUG_ON(dev->driver != NULL);
3137
3138        /* dma pool cleanup */
3139        if (dev->data_requests)
3140                pci_pool_destroy(dev->data_requests);
3141
3142        if (dev->stp_requests) {
3143                /* cleanup DMA desc's for ep0in */
3144                pci_pool_free(dev->stp_requests,
3145                        dev->ep[UDC_EP0OUT_IX].td_stp,
3146                        dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3147                pci_pool_free(dev->stp_requests,
3148                        dev->ep[UDC_EP0OUT_IX].td,
3149                        dev->ep[UDC_EP0OUT_IX].td_phys);
3150
3151                pci_pool_destroy(dev->stp_requests);
3152        }
3153
3154        /* reset controller */
3155        writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3156        if (dev->irq_registered)
3157                free_irq(pdev->irq, dev);
3158        if (dev->regs)
3159                iounmap(dev->regs);
3160        if (dev->mem_region)
3161                release_mem_region(pci_resource_start(pdev, 0),
3162                                pci_resource_len(pdev, 0));
3163        if (dev->active)
3164                pci_disable_device(pdev);
3165
3166        device_unregister(&dev->gadget.dev);
3167        pci_set_drvdata(pdev, NULL);
3168
3169        udc_remove(dev);
3170}
3171
3172/* create dma pools on init */
3173static int init_dma_pools(struct udc *dev)
3174{
3175        struct udc_stp_dma      *td_stp;
3176        struct udc_data_dma     *td_data;
3177        int retval;
3178
3179        /* consistent DMA mode setting ? */
3180        if (use_dma_ppb) {
3181                use_dma_bufferfill_mode = 0;
3182        } else {
3183                use_dma_ppb_du = 0;
3184                use_dma_bufferfill_mode = 1;
3185        }
3186
3187        /* DMA setup */
3188        dev->data_requests = dma_pool_create("data_requests", NULL,
3189                sizeof(struct udc_data_dma), 0, 0);
3190        if (!dev->data_requests) {
3191                DBG(dev, "can't get request data pool\n");
3192                retval = -ENOMEM;
3193                goto finished;
3194        }
3195
3196        /* EP0 in dma regs = dev control regs */
3197        dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3198
3199        /* dma desc for setup data */
3200        dev->stp_requests = dma_pool_create("setup requests", NULL,
3201                sizeof(struct udc_stp_dma), 0, 0);
3202        if (!dev->stp_requests) {
3203                DBG(dev, "can't get stp request pool\n");
3204                retval = -ENOMEM;
3205                goto finished;
3206        }
3207        /* setup */
3208        td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3209                                &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3210        if (td_stp == NULL) {
3211                retval = -ENOMEM;
3212                goto finished;
3213        }
3214        dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3215
3216        /* data: 0 packets !? */
3217        td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3218                                &dev->ep[UDC_EP0OUT_IX].td_phys);
3219        if (td_data == NULL) {
3220                retval = -ENOMEM;
3221                goto finished;
3222        }
3223        dev->ep[UDC_EP0OUT_IX].td = td_data;
3224        return 0;
3225
3226finished:
3227        return retval;
3228}
3229
3230/* Called by pci bus driver to init pci context */
3231static int udc_pci_probe(
3232        struct pci_dev *pdev,
3233        const struct pci_device_id *id
3234)
3235{
3236        struct udc              *dev;
3237        unsigned long           resource;
3238        unsigned long           len;
3239        int                     retval = 0;
3240
3241        /* one udc only */
3242        if (udc) {
3243                dev_dbg(&pdev->dev, "already probed\n");
3244                return -EBUSY;
3245        }
3246
3247        /* init */
3248        dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3249        if (!dev) {
3250                retval = -ENOMEM;
3251                goto finished;
3252        }
3253
3254        /* pci setup */
3255        if (pci_enable_device(pdev) < 0) {
3256                kfree(dev);
3257                dev = NULL;
3258                retval = -ENODEV;
3259                goto finished;
3260        }
3261        dev->active = 1;
3262
3263        /* PCI resource allocation */
3264        resource = pci_resource_start(pdev, 0);
3265        len = pci_resource_len(pdev, 0);
3266
3267        if (!request_mem_region(resource, len, name)) {
3268                dev_dbg(&pdev->dev, "pci device used already\n");
3269                kfree(dev);
3270                dev = NULL;
3271                retval = -EBUSY;
3272                goto finished;
3273        }
3274        dev->mem_region = 1;
3275
3276        dev->virt_addr = ioremap_nocache(resource, len);
3277        if (dev->virt_addr == NULL) {
3278                dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3279                kfree(dev);
3280                dev = NULL;
3281                retval = -EFAULT;
3282                goto finished;
3283        }
3284
3285        if (!pdev->irq) {
3286                dev_err(&dev->pdev->dev, "irq not set\n");
3287                kfree(dev);
3288                dev = NULL;
3289                retval = -ENODEV;
3290                goto finished;
3291        }
3292
3293        spin_lock_init(&dev->lock);
3294        /* udc csr registers base */
3295        dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3296        /* dev registers base */
3297        dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3298        /* ep registers base */
3299        dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3300        /* fifo's base */
3301        dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3302        dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3303
3304        if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3305                dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3306                kfree(dev);
3307                dev = NULL;
3308                retval = -EBUSY;
3309                goto finished;
3310        }
3311        dev->irq_registered = 1;
3312
3313        pci_set_drvdata(pdev, dev);
3314
3315        /* chip revision for Hs AMD5536 */
3316        dev->chiprev = pdev->revision;
3317
3318        pci_set_master(pdev);
3319        pci_try_set_mwi(pdev);
3320
3321        /* init dma pools */
3322        if (use_dma) {
3323                retval = init_dma_pools(dev);
3324                if (retval != 0)
3325                        goto finished;
3326        }
3327
3328        dev->phys_addr = resource;
3329        dev->irq = pdev->irq;
3330        dev->pdev = pdev;
3331        dev->gadget.dev.parent = &pdev->dev;
3332        dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3333
3334        /* general probing */
3335        if (udc_probe(dev) == 0)
3336                return 0;
3337
3338finished:
3339        if (dev)
3340                udc_pci_remove(pdev);
3341        return retval;
3342}
3343
3344/* general probe */
3345static int udc_probe(struct udc *dev)
3346{
3347        char            tmp[128];
3348        u32             reg;
3349        int             retval;
3350
3351        /* mark timer as not initialized */
3352        udc_timer.data = 0;
3353        udc_pollstall_timer.data = 0;
3354
3355        /* device struct setup */
3356        dev->gadget.ops = &udc_ops;
3357
3358        dev_set_name(&dev->gadget.dev, "gadget");
3359        dev->gadget.dev.release = gadget_release;
3360        dev->gadget.name = name;
3361        dev->gadget.name = name;
3362        dev->gadget.is_dualspeed = 1;
3363
3364        /* init registers, interrupts, ... */
3365        startup_registers(dev);
3366
3367        dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3368
3369        snprintf(tmp, sizeof tmp, "%d", dev->irq);
3370        dev_info(&dev->pdev->dev,
3371                "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3372                tmp, dev->phys_addr, dev->chiprev,
3373                (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3374        strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3375        if (dev->chiprev == UDC_HSA0_REV) {
3376                dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3377                retval = -ENODEV;
3378                goto finished;
3379        }
3380        dev_info(&dev->pdev->dev,
3381                "driver version: %s(for Geode5536 B1)\n", tmp);
3382        udc = dev;
3383
3384        retval = device_register(&dev->gadget.dev);
3385        if (retval)
3386                goto finished;
3387
3388        /* timer init */
3389        init_timer(&udc_timer);
3390        udc_timer.function = udc_timer_function;
3391        udc_timer.data = 1;
3392        /* timer pollstall init */
3393        init_timer(&udc_pollstall_timer);
3394        udc_pollstall_timer.function = udc_pollstall_timer_function;
3395        udc_pollstall_timer.data = 1;
3396
3397        /* set SD */
3398        reg = readl(&dev->regs->ctl);
3399        reg |= AMD_BIT(UDC_DEVCTL_SD);
3400        writel(reg, &dev->regs->ctl);
3401
3402        /* print dev register info */
3403        print_regs(dev);
3404
3405        return 0;
3406
3407finished:
3408        return retval;
3409}
3410
3411/* Initiates a remote wakeup */
3412static int udc_remote_wakeup(struct udc *dev)
3413{
3414        unsigned long flags;
3415        u32 tmp;
3416
3417        DBG(dev, "UDC initiates remote wakeup\n");
3418
3419        spin_lock_irqsave(&dev->lock, flags);
3420
3421        tmp = readl(&dev->regs->ctl);
3422        tmp |= AMD_BIT(UDC_DEVCTL_RES);
3423        writel(tmp, &dev->regs->ctl);
3424        tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
3425        writel(tmp, &dev->regs->ctl);
3426
3427        spin_unlock_irqrestore(&dev->lock, flags);
3428        return 0;
3429}
3430
3431/* PCI device parameters */
3432static const struct pci_device_id pci_id[] = {
3433        {
3434                PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3435                .class =        (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3436                .class_mask =   0xffffffff,
3437        },
3438        {},
3439};
3440MODULE_DEVICE_TABLE(pci, pci_id);
3441
3442/* PCI functions */
3443static struct pci_driver udc_pci_driver = {
3444        .name =         (char *) name,
3445        .id_table =     pci_id,
3446        .probe =        udc_pci_probe,
3447        .remove =       udc_pci_remove,
3448};
3449
3450/* Inits driver */
3451static int __init init(void)
3452{
3453        return pci_register_driver(&udc_pci_driver);
3454}
3455module_init(init);
3456
3457/* Cleans driver */
3458static void __exit cleanup(void)
3459{
3460        pci_unregister_driver(&udc_pci_driver);
3461}
3462module_exit(cleanup);
3463
3464MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3465MODULE_AUTHOR("Thomas Dahlmann");
3466MODULE_LICENSE("GPL");
3467
3468