linux/drivers/usb/musb/musb_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MUSB OTG driver core code
   4 *
   5 * Copyright 2005 Mentor Graphics Corporation
   6 * Copyright (C) 2005-2006 by Texas Instruments
   7 * Copyright (C) 2006-2007 Nokia Corporation
   8 */
   9
  10/*
  11 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
  12 *
  13 * This consists of a Host Controller Driver (HCD) and a peripheral
  14 * controller driver implementing the "Gadget" API; OTG support is
  15 * in the works.  These are normal Linux-USB controller drivers which
  16 * use IRQs and have no dedicated thread.
  17 *
  18 * This version of the driver has only been used with products from
  19 * Texas Instruments.  Those products integrate the Inventra logic
  20 * with other DMA, IRQ, and bus modules, as well as other logic that
  21 * needs to be reflected in this driver.
  22 *
  23 *
  24 * NOTE:  the original Mentor code here was pretty much a collection
  25 * of mechanisms that don't seem to have been fully integrated/working
  26 * for *any* Linux kernel version.  This version aims at Linux 2.6.now,
  27 * Key open issues include:
  28 *
  29 *  - Lack of host-side transaction scheduling, for all transfer types.
  30 *    The hardware doesn't do it; instead, software must.
  31 *
  32 *    This is not an issue for OTG devices that don't support external
  33 *    hubs, but for more "normal" USB hosts it's a user issue that the
  34 *    "multipoint" support doesn't scale in the expected ways.  That
  35 *    includes DaVinci EVM in a common non-OTG mode.
  36 *
  37 *      * Control and bulk use dedicated endpoints, and there's as
  38 *        yet no mechanism to either (a) reclaim the hardware when
  39 *        peripherals are NAKing, which gets complicated with bulk
  40 *        endpoints, or (b) use more than a single bulk endpoint in
  41 *        each direction.
  42 *
  43 *        RESULT:  one device may be perceived as blocking another one.
  44 *
  45 *      * Interrupt and isochronous will dynamically allocate endpoint
  46 *        hardware, but (a) there's no record keeping for bandwidth;
  47 *        (b) in the common case that few endpoints are available, there
  48 *        is no mechanism to reuse endpoints to talk to multiple devices.
  49 *
  50 *        RESULT:  At one extreme, bandwidth can be overcommitted in
  51 *        some hardware configurations, no faults will be reported.
  52 *        At the other extreme, the bandwidth capabilities which do
  53 *        exist tend to be severely undercommitted.  You can't yet hook
  54 *        up both a keyboard and a mouse to an external USB hub.
  55 */
  56
  57/*
  58 * This gets many kinds of configuration information:
  59 *      - Kconfig for everything user-configurable
  60 *      - platform_device for addressing, irq, and platform_data
  61 *      - platform_data is mostly for board-specific information
  62 *        (plus recentrly, SOC or family details)
  63 *
  64 * Most of the conditional compilation will (someday) vanish.
  65 */
  66
  67#include <linux/module.h>
  68#include <linux/kernel.h>
  69#include <linux/sched.h>
  70#include <linux/slab.h>
  71#include <linux/list.h>
  72#include <linux/kobject.h>
  73#include <linux/prefetch.h>
  74#include <linux/platform_device.h>
  75#include <linux/io.h>
  76#include <linux/dma-mapping.h>
  77#include <linux/usb.h>
  78#include <linux/usb/of.h>
  79
  80#include "musb_core.h"
  81#include "musb_trace.h"
  82
  83#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
  84
  85
  86#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
  87#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
  88
  89#define MUSB_VERSION "6.0"
  90
  91#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
  92
  93#define MUSB_DRIVER_NAME "musb-hdrc"
  94const char musb_driver_name[] = MUSB_DRIVER_NAME;
  95
  96MODULE_DESCRIPTION(DRIVER_INFO);
  97MODULE_AUTHOR(DRIVER_AUTHOR);
  98MODULE_LICENSE("GPL");
  99MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
 100
 101
 102/*-------------------------------------------------------------------------*/
 103
 104static inline struct musb *dev_to_musb(struct device *dev)
 105{
 106        return dev_get_drvdata(dev);
 107}
 108
 109enum musb_mode musb_get_mode(struct device *dev)
 110{
 111        enum usb_dr_mode mode;
 112
 113        mode = usb_get_dr_mode(dev);
 114        switch (mode) {
 115        case USB_DR_MODE_HOST:
 116                return MUSB_HOST;
 117        case USB_DR_MODE_PERIPHERAL:
 118                return MUSB_PERIPHERAL;
 119        case USB_DR_MODE_OTG:
 120        case USB_DR_MODE_UNKNOWN:
 121        default:
 122                return MUSB_OTG;
 123        }
 124}
 125EXPORT_SYMBOL_GPL(musb_get_mode);
 126
 127/*-------------------------------------------------------------------------*/
 128
 129static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
 130{
 131        void __iomem *addr = phy->io_priv;
 132        int     i = 0;
 133        u8      r;
 134        u8      power;
 135        int     ret;
 136
 137        pm_runtime_get_sync(phy->io_dev);
 138
 139        /* Make sure the transceiver is not in low power mode */
 140        power = musb_readb(addr, MUSB_POWER);
 141        power &= ~MUSB_POWER_SUSPENDM;
 142        musb_writeb(addr, MUSB_POWER, power);
 143
 144        /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
 145         * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
 146         */
 147
 148        musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
 149        musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
 150                        MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
 151
 152        while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
 153                                & MUSB_ULPI_REG_CMPLT)) {
 154                i++;
 155                if (i == 10000) {
 156                        ret = -ETIMEDOUT;
 157                        goto out;
 158                }
 159
 160        }
 161        r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
 162        r &= ~MUSB_ULPI_REG_CMPLT;
 163        musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
 164
 165        ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
 166
 167out:
 168        pm_runtime_put(phy->io_dev);
 169
 170        return ret;
 171}
 172
 173static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
 174{
 175        void __iomem *addr = phy->io_priv;
 176        int     i = 0;
 177        u8      r = 0;
 178        u8      power;
 179        int     ret = 0;
 180
 181        pm_runtime_get_sync(phy->io_dev);
 182
 183        /* Make sure the transceiver is not in low power mode */
 184        power = musb_readb(addr, MUSB_POWER);
 185        power &= ~MUSB_POWER_SUSPENDM;
 186        musb_writeb(addr, MUSB_POWER, power);
 187
 188        musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
 189        musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
 190        musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
 191
 192        while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
 193                                & MUSB_ULPI_REG_CMPLT)) {
 194                i++;
 195                if (i == 10000) {
 196                        ret = -ETIMEDOUT;
 197                        goto out;
 198                }
 199        }
 200
 201        r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
 202        r &= ~MUSB_ULPI_REG_CMPLT;
 203        musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
 204
 205out:
 206        pm_runtime_put(phy->io_dev);
 207
 208        return ret;
 209}
 210
 211static struct usb_phy_io_ops musb_ulpi_access = {
 212        .read = musb_ulpi_read,
 213        .write = musb_ulpi_write,
 214};
 215
 216/*-------------------------------------------------------------------------*/
 217
 218static u32 musb_default_fifo_offset(u8 epnum)
 219{
 220        return 0x20 + (epnum * 4);
 221}
 222
 223/* "flat" mapping: each endpoint has its own i/o address */
 224static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
 225{
 226}
 227
 228static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
 229{
 230        return 0x100 + (0x10 * epnum) + offset;
 231}
 232
 233/* "indexed" mapping: INDEX register controls register bank select */
 234static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
 235{
 236        musb_writeb(mbase, MUSB_INDEX, epnum);
 237}
 238
 239static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
 240{
 241        return 0x10 + offset;
 242}
 243
 244static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
 245{
 246        return 0x80 + (0x08 * epnum) + offset;
 247}
 248
 249static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
 250{
 251        u8 data =  __raw_readb(addr + offset);
 252
 253        trace_musb_readb(__builtin_return_address(0), addr, offset, data);
 254        return data;
 255}
 256
 257static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
 258{
 259        trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
 260        __raw_writeb(data, addr + offset);
 261}
 262
 263static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
 264{
 265        u16 data = __raw_readw(addr + offset);
 266
 267        trace_musb_readw(__builtin_return_address(0), addr, offset, data);
 268        return data;
 269}
 270
 271static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
 272{
 273        trace_musb_writew(__builtin_return_address(0), addr, offset, data);
 274        __raw_writew(data, addr + offset);
 275}
 276
 277/*
 278 * Load an endpoint's FIFO
 279 */
 280static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
 281                                    const u8 *src)
 282{
 283        struct musb *musb = hw_ep->musb;
 284        void __iomem *fifo = hw_ep->fifo;
 285
 286        if (unlikely(len == 0))
 287                return;
 288
 289        prefetch((u8 *)src);
 290
 291        dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
 292                        'T', hw_ep->epnum, fifo, len, src);
 293
 294        /* we can't assume unaligned reads work */
 295        if (likely((0x01 & (unsigned long) src) == 0)) {
 296                u16     index = 0;
 297
 298                /* best case is 32bit-aligned source address */
 299                if ((0x02 & (unsigned long) src) == 0) {
 300                        if (len >= 4) {
 301                                iowrite32_rep(fifo, src + index, len >> 2);
 302                                index += len & ~0x03;
 303                        }
 304                        if (len & 0x02) {
 305                                __raw_writew(*(u16 *)&src[index], fifo);
 306                                index += 2;
 307                        }
 308                } else {
 309                        if (len >= 2) {
 310                                iowrite16_rep(fifo, src + index, len >> 1);
 311                                index += len & ~0x01;
 312                        }
 313                }
 314                if (len & 0x01)
 315                        __raw_writeb(src[index], fifo);
 316        } else  {
 317                /* byte aligned */
 318                iowrite8_rep(fifo, src, len);
 319        }
 320}
 321
 322/*
 323 * Unload an endpoint's FIFO
 324 */
 325static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
 326{
 327        struct musb *musb = hw_ep->musb;
 328        void __iomem *fifo = hw_ep->fifo;
 329
 330        if (unlikely(len == 0))
 331                return;
 332
 333        dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
 334                        'R', hw_ep->epnum, fifo, len, dst);
 335
 336        /* we can't assume unaligned writes work */
 337        if (likely((0x01 & (unsigned long) dst) == 0)) {
 338                u16     index = 0;
 339
 340                /* best case is 32bit-aligned destination address */
 341                if ((0x02 & (unsigned long) dst) == 0) {
 342                        if (len >= 4) {
 343                                ioread32_rep(fifo, dst, len >> 2);
 344                                index = len & ~0x03;
 345                        }
 346                        if (len & 0x02) {
 347                                *(u16 *)&dst[index] = __raw_readw(fifo);
 348                                index += 2;
 349                        }
 350                } else {
 351                        if (len >= 2) {
 352                                ioread16_rep(fifo, dst, len >> 1);
 353                                index = len & ~0x01;
 354                        }
 355                }
 356                if (len & 0x01)
 357                        dst[index] = __raw_readb(fifo);
 358        } else  {
 359                /* byte aligned */
 360                ioread8_rep(fifo, dst, len);
 361        }
 362}
 363
 364/*
 365 * Old style IO functions
 366 */
 367u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
 368EXPORT_SYMBOL_GPL(musb_readb);
 369
 370void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
 371EXPORT_SYMBOL_GPL(musb_writeb);
 372
 373u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
 374EXPORT_SYMBOL_GPL(musb_readw);
 375
 376void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
 377EXPORT_SYMBOL_GPL(musb_writew);
 378
 379u32 musb_readl(const void __iomem *addr, unsigned offset)
 380{
 381        u32 data = __raw_readl(addr + offset);
 382
 383        trace_musb_readl(__builtin_return_address(0), addr, offset, data);
 384        return data;
 385}
 386EXPORT_SYMBOL_GPL(musb_readl);
 387
 388void musb_writel(void __iomem *addr, unsigned offset, u32 data)
 389{
 390        trace_musb_writel(__builtin_return_address(0), addr, offset, data);
 391        __raw_writel(data, addr + offset);
 392}
 393EXPORT_SYMBOL_GPL(musb_writel);
 394
 395#ifndef CONFIG_MUSB_PIO_ONLY
 396struct dma_controller *
 397(*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
 398EXPORT_SYMBOL(musb_dma_controller_create);
 399
 400void (*musb_dma_controller_destroy)(struct dma_controller *c);
 401EXPORT_SYMBOL(musb_dma_controller_destroy);
 402#endif
 403
 404/*
 405 * New style IO functions
 406 */
 407void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
 408{
 409        return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
 410}
 411
 412void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
 413{
 414        return hw_ep->musb->io.write_fifo(hw_ep, len, src);
 415}
 416
 417/*-------------------------------------------------------------------------*/
 418
 419/* for high speed test mode; see USB 2.0 spec 7.1.20 */
 420static const u8 musb_test_packet[53] = {
 421        /* implicit SYNC then DATA0 to start */
 422
 423        /* JKJKJKJK x9 */
 424        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 425        /* JJKKJJKK x8 */
 426        0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
 427        /* JJJJKKKK x8 */
 428        0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
 429        /* JJJJJJJKKKKKKK x8 */
 430        0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
 431        /* JJJJJJJK x8 */
 432        0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
 433        /* JKKKKKKK x10, JK */
 434        0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
 435
 436        /* implicit CRC16 then EOP to end */
 437};
 438
 439void musb_load_testpacket(struct musb *musb)
 440{
 441        void __iomem    *regs = musb->endpoints[0].regs;
 442
 443        musb_ep_select(musb->mregs, 0);
 444        musb_write_fifo(musb->control_ep,
 445                        sizeof(musb_test_packet), musb_test_packet);
 446        musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
 447}
 448
 449/*-------------------------------------------------------------------------*/
 450
 451/*
 452 * Handles OTG hnp timeouts, such as b_ase0_brst
 453 */
 454static void musb_otg_timer_func(struct timer_list *t)
 455{
 456        struct musb     *musb = from_timer(musb, t, otg_timer);
 457        unsigned long   flags;
 458
 459        spin_lock_irqsave(&musb->lock, flags);
 460        switch (musb->xceiv->otg->state) {
 461        case OTG_STATE_B_WAIT_ACON:
 462                musb_dbg(musb,
 463                        "HNP: b_wait_acon timeout; back to b_peripheral");
 464                musb_g_disconnect(musb);
 465                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
 466                musb->is_active = 0;
 467                break;
 468        case OTG_STATE_A_SUSPEND:
 469        case OTG_STATE_A_WAIT_BCON:
 470                musb_dbg(musb, "HNP: %s timeout",
 471                        usb_otg_state_string(musb->xceiv->otg->state));
 472                musb_platform_set_vbus(musb, 0);
 473                musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
 474                break;
 475        default:
 476                musb_dbg(musb, "HNP: Unhandled mode %s",
 477                        usb_otg_state_string(musb->xceiv->otg->state));
 478        }
 479        spin_unlock_irqrestore(&musb->lock, flags);
 480}
 481
 482/*
 483 * Stops the HNP transition. Caller must take care of locking.
 484 */
 485void musb_hnp_stop(struct musb *musb)
 486{
 487        struct usb_hcd  *hcd = musb->hcd;
 488        void __iomem    *mbase = musb->mregs;
 489        u8      reg;
 490
 491        musb_dbg(musb, "HNP: stop from %s",
 492                        usb_otg_state_string(musb->xceiv->otg->state));
 493
 494        switch (musb->xceiv->otg->state) {
 495        case OTG_STATE_A_PERIPHERAL:
 496                musb_g_disconnect(musb);
 497                musb_dbg(musb, "HNP: back to %s",
 498                        usb_otg_state_string(musb->xceiv->otg->state));
 499                break;
 500        case OTG_STATE_B_HOST:
 501                musb_dbg(musb, "HNP: Disabling HR");
 502                if (hcd)
 503                        hcd->self.is_b_host = 0;
 504                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
 505                MUSB_DEV_MODE(musb);
 506                reg = musb_readb(mbase, MUSB_POWER);
 507                reg |= MUSB_POWER_SUSPENDM;
 508                musb_writeb(mbase, MUSB_POWER, reg);
 509                /* REVISIT: Start SESSION_REQUEST here? */
 510                break;
 511        default:
 512                musb_dbg(musb, "HNP: Stopping in unknown state %s",
 513                        usb_otg_state_string(musb->xceiv->otg->state));
 514        }
 515
 516        /*
 517         * When returning to A state after HNP, avoid hub_port_rebounce(),
 518         * which cause occasional OPT A "Did not receive reset after connect"
 519         * errors.
 520         */
 521        musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
 522}
 523
 524static void musb_recover_from_babble(struct musb *musb);
 525
 526static void musb_handle_intr_resume(struct musb *musb, u8 devctl)
 527{
 528        musb_dbg(musb, "RESUME (%s)",
 529                        usb_otg_state_string(musb->xceiv->otg->state));
 530
 531        if (devctl & MUSB_DEVCTL_HM) {
 532                switch (musb->xceiv->otg->state) {
 533                case OTG_STATE_A_SUSPEND:
 534                        /* remote wakeup? */
 535                        musb->port1_status |=
 536                                        (USB_PORT_STAT_C_SUSPEND << 16)
 537                                        | MUSB_PORT_STAT_RESUME;
 538                        musb->rh_timer = jiffies
 539                                + msecs_to_jiffies(USB_RESUME_TIMEOUT);
 540                        musb->xceiv->otg->state = OTG_STATE_A_HOST;
 541                        musb->is_active = 1;
 542                        musb_host_resume_root_hub(musb);
 543                        schedule_delayed_work(&musb->finish_resume_work,
 544                                msecs_to_jiffies(USB_RESUME_TIMEOUT));
 545                        break;
 546                case OTG_STATE_B_WAIT_ACON:
 547                        musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
 548                        musb->is_active = 1;
 549                        MUSB_DEV_MODE(musb);
 550                        break;
 551                default:
 552                        WARNING("bogus %s RESUME (%s)\n",
 553                                "host",
 554                                usb_otg_state_string(musb->xceiv->otg->state));
 555                }
 556        } else {
 557                switch (musb->xceiv->otg->state) {
 558                case OTG_STATE_A_SUSPEND:
 559                        /* possibly DISCONNECT is upcoming */
 560                        musb->xceiv->otg->state = OTG_STATE_A_HOST;
 561                        musb_host_resume_root_hub(musb);
 562                        break;
 563                case OTG_STATE_B_WAIT_ACON:
 564                case OTG_STATE_B_PERIPHERAL:
 565                        /* disconnect while suspended?  we may
 566                         * not get a disconnect irq...
 567                         */
 568                        if ((devctl & MUSB_DEVCTL_VBUS)
 569                                        != (3 << MUSB_DEVCTL_VBUS_SHIFT)
 570                                        ) {
 571                                musb->int_usb |= MUSB_INTR_DISCONNECT;
 572                                musb->int_usb &= ~MUSB_INTR_SUSPEND;
 573                                break;
 574                        }
 575                        musb_g_resume(musb);
 576                        break;
 577                case OTG_STATE_B_IDLE:
 578                        musb->int_usb &= ~MUSB_INTR_SUSPEND;
 579                        break;
 580                default:
 581                        WARNING("bogus %s RESUME (%s)\n",
 582                                "peripheral",
 583                                usb_otg_state_string(musb->xceiv->otg->state));
 584                }
 585        }
 586}
 587
 588/* return IRQ_HANDLED to tell the caller to return immediately */
 589static irqreturn_t musb_handle_intr_sessreq(struct musb *musb, u8 devctl)
 590{
 591        void __iomem *mbase = musb->mregs;
 592
 593        if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
 594                        && (devctl & MUSB_DEVCTL_BDEVICE)) {
 595                musb_dbg(musb, "SessReq while on B state");
 596                return IRQ_HANDLED;
 597        }
 598
 599        musb_dbg(musb, "SESSION_REQUEST (%s)",
 600                usb_otg_state_string(musb->xceiv->otg->state));
 601
 602        /* IRQ arrives from ID pin sense or (later, if VBUS power
 603         * is removed) SRP.  responses are time critical:
 604         *  - turn on VBUS (with silicon-specific mechanism)
 605         *  - go through A_WAIT_VRISE
 606         *  - ... to A_WAIT_BCON.
 607         * a_wait_vrise_tmout triggers VBUS_ERROR transitions
 608         */
 609        musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
 610        musb->ep0_stage = MUSB_EP0_START;
 611        musb->xceiv->otg->state = OTG_STATE_A_IDLE;
 612        MUSB_HST_MODE(musb);
 613        musb_platform_set_vbus(musb, 1);
 614
 615        return IRQ_NONE;
 616}
 617
 618static void musb_handle_intr_vbuserr(struct musb *musb, u8 devctl)
 619{
 620        int     ignore = 0;
 621
 622        /* During connection as an A-Device, we may see a short
 623         * current spikes causing voltage drop, because of cable
 624         * and peripheral capacitance combined with vbus draw.
 625         * (So: less common with truly self-powered devices, where
 626         * vbus doesn't act like a power supply.)
 627         *
 628         * Such spikes are short; usually less than ~500 usec, max
 629         * of ~2 msec.  That is, they're not sustained overcurrent
 630         * errors, though they're reported using VBUSERROR irqs.
 631         *
 632         * Workarounds:  (a) hardware: use self powered devices.
 633         * (b) software:  ignore non-repeated VBUS errors.
 634         *
 635         * REVISIT:  do delays from lots of DEBUG_KERNEL checks
 636         * make trouble here, keeping VBUS < 4.4V ?
 637         */
 638        switch (musb->xceiv->otg->state) {
 639        case OTG_STATE_A_HOST:
 640                /* recovery is dicey once we've gotten past the
 641                 * initial stages of enumeration, but if VBUS
 642                 * stayed ok at the other end of the link, and
 643                 * another reset is due (at least for high speed,
 644                 * to redo the chirp etc), it might work OK...
 645                 */
 646        case OTG_STATE_A_WAIT_BCON:
 647        case OTG_STATE_A_WAIT_VRISE:
 648                if (musb->vbuserr_retry) {
 649                        void __iomem *mbase = musb->mregs;
 650
 651                        musb->vbuserr_retry--;
 652                        ignore = 1;
 653                        devctl |= MUSB_DEVCTL_SESSION;
 654                        musb_writeb(mbase, MUSB_DEVCTL, devctl);
 655                } else {
 656                        musb->port1_status |=
 657                                  USB_PORT_STAT_OVERCURRENT
 658                                | (USB_PORT_STAT_C_OVERCURRENT << 16);
 659                }
 660                break;
 661        default:
 662                break;
 663        }
 664
 665        dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
 666                        "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
 667                        usb_otg_state_string(musb->xceiv->otg->state),
 668                        devctl,
 669                        ({ char *s;
 670                        switch (devctl & MUSB_DEVCTL_VBUS) {
 671                        case 0 << MUSB_DEVCTL_VBUS_SHIFT:
 672                                s = "<SessEnd"; break;
 673                        case 1 << MUSB_DEVCTL_VBUS_SHIFT:
 674                                s = "<AValid"; break;
 675                        case 2 << MUSB_DEVCTL_VBUS_SHIFT:
 676                                s = "<VBusValid"; break;
 677                        /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
 678                        default:
 679                                s = "VALID"; break;
 680                        } s; }),
 681                        VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
 682                        musb->port1_status);
 683
 684        /* go through A_WAIT_VFALL then start a new session */
 685        if (!ignore)
 686                musb_platform_set_vbus(musb, 0);
 687}
 688
 689static void musb_handle_intr_suspend(struct musb *musb, u8 devctl)
 690{
 691        musb_dbg(musb, "SUSPEND (%s) devctl %02x",
 692                usb_otg_state_string(musb->xceiv->otg->state), devctl);
 693
 694        switch (musb->xceiv->otg->state) {
 695        case OTG_STATE_A_PERIPHERAL:
 696                /* We also come here if the cable is removed, since
 697                 * this silicon doesn't report ID-no-longer-grounded.
 698                 *
 699                 * We depend on T(a_wait_bcon) to shut us down, and
 700                 * hope users don't do anything dicey during this
 701                 * undesired detour through A_WAIT_BCON.
 702                 */
 703                musb_hnp_stop(musb);
 704                musb_host_resume_root_hub(musb);
 705                musb_root_disconnect(musb);
 706                musb_platform_try_idle(musb, jiffies
 707                                + msecs_to_jiffies(musb->a_wait_bcon
 708                                        ? : OTG_TIME_A_WAIT_BCON));
 709
 710                break;
 711        case OTG_STATE_B_IDLE:
 712                if (!musb->is_active)
 713                        break;
 714                /* fall through */
 715        case OTG_STATE_B_PERIPHERAL:
 716                musb_g_suspend(musb);
 717                musb->is_active = musb->g.b_hnp_enable;
 718                if (musb->is_active) {
 719                        musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
 720                        musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
 721                        mod_timer(&musb->otg_timer, jiffies
 722                                + msecs_to_jiffies(
 723                                                OTG_TIME_B_ASE0_BRST));
 724                }
 725                break;
 726        case OTG_STATE_A_WAIT_BCON:
 727                if (musb->a_wait_bcon != 0)
 728                        musb_platform_try_idle(musb, jiffies
 729                                + msecs_to_jiffies(musb->a_wait_bcon));
 730                break;
 731        case OTG_STATE_A_HOST:
 732                musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
 733                musb->is_active = musb->hcd->self.b_hnp_enable;
 734                break;
 735        case OTG_STATE_B_HOST:
 736                /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
 737                musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
 738                break;
 739        default:
 740                /* "should not happen" */
 741                musb->is_active = 0;
 742                break;
 743        }
 744}
 745
 746static void musb_handle_intr_connect(struct musb *musb, u8 devctl, u8 int_usb)
 747{
 748        struct usb_hcd *hcd = musb->hcd;
 749
 750        musb->is_active = 1;
 751        musb->ep0_stage = MUSB_EP0_START;
 752
 753        musb->intrtxe = musb->epmask;
 754        musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
 755        musb->intrrxe = musb->epmask & 0xfffe;
 756        musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
 757        musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
 758        musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
 759                                |USB_PORT_STAT_HIGH_SPEED
 760                                |USB_PORT_STAT_ENABLE
 761                                );
 762        musb->port1_status |= USB_PORT_STAT_CONNECTION
 763                                |(USB_PORT_STAT_C_CONNECTION << 16);
 764
 765        /* high vs full speed is just a guess until after reset */
 766        if (devctl & MUSB_DEVCTL_LSDEV)
 767                musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
 768
 769        /* indicate new connection to OTG machine */
 770        switch (musb->xceiv->otg->state) {
 771        case OTG_STATE_B_PERIPHERAL:
 772                if (int_usb & MUSB_INTR_SUSPEND) {
 773                        musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
 774                        int_usb &= ~MUSB_INTR_SUSPEND;
 775                        goto b_host;
 776                } else
 777                        musb_dbg(musb, "CONNECT as b_peripheral???");
 778                break;
 779        case OTG_STATE_B_WAIT_ACON:
 780                musb_dbg(musb, "HNP: CONNECT, now b_host");
 781b_host:
 782                musb->xceiv->otg->state = OTG_STATE_B_HOST;
 783                if (musb->hcd)
 784                        musb->hcd->self.is_b_host = 1;
 785                del_timer(&musb->otg_timer);
 786                break;
 787        default:
 788                if ((devctl & MUSB_DEVCTL_VBUS)
 789                                == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
 790                        musb->xceiv->otg->state = OTG_STATE_A_HOST;
 791                        if (hcd)
 792                                hcd->self.is_b_host = 0;
 793                }
 794                break;
 795        }
 796
 797        musb_host_poke_root_hub(musb);
 798
 799        musb_dbg(musb, "CONNECT (%s) devctl %02x",
 800                        usb_otg_state_string(musb->xceiv->otg->state), devctl);
 801}
 802
 803static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl)
 804{
 805        musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
 806                        usb_otg_state_string(musb->xceiv->otg->state),
 807                        MUSB_MODE(musb), devctl);
 808
 809        switch (musb->xceiv->otg->state) {
 810        case OTG_STATE_A_HOST:
 811        case OTG_STATE_A_SUSPEND:
 812                musb_host_resume_root_hub(musb);
 813                musb_root_disconnect(musb);
 814                if (musb->a_wait_bcon != 0)
 815                        musb_platform_try_idle(musb, jiffies
 816                                + msecs_to_jiffies(musb->a_wait_bcon));
 817                break;
 818        case OTG_STATE_B_HOST:
 819                /* REVISIT this behaves for "real disconnect"
 820                 * cases; make sure the other transitions from
 821                 * from B_HOST act right too.  The B_HOST code
 822                 * in hnp_stop() is currently not used...
 823                 */
 824                musb_root_disconnect(musb);
 825                if (musb->hcd)
 826                        musb->hcd->self.is_b_host = 0;
 827                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
 828                MUSB_DEV_MODE(musb);
 829                musb_g_disconnect(musb);
 830                break;
 831        case OTG_STATE_A_PERIPHERAL:
 832                musb_hnp_stop(musb);
 833                musb_root_disconnect(musb);
 834                /* FALLTHROUGH */
 835        case OTG_STATE_B_WAIT_ACON:
 836                /* FALLTHROUGH */
 837        case OTG_STATE_B_PERIPHERAL:
 838        case OTG_STATE_B_IDLE:
 839                musb_g_disconnect(musb);
 840                break;
 841        default:
 842                WARNING("unhandled DISCONNECT transition (%s)\n",
 843                        usb_otg_state_string(musb->xceiv->otg->state));
 844                break;
 845        }
 846}
 847
 848/*
 849 * mentor saves a bit: bus reset and babble share the same irq.
 850 * only host sees babble; only peripheral sees bus reset.
 851 */
 852static void musb_handle_intr_reset(struct musb *musb)
 853{
 854        if (is_host_active(musb)) {
 855                /*
 856                 * When BABBLE happens what we can depends on which
 857                 * platform MUSB is running, because some platforms
 858                 * implemented proprietary means for 'recovering' from
 859                 * Babble conditions. One such platform is AM335x. In
 860                 * most cases, however, the only thing we can do is
 861                 * drop the session.
 862                 */
 863                dev_err(musb->controller, "Babble\n");
 864                musb_recover_from_babble(musb);
 865        } else {
 866                musb_dbg(musb, "BUS RESET as %s",
 867                        usb_otg_state_string(musb->xceiv->otg->state));
 868                switch (musb->xceiv->otg->state) {
 869                case OTG_STATE_A_SUSPEND:
 870                        musb_g_reset(musb);
 871                        /* FALLTHROUGH */
 872                case OTG_STATE_A_WAIT_BCON:     /* OPT TD.4.7-900ms */
 873                        /* never use invalid T(a_wait_bcon) */
 874                        musb_dbg(musb, "HNP: in %s, %d msec timeout",
 875                                usb_otg_state_string(musb->xceiv->otg->state),
 876                                TA_WAIT_BCON(musb));
 877                        mod_timer(&musb->otg_timer, jiffies
 878                                + msecs_to_jiffies(TA_WAIT_BCON(musb)));
 879                        break;
 880                case OTG_STATE_A_PERIPHERAL:
 881                        del_timer(&musb->otg_timer);
 882                        musb_g_reset(musb);
 883                        break;
 884                case OTG_STATE_B_WAIT_ACON:
 885                        musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
 886                                usb_otg_state_string(musb->xceiv->otg->state));
 887                        musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
 888                        musb_g_reset(musb);
 889                        break;
 890                case OTG_STATE_B_IDLE:
 891                        musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
 892                        /* FALLTHROUGH */
 893                case OTG_STATE_B_PERIPHERAL:
 894                        musb_g_reset(musb);
 895                        break;
 896                default:
 897                        musb_dbg(musb, "Unhandled BUS RESET as %s",
 898                                usb_otg_state_string(musb->xceiv->otg->state));
 899                }
 900        }
 901}
 902
 903/*
 904 * Interrupt Service Routine to record USB "global" interrupts.
 905 * Since these do not happen often and signify things of
 906 * paramount importance, it seems OK to check them individually;
 907 * the order of the tests is specified in the manual
 908 *
 909 * @param musb instance pointer
 910 * @param int_usb register contents
 911 * @param devctl
 912 * @param power
 913 */
 914
 915static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
 916                                u8 devctl)
 917{
 918        irqreturn_t handled = IRQ_NONE;
 919
 920        musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
 921
 922        /* in host mode, the peripheral may issue remote wakeup.
 923         * in peripheral mode, the host may resume the link.
 924         * spurious RESUME irqs happen too, paired with SUSPEND.
 925         */
 926        if (int_usb & MUSB_INTR_RESUME) {
 927                musb_handle_intr_resume(musb, devctl);
 928                handled = IRQ_HANDLED;
 929        }
 930
 931        /* see manual for the order of the tests */
 932        if (int_usb & MUSB_INTR_SESSREQ) {
 933                if (musb_handle_intr_sessreq(musb, devctl))
 934                        return IRQ_HANDLED;
 935                handled = IRQ_HANDLED;
 936        }
 937
 938        if (int_usb & MUSB_INTR_VBUSERROR) {
 939                musb_handle_intr_vbuserr(musb, devctl);
 940                handled = IRQ_HANDLED;
 941        }
 942
 943        if (int_usb & MUSB_INTR_SUSPEND) {
 944                musb_handle_intr_suspend(musb, devctl);
 945                handled = IRQ_HANDLED;
 946        }
 947
 948        if (int_usb & MUSB_INTR_CONNECT) {
 949                musb_handle_intr_connect(musb, devctl, int_usb);
 950                handled = IRQ_HANDLED;
 951        }
 952
 953        if (int_usb & MUSB_INTR_DISCONNECT) {
 954                musb_handle_intr_disconnect(musb, devctl);
 955                handled = IRQ_HANDLED;
 956        }
 957
 958        if (int_usb & MUSB_INTR_RESET) {
 959                musb_handle_intr_reset(musb);
 960                handled = IRQ_HANDLED;
 961        }
 962
 963#if 0
 964/* REVISIT ... this would be for multiplexing periodic endpoints, or
 965 * supporting transfer phasing to prevent exceeding ISO bandwidth
 966 * limits of a given frame or microframe.
 967 *
 968 * It's not needed for peripheral side, which dedicates endpoints;
 969 * though it _might_ use SOF irqs for other purposes.
 970 *
 971 * And it's not currently needed for host side, which also dedicates
 972 * endpoints, relies on TX/RX interval registers, and isn't claimed
 973 * to support ISO transfers yet.
 974 */
 975        if (int_usb & MUSB_INTR_SOF) {
 976                void __iomem *mbase = musb->mregs;
 977                struct musb_hw_ep       *ep;
 978                u8 epnum;
 979                u16 frame;
 980
 981                dev_dbg(musb->controller, "START_OF_FRAME\n");
 982                handled = IRQ_HANDLED;
 983
 984                /* start any periodic Tx transfers waiting for current frame */
 985                frame = musb_readw(mbase, MUSB_FRAME);
 986                ep = musb->endpoints;
 987                for (epnum = 1; (epnum < musb->nr_endpoints)
 988                                        && (musb->epmask >= (1 << epnum));
 989                                epnum++, ep++) {
 990                        /*
 991                         * FIXME handle framecounter wraps (12 bits)
 992                         * eliminate duplicated StartUrb logic
 993                         */
 994                        if (ep->dwWaitFrame >= frame) {
 995                                ep->dwWaitFrame = 0;
 996                                pr_debug("SOF --> periodic TX%s on %d\n",
 997                                        ep->tx_channel ? " DMA" : "",
 998                                        epnum);
 999                                if (!ep->tx_channel)
1000                                        musb_h_tx_start(musb, epnum);
1001                                else
1002                                        cppi_hostdma_start(musb, epnum);
1003                        }
1004                }               /* end of for loop */
1005        }
1006#endif
1007
1008        schedule_delayed_work(&musb->irq_work, 0);
1009
1010        return handled;
1011}
1012
1013/*-------------------------------------------------------------------------*/
1014
1015static void musb_disable_interrupts(struct musb *musb)
1016{
1017        void __iomem    *mbase = musb->mregs;
1018        u16     temp;
1019
1020        /* disable interrupts */
1021        musb_writeb(mbase, MUSB_INTRUSBE, 0);
1022        musb->intrtxe = 0;
1023        musb_writew(mbase, MUSB_INTRTXE, 0);
1024        musb->intrrxe = 0;
1025        musb_writew(mbase, MUSB_INTRRXE, 0);
1026
1027        /*  flush pending interrupts */
1028        temp = musb_readb(mbase, MUSB_INTRUSB);
1029        temp = musb_readw(mbase, MUSB_INTRTX);
1030        temp = musb_readw(mbase, MUSB_INTRRX);
1031}
1032
1033static void musb_enable_interrupts(struct musb *musb)
1034{
1035        void __iomem    *regs = musb->mregs;
1036
1037        /*  Set INT enable registers, enable interrupts */
1038        musb->intrtxe = musb->epmask;
1039        musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
1040        musb->intrrxe = musb->epmask & 0xfffe;
1041        musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
1042        musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
1043
1044}
1045
1046/*
1047 * Program the HDRC to start (enable interrupts, dma, etc.).
1048 */
1049void musb_start(struct musb *musb)
1050{
1051        void __iomem    *regs = musb->mregs;
1052        u8              devctl = musb_readb(regs, MUSB_DEVCTL);
1053        u8              power;
1054
1055        musb_dbg(musb, "<== devctl %02x", devctl);
1056
1057        musb_enable_interrupts(musb);
1058        musb_writeb(regs, MUSB_TESTMODE, 0);
1059
1060        power = MUSB_POWER_ISOUPDATE;
1061        /*
1062         * treating UNKNOWN as unspecified maximum speed, in which case
1063         * we will default to high-speed.
1064         */
1065        if (musb->config->maximum_speed == USB_SPEED_HIGH ||
1066                        musb->config->maximum_speed == USB_SPEED_UNKNOWN)
1067                power |= MUSB_POWER_HSENAB;
1068        musb_writeb(regs, MUSB_POWER, power);
1069
1070        musb->is_active = 0;
1071        devctl = musb_readb(regs, MUSB_DEVCTL);
1072        devctl &= ~MUSB_DEVCTL_SESSION;
1073
1074        /* session started after:
1075         * (a) ID-grounded irq, host mode;
1076         * (b) vbus present/connect IRQ, peripheral mode;
1077         * (c) peripheral initiates, using SRP
1078         */
1079        if (musb->port_mode != MUSB_HOST &&
1080                        musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
1081                        (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1082                musb->is_active = 1;
1083        } else {
1084                devctl |= MUSB_DEVCTL_SESSION;
1085        }
1086
1087        musb_platform_enable(musb);
1088        musb_writeb(regs, MUSB_DEVCTL, devctl);
1089}
1090
1091/*
1092 * Make the HDRC stop (disable interrupts, etc.);
1093 * reversible by musb_start
1094 * called on gadget driver unregister
1095 * with controller locked, irqs blocked
1096 * acts as a NOP unless some role activated the hardware
1097 */
1098void musb_stop(struct musb *musb)
1099{
1100        /* stop IRQs, timers, ... */
1101        musb_platform_disable(musb);
1102        musb_disable_interrupts(musb);
1103        musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1104
1105        /* FIXME
1106         *  - mark host and/or peripheral drivers unusable/inactive
1107         *  - disable DMA (and enable it in HdrcStart)
1108         *  - make sure we can musb_start() after musb_stop(); with
1109         *    OTG mode, gadget driver module rmmod/modprobe cycles that
1110         *  - ...
1111         */
1112        musb_platform_try_idle(musb, 0);
1113}
1114
1115/*-------------------------------------------------------------------------*/
1116
1117/*
1118 * The silicon either has hard-wired endpoint configurations, or else
1119 * "dynamic fifo" sizing.  The driver has support for both, though at this
1120 * writing only the dynamic sizing is very well tested.   Since we switched
1121 * away from compile-time hardware parameters, we can no longer rely on
1122 * dead code elimination to leave only the relevant one in the object file.
1123 *
1124 * We don't currently use dynamic fifo setup capability to do anything
1125 * more than selecting one of a bunch of predefined configurations.
1126 */
1127static ushort fifo_mode;
1128
1129/* "modprobe ... fifo_mode=1" etc */
1130module_param(fifo_mode, ushort, 0);
1131MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
1132
1133/*
1134 * tables defining fifo_mode values.  define more if you like.
1135 * for host side, make sure both halves of ep1 are set up.
1136 */
1137
1138/* mode 0 - fits in 2KB */
1139static struct musb_fifo_cfg mode_0_cfg[] = {
1140{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
1141{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
1142{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
1143{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1144{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1145};
1146
1147/* mode 1 - fits in 4KB */
1148static struct musb_fifo_cfg mode_1_cfg[] = {
1149{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
1150{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
1151{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1152{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1153{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1154};
1155
1156/* mode 2 - fits in 4KB */
1157static struct musb_fifo_cfg mode_2_cfg[] = {
1158{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
1159{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
1160{ .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
1161{ .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
1162{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 960, },
1163{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 1024, },
1164};
1165
1166/* mode 3 - fits in 4KB */
1167static struct musb_fifo_cfg mode_3_cfg[] = {
1168{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
1169{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
1170{ .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
1171{ .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
1172{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1173{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1174};
1175
1176/* mode 4 - fits in 16KB */
1177static struct musb_fifo_cfg mode_4_cfg[] = {
1178{ .hw_ep_num =  1, .style = FIFO_TX,   .maxpacket = 512, },
1179{ .hw_ep_num =  1, .style = FIFO_RX,   .maxpacket = 512, },
1180{ .hw_ep_num =  2, .style = FIFO_TX,   .maxpacket = 512, },
1181{ .hw_ep_num =  2, .style = FIFO_RX,   .maxpacket = 512, },
1182{ .hw_ep_num =  3, .style = FIFO_TX,   .maxpacket = 512, },
1183{ .hw_ep_num =  3, .style = FIFO_RX,   .maxpacket = 512, },
1184{ .hw_ep_num =  4, .style = FIFO_TX,   .maxpacket = 512, },
1185{ .hw_ep_num =  4, .style = FIFO_RX,   .maxpacket = 512, },
1186{ .hw_ep_num =  5, .style = FIFO_TX,   .maxpacket = 512, },
1187{ .hw_ep_num =  5, .style = FIFO_RX,   .maxpacket = 512, },
1188{ .hw_ep_num =  6, .style = FIFO_TX,   .maxpacket = 512, },
1189{ .hw_ep_num =  6, .style = FIFO_RX,   .maxpacket = 512, },
1190{ .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 512, },
1191{ .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 512, },
1192{ .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 512, },
1193{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
1194{ .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
1195{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
1196{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 256, },
1197{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 64, },
1198{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 256, },
1199{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 64, },
1200{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 256, },
1201{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 64, },
1202{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
1203{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1204{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1205};
1206
1207/* mode 5 - fits in 8KB */
1208static struct musb_fifo_cfg mode_5_cfg[] = {
1209{ .hw_ep_num =  1, .style = FIFO_TX,   .maxpacket = 512, },
1210{ .hw_ep_num =  1, .style = FIFO_RX,   .maxpacket = 512, },
1211{ .hw_ep_num =  2, .style = FIFO_TX,   .maxpacket = 512, },
1212{ .hw_ep_num =  2, .style = FIFO_RX,   .maxpacket = 512, },
1213{ .hw_ep_num =  3, .style = FIFO_TX,   .maxpacket = 512, },
1214{ .hw_ep_num =  3, .style = FIFO_RX,   .maxpacket = 512, },
1215{ .hw_ep_num =  4, .style = FIFO_TX,   .maxpacket = 512, },
1216{ .hw_ep_num =  4, .style = FIFO_RX,   .maxpacket = 512, },
1217{ .hw_ep_num =  5, .style = FIFO_TX,   .maxpacket = 512, },
1218{ .hw_ep_num =  5, .style = FIFO_RX,   .maxpacket = 512, },
1219{ .hw_ep_num =  6, .style = FIFO_TX,   .maxpacket = 32, },
1220{ .hw_ep_num =  6, .style = FIFO_RX,   .maxpacket = 32, },
1221{ .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 32, },
1222{ .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 32, },
1223{ .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 32, },
1224{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 32, },
1225{ .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 32, },
1226{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 32, },
1227{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 32, },
1228{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 32, },
1229{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 32, },
1230{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 32, },
1231{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 32, },
1232{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 32, },
1233{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
1234{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1235{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1236};
1237
1238/*
1239 * configure a fifo; for non-shared endpoints, this may be called
1240 * once for a tx fifo and once for an rx fifo.
1241 *
1242 * returns negative errno or offset for next fifo.
1243 */
1244static int
1245fifo_setup(struct musb *musb, struct musb_hw_ep  *hw_ep,
1246                const struct musb_fifo_cfg *cfg, u16 offset)
1247{
1248        void __iomem    *mbase = musb->mregs;
1249        int     size = 0;
1250        u16     maxpacket = cfg->maxpacket;
1251        u16     c_off = offset >> 3;
1252        u8      c_size;
1253
1254        /* expect hw_ep has already been zero-initialized */
1255
1256        size = ffs(max(maxpacket, (u16) 8)) - 1;
1257        maxpacket = 1 << size;
1258
1259        c_size = size - 3;
1260        if (cfg->mode == BUF_DOUBLE) {
1261                if ((offset + (maxpacket << 1)) >
1262                                (1 << (musb->config->ram_bits + 2)))
1263                        return -EMSGSIZE;
1264                c_size |= MUSB_FIFOSZ_DPB;
1265        } else {
1266                if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
1267                        return -EMSGSIZE;
1268        }
1269
1270        /* configure the FIFO */
1271        musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
1272
1273        /* EP0 reserved endpoint for control, bidirectional;
1274         * EP1 reserved for bulk, two unidirectional halves.
1275         */
1276        if (hw_ep->epnum == 1)
1277                musb->bulk_ep = hw_ep;
1278        /* REVISIT error check:  be sure ep0 can both rx and tx ... */
1279        switch (cfg->style) {
1280        case FIFO_TX:
1281                musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
1282                musb_writew(mbase, MUSB_TXFIFOADD, c_off);
1283                hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1284                hw_ep->max_packet_sz_tx = maxpacket;
1285                break;
1286        case FIFO_RX:
1287                musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
1288                musb_writew(mbase, MUSB_RXFIFOADD, c_off);
1289                hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1290                hw_ep->max_packet_sz_rx = maxpacket;
1291                break;
1292        case FIFO_RXTX:
1293                musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
1294                musb_writew(mbase, MUSB_TXFIFOADD, c_off);
1295                hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1296                hw_ep->max_packet_sz_rx = maxpacket;
1297
1298                musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
1299                musb_writew(mbase, MUSB_RXFIFOADD, c_off);
1300                hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
1301                hw_ep->max_packet_sz_tx = maxpacket;
1302
1303                hw_ep->is_shared_fifo = true;
1304                break;
1305        }
1306
1307        /* NOTE rx and tx endpoint irqs aren't managed separately,
1308         * which happens to be ok
1309         */
1310        musb->epmask |= (1 << hw_ep->epnum);
1311
1312        return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
1313}
1314
1315static struct musb_fifo_cfg ep0_cfg = {
1316        .style = FIFO_RXTX, .maxpacket = 64,
1317};
1318
1319static int ep_config_from_table(struct musb *musb)
1320{
1321        const struct musb_fifo_cfg      *cfg;
1322        unsigned                i, n;
1323        int                     offset;
1324        struct musb_hw_ep       *hw_ep = musb->endpoints;
1325
1326        if (musb->config->fifo_cfg) {
1327                cfg = musb->config->fifo_cfg;
1328                n = musb->config->fifo_cfg_size;
1329                goto done;
1330        }
1331
1332        switch (fifo_mode) {
1333        default:
1334                fifo_mode = 0;
1335                /* FALLTHROUGH */
1336        case 0:
1337                cfg = mode_0_cfg;
1338                n = ARRAY_SIZE(mode_0_cfg);
1339                break;
1340        case 1:
1341                cfg = mode_1_cfg;
1342                n = ARRAY_SIZE(mode_1_cfg);
1343                break;
1344        case 2:
1345                cfg = mode_2_cfg;
1346                n = ARRAY_SIZE(mode_2_cfg);
1347                break;
1348        case 3:
1349                cfg = mode_3_cfg;
1350                n = ARRAY_SIZE(mode_3_cfg);
1351                break;
1352        case 4:
1353                cfg = mode_4_cfg;
1354                n = ARRAY_SIZE(mode_4_cfg);
1355                break;
1356        case 5:
1357                cfg = mode_5_cfg;
1358                n = ARRAY_SIZE(mode_5_cfg);
1359                break;
1360        }
1361
1362        pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
1363
1364
1365done:
1366        offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
1367        /* assert(offset > 0) */
1368
1369        /* NOTE:  for RTL versions >= 1.400 EPINFO and RAMINFO would
1370         * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1371         */
1372
1373        for (i = 0; i < n; i++) {
1374                u8      epn = cfg->hw_ep_num;
1375
1376                if (epn >= musb->config->num_eps) {
1377                        pr_debug("%s: invalid ep %d\n",
1378                                        musb_driver_name, epn);
1379                        return -EINVAL;
1380                }
1381                offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
1382                if (offset < 0) {
1383                        pr_debug("%s: mem overrun, ep %d\n",
1384                                        musb_driver_name, epn);
1385                        return offset;
1386                }
1387                epn++;
1388                musb->nr_endpoints = max(epn, musb->nr_endpoints);
1389        }
1390
1391        pr_debug("%s: %d/%d max ep, %d/%d memory\n",
1392                        musb_driver_name,
1393                        n + 1, musb->config->num_eps * 2 - 1,
1394                        offset, (1 << (musb->config->ram_bits + 2)));
1395
1396        if (!musb->bulk_ep) {
1397                pr_debug("%s: missing bulk\n", musb_driver_name);
1398                return -EINVAL;
1399        }
1400
1401        return 0;
1402}
1403
1404
1405/*
1406 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1407 * @param musb the controller
1408 */
1409static int ep_config_from_hw(struct musb *musb)
1410{
1411        u8 epnum = 0;
1412        struct musb_hw_ep *hw_ep;
1413        void __iomem *mbase = musb->mregs;
1414        int ret = 0;
1415
1416        musb_dbg(musb, "<== static silicon ep config");
1417
1418        /* FIXME pick up ep0 maxpacket size */
1419
1420        for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
1421                musb_ep_select(mbase, epnum);
1422                hw_ep = musb->endpoints + epnum;
1423
1424                ret = musb_read_fifosize(musb, hw_ep, epnum);
1425                if (ret < 0)
1426                        break;
1427
1428                /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1429
1430                /* pick an RX/TX endpoint for bulk */
1431                if (hw_ep->max_packet_sz_tx < 512
1432                                || hw_ep->max_packet_sz_rx < 512)
1433                        continue;
1434
1435                /* REVISIT:  this algorithm is lazy, we should at least
1436                 * try to pick a double buffered endpoint.
1437                 */
1438                if (musb->bulk_ep)
1439                        continue;
1440                musb->bulk_ep = hw_ep;
1441        }
1442
1443        if (!musb->bulk_ep) {
1444                pr_debug("%s: missing bulk\n", musb_driver_name);
1445                return -EINVAL;
1446        }
1447
1448        return 0;
1449}
1450
1451enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1452
1453/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1454 * configure endpoints, or take their config from silicon
1455 */
1456static int musb_core_init(u16 musb_type, struct musb *musb)
1457{
1458        u8 reg;
1459        char *type;
1460        char aInfo[90];
1461        void __iomem    *mbase = musb->mregs;
1462        int             status = 0;
1463        int             i;
1464
1465        /* log core options (read using indexed model) */
1466        reg = musb_read_configdata(mbase);
1467
1468        strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1469        if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1470                strcat(aInfo, ", dyn FIFOs");
1471                musb->dyn_fifo = true;
1472        }
1473        if (reg & MUSB_CONFIGDATA_MPRXE) {
1474                strcat(aInfo, ", bulk combine");
1475                musb->bulk_combine = true;
1476        }
1477        if (reg & MUSB_CONFIGDATA_MPTXE) {
1478                strcat(aInfo, ", bulk split");
1479                musb->bulk_split = true;
1480        }
1481        if (reg & MUSB_CONFIGDATA_HBRXE) {
1482                strcat(aInfo, ", HB-ISO Rx");
1483                musb->hb_iso_rx = true;
1484        }
1485        if (reg & MUSB_CONFIGDATA_HBTXE) {
1486                strcat(aInfo, ", HB-ISO Tx");
1487                musb->hb_iso_tx = true;
1488        }
1489        if (reg & MUSB_CONFIGDATA_SOFTCONE)
1490                strcat(aInfo, ", SoftConn");
1491
1492        pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
1493
1494        if (MUSB_CONTROLLER_MHDRC == musb_type) {
1495                musb->is_multipoint = 1;
1496                type = "M";
1497        } else {
1498                musb->is_multipoint = 0;
1499                type = "";
1500#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1501                pr_err("%s: kernel must blacklist external hubs\n",
1502                       musb_driver_name);
1503#endif
1504        }
1505
1506        /* log release info */
1507        musb->hwvers = musb_readw(mbase, MUSB_HWVERS);
1508        pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
1509                 musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
1510                 MUSB_HWVERS_MINOR(musb->hwvers),
1511                 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
1512
1513        /* configure ep0 */
1514        musb_configure_ep0(musb);
1515
1516        /* discover endpoint configuration */
1517        musb->nr_endpoints = 1;
1518        musb->epmask = 1;
1519
1520        if (musb->dyn_fifo)
1521                status = ep_config_from_table(musb);
1522        else
1523                status = ep_config_from_hw(musb);
1524
1525        if (status < 0)
1526                return status;
1527
1528        /* finish init, and print endpoint config */
1529        for (i = 0; i < musb->nr_endpoints; i++) {
1530                struct musb_hw_ep       *hw_ep = musb->endpoints + i;
1531
1532                hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
1533#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
1534                if (musb->ops->quirks & MUSB_IN_TUSB) {
1535                        hw_ep->fifo_async = musb->async + 0x400 +
1536                                musb->io.fifo_offset(i);
1537                        hw_ep->fifo_sync = musb->sync + 0x400 +
1538                                musb->io.fifo_offset(i);
1539                        hw_ep->fifo_sync_va =
1540                                musb->sync_va + 0x400 + musb->io.fifo_offset(i);
1541
1542                        if (i == 0)
1543                                hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
1544                        else
1545                                hw_ep->conf = mbase + 0x400 +
1546                                        (((i - 1) & 0xf) << 2);
1547                }
1548#endif
1549
1550                hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
1551                hw_ep->rx_reinit = 1;
1552                hw_ep->tx_reinit = 1;
1553
1554                if (hw_ep->max_packet_sz_tx) {
1555                        musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1556                                musb_driver_name, i,
1557                                hw_ep->is_shared_fifo ? "shared" : "tx",
1558                                hw_ep->tx_double_buffered
1559                                        ? "doublebuffer, " : "",
1560                                hw_ep->max_packet_sz_tx);
1561                }
1562                if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1563                        musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1564                                musb_driver_name, i,
1565                                "rx",
1566                                hw_ep->rx_double_buffered
1567                                        ? "doublebuffer, " : "",
1568                                hw_ep->max_packet_sz_rx);
1569                }
1570                if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
1571                        musb_dbg(musb, "hw_ep %d not configured", i);
1572        }
1573
1574        return 0;
1575}
1576
1577/*-------------------------------------------------------------------------*/
1578
1579/*
1580 * handle all the irqs defined by the HDRC core. for now we expect:  other
1581 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1582 * will be assigned, and the irq will already have been acked.
1583 *
1584 * called in irq context with spinlock held, irqs blocked
1585 */
1586irqreturn_t musb_interrupt(struct musb *musb)
1587{
1588        irqreturn_t     retval = IRQ_NONE;
1589        unsigned long   status;
1590        unsigned long   epnum;
1591        u8              devctl;
1592
1593        if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
1594                return IRQ_NONE;
1595
1596        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1597
1598        trace_musb_isr(musb);
1599
1600        /**
1601         * According to Mentor Graphics' documentation, flowchart on page 98,
1602         * IRQ should be handled as follows:
1603         *
1604         * . Resume IRQ
1605         * . Session Request IRQ
1606         * . VBUS Error IRQ
1607         * . Suspend IRQ
1608         * . Connect IRQ
1609         * . Disconnect IRQ
1610         * . Reset/Babble IRQ
1611         * . SOF IRQ (we're not using this one)
1612         * . Endpoint 0 IRQ
1613         * . TX Endpoints
1614         * . RX Endpoints
1615         *
1616         * We will be following that flowchart in order to avoid any problems
1617         * that might arise with internal Finite State Machine.
1618         */
1619
1620        if (musb->int_usb)
1621                retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
1622
1623        if (musb->int_tx & 1) {
1624                if (is_host_active(musb))
1625                        retval |= musb_h_ep0_irq(musb);
1626                else
1627                        retval |= musb_g_ep0_irq(musb);
1628
1629                /* we have just handled endpoint 0 IRQ, clear it */
1630                musb->int_tx &= ~BIT(0);
1631        }
1632
1633        status = musb->int_tx;
1634
1635        for_each_set_bit(epnum, &status, 16) {
1636                retval = IRQ_HANDLED;
1637                if (is_host_active(musb))
1638                        musb_host_tx(musb, epnum);
1639                else
1640                        musb_g_tx(musb, epnum);
1641        }
1642
1643        status = musb->int_rx;
1644
1645        for_each_set_bit(epnum, &status, 16) {
1646                retval = IRQ_HANDLED;
1647                if (is_host_active(musb))
1648                        musb_host_rx(musb, epnum);
1649                else
1650                        musb_g_rx(musb, epnum);
1651        }
1652
1653        return retval;
1654}
1655EXPORT_SYMBOL_GPL(musb_interrupt);
1656
1657#ifndef CONFIG_MUSB_PIO_ONLY
1658static bool use_dma = 1;
1659
1660/* "modprobe ... use_dma=0" etc */
1661module_param(use_dma, bool, 0644);
1662MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1663
1664void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
1665{
1666        /* called with controller lock already held */
1667
1668        if (!epnum) {
1669                if (!is_cppi_enabled(musb)) {
1670                        /* endpoint 0 */
1671                        if (is_host_active(musb))
1672                                musb_h_ep0_irq(musb);
1673                        else
1674                                musb_g_ep0_irq(musb);
1675                }
1676        } else {
1677                /* endpoints 1..15 */
1678                if (transmit) {
1679                        if (is_host_active(musb))
1680                                musb_host_tx(musb, epnum);
1681                        else
1682                                musb_g_tx(musb, epnum);
1683                } else {
1684                        /* receive */
1685                        if (is_host_active(musb))
1686                                musb_host_rx(musb, epnum);
1687                        else
1688                                musb_g_rx(musb, epnum);
1689                }
1690        }
1691}
1692EXPORT_SYMBOL_GPL(musb_dma_completion);
1693
1694#else
1695#define use_dma                 0
1696#endif
1697
1698static int (*musb_phy_callback)(enum musb_vbus_id_status status);
1699
1700/*
1701 * musb_mailbox - optional phy notifier function
1702 * @status phy state change
1703 *
1704 * Optionally gets called from the USB PHY. Note that the USB PHY must be
1705 * disabled at the point the phy_callback is registered or unregistered.
1706 */
1707int musb_mailbox(enum musb_vbus_id_status status)
1708{
1709        if (musb_phy_callback)
1710                return musb_phy_callback(status);
1711
1712        return -ENODEV;
1713};
1714EXPORT_SYMBOL_GPL(musb_mailbox);
1715
1716/*-------------------------------------------------------------------------*/
1717
1718static ssize_t
1719mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1720{
1721        struct musb *musb = dev_to_musb(dev);
1722        unsigned long flags;
1723        int ret = -EINVAL;
1724
1725        spin_lock_irqsave(&musb->lock, flags);
1726        ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
1727        spin_unlock_irqrestore(&musb->lock, flags);
1728
1729        return ret;
1730}
1731
1732static ssize_t
1733mode_store(struct device *dev, struct device_attribute *attr,
1734                const char *buf, size_t n)
1735{
1736        struct musb     *musb = dev_to_musb(dev);
1737        unsigned long   flags;
1738        int             status;
1739
1740        spin_lock_irqsave(&musb->lock, flags);
1741        if (sysfs_streq(buf, "host"))
1742                status = musb_platform_set_mode(musb, MUSB_HOST);
1743        else if (sysfs_streq(buf, "peripheral"))
1744                status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
1745        else if (sysfs_streq(buf, "otg"))
1746                status = musb_platform_set_mode(musb, MUSB_OTG);
1747        else
1748                status = -EINVAL;
1749        spin_unlock_irqrestore(&musb->lock, flags);
1750
1751        return (status == 0) ? n : status;
1752}
1753static DEVICE_ATTR_RW(mode);
1754
1755static ssize_t
1756vbus_store(struct device *dev, struct device_attribute *attr,
1757                const char *buf, size_t n)
1758{
1759        struct musb     *musb = dev_to_musb(dev);
1760        unsigned long   flags;
1761        unsigned long   val;
1762
1763        if (sscanf(buf, "%lu", &val) < 1) {
1764                dev_err(dev, "Invalid VBUS timeout ms value\n");
1765                return -EINVAL;
1766        }
1767
1768        spin_lock_irqsave(&musb->lock, flags);
1769        /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1770        musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
1771        if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
1772                musb->is_active = 0;
1773        musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1774        spin_unlock_irqrestore(&musb->lock, flags);
1775
1776        return n;
1777}
1778
1779static ssize_t
1780vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1781{
1782        struct musb     *musb = dev_to_musb(dev);
1783        unsigned long   flags;
1784        unsigned long   val;
1785        int             vbus;
1786        u8              devctl;
1787
1788        pm_runtime_get_sync(dev);
1789        spin_lock_irqsave(&musb->lock, flags);
1790        val = musb->a_wait_bcon;
1791        vbus = musb_platform_get_vbus_status(musb);
1792        if (vbus < 0) {
1793                /* Use default MUSB method by means of DEVCTL register */
1794                devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1795                if ((devctl & MUSB_DEVCTL_VBUS)
1796                                == (3 << MUSB_DEVCTL_VBUS_SHIFT))
1797                        vbus = 1;
1798                else
1799                        vbus = 0;
1800        }
1801        spin_unlock_irqrestore(&musb->lock, flags);
1802        pm_runtime_put_sync(dev);
1803
1804        return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1805                        vbus ? "on" : "off", val);
1806}
1807static DEVICE_ATTR_RW(vbus);
1808
1809/* Gadget drivers can't know that a host is connected so they might want
1810 * to start SRP, but users can.  This allows userspace to trigger SRP.
1811 */
1812static ssize_t srp_store(struct device *dev, struct device_attribute *attr,
1813                const char *buf, size_t n)
1814{
1815        struct musb     *musb = dev_to_musb(dev);
1816        unsigned short  srp;
1817
1818        if (sscanf(buf, "%hu", &srp) != 1
1819                        || (srp != 1)) {
1820                dev_err(dev, "SRP: Value must be 1\n");
1821                return -EINVAL;
1822        }
1823
1824        if (srp == 1)
1825                musb_g_wakeup(musb);
1826
1827        return n;
1828}
1829static DEVICE_ATTR_WO(srp);
1830
1831static struct attribute *musb_attributes[] = {
1832        &dev_attr_mode.attr,
1833        &dev_attr_vbus.attr,
1834        &dev_attr_srp.attr,
1835        NULL
1836};
1837
1838static const struct attribute_group musb_attr_group = {
1839        .attrs = musb_attributes,
1840};
1841
1842#define MUSB_QUIRK_B_INVALID_VBUS_91    (MUSB_DEVCTL_BDEVICE | \
1843                                         (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
1844                                         MUSB_DEVCTL_SESSION)
1845#define MUSB_QUIRK_A_DISCONNECT_19      ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
1846                                         MUSB_DEVCTL_SESSION)
1847
1848/*
1849 * Check the musb devctl session bit to determine if we want to
1850 * allow PM runtime for the device. In general, we want to keep things
1851 * active when the session bit is set except after host disconnect.
1852 *
1853 * Only called from musb_irq_work. If this ever needs to get called
1854 * elsewhere, proper locking must be implemented for musb->session.
1855 */
1856static void musb_pm_runtime_check_session(struct musb *musb)
1857{
1858        u8 devctl, s;
1859        int error;
1860
1861        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1862
1863        /* Handle session status quirks first */
1864        s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
1865                MUSB_DEVCTL_HR;
1866        switch (devctl & ~s) {
1867        case MUSB_QUIRK_B_INVALID_VBUS_91:
1868                if (musb->quirk_retries && !musb->flush_irq_work) {
1869                        musb_dbg(musb,
1870                                 "Poll devctl on invalid vbus, assume no session");
1871                        schedule_delayed_work(&musb->irq_work,
1872                                              msecs_to_jiffies(1000));
1873                        musb->quirk_retries--;
1874                        return;
1875                }
1876                /* fall through */
1877        case MUSB_QUIRK_A_DISCONNECT_19:
1878                if (musb->quirk_retries && !musb->flush_irq_work) {
1879                        musb_dbg(musb,
1880                                 "Poll devctl on possible host mode disconnect");
1881                        schedule_delayed_work(&musb->irq_work,
1882                                              msecs_to_jiffies(1000));
1883                        musb->quirk_retries--;
1884                        return;
1885                }
1886                if (!musb->session)
1887                        break;
1888                musb_dbg(musb, "Allow PM on possible host mode disconnect");
1889                pm_runtime_mark_last_busy(musb->controller);
1890                pm_runtime_put_autosuspend(musb->controller);
1891                musb->session = false;
1892                return;
1893        default:
1894                break;
1895        }
1896
1897        /* No need to do anything if session has not changed */
1898        s = devctl & MUSB_DEVCTL_SESSION;
1899        if (s == musb->session)
1900                return;
1901
1902        /* Block PM or allow PM? */
1903        if (s) {
1904                musb_dbg(musb, "Block PM on active session: %02x", devctl);
1905                error = pm_runtime_get_sync(musb->controller);
1906                if (error < 0)
1907                        dev_err(musb->controller, "Could not enable: %i\n",
1908                                error);
1909                musb->quirk_retries = 3;
1910        } else {
1911                musb_dbg(musb, "Allow PM with no session: %02x", devctl);
1912                pm_runtime_mark_last_busy(musb->controller);
1913                pm_runtime_put_autosuspend(musb->controller);
1914        }
1915
1916        musb->session = s;
1917}
1918
1919/* Only used to provide driver mode change events */
1920static void musb_irq_work(struct work_struct *data)
1921{
1922        struct musb *musb = container_of(data, struct musb, irq_work.work);
1923        int error;
1924
1925        error = pm_runtime_get_sync(musb->controller);
1926        if (error < 0) {
1927                dev_err(musb->controller, "Could not enable: %i\n", error);
1928
1929                return;
1930        }
1931
1932        musb_pm_runtime_check_session(musb);
1933
1934        if (musb->xceiv->otg->state != musb->xceiv_old_state) {
1935                musb->xceiv_old_state = musb->xceiv->otg->state;
1936                sysfs_notify(&musb->controller->kobj, NULL, "mode");
1937        }
1938
1939        pm_runtime_mark_last_busy(musb->controller);
1940        pm_runtime_put_autosuspend(musb->controller);
1941}
1942
1943static void musb_recover_from_babble(struct musb *musb)
1944{
1945        int ret;
1946        u8 devctl;
1947
1948        musb_disable_interrupts(musb);
1949
1950        /*
1951         * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
1952         * it some slack and wait for 10us.
1953         */
1954        udelay(10);
1955
1956        ret  = musb_platform_recover(musb);
1957        if (ret) {
1958                musb_enable_interrupts(musb);
1959                return;
1960        }
1961
1962        /* drop session bit */
1963        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1964        devctl &= ~MUSB_DEVCTL_SESSION;
1965        musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
1966
1967        /* tell usbcore about it */
1968        musb_root_disconnect(musb);
1969
1970        /*
1971         * When a babble condition occurs, the musb controller
1972         * removes the session bit and the endpoint config is lost.
1973         */
1974        if (musb->dyn_fifo)
1975                ret = ep_config_from_table(musb);
1976        else
1977                ret = ep_config_from_hw(musb);
1978
1979        /* restart session */
1980        if (ret == 0)
1981                musb_start(musb);
1982}
1983
1984/* --------------------------------------------------------------------------
1985 * Init support
1986 */
1987
1988static struct musb *allocate_instance(struct device *dev,
1989                const struct musb_hdrc_config *config, void __iomem *mbase)
1990{
1991        struct musb             *musb;
1992        struct musb_hw_ep       *ep;
1993        int                     epnum;
1994        int                     ret;
1995
1996        musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
1997        if (!musb)
1998                return NULL;
1999
2000        INIT_LIST_HEAD(&musb->control);
2001        INIT_LIST_HEAD(&musb->in_bulk);
2002        INIT_LIST_HEAD(&musb->out_bulk);
2003        INIT_LIST_HEAD(&musb->pending_list);
2004
2005        musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
2006        musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
2007        musb->mregs = mbase;
2008        musb->ctrl_base = mbase;
2009        musb->nIrq = -ENODEV;
2010        musb->config = config;
2011        BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
2012        for (epnum = 0, ep = musb->endpoints;
2013                        epnum < musb->config->num_eps;
2014                        epnum++, ep++) {
2015                ep->musb = musb;
2016                ep->epnum = epnum;
2017        }
2018
2019        musb->controller = dev;
2020
2021        ret = musb_host_alloc(musb);
2022        if (ret < 0)
2023                goto err_free;
2024
2025        dev_set_drvdata(dev, musb);
2026
2027        return musb;
2028
2029err_free:
2030        return NULL;
2031}
2032
2033static void musb_free(struct musb *musb)
2034{
2035        /* this has multiple entry modes. it handles fault cleanup after
2036         * probe(), where things may be partially set up, as well as rmmod
2037         * cleanup after everything's been de-activated.
2038         */
2039
2040#ifdef CONFIG_SYSFS
2041        sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
2042#endif
2043
2044        if (musb->nIrq >= 0) {
2045                if (musb->irq_wake)
2046                        disable_irq_wake(musb->nIrq);
2047                free_irq(musb->nIrq, musb);
2048        }
2049
2050        musb_host_free(musb);
2051}
2052
2053struct musb_pending_work {
2054        int (*callback)(struct musb *musb, void *data);
2055        void *data;
2056        struct list_head node;
2057};
2058
2059#ifdef CONFIG_PM
2060/*
2061 * Called from musb_runtime_resume(), musb_resume(), and
2062 * musb_queue_resume_work(). Callers must take musb->lock.
2063 */
2064static int musb_run_resume_work(struct musb *musb)
2065{
2066        struct musb_pending_work *w, *_w;
2067        unsigned long flags;
2068        int error = 0;
2069
2070        spin_lock_irqsave(&musb->list_lock, flags);
2071        list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
2072                if (w->callback) {
2073                        error = w->callback(musb, w->data);
2074                        if (error < 0) {
2075                                dev_err(musb->controller,
2076                                        "resume callback %p failed: %i\n",
2077                                        w->callback, error);
2078                        }
2079                }
2080                list_del(&w->node);
2081                devm_kfree(musb->controller, w);
2082        }
2083        spin_unlock_irqrestore(&musb->list_lock, flags);
2084
2085        return error;
2086}
2087#endif
2088
2089/*
2090 * Called to run work if device is active or else queue the work to happen
2091 * on resume. Caller must take musb->lock and must hold an RPM reference.
2092 *
2093 * Note that we cowardly refuse queuing work after musb PM runtime
2094 * resume is done calling musb_run_resume_work() and return -EINPROGRESS
2095 * instead.
2096 */
2097int musb_queue_resume_work(struct musb *musb,
2098                           int (*callback)(struct musb *musb, void *data),
2099                           void *data)
2100{
2101        struct musb_pending_work *w;
2102        unsigned long flags;
2103        int error;
2104
2105        if (WARN_ON(!callback))
2106                return -EINVAL;
2107
2108        if (pm_runtime_active(musb->controller))
2109                return callback(musb, data);
2110
2111        w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
2112        if (!w)
2113                return -ENOMEM;
2114
2115        w->callback = callback;
2116        w->data = data;
2117        spin_lock_irqsave(&musb->list_lock, flags);
2118        if (musb->is_runtime_suspended) {
2119                list_add_tail(&w->node, &musb->pending_list);
2120                error = 0;
2121        } else {
2122                dev_err(musb->controller, "could not add resume work %p\n",
2123                        callback);
2124                devm_kfree(musb->controller, w);
2125                error = -EINPROGRESS;
2126        }
2127        spin_unlock_irqrestore(&musb->list_lock, flags);
2128
2129        return error;
2130}
2131EXPORT_SYMBOL_GPL(musb_queue_resume_work);
2132
2133static void musb_deassert_reset(struct work_struct *work)
2134{
2135        struct musb *musb;
2136        unsigned long flags;
2137
2138        musb = container_of(work, struct musb, deassert_reset_work.work);
2139
2140        spin_lock_irqsave(&musb->lock, flags);
2141
2142        if (musb->port1_status & USB_PORT_STAT_RESET)
2143                musb_port_reset(musb, false);
2144
2145        spin_unlock_irqrestore(&musb->lock, flags);
2146}
2147
2148/*
2149 * Perform generic per-controller initialization.
2150 *
2151 * @dev: the controller (already clocked, etc)
2152 * @nIrq: IRQ number
2153 * @ctrl: virtual address of controller registers,
2154 *      not yet corrected for platform-specific offsets
2155 */
2156static int
2157musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2158{
2159        int                     status;
2160        struct musb             *musb;
2161        struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
2162
2163        /* The driver might handle more features than the board; OK.
2164         * Fail when the board needs a feature that's not enabled.
2165         */
2166        if (!plat) {
2167                dev_err(dev, "no platform_data?\n");
2168                status = -ENODEV;
2169                goto fail0;
2170        }
2171
2172        /* allocate */
2173        musb = allocate_instance(dev, plat->config, ctrl);
2174        if (!musb) {
2175                status = -ENOMEM;
2176                goto fail0;
2177        }
2178
2179        spin_lock_init(&musb->lock);
2180        spin_lock_init(&musb->list_lock);
2181        musb->board_set_power = plat->set_power;
2182        musb->min_power = plat->min_power;
2183        musb->ops = plat->platform_ops;
2184        musb->port_mode = plat->mode;
2185
2186        /*
2187         * Initialize the default IO functions. At least omap2430 needs
2188         * these early. We initialize the platform specific IO functions
2189         * later on.
2190         */
2191        musb_readb = musb_default_readb;
2192        musb_writeb = musb_default_writeb;
2193        musb_readw = musb_default_readw;
2194        musb_writew = musb_default_writew;
2195
2196        /* The musb_platform_init() call:
2197         *   - adjusts musb->mregs
2198         *   - sets the musb->isr
2199         *   - may initialize an integrated transceiver
2200         *   - initializes musb->xceiv, usually by otg_get_phy()
2201         *   - stops powering VBUS
2202         *
2203         * There are various transceiver configurations.
2204         * DaVinci, TUSB60x0, and others integrate them.  OMAP3 uses
2205         * external/discrete ones in various flavors (twl4030 family,
2206         * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2207         */
2208        status = musb_platform_init(musb);
2209        if (status < 0)
2210                goto fail1;
2211
2212        if (!musb->isr) {
2213                status = -ENODEV;
2214                goto fail2;
2215        }
2216
2217
2218        /* Most devices use indexed offset or flat offset */
2219        if (musb->ops->quirks & MUSB_INDEXED_EP) {
2220                musb->io.ep_offset = musb_indexed_ep_offset;
2221                musb->io.ep_select = musb_indexed_ep_select;
2222        } else {
2223                musb->io.ep_offset = musb_flat_ep_offset;
2224                musb->io.ep_select = musb_flat_ep_select;
2225        }
2226
2227        if (musb->ops->quirks & MUSB_G_NO_SKB_RESERVE)
2228                musb->g.quirk_avoids_skb_reserve = 1;
2229
2230        /* At least tusb6010 has its own offsets */
2231        if (musb->ops->ep_offset)
2232                musb->io.ep_offset = musb->ops->ep_offset;
2233        if (musb->ops->ep_select)
2234                musb->io.ep_select = musb->ops->ep_select;
2235
2236        if (musb->ops->fifo_mode)
2237                fifo_mode = musb->ops->fifo_mode;
2238        else
2239                fifo_mode = 4;
2240
2241        if (musb->ops->fifo_offset)
2242                musb->io.fifo_offset = musb->ops->fifo_offset;
2243        else
2244                musb->io.fifo_offset = musb_default_fifo_offset;
2245
2246        if (musb->ops->busctl_offset)
2247                musb->io.busctl_offset = musb->ops->busctl_offset;
2248        else
2249                musb->io.busctl_offset = musb_default_busctl_offset;
2250
2251        if (musb->ops->readb)
2252                musb_readb = musb->ops->readb;
2253        if (musb->ops->writeb)
2254                musb_writeb = musb->ops->writeb;
2255        if (musb->ops->readw)
2256                musb_readw = musb->ops->readw;
2257        if (musb->ops->writew)
2258                musb_writew = musb->ops->writew;
2259
2260#ifndef CONFIG_MUSB_PIO_ONLY
2261        if (!musb->ops->dma_init || !musb->ops->dma_exit) {
2262                dev_err(dev, "DMA controller not set\n");
2263                status = -ENODEV;
2264                goto fail2;
2265        }
2266        musb_dma_controller_create = musb->ops->dma_init;
2267        musb_dma_controller_destroy = musb->ops->dma_exit;
2268#endif
2269
2270        if (musb->ops->read_fifo)
2271                musb->io.read_fifo = musb->ops->read_fifo;
2272        else
2273                musb->io.read_fifo = musb_default_read_fifo;
2274
2275        if (musb->ops->write_fifo)
2276                musb->io.write_fifo = musb->ops->write_fifo;
2277        else
2278                musb->io.write_fifo = musb_default_write_fifo;
2279
2280        if (!musb->xceiv->io_ops) {
2281                musb->xceiv->io_dev = musb->controller;
2282                musb->xceiv->io_priv = musb->mregs;
2283                musb->xceiv->io_ops = &musb_ulpi_access;
2284        }
2285
2286        if (musb->ops->phy_callback)
2287                musb_phy_callback = musb->ops->phy_callback;
2288
2289        /*
2290         * We need musb_read/write functions initialized for PM.
2291         * Note that at least 2430 glue needs autosuspend delay
2292         * somewhere above 300 ms for the hardware to idle properly
2293         * after disconnecting the cable in host mode. Let's use
2294         * 500 ms for some margin.
2295         */
2296        pm_runtime_use_autosuspend(musb->controller);
2297        pm_runtime_set_autosuspend_delay(musb->controller, 500);
2298        pm_runtime_enable(musb->controller);
2299        pm_runtime_get_sync(musb->controller);
2300
2301        status = usb_phy_init(musb->xceiv);
2302        if (status < 0)
2303                goto err_usb_phy_init;
2304
2305        if (use_dma && dev->dma_mask) {
2306                musb->dma_controller =
2307                        musb_dma_controller_create(musb, musb->mregs);
2308                if (IS_ERR(musb->dma_controller)) {
2309                        status = PTR_ERR(musb->dma_controller);
2310                        goto fail2_5;
2311                }
2312        }
2313
2314        /* be sure interrupts are disabled before connecting ISR */
2315        musb_platform_disable(musb);
2316        musb_disable_interrupts(musb);
2317        musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2318
2319        /* Init IRQ workqueue before request_irq */
2320        INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
2321        INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
2322        INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
2323
2324        /* setup musb parts of the core (especially endpoints) */
2325        status = musb_core_init(plat->config->multipoint
2326                        ? MUSB_CONTROLLER_MHDRC
2327                        : MUSB_CONTROLLER_HDRC, musb);
2328        if (status < 0)
2329                goto fail3;
2330
2331        timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
2332
2333        /* attach to the IRQ */
2334        if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
2335                dev_err(dev, "request_irq %d failed!\n", nIrq);
2336                status = -ENODEV;
2337                goto fail3;
2338        }
2339        musb->nIrq = nIrq;
2340        /* FIXME this handles wakeup irqs wrong */
2341        if (enable_irq_wake(nIrq) == 0) {
2342                musb->irq_wake = 1;
2343                device_init_wakeup(dev, 1);
2344        } else {
2345                musb->irq_wake = 0;
2346        }
2347
2348        /* program PHY to use external vBus if required */
2349        if (plat->extvbus) {
2350                u8 busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
2351                busctl |= MUSB_ULPI_USE_EXTVBUS;
2352                musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
2353        }
2354
2355        MUSB_DEV_MODE(musb);
2356        musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2357
2358        switch (musb->port_mode) {
2359        case MUSB_HOST:
2360                status = musb_host_setup(musb, plat->power);
2361                if (status < 0)
2362                        goto fail3;
2363                status = musb_platform_set_mode(musb, MUSB_HOST);
2364                break;
2365        case MUSB_PERIPHERAL:
2366                status = musb_gadget_setup(musb);
2367                if (status < 0)
2368                        goto fail3;
2369                status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
2370                break;
2371        case MUSB_OTG:
2372                status = musb_host_setup(musb, plat->power);
2373                if (status < 0)
2374                        goto fail3;
2375                status = musb_gadget_setup(musb);
2376                if (status) {
2377                        musb_host_cleanup(musb);
2378                        goto fail3;
2379                }
2380                status = musb_platform_set_mode(musb, MUSB_OTG);
2381                break;
2382        default:
2383                dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
2384                break;
2385        }
2386
2387        if (status < 0)
2388                goto fail3;
2389
2390        musb_init_debugfs(musb);
2391
2392        status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2393        if (status)
2394                goto fail5;
2395
2396        musb->is_initialized = 1;
2397        pm_runtime_mark_last_busy(musb->controller);
2398        pm_runtime_put_autosuspend(musb->controller);
2399
2400        return 0;
2401
2402fail5:
2403        musb_exit_debugfs(musb);
2404
2405        musb_gadget_cleanup(musb);
2406        musb_host_cleanup(musb);
2407
2408fail3:
2409        cancel_delayed_work_sync(&musb->irq_work);
2410        cancel_delayed_work_sync(&musb->finish_resume_work);
2411        cancel_delayed_work_sync(&musb->deassert_reset_work);
2412        if (musb->dma_controller)
2413                musb_dma_controller_destroy(musb->dma_controller);
2414
2415fail2_5:
2416        usb_phy_shutdown(musb->xceiv);
2417
2418err_usb_phy_init:
2419        pm_runtime_dont_use_autosuspend(musb->controller);
2420        pm_runtime_put_sync(musb->controller);
2421        pm_runtime_disable(musb->controller);
2422
2423fail2:
2424        if (musb->irq_wake)
2425                device_init_wakeup(dev, 0);
2426        musb_platform_exit(musb);
2427
2428fail1:
2429        if (status != -EPROBE_DEFER)
2430                dev_err(musb->controller,
2431                        "%s failed with status %d\n", __func__, status);
2432
2433        musb_free(musb);
2434
2435fail0:
2436
2437        return status;
2438
2439}
2440
2441/*-------------------------------------------------------------------------*/
2442
2443/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2444 * bridge to a platform device; this driver then suffices.
2445 */
2446static int musb_probe(struct platform_device *pdev)
2447{
2448        struct device   *dev = &pdev->dev;
2449        int             irq = platform_get_irq_byname(pdev, "mc");
2450        struct resource *iomem;
2451        void __iomem    *base;
2452
2453        if (irq <= 0)
2454                return -ENODEV;
2455
2456        iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2457        base = devm_ioremap_resource(dev, iomem);
2458        if (IS_ERR(base))
2459                return PTR_ERR(base);
2460
2461        return musb_init_controller(dev, irq, base);
2462}
2463
2464static int musb_remove(struct platform_device *pdev)
2465{
2466        struct device   *dev = &pdev->dev;
2467        struct musb     *musb = dev_to_musb(dev);
2468        unsigned long   flags;
2469
2470        /* this gets called on rmmod.
2471         *  - Host mode: host may still be active
2472         *  - Peripheral mode: peripheral is deactivated (or never-activated)
2473         *  - OTG mode: both roles are deactivated (or never-activated)
2474         */
2475        musb_exit_debugfs(musb);
2476
2477        cancel_delayed_work_sync(&musb->irq_work);
2478        cancel_delayed_work_sync(&musb->finish_resume_work);
2479        cancel_delayed_work_sync(&musb->deassert_reset_work);
2480        pm_runtime_get_sync(musb->controller);
2481        musb_host_cleanup(musb);
2482        musb_gadget_cleanup(musb);
2483
2484        musb_platform_disable(musb);
2485        spin_lock_irqsave(&musb->lock, flags);
2486        musb_disable_interrupts(musb);
2487        musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2488        spin_unlock_irqrestore(&musb->lock, flags);
2489        musb_platform_exit(musb);
2490
2491        pm_runtime_dont_use_autosuspend(musb->controller);
2492        pm_runtime_put_sync(musb->controller);
2493        pm_runtime_disable(musb->controller);
2494        musb_phy_callback = NULL;
2495        if (musb->dma_controller)
2496                musb_dma_controller_destroy(musb->dma_controller);
2497        usb_phy_shutdown(musb->xceiv);
2498        musb_free(musb);
2499        device_init_wakeup(dev, 0);
2500        return 0;
2501}
2502
2503#ifdef  CONFIG_PM
2504
2505static void musb_save_context(struct musb *musb)
2506{
2507        int i;
2508        void __iomem *musb_base = musb->mregs;
2509        void __iomem *epio;
2510
2511        musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
2512        musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2513        musb->context.busctl = musb_readb(musb_base, MUSB_ULPI_BUSCONTROL);
2514        musb->context.power = musb_readb(musb_base, MUSB_POWER);
2515        musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2516        musb->context.index = musb_readb(musb_base, MUSB_INDEX);
2517        musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2518
2519        for (i = 0; i < musb->config->num_eps; ++i) {
2520                struct musb_hw_ep       *hw_ep;
2521
2522                hw_ep = &musb->endpoints[i];
2523                if (!hw_ep)
2524                        continue;
2525
2526                epio = hw_ep->regs;
2527                if (!epio)
2528                        continue;
2529
2530                musb_writeb(musb_base, MUSB_INDEX, i);
2531                musb->context.index_regs[i].txmaxp =
2532                        musb_readw(epio, MUSB_TXMAXP);
2533                musb->context.index_regs[i].txcsr =
2534                        musb_readw(epio, MUSB_TXCSR);
2535                musb->context.index_regs[i].rxmaxp =
2536                        musb_readw(epio, MUSB_RXMAXP);
2537                musb->context.index_regs[i].rxcsr =
2538                        musb_readw(epio, MUSB_RXCSR);
2539
2540                if (musb->dyn_fifo) {
2541                        musb->context.index_regs[i].txfifoadd =
2542                                        musb_readw(musb_base, MUSB_TXFIFOADD);
2543                        musb->context.index_regs[i].rxfifoadd =
2544                                        musb_readw(musb_base, MUSB_RXFIFOADD);
2545                        musb->context.index_regs[i].txfifosz =
2546                                        musb_readb(musb_base, MUSB_TXFIFOSZ);
2547                        musb->context.index_regs[i].rxfifosz =
2548                                        musb_readb(musb_base, MUSB_RXFIFOSZ);
2549                }
2550
2551                musb->context.index_regs[i].txtype =
2552                        musb_readb(epio, MUSB_TXTYPE);
2553                musb->context.index_regs[i].txinterval =
2554                        musb_readb(epio, MUSB_TXINTERVAL);
2555                musb->context.index_regs[i].rxtype =
2556                        musb_readb(epio, MUSB_RXTYPE);
2557                musb->context.index_regs[i].rxinterval =
2558                        musb_readb(epio, MUSB_RXINTERVAL);
2559
2560                musb->context.index_regs[i].txfunaddr =
2561                        musb_read_txfunaddr(musb, i);
2562                musb->context.index_regs[i].txhubaddr =
2563                        musb_read_txhubaddr(musb, i);
2564                musb->context.index_regs[i].txhubport =
2565                        musb_read_txhubport(musb, i);
2566
2567                musb->context.index_regs[i].rxfunaddr =
2568                        musb_read_rxfunaddr(musb, i);
2569                musb->context.index_regs[i].rxhubaddr =
2570                        musb_read_rxhubaddr(musb, i);
2571                musb->context.index_regs[i].rxhubport =
2572                        musb_read_rxhubport(musb, i);
2573        }
2574}
2575
2576static void musb_restore_context(struct musb *musb)
2577{
2578        int i;
2579        void __iomem *musb_base = musb->mregs;
2580        void __iomem *epio;
2581        u8 power;
2582
2583        musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
2584        musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
2585        musb_writeb(musb_base, MUSB_ULPI_BUSCONTROL, musb->context.busctl);
2586
2587        /* Don't affect SUSPENDM/RESUME bits in POWER reg */
2588        power = musb_readb(musb_base, MUSB_POWER);
2589        power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
2590        musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
2591        power |= musb->context.power;
2592        musb_writeb(musb_base, MUSB_POWER, power);
2593
2594        musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
2595        musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
2596        musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2597        if (musb->context.devctl & MUSB_DEVCTL_SESSION)
2598                musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2599
2600        for (i = 0; i < musb->config->num_eps; ++i) {
2601                struct musb_hw_ep       *hw_ep;
2602
2603                hw_ep = &musb->endpoints[i];
2604                if (!hw_ep)
2605                        continue;
2606
2607                epio = hw_ep->regs;
2608                if (!epio)
2609                        continue;
2610
2611                musb_writeb(musb_base, MUSB_INDEX, i);
2612                musb_writew(epio, MUSB_TXMAXP,
2613                        musb->context.index_regs[i].txmaxp);
2614                musb_writew(epio, MUSB_TXCSR,
2615                        musb->context.index_regs[i].txcsr);
2616                musb_writew(epio, MUSB_RXMAXP,
2617                        musb->context.index_regs[i].rxmaxp);
2618                musb_writew(epio, MUSB_RXCSR,
2619                        musb->context.index_regs[i].rxcsr);
2620
2621                if (musb->dyn_fifo) {
2622                        musb_writeb(musb_base, MUSB_TXFIFOSZ,
2623                                musb->context.index_regs[i].txfifosz);
2624                        musb_writeb(musb_base, MUSB_RXFIFOSZ,
2625                                musb->context.index_regs[i].rxfifosz);
2626                        musb_writew(musb_base, MUSB_TXFIFOADD,
2627                                musb->context.index_regs[i].txfifoadd);
2628                        musb_writew(musb_base, MUSB_RXFIFOADD,
2629                                musb->context.index_regs[i].rxfifoadd);
2630                }
2631
2632                musb_writeb(epio, MUSB_TXTYPE,
2633                                musb->context.index_regs[i].txtype);
2634                musb_writeb(epio, MUSB_TXINTERVAL,
2635                                musb->context.index_regs[i].txinterval);
2636                musb_writeb(epio, MUSB_RXTYPE,
2637                                musb->context.index_regs[i].rxtype);
2638                musb_writeb(epio, MUSB_RXINTERVAL,
2639
2640                                musb->context.index_regs[i].rxinterval);
2641                musb_write_txfunaddr(musb, i,
2642                                musb->context.index_regs[i].txfunaddr);
2643                musb_write_txhubaddr(musb, i,
2644                                musb->context.index_regs[i].txhubaddr);
2645                musb_write_txhubport(musb, i,
2646                                musb->context.index_regs[i].txhubport);
2647
2648                musb_write_rxfunaddr(musb, i,
2649                                musb->context.index_regs[i].rxfunaddr);
2650                musb_write_rxhubaddr(musb, i,
2651                                musb->context.index_regs[i].rxhubaddr);
2652                musb_write_rxhubport(musb, i,
2653                                musb->context.index_regs[i].rxhubport);
2654        }
2655        musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
2656}
2657
2658static int musb_suspend(struct device *dev)
2659{
2660        struct musb     *musb = dev_to_musb(dev);
2661        unsigned long   flags;
2662        int ret;
2663
2664        ret = pm_runtime_get_sync(dev);
2665        if (ret < 0) {
2666                pm_runtime_put_noidle(dev);
2667                return ret;
2668        }
2669
2670        musb_platform_disable(musb);
2671        musb_disable_interrupts(musb);
2672
2673        musb->flush_irq_work = true;
2674        while (flush_delayed_work(&musb->irq_work))
2675                ;
2676        musb->flush_irq_work = false;
2677
2678        if (!(musb->ops->quirks & MUSB_PRESERVE_SESSION))
2679                musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2680
2681        WARN_ON(!list_empty(&musb->pending_list));
2682
2683        spin_lock_irqsave(&musb->lock, flags);
2684
2685        if (is_peripheral_active(musb)) {
2686                /* FIXME force disconnect unless we know USB will wake
2687                 * the system up quickly enough to respond ...
2688                 */
2689        } else if (is_host_active(musb)) {
2690                /* we know all the children are suspended; sometimes
2691                 * they will even be wakeup-enabled.
2692                 */
2693        }
2694
2695        musb_save_context(musb);
2696
2697        spin_unlock_irqrestore(&musb->lock, flags);
2698        return 0;
2699}
2700
2701static int musb_resume(struct device *dev)
2702{
2703        struct musb *musb = dev_to_musb(dev);
2704        unsigned long flags;
2705        int error;
2706        u8 devctl;
2707        u8 mask;
2708
2709        /*
2710         * For static cmos like DaVinci, register values were preserved
2711         * unless for some reason the whole soc powered down or the USB
2712         * module got reset through the PSC (vs just being disabled).
2713         *
2714         * For the DSPS glue layer though, a full register restore has to
2715         * be done. As it shouldn't harm other platforms, we do it
2716         * unconditionally.
2717         */
2718
2719        musb_restore_context(musb);
2720
2721        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2722        mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
2723        if ((devctl & mask) != (musb->context.devctl & mask))
2724                musb->port1_status = 0;
2725
2726        musb_enable_interrupts(musb);
2727        musb_platform_enable(musb);
2728
2729        spin_lock_irqsave(&musb->lock, flags);
2730        error = musb_run_resume_work(musb);
2731        if (error)
2732                dev_err(musb->controller, "resume work failed with %i\n",
2733                        error);
2734        spin_unlock_irqrestore(&musb->lock, flags);
2735
2736        pm_runtime_mark_last_busy(dev);
2737        pm_runtime_put_autosuspend(dev);
2738
2739        return 0;
2740}
2741
2742static int musb_runtime_suspend(struct device *dev)
2743{
2744        struct musb     *musb = dev_to_musb(dev);
2745
2746        musb_save_context(musb);
2747        musb->is_runtime_suspended = 1;
2748
2749        return 0;
2750}
2751
2752static int musb_runtime_resume(struct device *dev)
2753{
2754        struct musb *musb = dev_to_musb(dev);
2755        unsigned long flags;
2756        int error;
2757
2758        /*
2759         * When pm_runtime_get_sync called for the first time in driver
2760         * init,  some of the structure is still not initialized which is
2761         * used in restore function. But clock needs to be
2762         * enabled before any register access, so
2763         * pm_runtime_get_sync has to be called.
2764         * Also context restore without save does not make
2765         * any sense
2766         */
2767        if (!musb->is_initialized)
2768                return 0;
2769
2770        musb_restore_context(musb);
2771
2772        spin_lock_irqsave(&musb->lock, flags);
2773        error = musb_run_resume_work(musb);
2774        if (error)
2775                dev_err(musb->controller, "resume work failed with %i\n",
2776                        error);
2777        musb->is_runtime_suspended = 0;
2778        spin_unlock_irqrestore(&musb->lock, flags);
2779
2780        return 0;
2781}
2782
2783static const struct dev_pm_ops musb_dev_pm_ops = {
2784        .suspend        = musb_suspend,
2785        .resume         = musb_resume,
2786        .runtime_suspend = musb_runtime_suspend,
2787        .runtime_resume = musb_runtime_resume,
2788};
2789
2790#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2791#else
2792#define MUSB_DEV_PM_OPS NULL
2793#endif
2794
2795static struct platform_driver musb_driver = {
2796        .driver = {
2797                .name           = (char *)musb_driver_name,
2798                .bus            = &platform_bus_type,
2799                .pm             = MUSB_DEV_PM_OPS,
2800        },
2801        .probe          = musb_probe,
2802        .remove         = musb_remove,
2803};
2804
2805module_platform_driver(musb_driver);
2806