linux/drivers/usb/host/imx21-hcd.c
<<
>>
Prefs
   1/*
   2 * USB Host Controller Driver for IMX21
   3 *
   4 * Copyright (C) 2006 Loping Dog Embedded Systems
   5 * Copyright (C) 2009 Martin Fuzzey
   6 * Originally written by Jay Monkman <jtm@lopingdog.com>
   7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License as published by the
  11 * Free Software Foundation; either version 2 of the License, or (at your
  12 * option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful, but
  15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  16 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  17 * for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software Foundation,
  21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  22 */
  23
  24
  25 /*
  26  * The i.MX21 USB hardware contains
  27  *    * 32 transfer descriptors (called ETDs)
  28  *    * 4Kb of Data memory
  29  *
  30  * The data memory is shared between the host and function controllers
  31  * (but this driver only supports the host controller)
  32  *
  33  * So setting up a transfer involves:
  34  *    * Allocating a ETD
  35  *    * Fill in ETD with appropriate information
  36  *    * Allocating data memory (and putting the offset in the ETD)
  37  *    * Activate the ETD
  38  *    * Get interrupt when done.
  39  *
  40  * An ETD is assigned to each active endpoint.
  41  *
  42  * Low resource (ETD and Data memory) situations are handled differently for
  43  * isochronous and non insosynchronous transactions :
  44  *
  45  * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
  46  *
  47  * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
  48  * They allocate both ETDs and Data memory during URB submission
  49  * (and fail if unavailable).
  50  */
  51
  52#include <linux/clk.h>
  53#include <linux/io.h>
  54#include <linux/kernel.h>
  55#include <linux/list.h>
  56#include <linux/platform_device.h>
  57#include <linux/slab.h>
  58#include <linux/usb.h>
  59#include <linux/usb/hcd.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/module.h>
  62
  63#include "imx21-hcd.h"
  64
  65#ifdef CONFIG_DYNAMIC_DEBUG
  66#define DEBUG
  67#endif
  68
  69#ifdef DEBUG
  70#define DEBUG_LOG_FRAME(imx21, etd, event) \
  71        (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
  72#else
  73#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
  74#endif
  75
  76static const char hcd_name[] = "imx21-hcd";
  77
  78static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
  79{
  80        return (struct imx21 *)hcd->hcd_priv;
  81}
  82
  83
  84/* =========================================== */
  85/* Hardware access helpers                      */
  86/* =========================================== */
  87
  88static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
  89{
  90        void __iomem *reg = imx21->regs + offset;
  91        writel(readl(reg) | mask, reg);
  92}
  93
  94static inline void clear_register_bits(struct imx21 *imx21,
  95        u32 offset, u32 mask)
  96{
  97        void __iomem *reg = imx21->regs + offset;
  98        writel(readl(reg) & ~mask, reg);
  99}
 100
 101static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
 102{
 103        void __iomem *reg = imx21->regs + offset;
 104
 105        if (readl(reg) & mask)
 106                writel(mask, reg);
 107}
 108
 109static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
 110{
 111        void __iomem *reg = imx21->regs + offset;
 112
 113        if (!(readl(reg) & mask))
 114                writel(mask, reg);
 115}
 116
 117static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
 118{
 119        writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
 120}
 121
 122static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
 123{
 124        return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
 125}
 126
 127static inline int wrap_frame(int counter)
 128{
 129        return counter & 0xFFFF;
 130}
 131
 132static inline int frame_after(int frame, int after)
 133{
 134        /* handle wrapping like jiffies time_afer */
 135        return (s16)((s16)after - (s16)frame) < 0;
 136}
 137
 138static int imx21_hc_get_frame(struct usb_hcd *hcd)
 139{
 140        struct imx21 *imx21 = hcd_to_imx21(hcd);
 141
 142        return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
 143}
 144
 145static inline bool unsuitable_for_dma(dma_addr_t addr)
 146{
 147        return (addr & 3) != 0;
 148}
 149
 150#include "imx21-dbg.c"
 151
 152static void nonisoc_urb_completed_for_etd(
 153        struct imx21 *imx21, struct etd_priv *etd, int status);
 154static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
 155static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
 156
 157/* =========================================== */
 158/* ETD management                               */
 159/* ===========================================  */
 160
 161static int alloc_etd(struct imx21 *imx21)
 162{
 163        int i;
 164        struct etd_priv *etd = imx21->etd;
 165
 166        for (i = 0; i < USB_NUM_ETD; i++, etd++) {
 167                if (etd->alloc == 0) {
 168                        memset(etd, 0, sizeof(imx21->etd[0]));
 169                        etd->alloc = 1;
 170                        debug_etd_allocated(imx21);
 171                        return i;
 172                }
 173        }
 174        return -1;
 175}
 176
 177static void disactivate_etd(struct imx21 *imx21, int num)
 178{
 179        int etd_mask = (1 << num);
 180        struct etd_priv *etd = &imx21->etd[num];
 181
 182        writel(etd_mask, imx21->regs + USBH_ETDENCLR);
 183        clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
 184        writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
 185        clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
 186
 187        etd->active_count = 0;
 188
 189        DEBUG_LOG_FRAME(imx21, etd, disactivated);
 190}
 191
 192static void reset_etd(struct imx21 *imx21, int num)
 193{
 194        struct etd_priv *etd = imx21->etd + num;
 195        int i;
 196
 197        disactivate_etd(imx21, num);
 198
 199        for (i = 0; i < 4; i++)
 200                etd_writel(imx21, num, i, 0);
 201        etd->urb = NULL;
 202        etd->ep = NULL;
 203        etd->td = NULL;
 204        etd->bounce_buffer = NULL;
 205}
 206
 207static void free_etd(struct imx21 *imx21, int num)
 208{
 209        if (num < 0)
 210                return;
 211
 212        if (num >= USB_NUM_ETD) {
 213                dev_err(imx21->dev, "BAD etd=%d!\n", num);
 214                return;
 215        }
 216        if (imx21->etd[num].alloc == 0) {
 217                dev_err(imx21->dev, "ETD %d already free!\n", num);
 218                return;
 219        }
 220
 221        debug_etd_freed(imx21);
 222        reset_etd(imx21, num);
 223        memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
 224}
 225
 226
 227static void setup_etd_dword0(struct imx21 *imx21,
 228        int etd_num, struct urb *urb,  u8 dir, u16 maxpacket)
 229{
 230        etd_writel(imx21, etd_num, 0,
 231                ((u32) usb_pipedevice(urb->pipe)) <<  DW0_ADDRESS |
 232                ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
 233                ((u32) dir << DW0_DIRECT) |
 234                ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
 235                        1 : 0) << DW0_SPEED) |
 236                ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
 237                ((u32) maxpacket << DW0_MAXPKTSIZ));
 238}
 239
 240/**
 241 * Copy buffer to data controller data memory.
 242 * We cannot use memcpy_toio() because the hardware requires 32bit writes
 243 */
 244static void copy_to_dmem(
 245        struct imx21 *imx21, int dmem_offset, void *src, int count)
 246{
 247        void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
 248        u32 word = 0;
 249        u8 *p = src;
 250        int byte = 0;
 251        int i;
 252
 253        for (i = 0; i < count; i++) {
 254                byte = i % 4;
 255                word += (*p++ << (byte * 8));
 256                if (byte == 3) {
 257                        writel(word, dmem);
 258                        dmem += 4;
 259                        word = 0;
 260                }
 261        }
 262
 263        if (count && byte != 3)
 264                writel(word, dmem);
 265}
 266
 267static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
 268{
 269        u32 etd_mask = 1 << etd_num;
 270        struct etd_priv *etd = &imx21->etd[etd_num];
 271
 272        if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
 273                /* For non aligned isoc the condition below is always true */
 274                if (etd->len <= etd->dmem_size) {
 275                        /* Fits into data memory, use PIO */
 276                        if (dir != TD_DIR_IN) {
 277                                copy_to_dmem(imx21,
 278                                                etd->dmem_offset,
 279                                                etd->cpu_buffer, etd->len);
 280                        }
 281                        etd->dma_handle = 0;
 282
 283                } else {
 284                        /* Too big for data memory, use bounce buffer */
 285                        enum dma_data_direction dmadir;
 286
 287                        if (dir == TD_DIR_IN) {
 288                                dmadir = DMA_FROM_DEVICE;
 289                                etd->bounce_buffer = kmalloc(etd->len,
 290                                                                GFP_ATOMIC);
 291                        } else {
 292                                dmadir = DMA_TO_DEVICE;
 293                                etd->bounce_buffer = kmemdup(etd->cpu_buffer,
 294                                                                etd->len,
 295                                                                GFP_ATOMIC);
 296                        }
 297                        if (!etd->bounce_buffer) {
 298                                dev_err(imx21->dev, "failed bounce alloc\n");
 299                                goto err_bounce_alloc;
 300                        }
 301
 302                        etd->dma_handle =
 303                                dma_map_single(imx21->dev,
 304                                                etd->bounce_buffer,
 305                                                etd->len,
 306                                                dmadir);
 307                        if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
 308                                dev_err(imx21->dev, "failed bounce map\n");
 309                                goto err_bounce_map;
 310                        }
 311                }
 312        }
 313
 314        clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
 315        set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
 316        clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 317        clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
 318
 319        if (etd->dma_handle) {
 320                set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
 321                clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
 322                clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
 323                writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
 324                set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
 325        } else {
 326                if (dir != TD_DIR_IN) {
 327                        /* need to set for ZLP and PIO */
 328                        set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 329                        set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
 330                }
 331        }
 332
 333        DEBUG_LOG_FRAME(imx21, etd, activated);
 334
 335#ifdef DEBUG
 336        if (!etd->active_count) {
 337                int i;
 338                etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
 339                etd->disactivated_frame = -1;
 340                etd->last_int_frame = -1;
 341                etd->last_req_frame = -1;
 342
 343                for (i = 0; i < 4; i++)
 344                        etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
 345        }
 346#endif
 347
 348        etd->active_count = 1;
 349        writel(etd_mask, imx21->regs + USBH_ETDENSET);
 350        return;
 351
 352err_bounce_map:
 353        kfree(etd->bounce_buffer);
 354
 355err_bounce_alloc:
 356        free_dmem(imx21, etd);
 357        nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
 358}
 359
 360/* ===========================================  */
 361/* Data memory management                       */
 362/* ===========================================  */
 363
 364static int alloc_dmem(struct imx21 *imx21, unsigned int size,
 365                      struct usb_host_endpoint *ep)
 366{
 367        unsigned int offset = 0;
 368        struct imx21_dmem_area *area;
 369        struct imx21_dmem_area *tmp;
 370
 371        size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
 372
 373        if (size > DMEM_SIZE) {
 374                dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
 375                        size, DMEM_SIZE);
 376                return -EINVAL;
 377        }
 378
 379        list_for_each_entry(tmp, &imx21->dmem_list, list) {
 380                if ((size + offset) < offset)
 381                        goto fail;
 382                if ((size + offset) <= tmp->offset)
 383                        break;
 384                offset = tmp->size + tmp->offset;
 385                if ((offset + size) > DMEM_SIZE)
 386                        goto fail;
 387        }
 388
 389        area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
 390        if (area == NULL)
 391                return -ENOMEM;
 392
 393        area->ep = ep;
 394        area->offset = offset;
 395        area->size = size;
 396        list_add_tail(&area->list, &tmp->list);
 397        debug_dmem_allocated(imx21, size);
 398        return offset;
 399
 400fail:
 401        return -ENOMEM;
 402}
 403
 404/* Memory now available for a queued ETD - activate it */
 405static void activate_queued_etd(struct imx21 *imx21,
 406        struct etd_priv *etd, u32 dmem_offset)
 407{
 408        struct urb_priv *urb_priv = etd->urb->hcpriv;
 409        int etd_num = etd - &imx21->etd[0];
 410        u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
 411        u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
 412
 413        dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
 414                etd_num);
 415        etd_writel(imx21, etd_num, 1,
 416            ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
 417
 418        etd->dmem_offset = dmem_offset;
 419        urb_priv->active = 1;
 420        activate_etd(imx21, etd_num, dir);
 421}
 422
 423static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
 424{
 425        struct imx21_dmem_area *area;
 426        struct etd_priv *tmp;
 427        int found = 0;
 428        int offset;
 429
 430        if (!etd->dmem_size)
 431                return;
 432        etd->dmem_size = 0;
 433
 434        offset = etd->dmem_offset;
 435        list_for_each_entry(area, &imx21->dmem_list, list) {
 436                if (area->offset == offset) {
 437                        debug_dmem_freed(imx21, area->size);
 438                        list_del(&area->list);
 439                        kfree(area);
 440                        found = 1;
 441                        break;
 442                }
 443        }
 444
 445        if (!found)  {
 446                dev_err(imx21->dev,
 447                        "Trying to free unallocated DMEM %d\n", offset);
 448                return;
 449        }
 450
 451        /* Try again to allocate memory for anything we've queued */
 452        list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
 453                offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
 454                if (offset >= 0) {
 455                        list_del(&etd->queue);
 456                        activate_queued_etd(imx21, etd, (u32)offset);
 457                }
 458        }
 459}
 460
 461static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
 462{
 463        struct imx21_dmem_area *area, *tmp;
 464
 465        list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
 466                if (area->ep == ep) {
 467                        dev_err(imx21->dev,
 468                                "Active DMEM %d for disabled ep=%p\n",
 469                                area->offset, ep);
 470                        list_del(&area->list);
 471                        kfree(area);
 472                }
 473        }
 474}
 475
 476
 477/* ===========================================  */
 478/* End handling                                 */
 479/* ===========================================  */
 480
 481/* Endpoint now idle - release its ETD(s) or assign to queued request */
 482static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
 483{
 484        int i;
 485
 486        for (i = 0; i < NUM_ISO_ETDS; i++) {
 487                int etd_num = ep_priv->etd[i];
 488                struct etd_priv *etd;
 489                if (etd_num < 0)
 490                        continue;
 491
 492                etd = &imx21->etd[etd_num];
 493                ep_priv->etd[i] = -1;
 494
 495                free_dmem(imx21, etd); /* for isoc */
 496
 497                if (list_empty(&imx21->queue_for_etd)) {
 498                        free_etd(imx21, etd_num);
 499                        continue;
 500                }
 501
 502                dev_dbg(imx21->dev,
 503                        "assigning idle etd %d for queued request\n", etd_num);
 504                ep_priv = list_first_entry(&imx21->queue_for_etd,
 505                        struct ep_priv, queue);
 506                list_del(&ep_priv->queue);
 507                reset_etd(imx21, etd_num);
 508                ep_priv->waiting_etd = 0;
 509                ep_priv->etd[i] = etd_num;
 510
 511                if (list_empty(&ep_priv->ep->urb_list)) {
 512                        dev_err(imx21->dev, "No urb for queued ep!\n");
 513                        continue;
 514                }
 515                schedule_nonisoc_etd(imx21, list_first_entry(
 516                        &ep_priv->ep->urb_list, struct urb, urb_list));
 517        }
 518}
 519
 520static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
 521__releases(imx21->lock)
 522__acquires(imx21->lock)
 523{
 524        struct imx21 *imx21 = hcd_to_imx21(hcd);
 525        struct ep_priv *ep_priv = urb->ep->hcpriv;
 526        struct urb_priv *urb_priv = urb->hcpriv;
 527
 528        debug_urb_completed(imx21, urb, status);
 529        dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
 530
 531        kfree(urb_priv->isoc_td);
 532        kfree(urb->hcpriv);
 533        urb->hcpriv = NULL;
 534        usb_hcd_unlink_urb_from_ep(hcd, urb);
 535        spin_unlock(&imx21->lock);
 536        usb_hcd_giveback_urb(hcd, urb, status);
 537        spin_lock(&imx21->lock);
 538        if (list_empty(&ep_priv->ep->urb_list))
 539                ep_idle(imx21, ep_priv);
 540}
 541
 542static void nonisoc_urb_completed_for_etd(
 543        struct imx21 *imx21, struct etd_priv *etd, int status)
 544{
 545        struct usb_host_endpoint *ep = etd->ep;
 546
 547        urb_done(imx21->hcd, etd->urb, status);
 548        etd->urb = NULL;
 549
 550        if (!list_empty(&ep->urb_list)) {
 551                struct urb *urb = list_first_entry(
 552                                        &ep->urb_list, struct urb, urb_list);
 553
 554                dev_vdbg(imx21->dev, "next URB %p\n", urb);
 555                schedule_nonisoc_etd(imx21, urb);
 556        }
 557}
 558
 559
 560/* ===========================================  */
 561/* ISOC Handling ...                            */
 562/* ===========================================  */
 563
 564static void schedule_isoc_etds(struct usb_hcd *hcd,
 565        struct usb_host_endpoint *ep)
 566{
 567        struct imx21 *imx21 = hcd_to_imx21(hcd);
 568        struct ep_priv *ep_priv = ep->hcpriv;
 569        struct etd_priv *etd;
 570        struct urb_priv *urb_priv;
 571        struct td *td;
 572        int etd_num;
 573        int i;
 574        int cur_frame;
 575        u8 dir;
 576
 577        for (i = 0; i < NUM_ISO_ETDS; i++) {
 578too_late:
 579                if (list_empty(&ep_priv->td_list))
 580                        break;
 581
 582                etd_num = ep_priv->etd[i];
 583                if (etd_num < 0)
 584                        break;
 585
 586                etd = &imx21->etd[etd_num];
 587                if (etd->urb)
 588                        continue;
 589
 590                td = list_entry(ep_priv->td_list.next, struct td, list);
 591                list_del(&td->list);
 592                urb_priv = td->urb->hcpriv;
 593
 594                cur_frame = imx21_hc_get_frame(hcd);
 595                if (frame_after(cur_frame, td->frame)) {
 596                        dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
 597                                cur_frame, td->frame);
 598                        urb_priv->isoc_status = -EXDEV;
 599                        td->urb->iso_frame_desc[
 600                                td->isoc_index].actual_length = 0;
 601                        td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
 602                        if (--urb_priv->isoc_remaining == 0)
 603                                urb_done(hcd, td->urb, urb_priv->isoc_status);
 604                        goto too_late;
 605                }
 606
 607                urb_priv->active = 1;
 608                etd->td = td;
 609                etd->ep = td->ep;
 610                etd->urb = td->urb;
 611                etd->len = td->len;
 612                etd->dma_handle = td->dma_handle;
 613                etd->cpu_buffer = td->cpu_buffer;
 614
 615                debug_isoc_submitted(imx21, cur_frame, td);
 616
 617                dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
 618                setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
 619                etd_writel(imx21, etd_num, 1, etd->dmem_offset);
 620                etd_writel(imx21, etd_num, 2,
 621                        (TD_NOTACCESSED << DW2_COMPCODE) |
 622                        ((td->frame & 0xFFFF) << DW2_STARTFRM));
 623                etd_writel(imx21, etd_num, 3,
 624                        (TD_NOTACCESSED << DW3_COMPCODE0) |
 625                        (td->len << DW3_PKTLEN0));
 626
 627                activate_etd(imx21, etd_num, dir);
 628        }
 629}
 630
 631static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
 632{
 633        struct imx21 *imx21 = hcd_to_imx21(hcd);
 634        int etd_mask = 1 << etd_num;
 635        struct etd_priv *etd = imx21->etd + etd_num;
 636        struct urb *urb = etd->urb;
 637        struct urb_priv *urb_priv = urb->hcpriv;
 638        struct td *td = etd->td;
 639        struct usb_host_endpoint *ep = etd->ep;
 640        int isoc_index = td->isoc_index;
 641        unsigned int pipe = urb->pipe;
 642        int dir_in = usb_pipein(pipe);
 643        int cc;
 644        int bytes_xfrd;
 645
 646        disactivate_etd(imx21, etd_num);
 647
 648        cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
 649        bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
 650
 651        /* Input doesn't always fill the buffer, don't generate an error
 652         * when this happens.
 653         */
 654        if (dir_in && (cc == TD_DATAUNDERRUN))
 655                cc = TD_CC_NOERROR;
 656
 657        if (cc == TD_NOTACCESSED)
 658                bytes_xfrd = 0;
 659
 660        debug_isoc_completed(imx21,
 661                imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
 662        if (cc) {
 663                urb_priv->isoc_status = -EXDEV;
 664                dev_dbg(imx21->dev,
 665                        "bad iso cc=0x%X frame=%d sched frame=%d "
 666                        "cnt=%d len=%d urb=%p etd=%d index=%d\n",
 667                        cc,  imx21_hc_get_frame(hcd), td->frame,
 668                        bytes_xfrd, td->len, urb, etd_num, isoc_index);
 669        }
 670
 671        if (dir_in) {
 672                clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 673                if (!etd->dma_handle)
 674                        memcpy_fromio(etd->cpu_buffer,
 675                                imx21->regs + USBOTG_DMEM + etd->dmem_offset,
 676                                bytes_xfrd);
 677        }
 678
 679        urb->actual_length += bytes_xfrd;
 680        urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
 681        urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
 682
 683        etd->td = NULL;
 684        etd->urb = NULL;
 685        etd->ep = NULL;
 686
 687        if (--urb_priv->isoc_remaining == 0)
 688                urb_done(hcd, urb, urb_priv->isoc_status);
 689
 690        schedule_isoc_etds(hcd, ep);
 691}
 692
 693static struct ep_priv *alloc_isoc_ep(
 694        struct imx21 *imx21, struct usb_host_endpoint *ep)
 695{
 696        struct ep_priv *ep_priv;
 697        int i;
 698
 699        ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
 700        if (!ep_priv)
 701                return NULL;
 702
 703        for (i = 0; i < NUM_ISO_ETDS; i++)
 704                ep_priv->etd[i] = -1;
 705
 706        INIT_LIST_HEAD(&ep_priv->td_list);
 707        ep_priv->ep = ep;
 708        ep->hcpriv = ep_priv;
 709        return ep_priv;
 710}
 711
 712static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
 713{
 714        int i, j;
 715        int etd_num;
 716
 717        /* Allocate the ETDs if required */
 718        for (i = 0; i < NUM_ISO_ETDS; i++) {
 719                if (ep_priv->etd[i] < 0) {
 720                        etd_num = alloc_etd(imx21);
 721                        if (etd_num < 0)
 722                                goto alloc_etd_failed;
 723
 724                        ep_priv->etd[i] = etd_num;
 725                        imx21->etd[etd_num].ep = ep_priv->ep;
 726                }
 727        }
 728        return 0;
 729
 730alloc_etd_failed:
 731        dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
 732        for (j = 0; j < i; j++) {
 733                free_etd(imx21, ep_priv->etd[j]);
 734                ep_priv->etd[j] = -1;
 735        }
 736        return -ENOMEM;
 737}
 738
 739static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
 740                                     struct usb_host_endpoint *ep,
 741                                     struct urb *urb, gfp_t mem_flags)
 742{
 743        struct imx21 *imx21 = hcd_to_imx21(hcd);
 744        struct urb_priv *urb_priv;
 745        unsigned long flags;
 746        struct ep_priv *ep_priv;
 747        struct td *td = NULL;
 748        int i;
 749        int ret;
 750        int cur_frame;
 751        u16 maxpacket;
 752
 753        urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
 754        if (urb_priv == NULL)
 755                return -ENOMEM;
 756
 757        urb_priv->isoc_td = kzalloc(
 758                sizeof(struct td) * urb->number_of_packets, mem_flags);
 759        if (urb_priv->isoc_td == NULL) {
 760                ret = -ENOMEM;
 761                goto alloc_td_failed;
 762        }
 763
 764        spin_lock_irqsave(&imx21->lock, flags);
 765
 766        if (ep->hcpriv == NULL) {
 767                ep_priv = alloc_isoc_ep(imx21, ep);
 768                if (ep_priv == NULL) {
 769                        ret = -ENOMEM;
 770                        goto alloc_ep_failed;
 771                }
 772        } else {
 773                ep_priv = ep->hcpriv;
 774        }
 775
 776        ret = alloc_isoc_etds(imx21, ep_priv);
 777        if (ret)
 778                goto alloc_etd_failed;
 779
 780        ret = usb_hcd_link_urb_to_ep(hcd, urb);
 781        if (ret)
 782                goto link_failed;
 783
 784        urb->status = -EINPROGRESS;
 785        urb->actual_length = 0;
 786        urb->error_count = 0;
 787        urb->hcpriv = urb_priv;
 788        urb_priv->ep = ep;
 789
 790        /* allocate data memory for largest packets if not already done */
 791        maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
 792        for (i = 0; i < NUM_ISO_ETDS; i++) {
 793                struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
 794
 795                if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
 796                        /* not sure if this can really occur.... */
 797                        dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
 798                                etd->dmem_size, maxpacket);
 799                        ret = -EMSGSIZE;
 800                        goto alloc_dmem_failed;
 801                }
 802
 803                if (etd->dmem_size == 0) {
 804                        etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
 805                        if (etd->dmem_offset < 0) {
 806                                dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
 807                                ret = -EAGAIN;
 808                                goto alloc_dmem_failed;
 809                        }
 810                        etd->dmem_size = maxpacket;
 811                }
 812        }
 813
 814        /* calculate frame */
 815        cur_frame = imx21_hc_get_frame(hcd);
 816        i = 0;
 817        if (list_empty(&ep_priv->td_list)) {
 818                urb->start_frame = wrap_frame(cur_frame + 5);
 819        } else {
 820                urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev,
 821                                struct td, list)->frame + urb->interval);
 822
 823                if (frame_after(cur_frame, urb->start_frame)) {
 824                        dev_dbg(imx21->dev,
 825                                "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
 826                                urb->start_frame, cur_frame,
 827                                (urb->transfer_flags & URB_ISO_ASAP) != 0);
 828                        i = DIV_ROUND_UP(wrap_frame(
 829                                        cur_frame - urb->start_frame),
 830                                        urb->interval);
 831
 832                        /* Treat underruns as if URB_ISO_ASAP was set */
 833                        if ((urb->transfer_flags & URB_ISO_ASAP) ||
 834                                        i >= urb->number_of_packets) {
 835                                urb->start_frame = wrap_frame(urb->start_frame
 836                                                + i * urb->interval);
 837                                i = 0;
 838                        }
 839                }
 840        }
 841
 842        /* set up transfers */
 843        urb_priv->isoc_remaining = urb->number_of_packets - i;
 844        td = urb_priv->isoc_td;
 845        for (; i < urb->number_of_packets; i++, td++) {
 846                unsigned int offset = urb->iso_frame_desc[i].offset;
 847                td->ep = ep;
 848                td->urb = urb;
 849                td->len = urb->iso_frame_desc[i].length;
 850                td->isoc_index = i;
 851                td->frame = wrap_frame(urb->start_frame + urb->interval * i);
 852                td->dma_handle = urb->transfer_dma + offset;
 853                td->cpu_buffer = urb->transfer_buffer + offset;
 854                list_add_tail(&td->list, &ep_priv->td_list);
 855        }
 856
 857        dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
 858                urb->number_of_packets, urb->start_frame, td->frame);
 859
 860        debug_urb_submitted(imx21, urb);
 861        schedule_isoc_etds(hcd, ep);
 862
 863        spin_unlock_irqrestore(&imx21->lock, flags);
 864        return 0;
 865
 866alloc_dmem_failed:
 867        usb_hcd_unlink_urb_from_ep(hcd, urb);
 868
 869link_failed:
 870alloc_etd_failed:
 871alloc_ep_failed:
 872        spin_unlock_irqrestore(&imx21->lock, flags);
 873        kfree(urb_priv->isoc_td);
 874
 875alloc_td_failed:
 876        kfree(urb_priv);
 877        return ret;
 878}
 879
 880static void dequeue_isoc_urb(struct imx21 *imx21,
 881        struct urb *urb, struct ep_priv *ep_priv)
 882{
 883        struct urb_priv *urb_priv = urb->hcpriv;
 884        struct td *td, *tmp;
 885        int i;
 886
 887        if (urb_priv->active) {
 888                for (i = 0; i < NUM_ISO_ETDS; i++) {
 889                        int etd_num = ep_priv->etd[i];
 890                        if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
 891                                struct etd_priv *etd = imx21->etd + etd_num;
 892
 893                                reset_etd(imx21, etd_num);
 894                                free_dmem(imx21, etd);
 895                        }
 896                }
 897        }
 898
 899        list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
 900                if (td->urb == urb) {
 901                        dev_vdbg(imx21->dev, "removing td %p\n", td);
 902                        list_del(&td->list);
 903                }
 904        }
 905}
 906
 907/* =========================================== */
 908/* NON ISOC Handling ...                        */
 909/* =========================================== */
 910
 911static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
 912{
 913        unsigned int pipe = urb->pipe;
 914        struct urb_priv *urb_priv = urb->hcpriv;
 915        struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
 916        int state = urb_priv->state;
 917        int etd_num = ep_priv->etd[0];
 918        struct etd_priv *etd;
 919        u32 count;
 920        u16 etd_buf_size;
 921        u16 maxpacket;
 922        u8 dir;
 923        u8 bufround;
 924        u8 datatoggle;
 925        u8 interval = 0;
 926        u8 relpolpos = 0;
 927
 928        if (etd_num < 0) {
 929                dev_err(imx21->dev, "No valid ETD\n");
 930                return;
 931        }
 932        if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
 933                dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
 934
 935        etd = &imx21->etd[etd_num];
 936        maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
 937        if (!maxpacket)
 938                maxpacket = 8;
 939
 940        if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
 941                if (state == US_CTRL_SETUP) {
 942                        dir = TD_DIR_SETUP;
 943                        if (unsuitable_for_dma(urb->setup_dma))
 944                                usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
 945                                        urb);
 946                        etd->dma_handle = urb->setup_dma;
 947                        etd->cpu_buffer = urb->setup_packet;
 948                        bufround = 0;
 949                        count = 8;
 950                        datatoggle = TD_TOGGLE_DATA0;
 951                } else {        /* US_CTRL_ACK */
 952                        dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
 953                        bufround = 0;
 954                        count = 0;
 955                        datatoggle = TD_TOGGLE_DATA1;
 956                }
 957        } else {
 958                dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
 959                bufround = (dir == TD_DIR_IN) ? 1 : 0;
 960                if (unsuitable_for_dma(urb->transfer_dma))
 961                        usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
 962
 963                etd->dma_handle = urb->transfer_dma;
 964                etd->cpu_buffer = urb->transfer_buffer;
 965                if (usb_pipebulk(pipe) && (state == US_BULK0))
 966                        count = 0;
 967                else
 968                        count = urb->transfer_buffer_length;
 969
 970                if (usb_pipecontrol(pipe)) {
 971                        datatoggle = TD_TOGGLE_DATA1;
 972                } else {
 973                        if (usb_gettoggle(
 974                                        urb->dev,
 975                                        usb_pipeendpoint(urb->pipe),
 976                                        usb_pipeout(urb->pipe)))
 977                                datatoggle = TD_TOGGLE_DATA1;
 978                        else
 979                                datatoggle = TD_TOGGLE_DATA0;
 980                }
 981        }
 982
 983        etd->urb = urb;
 984        etd->ep = urb_priv->ep;
 985        etd->len = count;
 986
 987        if (usb_pipeint(pipe)) {
 988                interval = urb->interval;
 989                relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
 990        }
 991
 992        /* Write ETD to device memory */
 993        setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
 994
 995        etd_writel(imx21, etd_num, 2,
 996                (u32) interval << DW2_POLINTERV |
 997                ((u32) relpolpos << DW2_RELPOLPOS) |
 998                ((u32) dir << DW2_DIRPID) |
 999                ((u32) bufround << DW2_BUFROUND) |
1000                ((u32) datatoggle << DW2_DATATOG) |
1001                ((u32) TD_NOTACCESSED << DW2_COMPCODE));
1002
1003        /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
1004           is smaller. Make sure we don't overrun the buffer!
1005         */
1006        if (count && count < maxpacket)
1007                etd_buf_size = count;
1008        else
1009                etd_buf_size = maxpacket;
1010
1011        etd_writel(imx21, etd_num, 3,
1012                ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
1013
1014        if (!count)
1015                etd->dma_handle = 0;
1016
1017        /* allocate x and y buffer space at once */
1018        etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
1019        etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
1020        if (etd->dmem_offset < 0) {
1021                /* Setup everything we can in HW and update when we get DMEM */
1022                etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
1023
1024                dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
1025                debug_urb_queued_for_dmem(imx21, urb);
1026                list_add_tail(&etd->queue, &imx21->queue_for_dmem);
1027                return;
1028        }
1029
1030        etd_writel(imx21, etd_num, 1,
1031                (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
1032                (u32) etd->dmem_offset);
1033
1034        urb_priv->active = 1;
1035
1036        /* enable the ETD to kick off transfer */
1037        dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
1038                etd_num, count, dir != TD_DIR_IN ? "out" : "in");
1039        activate_etd(imx21, etd_num, dir);
1040
1041}
1042
1043static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
1044{
1045        struct imx21 *imx21 = hcd_to_imx21(hcd);
1046        struct etd_priv *etd = &imx21->etd[etd_num];
1047        struct urb *urb = etd->urb;
1048        u32 etd_mask = 1 << etd_num;
1049        struct urb_priv *urb_priv = urb->hcpriv;
1050        int dir;
1051        int cc;
1052        u32 bytes_xfrd;
1053        int etd_done;
1054
1055        disactivate_etd(imx21, etd_num);
1056
1057        dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
1058        cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
1059        bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
1060
1061        /* save toggle carry */
1062        usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1063                      usb_pipeout(urb->pipe),
1064                      (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
1065
1066        if (dir == TD_DIR_IN) {
1067                clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
1068                clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1069
1070                if (etd->bounce_buffer) {
1071                        memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1072                        dma_unmap_single(imx21->dev,
1073                                etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1074                } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1075                        memcpy_fromio(etd->cpu_buffer,
1076                                imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1077                                bytes_xfrd);
1078                }
1079        }
1080
1081        kfree(etd->bounce_buffer);
1082        etd->bounce_buffer = NULL;
1083        free_dmem(imx21, etd);
1084
1085        urb->error_count = 0;
1086        if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
1087                        && (cc == TD_DATAUNDERRUN))
1088                cc = TD_CC_NOERROR;
1089
1090        if (cc != 0)
1091                dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
1092
1093        etd_done = (cc_to_error[cc] != 0);      /* stop if error */
1094
1095        switch (usb_pipetype(urb->pipe)) {
1096        case PIPE_CONTROL:
1097                switch (urb_priv->state) {
1098                case US_CTRL_SETUP:
1099                        if (urb->transfer_buffer_length > 0)
1100                                urb_priv->state = US_CTRL_DATA;
1101                        else
1102                                urb_priv->state = US_CTRL_ACK;
1103                        break;
1104                case US_CTRL_DATA:
1105                        urb->actual_length += bytes_xfrd;
1106                        urb_priv->state = US_CTRL_ACK;
1107                        break;
1108                case US_CTRL_ACK:
1109                        etd_done = 1;
1110                        break;
1111                default:
1112                        dev_err(imx21->dev,
1113                                "Invalid pipe state %d\n", urb_priv->state);
1114                        etd_done = 1;
1115                        break;
1116                }
1117                break;
1118
1119        case PIPE_BULK:
1120                urb->actual_length += bytes_xfrd;
1121                if ((urb_priv->state == US_BULK)
1122                    && (urb->transfer_flags & URB_ZERO_PACKET)
1123                    && urb->transfer_buffer_length > 0
1124                    && ((urb->transfer_buffer_length %
1125                         usb_maxpacket(urb->dev, urb->pipe,
1126                                       usb_pipeout(urb->pipe))) == 0)) {
1127                        /* need a 0-packet */
1128                        urb_priv->state = US_BULK0;
1129                } else {
1130                        etd_done = 1;
1131                }
1132                break;
1133
1134        case PIPE_INTERRUPT:
1135                urb->actual_length += bytes_xfrd;
1136                etd_done = 1;
1137                break;
1138        }
1139
1140        if (etd_done)
1141                nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1142        else {
1143                dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
1144                schedule_nonisoc_etd(imx21, urb);
1145        }
1146}
1147
1148
1149static struct ep_priv *alloc_ep(void)
1150{
1151        int i;
1152        struct ep_priv *ep_priv;
1153
1154        ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
1155        if (!ep_priv)
1156                return NULL;
1157
1158        for (i = 0; i < NUM_ISO_ETDS; ++i)
1159                ep_priv->etd[i] = -1;
1160
1161        return ep_priv;
1162}
1163
1164static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1165                                struct urb *urb, gfp_t mem_flags)
1166{
1167        struct imx21 *imx21 = hcd_to_imx21(hcd);
1168        struct usb_host_endpoint *ep = urb->ep;
1169        struct urb_priv *urb_priv;
1170        struct ep_priv *ep_priv;
1171        struct etd_priv *etd;
1172        int ret;
1173        unsigned long flags;
1174
1175        dev_vdbg(imx21->dev,
1176                "enqueue urb=%p ep=%p len=%d "
1177                "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1178                urb, ep,
1179                urb->transfer_buffer_length,
1180                urb->transfer_buffer, urb->transfer_dma,
1181                urb->setup_packet, urb->setup_dma);
1182
1183        if (usb_pipeisoc(urb->pipe))
1184                return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1185
1186        urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1187        if (!urb_priv)
1188                return -ENOMEM;
1189
1190        spin_lock_irqsave(&imx21->lock, flags);
1191
1192        ep_priv = ep->hcpriv;
1193        if (ep_priv == NULL) {
1194                ep_priv = alloc_ep();
1195                if (!ep_priv) {
1196                        ret = -ENOMEM;
1197                        goto failed_alloc_ep;
1198                }
1199                ep->hcpriv = ep_priv;
1200                ep_priv->ep = ep;
1201        }
1202
1203        ret = usb_hcd_link_urb_to_ep(hcd, urb);
1204        if (ret)
1205                goto failed_link;
1206
1207        urb->status = -EINPROGRESS;
1208        urb->actual_length = 0;
1209        urb->error_count = 0;
1210        urb->hcpriv = urb_priv;
1211        urb_priv->ep = ep;
1212
1213        switch (usb_pipetype(urb->pipe)) {
1214        case PIPE_CONTROL:
1215                urb_priv->state = US_CTRL_SETUP;
1216                break;
1217        case PIPE_BULK:
1218                urb_priv->state = US_BULK;
1219                break;
1220        }
1221
1222        debug_urb_submitted(imx21, urb);
1223        if (ep_priv->etd[0] < 0) {
1224                if (ep_priv->waiting_etd) {
1225                        dev_dbg(imx21->dev,
1226                                "no ETD available already queued %p\n",
1227                                ep_priv);
1228                        debug_urb_queued_for_etd(imx21, urb);
1229                        goto out;
1230                }
1231                ep_priv->etd[0] = alloc_etd(imx21);
1232                if (ep_priv->etd[0] < 0) {
1233                        dev_dbg(imx21->dev,
1234                                "no ETD available queueing %p\n", ep_priv);
1235                        debug_urb_queued_for_etd(imx21, urb);
1236                        list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1237                        ep_priv->waiting_etd = 1;
1238                        goto out;
1239                }
1240        }
1241
1242        /* Schedule if no URB already active for this endpoint */
1243        etd = &imx21->etd[ep_priv->etd[0]];
1244        if (etd->urb == NULL) {
1245                DEBUG_LOG_FRAME(imx21, etd, last_req);
1246                schedule_nonisoc_etd(imx21, urb);
1247        }
1248
1249out:
1250        spin_unlock_irqrestore(&imx21->lock, flags);
1251        return 0;
1252
1253failed_link:
1254failed_alloc_ep:
1255        spin_unlock_irqrestore(&imx21->lock, flags);
1256        kfree(urb_priv);
1257        return ret;
1258}
1259
1260static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1261                                int status)
1262{
1263        struct imx21 *imx21 = hcd_to_imx21(hcd);
1264        unsigned long flags;
1265        struct usb_host_endpoint *ep;
1266        struct ep_priv *ep_priv;
1267        struct urb_priv *urb_priv = urb->hcpriv;
1268        int ret = -EINVAL;
1269
1270        dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1271                urb, usb_pipeisoc(urb->pipe), status);
1272
1273        spin_lock_irqsave(&imx21->lock, flags);
1274
1275        ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1276        if (ret)
1277                goto fail;
1278        ep = urb_priv->ep;
1279        ep_priv = ep->hcpriv;
1280
1281        debug_urb_unlinked(imx21, urb);
1282
1283        if (usb_pipeisoc(urb->pipe)) {
1284                dequeue_isoc_urb(imx21, urb, ep_priv);
1285                schedule_isoc_etds(hcd, ep);
1286        } else if (urb_priv->active) {
1287                int etd_num = ep_priv->etd[0];
1288                if (etd_num != -1) {
1289                        struct etd_priv *etd = &imx21->etd[etd_num];
1290
1291                        disactivate_etd(imx21, etd_num);
1292                        free_dmem(imx21, etd);
1293                        etd->urb = NULL;
1294                        kfree(etd->bounce_buffer);
1295                        etd->bounce_buffer = NULL;
1296                }
1297        }
1298
1299        urb_done(hcd, urb, status);
1300
1301        spin_unlock_irqrestore(&imx21->lock, flags);
1302        return 0;
1303
1304fail:
1305        spin_unlock_irqrestore(&imx21->lock, flags);
1306        return ret;
1307}
1308
1309/* =========================================== */
1310/* Interrupt dispatch                           */
1311/* =========================================== */
1312
1313static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1314{
1315        int etd_num;
1316        int enable_sof_int = 0;
1317        unsigned long flags;
1318
1319        spin_lock_irqsave(&imx21->lock, flags);
1320
1321        for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1322                u32 etd_mask = 1 << etd_num;
1323                u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1324                u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1325                struct etd_priv *etd = &imx21->etd[etd_num];
1326
1327
1328                if (done) {
1329                        DEBUG_LOG_FRAME(imx21, etd, last_int);
1330                } else {
1331/*
1332 * Kludge warning!
1333 *
1334 * When multiple transfers are using the bus we sometimes get into a state
1335 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1336 * the ETD has self disabled but the ETDDONESTAT flag is not set
1337 * (and hence no interrupt occurs).
1338 * This causes the transfer in question to hang.
1339 * The kludge below checks for this condition at each SOF and processes any
1340 * blocked ETDs (after an arbitrary 10 frame wait)
1341 *
1342 * With a single active transfer the usbtest test suite will run for days
1343 * without the kludge.
1344 * With other bus activity (eg mass storage) even just test1 will hang without
1345 * the kludge.
1346 */
1347                        u32 dword0;
1348                        int cc;
1349
1350                        if (etd->active_count && !enabled) /* suspicious... */
1351                                enable_sof_int = 1;
1352
1353                        if (!sof || enabled || !etd->active_count)
1354                                continue;
1355
1356                        cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1357                        if (cc == TD_NOTACCESSED)
1358                                continue;
1359
1360                        if (++etd->active_count < 10)
1361                                continue;
1362
1363                        dword0 = etd_readl(imx21, etd_num, 0);
1364                        dev_dbg(imx21->dev,
1365                                "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1366                                etd_num, dword0 & 0x7F,
1367                                (dword0 >> DW0_ENDPNT) & 0x0F,
1368                                cc);
1369
1370#ifdef DEBUG
1371                        dev_dbg(imx21->dev,
1372                                "frame: act=%d disact=%d"
1373                                " int=%d req=%d cur=%d\n",
1374                                etd->activated_frame,
1375                                etd->disactivated_frame,
1376                                etd->last_int_frame,
1377                                etd->last_req_frame,
1378                                readl(imx21->regs + USBH_FRMNUB));
1379                        imx21->debug_unblocks++;
1380#endif
1381                        etd->active_count = 0;
1382/* End of kludge */
1383                }
1384
1385                if (etd->ep == NULL || etd->urb == NULL) {
1386                        dev_dbg(imx21->dev,
1387                                "Interrupt for unexpected etd %d"
1388                                " ep=%p urb=%p\n",
1389                                etd_num, etd->ep, etd->urb);
1390                        disactivate_etd(imx21, etd_num);
1391                        continue;
1392                }
1393
1394                if (usb_pipeisoc(etd->urb->pipe))
1395                        isoc_etd_done(hcd, etd_num);
1396                else
1397                        nonisoc_etd_done(hcd, etd_num);
1398        }
1399
1400        /* only enable SOF interrupt if it may be needed for the kludge */
1401        if (enable_sof_int)
1402                set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1403        else
1404                clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1405
1406
1407        spin_unlock_irqrestore(&imx21->lock, flags);
1408}
1409
1410static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1411{
1412        struct imx21 *imx21 = hcd_to_imx21(hcd);
1413        u32 ints = readl(imx21->regs + USBH_SYSISR);
1414
1415        if (ints & USBH_SYSIEN_HERRINT)
1416                dev_dbg(imx21->dev, "Scheduling error\n");
1417
1418        if (ints & USBH_SYSIEN_SORINT)
1419                dev_dbg(imx21->dev, "Scheduling overrun\n");
1420
1421        if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1422                process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1423
1424        writel(ints, imx21->regs + USBH_SYSISR);
1425        return IRQ_HANDLED;
1426}
1427
1428static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1429                                      struct usb_host_endpoint *ep)
1430{
1431        struct imx21 *imx21 = hcd_to_imx21(hcd);
1432        unsigned long flags;
1433        struct ep_priv *ep_priv;
1434        int i;
1435
1436        if (ep == NULL)
1437                return;
1438
1439        spin_lock_irqsave(&imx21->lock, flags);
1440        ep_priv = ep->hcpriv;
1441        dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1442
1443        if (!list_empty(&ep->urb_list))
1444                dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1445
1446        if (ep_priv != NULL) {
1447                for (i = 0; i < NUM_ISO_ETDS; i++) {
1448                        if (ep_priv->etd[i] > -1)
1449                                dev_dbg(imx21->dev, "free etd %d for disable\n",
1450                                        ep_priv->etd[i]);
1451
1452                        free_etd(imx21, ep_priv->etd[i]);
1453                }
1454                kfree(ep_priv);
1455                ep->hcpriv = NULL;
1456        }
1457
1458        for (i = 0; i < USB_NUM_ETD; i++) {
1459                if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1460                        dev_err(imx21->dev,
1461                                "Active etd %d for disabled ep=%p!\n", i, ep);
1462                        free_etd(imx21, i);
1463                }
1464        }
1465        free_epdmem(imx21, ep);
1466        spin_unlock_irqrestore(&imx21->lock, flags);
1467}
1468
1469/* =========================================== */
1470/* Hub handling                                 */
1471/* =========================================== */
1472
1473static int get_hub_descriptor(struct usb_hcd *hcd,
1474                              struct usb_hub_descriptor *desc)
1475{
1476        struct imx21 *imx21 = hcd_to_imx21(hcd);
1477        desc->bDescriptorType = 0x29;   /* HUB descriptor */
1478        desc->bHubContrCurrent = 0;
1479
1480        desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1481                & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1482        desc->bDescLength = 9;
1483        desc->bPwrOn2PwrGood = 0;
1484        desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1485                0x0002 |        /* No power switching */
1486                0x0010 |        /* No over current protection */
1487                0);
1488
1489        desc->u.hs.DeviceRemovable[0] = 1 << 1;
1490        desc->u.hs.DeviceRemovable[1] = ~0;
1491        return 0;
1492}
1493
1494static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1495{
1496        struct imx21 *imx21 = hcd_to_imx21(hcd);
1497        int ports;
1498        int changed = 0;
1499        int i;
1500        unsigned long flags;
1501
1502        spin_lock_irqsave(&imx21->lock, flags);
1503        ports = readl(imx21->regs + USBH_ROOTHUBA)
1504                & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1505        if (ports > 7) {
1506                ports = 7;
1507                dev_err(imx21->dev, "ports %d > 7\n", ports);
1508        }
1509        for (i = 0; i < ports; i++) {
1510                if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1511                        (USBH_PORTSTAT_CONNECTSC |
1512                        USBH_PORTSTAT_PRTENBLSC |
1513                        USBH_PORTSTAT_PRTSTATSC |
1514                        USBH_PORTSTAT_OVRCURIC |
1515                        USBH_PORTSTAT_PRTRSTSC)) {
1516
1517                        changed = 1;
1518                        buf[0] |= 1 << (i + 1);
1519                }
1520        }
1521        spin_unlock_irqrestore(&imx21->lock, flags);
1522
1523        if (changed)
1524                dev_info(imx21->dev, "Hub status changed\n");
1525        return changed;
1526}
1527
1528static int imx21_hc_hub_control(struct usb_hcd *hcd,
1529                                u16 typeReq,
1530                                u16 wValue, u16 wIndex, char *buf, u16 wLength)
1531{
1532        struct imx21 *imx21 = hcd_to_imx21(hcd);
1533        int rc = 0;
1534        u32 status_write = 0;
1535
1536        switch (typeReq) {
1537        case ClearHubFeature:
1538                dev_dbg(imx21->dev, "ClearHubFeature\n");
1539                switch (wValue) {
1540                case C_HUB_OVER_CURRENT:
1541                        dev_dbg(imx21->dev, "    OVER_CURRENT\n");
1542                        break;
1543                case C_HUB_LOCAL_POWER:
1544                        dev_dbg(imx21->dev, "    LOCAL_POWER\n");
1545                        break;
1546                default:
1547                        dev_dbg(imx21->dev, "    unknown\n");
1548                        rc = -EINVAL;
1549                        break;
1550                }
1551                break;
1552
1553        case ClearPortFeature:
1554                dev_dbg(imx21->dev, "ClearPortFeature\n");
1555                switch (wValue) {
1556                case USB_PORT_FEAT_ENABLE:
1557                        dev_dbg(imx21->dev, "    ENABLE\n");
1558                        status_write = USBH_PORTSTAT_CURCONST;
1559                        break;
1560                case USB_PORT_FEAT_SUSPEND:
1561                        dev_dbg(imx21->dev, "    SUSPEND\n");
1562                        status_write = USBH_PORTSTAT_PRTOVRCURI;
1563                        break;
1564                case USB_PORT_FEAT_POWER:
1565                        dev_dbg(imx21->dev, "    POWER\n");
1566                        status_write = USBH_PORTSTAT_LSDEVCON;
1567                        break;
1568                case USB_PORT_FEAT_C_ENABLE:
1569                        dev_dbg(imx21->dev, "    C_ENABLE\n");
1570                        status_write = USBH_PORTSTAT_PRTENBLSC;
1571                        break;
1572                case USB_PORT_FEAT_C_SUSPEND:
1573                        dev_dbg(imx21->dev, "    C_SUSPEND\n");
1574                        status_write = USBH_PORTSTAT_PRTSTATSC;
1575                        break;
1576                case USB_PORT_FEAT_C_CONNECTION:
1577                        dev_dbg(imx21->dev, "    C_CONNECTION\n");
1578                        status_write = USBH_PORTSTAT_CONNECTSC;
1579                        break;
1580                case USB_PORT_FEAT_C_OVER_CURRENT:
1581                        dev_dbg(imx21->dev, "    C_OVER_CURRENT\n");
1582                        status_write = USBH_PORTSTAT_OVRCURIC;
1583                        break;
1584                case USB_PORT_FEAT_C_RESET:
1585                        dev_dbg(imx21->dev, "    C_RESET\n");
1586                        status_write = USBH_PORTSTAT_PRTRSTSC;
1587                        break;
1588                default:
1589                        dev_dbg(imx21->dev, "    unknown\n");
1590                        rc = -EINVAL;
1591                        break;
1592                }
1593
1594                break;
1595
1596        case GetHubDescriptor:
1597                dev_dbg(imx21->dev, "GetHubDescriptor\n");
1598                rc = get_hub_descriptor(hcd, (void *)buf);
1599                break;
1600
1601        case GetHubStatus:
1602                dev_dbg(imx21->dev, "  GetHubStatus\n");
1603                *(__le32 *) buf = 0;
1604                break;
1605
1606        case GetPortStatus:
1607                dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1608                    wIndex, USBH_PORTSTAT(wIndex - 1));
1609                *(__le32 *) buf = readl(imx21->regs +
1610                        USBH_PORTSTAT(wIndex - 1));
1611                break;
1612
1613        case SetHubFeature:
1614                dev_dbg(imx21->dev, "SetHubFeature\n");
1615                switch (wValue) {
1616                case C_HUB_OVER_CURRENT:
1617                        dev_dbg(imx21->dev, "    OVER_CURRENT\n");
1618                        break;
1619
1620                case C_HUB_LOCAL_POWER:
1621                        dev_dbg(imx21->dev, "    LOCAL_POWER\n");
1622                        break;
1623                default:
1624                        dev_dbg(imx21->dev, "    unknown\n");
1625                        rc = -EINVAL;
1626                        break;
1627                }
1628
1629                break;
1630
1631        case SetPortFeature:
1632                dev_dbg(imx21->dev, "SetPortFeature\n");
1633                switch (wValue) {
1634                case USB_PORT_FEAT_SUSPEND:
1635                        dev_dbg(imx21->dev, "    SUSPEND\n");
1636                        status_write = USBH_PORTSTAT_PRTSUSPST;
1637                        break;
1638                case USB_PORT_FEAT_POWER:
1639                        dev_dbg(imx21->dev, "    POWER\n");
1640                        status_write = USBH_PORTSTAT_PRTPWRST;
1641                        break;
1642                case USB_PORT_FEAT_RESET:
1643                        dev_dbg(imx21->dev, "    RESET\n");
1644                        status_write = USBH_PORTSTAT_PRTRSTST;
1645                        break;
1646                default:
1647                        dev_dbg(imx21->dev, "    unknown\n");
1648                        rc = -EINVAL;
1649                        break;
1650                }
1651                break;
1652
1653        default:
1654                dev_dbg(imx21->dev, "  unknown\n");
1655                rc = -EINVAL;
1656                break;
1657        }
1658
1659        if (status_write)
1660                writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1661        return rc;
1662}
1663
1664/* =========================================== */
1665/* Host controller management                   */
1666/* =========================================== */
1667
1668static int imx21_hc_reset(struct usb_hcd *hcd)
1669{
1670        struct imx21 *imx21 = hcd_to_imx21(hcd);
1671        unsigned long timeout;
1672        unsigned long flags;
1673
1674        spin_lock_irqsave(&imx21->lock, flags);
1675
1676        /* Reset the Host controller modules */
1677        writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1678                USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1679                imx21->regs + USBOTG_RST_CTRL);
1680
1681        /* Wait for reset to finish */
1682        timeout = jiffies + HZ;
1683        while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1684                if (time_after(jiffies, timeout)) {
1685                        spin_unlock_irqrestore(&imx21->lock, flags);
1686                        dev_err(imx21->dev, "timeout waiting for reset\n");
1687                        return -ETIMEDOUT;
1688                }
1689                spin_unlock_irq(&imx21->lock);
1690                schedule_timeout_uninterruptible(1);
1691                spin_lock_irq(&imx21->lock);
1692        }
1693        spin_unlock_irqrestore(&imx21->lock, flags);
1694        return 0;
1695}
1696
1697static int imx21_hc_start(struct usb_hcd *hcd)
1698{
1699        struct imx21 *imx21 = hcd_to_imx21(hcd);
1700        unsigned long flags;
1701        int i, j;
1702        u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1703        u32 usb_control = 0;
1704
1705        hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1706                        USBOTG_HWMODE_HOSTXCVR_MASK);
1707        hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1708                        USBOTG_HWMODE_OTGXCVR_MASK);
1709
1710        if (imx21->pdata->host1_txenoe)
1711                usb_control |= USBCTRL_HOST1_TXEN_OE;
1712
1713        if (!imx21->pdata->host1_xcverless)
1714                usb_control |= USBCTRL_HOST1_BYP_TLL;
1715
1716        if (imx21->pdata->otg_ext_xcvr)
1717                usb_control |= USBCTRL_OTC_RCV_RXDP;
1718
1719
1720        spin_lock_irqsave(&imx21->lock, flags);
1721
1722        writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1723                imx21->regs + USBOTG_CLK_CTRL);
1724        writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1725        writel(usb_control, imx21->regs + USBCTRL);
1726        writel(USB_MISCCONTROL_SKPRTRY  | USB_MISCCONTROL_ARBMODE,
1727                imx21->regs + USB_MISCCONTROL);
1728
1729        /* Clear the ETDs */
1730        for (i = 0; i < USB_NUM_ETD; i++)
1731                for (j = 0; j < 4; j++)
1732                        etd_writel(imx21, i, j, 0);
1733
1734        /* Take the HC out of reset */
1735        writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1736                imx21->regs + USBH_HOST_CTRL);
1737
1738        /* Enable ports */
1739        if (imx21->pdata->enable_otg_host)
1740                writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1741                        imx21->regs + USBH_PORTSTAT(0));
1742
1743        if (imx21->pdata->enable_host1)
1744                writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1745                        imx21->regs + USBH_PORTSTAT(1));
1746
1747        if (imx21->pdata->enable_host2)
1748                writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1749                        imx21->regs + USBH_PORTSTAT(2));
1750
1751
1752        hcd->state = HC_STATE_RUNNING;
1753
1754        /* Enable host controller interrupts */
1755        set_register_bits(imx21, USBH_SYSIEN,
1756                USBH_SYSIEN_HERRINT |
1757                USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1758        set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1759
1760        spin_unlock_irqrestore(&imx21->lock, flags);
1761
1762        return 0;
1763}
1764
1765static void imx21_hc_stop(struct usb_hcd *hcd)
1766{
1767        struct imx21 *imx21 = hcd_to_imx21(hcd);
1768        unsigned long flags;
1769
1770        spin_lock_irqsave(&imx21->lock, flags);
1771
1772        writel(0, imx21->regs + USBH_SYSIEN);
1773        clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1774        clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1775                                        USBOTG_CLK_CTRL);
1776        spin_unlock_irqrestore(&imx21->lock, flags);
1777}
1778
1779/* =========================================== */
1780/* Driver glue                                  */
1781/* =========================================== */
1782
1783static struct hc_driver imx21_hc_driver = {
1784        .description = hcd_name,
1785        .product_desc = "IMX21 USB Host Controller",
1786        .hcd_priv_size = sizeof(struct imx21),
1787
1788        .flags = HCD_USB11,
1789        .irq = imx21_irq,
1790
1791        .reset = imx21_hc_reset,
1792        .start = imx21_hc_start,
1793        .stop = imx21_hc_stop,
1794
1795        /* I/O requests */
1796        .urb_enqueue = imx21_hc_urb_enqueue,
1797        .urb_dequeue = imx21_hc_urb_dequeue,
1798        .endpoint_disable = imx21_hc_endpoint_disable,
1799
1800        /* scheduling support */
1801        .get_frame_number = imx21_hc_get_frame,
1802
1803        /* Root hub support */
1804        .hub_status_data = imx21_hc_hub_status_data,
1805        .hub_control = imx21_hc_hub_control,
1806
1807};
1808
1809static struct mx21_usbh_platform_data default_pdata = {
1810        .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1811        .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1812        .enable_host1 = 1,
1813        .enable_host2 = 1,
1814        .enable_otg_host = 1,
1815
1816};
1817
1818static int imx21_remove(struct platform_device *pdev)
1819{
1820        struct usb_hcd *hcd = platform_get_drvdata(pdev);
1821        struct imx21 *imx21 = hcd_to_imx21(hcd);
1822        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1823
1824        remove_debug_files(imx21);
1825        usb_remove_hcd(hcd);
1826
1827        if (res != NULL) {
1828                clk_disable_unprepare(imx21->clk);
1829                clk_put(imx21->clk);
1830                iounmap(imx21->regs);
1831                release_mem_region(res->start, resource_size(res));
1832        }
1833
1834        kfree(hcd);
1835        return 0;
1836}
1837
1838
1839static int imx21_probe(struct platform_device *pdev)
1840{
1841        struct usb_hcd *hcd;
1842        struct imx21 *imx21;
1843        struct resource *res;
1844        int ret;
1845        int irq;
1846
1847        printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1848
1849        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1850        if (!res)
1851                return -ENODEV;
1852        irq = platform_get_irq(pdev, 0);
1853        if (irq < 0)
1854                return -ENXIO;
1855
1856        hcd = usb_create_hcd(&imx21_hc_driver,
1857                &pdev->dev, dev_name(&pdev->dev));
1858        if (hcd == NULL) {
1859                dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1860                    dev_name(&pdev->dev));
1861                return -ENOMEM;
1862        }
1863
1864        imx21 = hcd_to_imx21(hcd);
1865        imx21->hcd = hcd;
1866        imx21->dev = &pdev->dev;
1867        imx21->pdata = pdev->dev.platform_data;
1868        if (!imx21->pdata)
1869                imx21->pdata = &default_pdata;
1870
1871        spin_lock_init(&imx21->lock);
1872        INIT_LIST_HEAD(&imx21->dmem_list);
1873        INIT_LIST_HEAD(&imx21->queue_for_etd);
1874        INIT_LIST_HEAD(&imx21->queue_for_dmem);
1875        create_debug_files(imx21);
1876
1877        res = request_mem_region(res->start, resource_size(res), hcd_name);
1878        if (!res) {
1879                ret = -EBUSY;
1880                goto failed_request_mem;
1881        }
1882
1883        imx21->regs = ioremap(res->start, resource_size(res));
1884        if (imx21->regs == NULL) {
1885                dev_err(imx21->dev, "Cannot map registers\n");
1886                ret = -ENOMEM;
1887                goto failed_ioremap;
1888        }
1889
1890        /* Enable clocks source */
1891        imx21->clk = clk_get(imx21->dev, NULL);
1892        if (IS_ERR(imx21->clk)) {
1893                dev_err(imx21->dev, "no clock found\n");
1894                ret = PTR_ERR(imx21->clk);
1895                goto failed_clock_get;
1896        }
1897
1898        ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1899        if (ret)
1900                goto failed_clock_set;
1901        ret = clk_prepare_enable(imx21->clk);
1902        if (ret)
1903                goto failed_clock_enable;
1904
1905        dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1906                (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1907
1908        ret = usb_add_hcd(hcd, irq, 0);
1909        if (ret != 0) {
1910                dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1911                goto failed_add_hcd;
1912        }
1913
1914        return 0;
1915
1916failed_add_hcd:
1917        clk_disable_unprepare(imx21->clk);
1918failed_clock_enable:
1919failed_clock_set:
1920        clk_put(imx21->clk);
1921failed_clock_get:
1922        iounmap(imx21->regs);
1923failed_ioremap:
1924        release_mem_region(res->start, resource_size(res));
1925failed_request_mem:
1926        remove_debug_files(imx21);
1927        usb_put_hcd(hcd);
1928        return ret;
1929}
1930
1931static struct platform_driver imx21_hcd_driver = {
1932        .driver = {
1933                   .name = hcd_name,
1934                   },
1935        .probe = imx21_probe,
1936        .remove = imx21_remove,
1937        .suspend = NULL,
1938        .resume = NULL,
1939};
1940
1941module_platform_driver(imx21_hcd_driver);
1942
1943MODULE_DESCRIPTION("i.MX21 USB Host controller");
1944MODULE_AUTHOR("Martin Fuzzey");
1945MODULE_LICENSE("GPL");
1946MODULE_ALIAS("platform:imx21-hcd");
1947