uboot/drivers/usb/gadget/ci_udc.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011, Marvell Semiconductor Inc.
   3 * Lei Wen <leiwen@marvell.com>
   4 *
   5 * SPDX-License-Identifier:     GPL-2.0+
   6 *
   7 * Back ported to the 8xx platform (from the 8260 platform) by
   8 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
   9 */
  10
  11#include <common.h>
  12#include <command.h>
  13#include <config.h>
  14#include <net.h>
  15#include <malloc.h>
  16#include <asm/byteorder.h>
  17#include <linux/errno.h>
  18#include <asm/io.h>
  19#include <asm/unaligned.h>
  20#include <linux/types.h>
  21#include <linux/usb/ch9.h>
  22#include <linux/usb/gadget.h>
  23#include <usb/ci_udc.h>
  24#include "../host/ehci.h"
  25#include "ci_udc.h"
  26
  27/*
  28 * Check if the system has too long cachelines. If the cachelines are
  29 * longer then 128b, the driver will not be able flush/invalidate data
  30 * cache over separate QH entries. We use 128b because one QH entry is
  31 * 64b long and there are always two QH list entries for each endpoint.
  32 */
  33#if ARCH_DMA_MINALIGN > 128
  34#error This driver can not work on systems with caches longer than 128b
  35#endif
  36
  37/*
  38 * Every QTD must be individually aligned, since we can program any
  39 * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN,
  40 * and the USB HW requires 32-byte alignment. Align to both:
  41 */
  42#define ILIST_ALIGN             roundup(ARCH_DMA_MINALIGN, 32)
  43/* Each QTD is this size */
  44#define ILIST_ENT_RAW_SZ        sizeof(struct ept_queue_item)
  45/*
  46 * Align the size of the QTD too, so we can add this value to each
  47 * QTD's address to get another aligned address.
  48 */
  49#define ILIST_ENT_SZ            roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN)
  50/* For each endpoint, we need 2 QTDs, one for each of IN and OUT */
  51#define ILIST_SZ                (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ)
  52
  53#define EP_MAX_LENGTH_TRANSFER  0x4000
  54
  55#ifndef DEBUG
  56#define DBG(x...) do {} while (0)
  57#else
  58#define DBG(x...) printf(x)
  59static const char *reqname(unsigned r)
  60{
  61        switch (r) {
  62        case USB_REQ_GET_STATUS: return "GET_STATUS";
  63        case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
  64        case USB_REQ_SET_FEATURE: return "SET_FEATURE";
  65        case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
  66        case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
  67        case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
  68        case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
  69        case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
  70        case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
  71        case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
  72        default: return "*UNKNOWN*";
  73        }
  74}
  75#endif
  76
  77static struct usb_endpoint_descriptor ep0_desc = {
  78        .bLength = sizeof(struct usb_endpoint_descriptor),
  79        .bDescriptorType = USB_DT_ENDPOINT,
  80        .bEndpointAddress = USB_DIR_IN,
  81        .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  82};
  83
  84static int ci_pullup(struct usb_gadget *gadget, int is_on);
  85static int ci_ep_enable(struct usb_ep *ep,
  86                const struct usb_endpoint_descriptor *desc);
  87static int ci_ep_disable(struct usb_ep *ep);
  88static int ci_ep_queue(struct usb_ep *ep,
  89                struct usb_request *req, gfp_t gfp_flags);
  90static int ci_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
  91static struct usb_request *
  92ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags);
  93static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req);
  94
  95static struct usb_gadget_ops ci_udc_ops = {
  96        .pullup = ci_pullup,
  97};
  98
  99static struct usb_ep_ops ci_ep_ops = {
 100        .enable         = ci_ep_enable,
 101        .disable        = ci_ep_disable,
 102        .queue          = ci_ep_queue,
 103        .dequeue        = ci_ep_dequeue,
 104        .alloc_request  = ci_ep_alloc_request,
 105        .free_request   = ci_ep_free_request,
 106};
 107
 108/* Init values for USB endpoints. */
 109static const struct usb_ep ci_ep_init[5] = {
 110        [0] = { /* EP 0 */
 111                .maxpacket      = 64,
 112                .name           = "ep0",
 113                .ops            = &ci_ep_ops,
 114        },
 115        [1] = {
 116                .maxpacket      = 512,
 117                .name           = "ep1in-bulk",
 118                .ops            = &ci_ep_ops,
 119        },
 120        [2] = {
 121                .maxpacket      = 512,
 122                .name           = "ep2out-bulk",
 123                .ops            = &ci_ep_ops,
 124        },
 125        [3] = {
 126                .maxpacket      = 512,
 127                .name           = "ep3in-int",
 128                .ops            = &ci_ep_ops,
 129        },
 130        [4] = {
 131                .maxpacket      = 512,
 132                .name           = "ep-",
 133                .ops            = &ci_ep_ops,
 134        },
 135};
 136
 137static struct ci_drv controller = {
 138        .gadget = {
 139                .name   = "ci_udc",
 140                .ops    = &ci_udc_ops,
 141                .is_dualspeed = 1,
 142        },
 143};
 144
 145/**
 146 * ci_get_qh() - return queue head for endpoint
 147 * @ep_num:     Endpoint number
 148 * @dir_in:     Direction of the endpoint (IN = 1, OUT = 0)
 149 *
 150 * This function returns the QH associated with particular endpoint
 151 * and it's direction.
 152 */
 153static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in)
 154{
 155        return &controller.epts[(ep_num * 2) + dir_in];
 156}
 157
 158/**
 159 * ci_get_qtd() - return queue item for endpoint
 160 * @ep_num:     Endpoint number
 161 * @dir_in:     Direction of the endpoint (IN = 1, OUT = 0)
 162 *
 163 * This function returns the QH associated with particular endpoint
 164 * and it's direction.
 165 */
 166static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in)
 167{
 168        int index = (ep_num * 2) + dir_in;
 169        uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ);
 170        return (struct ept_queue_item *)imem;
 171}
 172
 173/**
 174 * ci_flush_qh - flush cache over queue head
 175 * @ep_num:     Endpoint number
 176 *
 177 * This function flushes cache over QH for particular endpoint.
 178 */
 179static void ci_flush_qh(int ep_num)
 180{
 181        struct ept_queue_head *head = ci_get_qh(ep_num, 0);
 182        const unsigned long start = (unsigned long)head;
 183        const unsigned long end = start + 2 * sizeof(*head);
 184
 185        flush_dcache_range(start, end);
 186}
 187
 188/**
 189 * ci_invalidate_qh - invalidate cache over queue head
 190 * @ep_num:     Endpoint number
 191 *
 192 * This function invalidates cache over QH for particular endpoint.
 193 */
 194static void ci_invalidate_qh(int ep_num)
 195{
 196        struct ept_queue_head *head = ci_get_qh(ep_num, 0);
 197        unsigned long start = (unsigned long)head;
 198        unsigned long end = start + 2 * sizeof(*head);
 199
 200        invalidate_dcache_range(start, end);
 201}
 202
 203/**
 204 * ci_flush_qtd - flush cache over queue item
 205 * @ep_num:     Endpoint number
 206 *
 207 * This function flushes cache over qTD pair for particular endpoint.
 208 */
 209static void ci_flush_qtd(int ep_num)
 210{
 211        struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
 212        const unsigned long start = (unsigned long)item;
 213        const unsigned long end = start + 2 * ILIST_ENT_SZ;
 214
 215        flush_dcache_range(start, end);
 216}
 217
 218/**
 219 * ci_flush_td - flush cache over queue item
 220 * @td: td pointer
 221 *
 222 * This function flushes cache for particular transfer descriptor.
 223 */
 224static void ci_flush_td(struct ept_queue_item *td)
 225{
 226        const unsigned long start = (unsigned long)td;
 227        const unsigned long end = (unsigned long)td + ILIST_ENT_SZ;
 228        flush_dcache_range(start, end);
 229}
 230
 231/**
 232 * ci_invalidate_qtd - invalidate cache over queue item
 233 * @ep_num:     Endpoint number
 234 *
 235 * This function invalidates cache over qTD pair for particular endpoint.
 236 */
 237static void ci_invalidate_qtd(int ep_num)
 238{
 239        struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
 240        const unsigned long start = (unsigned long)item;
 241        const unsigned long end = start + 2 * ILIST_ENT_SZ;
 242
 243        invalidate_dcache_range(start, end);
 244}
 245
 246/**
 247 * ci_invalidate_td - invalidate cache over queue item
 248 * @td: td pointer
 249 *
 250 * This function invalidates cache for particular transfer descriptor.
 251 */
 252static void ci_invalidate_td(struct ept_queue_item *td)
 253{
 254        const unsigned long start = (unsigned long)td;
 255        const unsigned long end = start + ILIST_ENT_SZ;
 256        invalidate_dcache_range(start, end);
 257}
 258
 259static struct usb_request *
 260ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags)
 261{
 262        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 263        int num = -1;
 264        struct ci_req *ci_req;
 265
 266        if (ci_ep->desc)
 267                num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 268
 269        if (num == 0 && controller.ep0_req)
 270                return &controller.ep0_req->req;
 271
 272        ci_req = calloc(1, sizeof(*ci_req));
 273        if (!ci_req)
 274                return NULL;
 275
 276        INIT_LIST_HEAD(&ci_req->queue);
 277
 278        if (num == 0)
 279                controller.ep0_req = ci_req;
 280
 281        return &ci_req->req;
 282}
 283
 284static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req)
 285{
 286        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 287        struct ci_req *ci_req = container_of(req, struct ci_req, req);
 288        int num = -1;
 289
 290        if (ci_ep->desc)
 291                num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 292
 293        if (num == 0) {
 294                if (!controller.ep0_req)
 295                        return;
 296                controller.ep0_req = 0;
 297        }
 298
 299        if (ci_req->b_buf)
 300                free(ci_req->b_buf);
 301        free(ci_req);
 302}
 303
 304static void ep_enable(int num, int in, int maxpacket)
 305{
 306        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 307        unsigned n;
 308
 309        n = readl(&udc->epctrl[num]);
 310        if (in)
 311                n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK);
 312        else
 313                n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK);
 314
 315        if (num != 0) {
 316                struct ept_queue_head *head = ci_get_qh(num, in);
 317
 318                head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT;
 319                ci_flush_qh(num);
 320        }
 321        writel(n, &udc->epctrl[num]);
 322}
 323
 324static int ci_ep_enable(struct usb_ep *ep,
 325                const struct usb_endpoint_descriptor *desc)
 326{
 327        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 328        int num, in;
 329        num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 330        in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
 331        ci_ep->desc = desc;
 332
 333        if (num) {
 334                int max = get_unaligned_le16(&desc->wMaxPacketSize);
 335
 336                if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL))
 337                        max = 64;
 338                if (ep->maxpacket != max) {
 339                        DBG("%s: from %d to %d\n", __func__,
 340                            ep->maxpacket, max);
 341                        ep->maxpacket = max;
 342                }
 343        }
 344        ep_enable(num, in, ep->maxpacket);
 345        DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket);
 346        return 0;
 347}
 348
 349static int ci_ep_disable(struct usb_ep *ep)
 350{
 351        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 352
 353        ci_ep->desc = NULL;
 354        return 0;
 355}
 356
 357static int ci_bounce(struct ci_req *ci_req, int in)
 358{
 359        struct usb_request *req = &ci_req->req;
 360        unsigned long addr = (unsigned long)req->buf;
 361        unsigned long hwaddr;
 362        uint32_t aligned_used_len;
 363
 364        /* Input buffer address is not aligned. */
 365        if (addr & (ARCH_DMA_MINALIGN - 1))
 366                goto align;
 367
 368        /* Input buffer length is not aligned. */
 369        if (req->length & (ARCH_DMA_MINALIGN - 1))
 370                goto align;
 371
 372        /* The buffer is well aligned, only flush cache. */
 373        ci_req->hw_len = req->length;
 374        ci_req->hw_buf = req->buf;
 375        goto flush;
 376
 377align:
 378        if (ci_req->b_buf && req->length > ci_req->b_len) {
 379                free(ci_req->b_buf);
 380                ci_req->b_buf = 0;
 381        }
 382        if (!ci_req->b_buf) {
 383                ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN);
 384                ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len);
 385                if (!ci_req->b_buf)
 386                        return -ENOMEM;
 387        }
 388        ci_req->hw_len = ci_req->b_len;
 389        ci_req->hw_buf = ci_req->b_buf;
 390
 391        if (in)
 392                memcpy(ci_req->hw_buf, req->buf, req->length);
 393
 394flush:
 395        hwaddr = (unsigned long)ci_req->hw_buf;
 396        aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN);
 397        flush_dcache_range(hwaddr, hwaddr + aligned_used_len);
 398
 399        return 0;
 400}
 401
 402static void ci_debounce(struct ci_req *ci_req, int in)
 403{
 404        struct usb_request *req = &ci_req->req;
 405        unsigned long addr = (unsigned long)req->buf;
 406        unsigned long hwaddr = (unsigned long)ci_req->hw_buf;
 407        uint32_t aligned_used_len;
 408
 409        if (in)
 410                return;
 411
 412        aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN);
 413        invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len);
 414
 415        if (addr == hwaddr)
 416                return; /* not a bounce */
 417
 418        memcpy(req->buf, ci_req->hw_buf, req->actual);
 419}
 420
 421static void ci_ep_submit_next_request(struct ci_ep *ci_ep)
 422{
 423        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 424        struct ept_queue_item *item;
 425        struct ept_queue_head *head;
 426        int bit, num, len, in;
 427        struct ci_req *ci_req;
 428        u8 *buf;
 429        uint32_t len_left, len_this_dtd;
 430        struct ept_queue_item *dtd, *qtd;
 431
 432        ci_ep->req_primed = true;
 433
 434        num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 435        in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
 436        item = ci_get_qtd(num, in);
 437        head = ci_get_qh(num, in);
 438
 439        ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
 440        len = ci_req->req.length;
 441
 442        head->next = (unsigned long)item;
 443        head->info = 0;
 444
 445        ci_req->dtd_count = 0;
 446        buf = ci_req->hw_buf;
 447        len_left = len;
 448        dtd = item;
 449
 450        do {
 451                len_this_dtd = min(len_left, (unsigned)EP_MAX_LENGTH_TRANSFER);
 452
 453                dtd->info = INFO_BYTES(len_this_dtd) | INFO_ACTIVE;
 454                dtd->page0 = (unsigned long)buf;
 455                dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000;
 456                dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000;
 457                dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000;
 458                dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000;
 459
 460                len_left -= len_this_dtd;
 461                buf += len_this_dtd;
 462
 463                if (len_left) {
 464                        qtd = (struct ept_queue_item *)
 465                               memalign(ILIST_ALIGN, ILIST_ENT_SZ);
 466                        dtd->next = (unsigned long)qtd;
 467                        dtd = qtd;
 468                        memset(dtd, 0, ILIST_ENT_SZ);
 469                }
 470
 471                ci_req->dtd_count++;
 472        } while (len_left);
 473
 474        item = dtd;
 475        /*
 476         * When sending the data for an IN transaction, the attached host
 477         * knows that all data for the IN is sent when one of the following
 478         * occurs:
 479         * a) A zero-length packet is transmitted.
 480         * b) A packet with length that isn't an exact multiple of the ep's
 481         *    maxpacket is transmitted.
 482         * c) Enough data is sent to exactly fill the host's maximum expected
 483         *    IN transaction size.
 484         *
 485         * One of these conditions MUST apply at the end of an IN transaction,
 486         * or the transaction will not be considered complete by the host. If
 487         * none of (a)..(c) already applies, then we must force (a) to apply
 488         * by explicitly sending an extra zero-length packet.
 489         */
 490        /*  IN    !a     !b                              !c */
 491        if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) {
 492                /*
 493                 * Each endpoint has 2 items allocated, even though typically
 494                 * only 1 is used at a time since either an IN or an OUT but
 495                 * not both is queued. For an IN transaction, item currently
 496                 * points at the second of these items, so we know that we
 497                 * can use the other to transmit the extra zero-length packet.
 498                 */
 499                struct ept_queue_item *other_item = ci_get_qtd(num, 0);
 500                item->next = (unsigned long)other_item;
 501                item = other_item;
 502                item->info = INFO_ACTIVE;
 503        }
 504
 505        item->next = TERMINATE;
 506        item->info |= INFO_IOC;
 507
 508        ci_flush_qtd(num);
 509
 510        item = (struct ept_queue_item *)(unsigned long)head->next;
 511        while (item->next != TERMINATE) {
 512                ci_flush_td((struct ept_queue_item *)(unsigned long)item->next);
 513                item = (struct ept_queue_item *)(unsigned long)item->next;
 514        }
 515
 516        DBG("ept%d %s queue len %x, req %p, buffer %p\n",
 517            num, in ? "in" : "out", len, ci_req, ci_req->hw_buf);
 518        ci_flush_qh(num);
 519
 520        if (in)
 521                bit = EPT_TX(num);
 522        else
 523                bit = EPT_RX(num);
 524
 525        writel(bit, &udc->epprime);
 526}
 527
 528static int ci_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 529{
 530        struct ci_ep *ci_ep = container_of(_ep, struct ci_ep, ep);
 531        struct ci_req *ci_req;
 532
 533        list_for_each_entry(ci_req, &ci_ep->queue, queue) {
 534                if (&ci_req->req == _req)
 535                        break;
 536        }
 537
 538        if (&ci_req->req != _req)
 539                return -EINVAL;
 540
 541        list_del_init(&ci_req->queue);
 542
 543        if (ci_req->req.status == -EINPROGRESS) {
 544                ci_req->req.status = -ECONNRESET;
 545                if (ci_req->req.complete)
 546                        ci_req->req.complete(_ep, _req);
 547        }
 548
 549        return 0;
 550}
 551
 552static int ci_ep_queue(struct usb_ep *ep,
 553                struct usb_request *req, gfp_t gfp_flags)
 554{
 555        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 556        struct ci_req *ci_req = container_of(req, struct ci_req, req);
 557        int in, ret;
 558        int __maybe_unused num;
 559
 560        num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 561        in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
 562
 563        if (!num && ci_ep->req_primed) {
 564                /*
 565                 * The flipping of ep0 between IN and OUT relies on
 566                 * ci_ep_queue consuming the current IN/OUT setting
 567                 * immediately. If this is deferred to a later point when the
 568                 * req is pulled out of ci_req->queue, then the IN/OUT setting
 569                 * may have been changed since the req was queued, and state
 570                 * will get out of sync. This condition doesn't occur today,
 571                 * but could if bugs were introduced later, and this error
 572                 * check will save a lot of debugging time.
 573                 */
 574                printf("%s: ep0 transaction already in progress\n", __func__);
 575                return -EPROTO;
 576        }
 577
 578        ret = ci_bounce(ci_req, in);
 579        if (ret)
 580                return ret;
 581
 582        DBG("ept%d %s pre-queue req %p, buffer %p\n",
 583            num, in ? "in" : "out", ci_req, ci_req->hw_buf);
 584        list_add_tail(&ci_req->queue, &ci_ep->queue);
 585
 586        if (!ci_ep->req_primed)
 587                ci_ep_submit_next_request(ci_ep);
 588
 589        return 0;
 590}
 591
 592static void flip_ep0_direction(void)
 593{
 594        if (ep0_desc.bEndpointAddress == USB_DIR_IN) {
 595                DBG("%s: Flipping ep0 to OUT\n", __func__);
 596                ep0_desc.bEndpointAddress = 0;
 597        } else {
 598                DBG("%s: Flipping ep0 to IN\n", __func__);
 599                ep0_desc.bEndpointAddress = USB_DIR_IN;
 600        }
 601}
 602
 603static void handle_ep_complete(struct ci_ep *ci_ep)
 604{
 605        struct ept_queue_item *item, *next_td;
 606        int num, in, len, j;
 607        struct ci_req *ci_req;
 608
 609        num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 610        in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
 611        item = ci_get_qtd(num, in);
 612        ci_invalidate_qtd(num);
 613        ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
 614
 615        next_td = item;
 616        len = 0;
 617        for (j = 0; j < ci_req->dtd_count; j++) {
 618                ci_invalidate_td(next_td);
 619                item = next_td;
 620                len += (item->info >> 16) & 0x7fff;
 621                if (item->info & 0xff)
 622                        printf("EP%d/%s FAIL info=%x pg0=%x\n",
 623                               num, in ? "in" : "out", item->info, item->page0);
 624                if (j != ci_req->dtd_count - 1)
 625                        next_td = (struct ept_queue_item *)(unsigned long)
 626                                item->next;
 627                if (j != 0)
 628                        free(item);
 629        }
 630
 631        list_del_init(&ci_req->queue);
 632        ci_ep->req_primed = false;
 633
 634        if (!list_empty(&ci_ep->queue))
 635                ci_ep_submit_next_request(ci_ep);
 636
 637        ci_req->req.actual = ci_req->req.length - len;
 638        ci_debounce(ci_req, in);
 639
 640        DBG("ept%d %s req %p, complete %x\n",
 641            num, in ? "in" : "out", ci_req, len);
 642        if (num != 0 || controller.ep0_data_phase)
 643                ci_req->req.complete(&ci_ep->ep, &ci_req->req);
 644        if (num == 0 && controller.ep0_data_phase) {
 645                /*
 646                 * Data Stage is complete, so flip ep0 dir for Status Stage,
 647                 * which always transfers a packet in the opposite direction.
 648                 */
 649                DBG("%s: flip ep0 dir for Status Stage\n", __func__);
 650                flip_ep0_direction();
 651                controller.ep0_data_phase = false;
 652                ci_req->req.length = 0;
 653                usb_ep_queue(&ci_ep->ep, &ci_req->req, 0);
 654        }
 655}
 656
 657#define SETUP(type, request) (((type) << 8) | (request))
 658
 659static void handle_setup(void)
 660{
 661        struct ci_ep *ci_ep = &controller.ep[0];
 662        struct ci_req *ci_req;
 663        struct usb_request *req;
 664        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 665        struct ept_queue_head *head;
 666        struct usb_ctrlrequest r;
 667        int status = 0;
 668        int num, in, _num, _in, i;
 669        char *buf;
 670
 671        ci_req = controller.ep0_req;
 672        req = &ci_req->req;
 673        head = ci_get_qh(0, 0); /* EP0 OUT */
 674
 675        ci_invalidate_qh(0);
 676        memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest));
 677#ifdef CONFIG_CI_UDC_HAS_HOSTPC
 678        writel(EPT_RX(0), &udc->epsetupstat);
 679#else
 680        writel(EPT_RX(0), &udc->epstat);
 681#endif
 682        DBG("handle setup %s, %x, %x index %x value %x length %x\n",
 683            reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex,
 684            r.wValue, r.wLength);
 685
 686        /* Set EP0 dir for Data Stage based on Setup Stage data */
 687        if (r.bRequestType & USB_DIR_IN) {
 688                DBG("%s: Set ep0 to IN for Data Stage\n", __func__);
 689                ep0_desc.bEndpointAddress = USB_DIR_IN;
 690        } else {
 691                DBG("%s: Set ep0 to OUT for Data Stage\n", __func__);
 692                ep0_desc.bEndpointAddress = 0;
 693        }
 694        if (r.wLength) {
 695                controller.ep0_data_phase = true;
 696        } else {
 697                /* 0 length -> no Data Stage. Flip dir for Status Stage */
 698                DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__);
 699                flip_ep0_direction();
 700                controller.ep0_data_phase = false;
 701        }
 702
 703        list_del_init(&ci_req->queue);
 704        ci_ep->req_primed = false;
 705
 706        switch (SETUP(r.bRequestType, r.bRequest)) {
 707        case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE):
 708                _num = r.wIndex & 15;
 709                _in = !!(r.wIndex & 0x80);
 710
 711                if ((r.wValue == 0) && (r.wLength == 0)) {
 712                        req->length = 0;
 713                        for (i = 0; i < NUM_ENDPOINTS; i++) {
 714                                struct ci_ep *ep = &controller.ep[i];
 715
 716                                if (!ep->desc)
 717                                        continue;
 718                                num = ep->desc->bEndpointAddress
 719                                                & USB_ENDPOINT_NUMBER_MASK;
 720                                in = (ep->desc->bEndpointAddress
 721                                                & USB_DIR_IN) != 0;
 722                                if ((num == _num) && (in == _in)) {
 723                                        ep_enable(num, in, ep->ep.maxpacket);
 724                                        usb_ep_queue(controller.gadget.ep0,
 725                                                        req, 0);
 726                                        break;
 727                                }
 728                        }
 729                }
 730                return;
 731
 732        case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS):
 733                /*
 734                 * write address delayed (will take effect
 735                 * after the next IN txn)
 736                 */
 737                writel((r.wValue << 25) | (1 << 24), &udc->devaddr);
 738                req->length = 0;
 739                usb_ep_queue(controller.gadget.ep0, req, 0);
 740                return;
 741
 742        case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS):
 743                req->length = 2;
 744                buf = (char *)req->buf;
 745                buf[0] = 1 << USB_DEVICE_SELF_POWERED;
 746                buf[1] = 0;
 747                usb_ep_queue(controller.gadget.ep0, req, 0);
 748                return;
 749        }
 750        /* pass request up to the gadget driver */
 751        if (controller.driver)
 752                status = controller.driver->setup(&controller.gadget, &r);
 753        else
 754                status = -ENODEV;
 755
 756        if (!status)
 757                return;
 758        DBG("STALL reqname %s type %x value %x, index %x\n",
 759            reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex);
 760        writel((1<<16) | (1 << 0), &udc->epctrl[0]);
 761}
 762
 763static void stop_activity(void)
 764{
 765        int i, num, in;
 766        struct ept_queue_head *head;
 767        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 768        writel(readl(&udc->epcomp), &udc->epcomp);
 769#ifdef CONFIG_CI_UDC_HAS_HOSTPC
 770        writel(readl(&udc->epsetupstat), &udc->epsetupstat);
 771#endif
 772        writel(readl(&udc->epstat), &udc->epstat);
 773        writel(0xffffffff, &udc->epflush);
 774
 775        /* error out any pending reqs */
 776        for (i = 0; i < NUM_ENDPOINTS; i++) {
 777                if (i != 0)
 778                        writel(0, &udc->epctrl[i]);
 779                if (controller.ep[i].desc) {
 780                        num = controller.ep[i].desc->bEndpointAddress
 781                                & USB_ENDPOINT_NUMBER_MASK;
 782                        in = (controller.ep[i].desc->bEndpointAddress
 783                                & USB_DIR_IN) != 0;
 784                        head = ci_get_qh(num, in);
 785                        head->info = INFO_ACTIVE;
 786                        ci_flush_qh(num);
 787                }
 788        }
 789}
 790
 791void udc_irq(void)
 792{
 793        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 794        unsigned n = readl(&udc->usbsts);
 795        writel(n, &udc->usbsts);
 796        int bit, i, num, in;
 797
 798        n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI);
 799        if (n == 0)
 800                return;
 801
 802        if (n & STS_URI) {
 803                DBG("-- reset --\n");
 804                stop_activity();
 805        }
 806        if (n & STS_SLI)
 807                DBG("-- suspend --\n");
 808
 809        if (n & STS_PCI) {
 810                int max = 64;
 811                int speed = USB_SPEED_FULL;
 812
 813#ifdef CONFIG_CI_UDC_HAS_HOSTPC
 814                bit = (readl(&udc->hostpc1_devlc) >> 25) & 3;
 815#else
 816                bit = (readl(&udc->portsc) >> 26) & 3;
 817#endif
 818                DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full");
 819                if (bit == 2) {
 820                        speed = USB_SPEED_HIGH;
 821                        max = 512;
 822                }
 823                controller.gadget.speed = speed;
 824                for (i = 1; i < NUM_ENDPOINTS; i++) {
 825                        if (controller.ep[i].ep.maxpacket > max)
 826                                controller.ep[i].ep.maxpacket = max;
 827                }
 828        }
 829
 830        if (n & STS_UEI)
 831                printf("<UEI %x>\n", readl(&udc->epcomp));
 832
 833        if ((n & STS_UI) || (n & STS_UEI)) {
 834#ifdef CONFIG_CI_UDC_HAS_HOSTPC
 835                n = readl(&udc->epsetupstat);
 836#else
 837                n = readl(&udc->epstat);
 838#endif
 839                if (n & EPT_RX(0))
 840                        handle_setup();
 841
 842                n = readl(&udc->epcomp);
 843                if (n != 0)
 844                        writel(n, &udc->epcomp);
 845
 846                for (i = 0; i < NUM_ENDPOINTS && n; i++) {
 847                        if (controller.ep[i].desc) {
 848                                num = controller.ep[i].desc->bEndpointAddress
 849                                        & USB_ENDPOINT_NUMBER_MASK;
 850                                in = (controller.ep[i].desc->bEndpointAddress
 851                                                & USB_DIR_IN) != 0;
 852                                bit = (in) ? EPT_TX(num) : EPT_RX(num);
 853                                if (n & bit)
 854                                        handle_ep_complete(&controller.ep[i]);
 855                        }
 856                }
 857        }
 858}
 859
 860int usb_gadget_handle_interrupts(int index)
 861{
 862        u32 value;
 863        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 864
 865        value = readl(&udc->usbsts);
 866        if (value)
 867                udc_irq();
 868
 869        return value;
 870}
 871
 872void udc_disconnect(void)
 873{
 874        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 875        /* disable pullup */
 876        stop_activity();
 877        writel(USBCMD_FS2, &udc->usbcmd);
 878        udelay(800);
 879        if (controller.driver)
 880                controller.driver->disconnect(&controller.gadget);
 881}
 882
 883static int ci_pullup(struct usb_gadget *gadget, int is_on)
 884{
 885        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 886        if (is_on) {
 887                /* RESET */
 888                writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd);
 889                udelay(200);
 890
 891                writel((unsigned long)controller.epts, &udc->epinitaddr);
 892
 893                /* select DEVICE mode */
 894                writel(USBMODE_DEVICE, &udc->usbmode);
 895
 896#if !defined(CONFIG_USB_GADGET_DUALSPEED)
 897                /* Port force Full-Speed Connect */
 898                setbits_le32(&udc->portsc, PFSC);
 899#endif
 900
 901                writel(0xffffffff, &udc->epflush);
 902
 903                /* Turn on the USB connection by enabling the pullup resistor */
 904                writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd);
 905        } else {
 906                udc_disconnect();
 907        }
 908
 909        return 0;
 910}
 911
 912static int ci_udc_probe(void)
 913{
 914        struct ept_queue_head *head;
 915        int i;
 916
 917        const int num = 2 * NUM_ENDPOINTS;
 918
 919        const int eplist_min_align = 4096;
 920        const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN);
 921        const int eplist_raw_sz = num * sizeof(struct ept_queue_head);
 922        const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN);
 923
 924        /* The QH list must be aligned to 4096 bytes. */
 925        controller.epts = memalign(eplist_align, eplist_sz);
 926        if (!controller.epts)
 927                return -ENOMEM;
 928        memset(controller.epts, 0, eplist_sz);
 929
 930        controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ);
 931        if (!controller.items_mem) {
 932                free(controller.epts);
 933                return -ENOMEM;
 934        }
 935        memset(controller.items_mem, 0, ILIST_SZ);
 936
 937        for (i = 0; i < 2 * NUM_ENDPOINTS; i++) {
 938                /*
 939                 * Configure QH for each endpoint. The structure of the QH list
 940                 * is such that each two subsequent fields, N and N+1 where N is
 941                 * even, in the QH list represent QH for one endpoint. The Nth
 942                 * entry represents OUT configuration and the N+1th entry does
 943                 * represent IN configuration of the endpoint.
 944                 */
 945                head = controller.epts + i;
 946                if (i < 2)
 947                        head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE)
 948                                | CONFIG_ZLT | CONFIG_IOS;
 949                else
 950                        head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE)
 951                                | CONFIG_ZLT;
 952                head->next = TERMINATE;
 953                head->info = 0;
 954
 955                if (i & 1) {
 956                        ci_flush_qh(i / 2);
 957                        ci_flush_qtd(i / 2);
 958                }
 959        }
 960
 961        INIT_LIST_HEAD(&controller.gadget.ep_list);
 962
 963        /* Init EP 0 */
 964        memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init));
 965        controller.ep[0].desc = &ep0_desc;
 966        INIT_LIST_HEAD(&controller.ep[0].queue);
 967        controller.ep[0].req_primed = false;
 968        controller.gadget.ep0 = &controller.ep[0].ep;
 969        INIT_LIST_HEAD(&controller.gadget.ep0->ep_list);
 970
 971        /* Init EP 1..3 */
 972        for (i = 1; i < 4; i++) {
 973                memcpy(&controller.ep[i].ep, &ci_ep_init[i],
 974                       sizeof(*ci_ep_init));
 975                INIT_LIST_HEAD(&controller.ep[i].queue);
 976                controller.ep[i].req_primed = false;
 977                list_add_tail(&controller.ep[i].ep.ep_list,
 978                              &controller.gadget.ep_list);
 979        }
 980
 981        /* Init EP 4..n */
 982        for (i = 4; i < NUM_ENDPOINTS; i++) {
 983                memcpy(&controller.ep[i].ep, &ci_ep_init[4],
 984                       sizeof(*ci_ep_init));
 985                INIT_LIST_HEAD(&controller.ep[i].queue);
 986                controller.ep[i].req_primed = false;
 987                list_add_tail(&controller.ep[i].ep.ep_list,
 988                              &controller.gadget.ep_list);
 989        }
 990
 991        ci_ep_alloc_request(&controller.ep[0].ep, 0);
 992        if (!controller.ep0_req) {
 993                free(controller.items_mem);
 994                free(controller.epts);
 995                return -ENOMEM;
 996        }
 997
 998        return 0;
 999}
1000
1001int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1002{
1003        int ret;
1004
1005        if (!driver)
1006                return -EINVAL;
1007        if (!driver->bind || !driver->setup || !driver->disconnect)
1008                return -EINVAL;
1009        if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH)
1010                return -EINVAL;
1011
1012#ifdef CONFIG_DM_USB
1013        ret = usb_setup_ehci_gadget(&controller.ctrl);
1014#else
1015        ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl);
1016#endif
1017        if (ret)
1018                return ret;
1019
1020        ret = ci_udc_probe();
1021        if (ret) {
1022                DBG("udc probe failed, returned %d\n", ret);
1023                return ret;
1024        }
1025
1026        ret = driver->bind(&controller.gadget);
1027        if (ret) {
1028                DBG("driver->bind() returned %d\n", ret);
1029                return ret;
1030        }
1031        controller.driver = driver;
1032
1033        return 0;
1034}
1035
1036int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1037{
1038        udc_disconnect();
1039
1040        driver->unbind(&controller.gadget);
1041        controller.driver = NULL;
1042
1043        ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req);
1044        free(controller.items_mem);
1045        free(controller.epts);
1046
1047        return 0;
1048}
1049
1050bool dfu_usb_get_reset(void)
1051{
1052        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
1053
1054        return !!(readl(&udc->usbsts) & STS_URI);
1055}
1056