uboot/drivers/usb/gadget/ci_udc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright 2011, Marvell Semiconductor Inc.
   4 * Lei Wen <leiwen@marvell.com>
   5 *
   6 * Back ported to the 8xx platform (from the 8260 platform) by
   7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
   8 */
   9
  10#include <common.h>
  11#include <command.h>
  12#include <config.h>
  13#include <net.h>
  14#include <malloc.h>
  15#include <asm/byteorder.h>
  16#include <linux/errno.h>
  17#include <asm/io.h>
  18#include <asm/unaligned.h>
  19#include <linux/types.h>
  20#include <linux/usb/ch9.h>
  21#include <linux/usb/gadget.h>
  22#include <usb/ci_udc.h>
  23#include "../host/ehci.h"
  24#include "ci_udc.h"
  25
  26/*
  27 * Check if the system has too long cachelines. If the cachelines are
  28 * longer then 128b, the driver will not be able flush/invalidate data
  29 * cache over separate QH entries. We use 128b because one QH entry is
  30 * 64b long and there are always two QH list entries for each endpoint.
  31 */
  32#if ARCH_DMA_MINALIGN > 128
  33#error This driver can not work on systems with caches longer than 128b
  34#endif
  35
  36/*
  37 * Every QTD must be individually aligned, since we can program any
  38 * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN,
  39 * and the USB HW requires 32-byte alignment. Align to both:
  40 */
  41#define ILIST_ALIGN             roundup(ARCH_DMA_MINALIGN, 32)
  42/* Each QTD is this size */
  43#define ILIST_ENT_RAW_SZ        sizeof(struct ept_queue_item)
  44/*
  45 * Align the size of the QTD too, so we can add this value to each
  46 * QTD's address to get another aligned address.
  47 */
  48#define ILIST_ENT_SZ            roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN)
  49/* For each endpoint, we need 2 QTDs, one for each of IN and OUT */
  50#define ILIST_SZ                (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ)
  51
  52#define EP_MAX_LENGTH_TRANSFER  0x4000
  53
  54#ifndef DEBUG
  55#define DBG(x...) do {} while (0)
  56#else
  57#define DBG(x...) printf(x)
  58static const char *reqname(unsigned r)
  59{
  60        switch (r) {
  61        case USB_REQ_GET_STATUS: return "GET_STATUS";
  62        case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
  63        case USB_REQ_SET_FEATURE: return "SET_FEATURE";
  64        case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
  65        case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
  66        case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
  67        case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
  68        case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
  69        case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
  70        case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
  71        default: return "*UNKNOWN*";
  72        }
  73}
  74#endif
  75
  76static struct usb_endpoint_descriptor ep0_desc = {
  77        .bLength = sizeof(struct usb_endpoint_descriptor),
  78        .bDescriptorType = USB_DT_ENDPOINT,
  79        .bEndpointAddress = USB_DIR_IN,
  80        .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  81};
  82
  83static int ci_pullup(struct usb_gadget *gadget, int is_on);
  84static int ci_ep_enable(struct usb_ep *ep,
  85                const struct usb_endpoint_descriptor *desc);
  86static int ci_ep_disable(struct usb_ep *ep);
  87static int ci_ep_queue(struct usb_ep *ep,
  88                struct usb_request *req, gfp_t gfp_flags);
  89static int ci_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
  90static struct usb_request *
  91ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags);
  92static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req);
  93
  94static struct usb_gadget_ops ci_udc_ops = {
  95        .pullup = ci_pullup,
  96};
  97
  98static struct usb_ep_ops ci_ep_ops = {
  99        .enable         = ci_ep_enable,
 100        .disable        = ci_ep_disable,
 101        .queue          = ci_ep_queue,
 102        .dequeue        = ci_ep_dequeue,
 103        .alloc_request  = ci_ep_alloc_request,
 104        .free_request   = ci_ep_free_request,
 105};
 106
 107__weak void ci_init_after_reset(struct ehci_ctrl *ctrl)
 108{
 109}
 110
 111/* Init values for USB endpoints. */
 112static const struct usb_ep ci_ep_init[5] = {
 113        [0] = { /* EP 0 */
 114                .maxpacket      = 64,
 115                .name           = "ep0",
 116                .ops            = &ci_ep_ops,
 117        },
 118        [1] = {
 119                .maxpacket      = 512,
 120                .name           = "ep1in-bulk",
 121                .ops            = &ci_ep_ops,
 122        },
 123        [2] = {
 124                .maxpacket      = 512,
 125                .name           = "ep2out-bulk",
 126                .ops            = &ci_ep_ops,
 127        },
 128        [3] = {
 129                .maxpacket      = 512,
 130                .name           = "ep3in-int",
 131                .ops            = &ci_ep_ops,
 132        },
 133        [4] = {
 134                .maxpacket      = 512,
 135                .name           = "ep-",
 136                .ops            = &ci_ep_ops,
 137        },
 138};
 139
 140static struct ci_drv controller = {
 141        .gadget = {
 142                .name   = "ci_udc",
 143                .ops    = &ci_udc_ops,
 144                .is_dualspeed = 1,
 145        },
 146};
 147
 148/**
 149 * ci_get_qh() - return queue head for endpoint
 150 * @ep_num:     Endpoint number
 151 * @dir_in:     Direction of the endpoint (IN = 1, OUT = 0)
 152 *
 153 * This function returns the QH associated with particular endpoint
 154 * and it's direction.
 155 */
 156static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in)
 157{
 158        return &controller.epts[(ep_num * 2) + dir_in];
 159}
 160
 161/**
 162 * ci_get_qtd() - return queue item for endpoint
 163 * @ep_num:     Endpoint number
 164 * @dir_in:     Direction of the endpoint (IN = 1, OUT = 0)
 165 *
 166 * This function returns the QH associated with particular endpoint
 167 * and it's direction.
 168 */
 169static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in)
 170{
 171        int index = (ep_num * 2) + dir_in;
 172        uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ);
 173        return (struct ept_queue_item *)imem;
 174}
 175
 176/**
 177 * ci_flush_qh - flush cache over queue head
 178 * @ep_num:     Endpoint number
 179 *
 180 * This function flushes cache over QH for particular endpoint.
 181 */
 182static void ci_flush_qh(int ep_num)
 183{
 184        struct ept_queue_head *head = ci_get_qh(ep_num, 0);
 185        const unsigned long start = (unsigned long)head;
 186        const unsigned long end = start + 2 * sizeof(*head);
 187
 188        flush_dcache_range(start, end);
 189}
 190
 191/**
 192 * ci_invalidate_qh - invalidate cache over queue head
 193 * @ep_num:     Endpoint number
 194 *
 195 * This function invalidates cache over QH for particular endpoint.
 196 */
 197static void ci_invalidate_qh(int ep_num)
 198{
 199        struct ept_queue_head *head = ci_get_qh(ep_num, 0);
 200        unsigned long start = (unsigned long)head;
 201        unsigned long end = start + 2 * sizeof(*head);
 202
 203        invalidate_dcache_range(start, end);
 204}
 205
 206/**
 207 * ci_flush_qtd - flush cache over queue item
 208 * @ep_num:     Endpoint number
 209 *
 210 * This function flushes cache over qTD pair for particular endpoint.
 211 */
 212static void ci_flush_qtd(int ep_num)
 213{
 214        struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
 215        const unsigned long start = (unsigned long)item;
 216        const unsigned long end = start + 2 * ILIST_ENT_SZ;
 217
 218        flush_dcache_range(start, end);
 219}
 220
 221/**
 222 * ci_flush_td - flush cache over queue item
 223 * @td: td pointer
 224 *
 225 * This function flushes cache for particular transfer descriptor.
 226 */
 227static void ci_flush_td(struct ept_queue_item *td)
 228{
 229        const unsigned long start = (unsigned long)td;
 230        const unsigned long end = (unsigned long)td + ILIST_ENT_SZ;
 231        flush_dcache_range(start, end);
 232}
 233
 234/**
 235 * ci_invalidate_qtd - invalidate cache over queue item
 236 * @ep_num:     Endpoint number
 237 *
 238 * This function invalidates cache over qTD pair for particular endpoint.
 239 */
 240static void ci_invalidate_qtd(int ep_num)
 241{
 242        struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
 243        const unsigned long start = (unsigned long)item;
 244        const unsigned long end = start + 2 * ILIST_ENT_SZ;
 245
 246        invalidate_dcache_range(start, end);
 247}
 248
 249/**
 250 * ci_invalidate_td - invalidate cache over queue item
 251 * @td: td pointer
 252 *
 253 * This function invalidates cache for particular transfer descriptor.
 254 */
 255static void ci_invalidate_td(struct ept_queue_item *td)
 256{
 257        const unsigned long start = (unsigned long)td;
 258        const unsigned long end = start + ILIST_ENT_SZ;
 259        invalidate_dcache_range(start, end);
 260}
 261
 262static struct usb_request *
 263ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags)
 264{
 265        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 266        int num = -1;
 267        struct ci_req *ci_req;
 268
 269        if (ci_ep->desc)
 270                num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 271
 272        if (num == 0 && controller.ep0_req)
 273                return &controller.ep0_req->req;
 274
 275        ci_req = calloc(1, sizeof(*ci_req));
 276        if (!ci_req)
 277                return NULL;
 278
 279        INIT_LIST_HEAD(&ci_req->queue);
 280
 281        if (num == 0)
 282                controller.ep0_req = ci_req;
 283
 284        return &ci_req->req;
 285}
 286
 287static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req)
 288{
 289        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 290        struct ci_req *ci_req = container_of(req, struct ci_req, req);
 291        int num = -1;
 292
 293        if (ci_ep->desc)
 294                num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 295
 296        if (num == 0) {
 297                if (!controller.ep0_req)
 298                        return;
 299                controller.ep0_req = 0;
 300        }
 301
 302        if (ci_req->b_buf)
 303                free(ci_req->b_buf);
 304        free(ci_req);
 305}
 306
 307static void ep_enable(int num, int in, int maxpacket)
 308{
 309        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 310        unsigned n;
 311
 312        n = readl(&udc->epctrl[num]);
 313        if (in)
 314                n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK);
 315        else
 316                n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK);
 317
 318        if (num != 0) {
 319                struct ept_queue_head *head = ci_get_qh(num, in);
 320
 321                head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT;
 322                ci_flush_qh(num);
 323        }
 324        writel(n, &udc->epctrl[num]);
 325}
 326
 327static int ci_ep_enable(struct usb_ep *ep,
 328                const struct usb_endpoint_descriptor *desc)
 329{
 330        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 331        int num, in;
 332        num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 333        in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
 334        ci_ep->desc = desc;
 335
 336        if (num) {
 337                int max = get_unaligned_le16(&desc->wMaxPacketSize);
 338
 339                if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL))
 340                        max = 64;
 341                if (ep->maxpacket != max) {
 342                        DBG("%s: from %d to %d\n", __func__,
 343                            ep->maxpacket, max);
 344                        ep->maxpacket = max;
 345                }
 346        }
 347        ep_enable(num, in, ep->maxpacket);
 348        DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket);
 349        return 0;
 350}
 351
 352static int ci_ep_disable(struct usb_ep *ep)
 353{
 354        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 355
 356        ci_ep->desc = NULL;
 357        return 0;
 358}
 359
 360static int ci_bounce(struct ci_req *ci_req, int in)
 361{
 362        struct usb_request *req = &ci_req->req;
 363        unsigned long addr = (unsigned long)req->buf;
 364        unsigned long hwaddr;
 365        uint32_t aligned_used_len;
 366
 367        /* Input buffer address is not aligned. */
 368        if (addr & (ARCH_DMA_MINALIGN - 1))
 369                goto align;
 370
 371        /* Input buffer length is not aligned. */
 372        if (req->length & (ARCH_DMA_MINALIGN - 1))
 373                goto align;
 374
 375        /* The buffer is well aligned, only flush cache. */
 376        ci_req->hw_len = req->length;
 377        ci_req->hw_buf = req->buf;
 378        goto flush;
 379
 380align:
 381        if (ci_req->b_buf && req->length > ci_req->b_len) {
 382                free(ci_req->b_buf);
 383                ci_req->b_buf = 0;
 384        }
 385        if (!ci_req->b_buf) {
 386                ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN);
 387                ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len);
 388                if (!ci_req->b_buf)
 389                        return -ENOMEM;
 390        }
 391        ci_req->hw_len = ci_req->b_len;
 392        ci_req->hw_buf = ci_req->b_buf;
 393
 394        if (in)
 395                memcpy(ci_req->hw_buf, req->buf, req->length);
 396
 397flush:
 398        hwaddr = (unsigned long)ci_req->hw_buf;
 399        aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN);
 400        flush_dcache_range(hwaddr, hwaddr + aligned_used_len);
 401
 402        return 0;
 403}
 404
 405static void ci_debounce(struct ci_req *ci_req, int in)
 406{
 407        struct usb_request *req = &ci_req->req;
 408        unsigned long addr = (unsigned long)req->buf;
 409        unsigned long hwaddr = (unsigned long)ci_req->hw_buf;
 410        uint32_t aligned_used_len;
 411
 412        if (in)
 413                return;
 414
 415        aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN);
 416        invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len);
 417
 418        if (addr == hwaddr)
 419                return; /* not a bounce */
 420
 421        memcpy(req->buf, ci_req->hw_buf, req->actual);
 422}
 423
 424static void ci_ep_submit_next_request(struct ci_ep *ci_ep)
 425{
 426        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 427        struct ept_queue_item *item;
 428        struct ept_queue_head *head;
 429        int bit, num, len, in;
 430        struct ci_req *ci_req;
 431        u8 *buf;
 432        uint32_t len_left, len_this_dtd;
 433        struct ept_queue_item *dtd, *qtd;
 434
 435        ci_ep->req_primed = true;
 436
 437        num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 438        in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
 439        item = ci_get_qtd(num, in);
 440        head = ci_get_qh(num, in);
 441
 442        ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
 443        len = ci_req->req.length;
 444
 445        head->next = (unsigned long)item;
 446        head->info = 0;
 447
 448        ci_req->dtd_count = 0;
 449        buf = ci_req->hw_buf;
 450        len_left = len;
 451        dtd = item;
 452
 453        do {
 454                len_this_dtd = min(len_left, (unsigned)EP_MAX_LENGTH_TRANSFER);
 455
 456                dtd->info = INFO_BYTES(len_this_dtd) | INFO_ACTIVE;
 457                dtd->page0 = (unsigned long)buf;
 458                dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000;
 459                dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000;
 460                dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000;
 461                dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000;
 462
 463                len_left -= len_this_dtd;
 464                buf += len_this_dtd;
 465
 466                if (len_left) {
 467                        qtd = (struct ept_queue_item *)
 468                               memalign(ILIST_ALIGN, ILIST_ENT_SZ);
 469                        dtd->next = (unsigned long)qtd;
 470                        dtd = qtd;
 471                        memset(dtd, 0, ILIST_ENT_SZ);
 472                }
 473
 474                ci_req->dtd_count++;
 475        } while (len_left);
 476
 477        item = dtd;
 478        /*
 479         * When sending the data for an IN transaction, the attached host
 480         * knows that all data for the IN is sent when one of the following
 481         * occurs:
 482         * a) A zero-length packet is transmitted.
 483         * b) A packet with length that isn't an exact multiple of the ep's
 484         *    maxpacket is transmitted.
 485         * c) Enough data is sent to exactly fill the host's maximum expected
 486         *    IN transaction size.
 487         *
 488         * One of these conditions MUST apply at the end of an IN transaction,
 489         * or the transaction will not be considered complete by the host. If
 490         * none of (a)..(c) already applies, then we must force (a) to apply
 491         * by explicitly sending an extra zero-length packet.
 492         */
 493        /*  IN    !a     !b                              !c */
 494        if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) {
 495                /*
 496                 * Each endpoint has 2 items allocated, even though typically
 497                 * only 1 is used at a time since either an IN or an OUT but
 498                 * not both is queued. For an IN transaction, item currently
 499                 * points at the second of these items, so we know that we
 500                 * can use the other to transmit the extra zero-length packet.
 501                 */
 502                struct ept_queue_item *other_item = ci_get_qtd(num, 0);
 503                item->next = (unsigned long)other_item;
 504                item = other_item;
 505                item->info = INFO_ACTIVE;
 506        }
 507
 508        item->next = TERMINATE;
 509        item->info |= INFO_IOC;
 510
 511        ci_flush_qtd(num);
 512
 513        item = (struct ept_queue_item *)(unsigned long)head->next;
 514        while (item->next != TERMINATE) {
 515                ci_flush_td((struct ept_queue_item *)(unsigned long)item->next);
 516                item = (struct ept_queue_item *)(unsigned long)item->next;
 517        }
 518
 519        DBG("ept%d %s queue len %x, req %p, buffer %p\n",
 520            num, in ? "in" : "out", len, ci_req, ci_req->hw_buf);
 521        ci_flush_qh(num);
 522
 523        if (in)
 524                bit = EPT_TX(num);
 525        else
 526                bit = EPT_RX(num);
 527
 528        writel(bit, &udc->epprime);
 529}
 530
 531static int ci_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 532{
 533        struct ci_ep *ci_ep = container_of(_ep, struct ci_ep, ep);
 534        struct ci_req *ci_req;
 535
 536        list_for_each_entry(ci_req, &ci_ep->queue, queue) {
 537                if (&ci_req->req == _req)
 538                        break;
 539        }
 540
 541        if (&ci_req->req != _req)
 542                return -EINVAL;
 543
 544        list_del_init(&ci_req->queue);
 545
 546        if (ci_req->req.status == -EINPROGRESS) {
 547                ci_req->req.status = -ECONNRESET;
 548                if (ci_req->req.complete)
 549                        ci_req->req.complete(_ep, _req);
 550        }
 551
 552        return 0;
 553}
 554
 555static int ci_ep_queue(struct usb_ep *ep,
 556                struct usb_request *req, gfp_t gfp_flags)
 557{
 558        struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
 559        struct ci_req *ci_req = container_of(req, struct ci_req, req);
 560        int in, ret;
 561        int __maybe_unused num;
 562
 563        num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 564        in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
 565
 566        if (!num && ci_ep->req_primed) {
 567                /*
 568                 * The flipping of ep0 between IN and OUT relies on
 569                 * ci_ep_queue consuming the current IN/OUT setting
 570                 * immediately. If this is deferred to a later point when the
 571                 * req is pulled out of ci_req->queue, then the IN/OUT setting
 572                 * may have been changed since the req was queued, and state
 573                 * will get out of sync. This condition doesn't occur today,
 574                 * but could if bugs were introduced later, and this error
 575                 * check will save a lot of debugging time.
 576                 */
 577                printf("%s: ep0 transaction already in progress\n", __func__);
 578                return -EPROTO;
 579        }
 580
 581        ret = ci_bounce(ci_req, in);
 582        if (ret)
 583                return ret;
 584
 585        DBG("ept%d %s pre-queue req %p, buffer %p\n",
 586            num, in ? "in" : "out", ci_req, ci_req->hw_buf);
 587        list_add_tail(&ci_req->queue, &ci_ep->queue);
 588
 589        if (!ci_ep->req_primed)
 590                ci_ep_submit_next_request(ci_ep);
 591
 592        return 0;
 593}
 594
 595static void flip_ep0_direction(void)
 596{
 597        if (ep0_desc.bEndpointAddress == USB_DIR_IN) {
 598                DBG("%s: Flipping ep0 to OUT\n", __func__);
 599                ep0_desc.bEndpointAddress = 0;
 600        } else {
 601                DBG("%s: Flipping ep0 to IN\n", __func__);
 602                ep0_desc.bEndpointAddress = USB_DIR_IN;
 603        }
 604}
 605
 606static void handle_ep_complete(struct ci_ep *ci_ep)
 607{
 608        struct ept_queue_item *item, *next_td;
 609        int num, in, len, j;
 610        struct ci_req *ci_req;
 611
 612        num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
 613        in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
 614        item = ci_get_qtd(num, in);
 615        ci_invalidate_qtd(num);
 616        ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
 617
 618        next_td = item;
 619        len = 0;
 620        for (j = 0; j < ci_req->dtd_count; j++) {
 621                ci_invalidate_td(next_td);
 622                item = next_td;
 623                len += (item->info >> 16) & 0x7fff;
 624                if (item->info & 0xff)
 625                        printf("EP%d/%s FAIL info=%x pg0=%x\n",
 626                               num, in ? "in" : "out", item->info, item->page0);
 627                if (j != ci_req->dtd_count - 1)
 628                        next_td = (struct ept_queue_item *)(unsigned long)
 629                                item->next;
 630                if (j != 0)
 631                        free(item);
 632        }
 633
 634        list_del_init(&ci_req->queue);
 635        ci_ep->req_primed = false;
 636
 637        if (!list_empty(&ci_ep->queue))
 638                ci_ep_submit_next_request(ci_ep);
 639
 640        ci_req->req.actual = ci_req->req.length - len;
 641        ci_debounce(ci_req, in);
 642
 643        DBG("ept%d %s req %p, complete %x\n",
 644            num, in ? "in" : "out", ci_req, len);
 645        if (num != 0 || controller.ep0_data_phase)
 646                ci_req->req.complete(&ci_ep->ep, &ci_req->req);
 647        if (num == 0 && controller.ep0_data_phase) {
 648                /*
 649                 * Data Stage is complete, so flip ep0 dir for Status Stage,
 650                 * which always transfers a packet in the opposite direction.
 651                 */
 652                DBG("%s: flip ep0 dir for Status Stage\n", __func__);
 653                flip_ep0_direction();
 654                controller.ep0_data_phase = false;
 655                ci_req->req.length = 0;
 656                usb_ep_queue(&ci_ep->ep, &ci_req->req, 0);
 657        }
 658}
 659
 660#define SETUP(type, request) (((type) << 8) | (request))
 661
 662static void handle_setup(void)
 663{
 664        struct ci_ep *ci_ep = &controller.ep[0];
 665        struct ci_req *ci_req;
 666        struct usb_request *req;
 667        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 668        struct ept_queue_head *head;
 669        struct usb_ctrlrequest r;
 670        int status = 0;
 671        int num, in, _num, _in, i;
 672        char *buf;
 673
 674        ci_req = controller.ep0_req;
 675        req = &ci_req->req;
 676        head = ci_get_qh(0, 0); /* EP0 OUT */
 677
 678        ci_invalidate_qh(0);
 679        memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest));
 680#ifdef CONFIG_CI_UDC_HAS_HOSTPC
 681        writel(EPT_RX(0), &udc->epsetupstat);
 682#else
 683        writel(EPT_RX(0), &udc->epstat);
 684#endif
 685        DBG("handle setup %s, %x, %x index %x value %x length %x\n",
 686            reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex,
 687            r.wValue, r.wLength);
 688
 689        /* Set EP0 dir for Data Stage based on Setup Stage data */
 690        if (r.bRequestType & USB_DIR_IN) {
 691                DBG("%s: Set ep0 to IN for Data Stage\n", __func__);
 692                ep0_desc.bEndpointAddress = USB_DIR_IN;
 693        } else {
 694                DBG("%s: Set ep0 to OUT for Data Stage\n", __func__);
 695                ep0_desc.bEndpointAddress = 0;
 696        }
 697        if (r.wLength) {
 698                controller.ep0_data_phase = true;
 699        } else {
 700                /* 0 length -> no Data Stage. Flip dir for Status Stage */
 701                DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__);
 702                flip_ep0_direction();
 703                controller.ep0_data_phase = false;
 704        }
 705
 706        list_del_init(&ci_req->queue);
 707        ci_ep->req_primed = false;
 708
 709        switch (SETUP(r.bRequestType, r.bRequest)) {
 710        case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE):
 711                _num = r.wIndex & 15;
 712                _in = !!(r.wIndex & 0x80);
 713
 714                if ((r.wValue == 0) && (r.wLength == 0)) {
 715                        req->length = 0;
 716                        for (i = 0; i < NUM_ENDPOINTS; i++) {
 717                                struct ci_ep *ep = &controller.ep[i];
 718
 719                                if (!ep->desc)
 720                                        continue;
 721                                num = ep->desc->bEndpointAddress
 722                                                & USB_ENDPOINT_NUMBER_MASK;
 723                                in = (ep->desc->bEndpointAddress
 724                                                & USB_DIR_IN) != 0;
 725                                if ((num == _num) && (in == _in)) {
 726                                        ep_enable(num, in, ep->ep.maxpacket);
 727                                        usb_ep_queue(controller.gadget.ep0,
 728                                                        req, 0);
 729                                        break;
 730                                }
 731                        }
 732                }
 733                return;
 734
 735        case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS):
 736                /*
 737                 * write address delayed (will take effect
 738                 * after the next IN txn)
 739                 */
 740                writel((r.wValue << 25) | (1 << 24), &udc->devaddr);
 741                req->length = 0;
 742                usb_ep_queue(controller.gadget.ep0, req, 0);
 743                return;
 744
 745        case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS):
 746                req->length = 2;
 747                buf = (char *)req->buf;
 748                buf[0] = 1 << USB_DEVICE_SELF_POWERED;
 749                buf[1] = 0;
 750                usb_ep_queue(controller.gadget.ep0, req, 0);
 751                return;
 752        }
 753        /* pass request up to the gadget driver */
 754        if (controller.driver)
 755                status = controller.driver->setup(&controller.gadget, &r);
 756        else
 757                status = -ENODEV;
 758
 759        if (!status)
 760                return;
 761        DBG("STALL reqname %s type %x value %x, index %x\n",
 762            reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex);
 763        writel((1<<16) | (1 << 0), &udc->epctrl[0]);
 764}
 765
 766static void stop_activity(void)
 767{
 768        int i, num, in;
 769        struct ept_queue_head *head;
 770        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 771        writel(readl(&udc->epcomp), &udc->epcomp);
 772#ifdef CONFIG_CI_UDC_HAS_HOSTPC
 773        writel(readl(&udc->epsetupstat), &udc->epsetupstat);
 774#endif
 775        writel(readl(&udc->epstat), &udc->epstat);
 776        writel(0xffffffff, &udc->epflush);
 777
 778        /* error out any pending reqs */
 779        for (i = 0; i < NUM_ENDPOINTS; i++) {
 780                if (i != 0)
 781                        writel(0, &udc->epctrl[i]);
 782                if (controller.ep[i].desc) {
 783                        num = controller.ep[i].desc->bEndpointAddress
 784                                & USB_ENDPOINT_NUMBER_MASK;
 785                        in = (controller.ep[i].desc->bEndpointAddress
 786                                & USB_DIR_IN) != 0;
 787                        head = ci_get_qh(num, in);
 788                        head->info = INFO_ACTIVE;
 789                        ci_flush_qh(num);
 790                }
 791        }
 792}
 793
 794void udc_irq(void)
 795{
 796        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 797        unsigned n = readl(&udc->usbsts);
 798        writel(n, &udc->usbsts);
 799        int bit, i, num, in;
 800
 801        n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI);
 802        if (n == 0)
 803                return;
 804
 805        if (n & STS_URI) {
 806                DBG("-- reset --\n");
 807                stop_activity();
 808        }
 809        if (n & STS_SLI)
 810                DBG("-- suspend --\n");
 811
 812        if (n & STS_PCI) {
 813                int max = 64;
 814                int speed = USB_SPEED_FULL;
 815
 816#ifdef CONFIG_CI_UDC_HAS_HOSTPC
 817                bit = (readl(&udc->hostpc1_devlc) >> 25) & 3;
 818#else
 819                bit = (readl(&udc->portsc) >> 26) & 3;
 820#endif
 821                DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full");
 822                if (bit == 2) {
 823                        speed = USB_SPEED_HIGH;
 824                        max = 512;
 825                }
 826                controller.gadget.speed = speed;
 827                for (i = 1; i < NUM_ENDPOINTS; i++) {
 828                        if (controller.ep[i].ep.maxpacket > max)
 829                                controller.ep[i].ep.maxpacket = max;
 830                }
 831        }
 832
 833        if (n & STS_UEI)
 834                printf("<UEI %x>\n", readl(&udc->epcomp));
 835
 836        if ((n & STS_UI) || (n & STS_UEI)) {
 837#ifdef CONFIG_CI_UDC_HAS_HOSTPC
 838                n = readl(&udc->epsetupstat);
 839#else
 840                n = readl(&udc->epstat);
 841#endif
 842                if (n & EPT_RX(0))
 843                        handle_setup();
 844
 845                n = readl(&udc->epcomp);
 846                if (n != 0)
 847                        writel(n, &udc->epcomp);
 848
 849                for (i = 0; i < NUM_ENDPOINTS && n; i++) {
 850                        if (controller.ep[i].desc) {
 851                                num = controller.ep[i].desc->bEndpointAddress
 852                                        & USB_ENDPOINT_NUMBER_MASK;
 853                                in = (controller.ep[i].desc->bEndpointAddress
 854                                                & USB_DIR_IN) != 0;
 855                                bit = (in) ? EPT_TX(num) : EPT_RX(num);
 856                                if (n & bit)
 857                                        handle_ep_complete(&controller.ep[i]);
 858                        }
 859                }
 860        }
 861}
 862
 863int usb_gadget_handle_interrupts(int index)
 864{
 865        u32 value;
 866        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 867
 868        value = readl(&udc->usbsts);
 869        if (value)
 870                udc_irq();
 871
 872        return value;
 873}
 874
 875void udc_disconnect(void)
 876{
 877        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 878        /* disable pullup */
 879        stop_activity();
 880        writel(USBCMD_FS2, &udc->usbcmd);
 881        udelay(800);
 882        if (controller.driver)
 883                controller.driver->disconnect(&controller.gadget);
 884}
 885
 886static int ci_pullup(struct usb_gadget *gadget, int is_on)
 887{
 888        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
 889        if (is_on) {
 890                /* RESET */
 891                writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd);
 892                udelay(200);
 893
 894                ci_init_after_reset(controller.ctrl);
 895
 896                writel((unsigned long)controller.epts, &udc->epinitaddr);
 897
 898                /* select DEVICE mode */
 899                writel(USBMODE_DEVICE, &udc->usbmode);
 900
 901#if !defined(CONFIG_USB_GADGET_DUALSPEED)
 902                /* Port force Full-Speed Connect */
 903                setbits_le32(&udc->portsc, PFSC);
 904#endif
 905
 906                writel(0xffffffff, &udc->epflush);
 907
 908                /* Turn on the USB connection by enabling the pullup resistor */
 909                setbits_le32(&udc->usbcmd, USBCMD_ITC(MICRO_8FRAME) |
 910                             USBCMD_RUN);
 911        } else {
 912                udc_disconnect();
 913        }
 914
 915        return 0;
 916}
 917
 918static int ci_udc_probe(void)
 919{
 920        struct ept_queue_head *head;
 921        int i;
 922
 923        const int num = 2 * NUM_ENDPOINTS;
 924
 925        const int eplist_min_align = 4096;
 926        const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN);
 927        const int eplist_raw_sz = num * sizeof(struct ept_queue_head);
 928        const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN);
 929
 930        /* The QH list must be aligned to 4096 bytes. */
 931        controller.epts = memalign(eplist_align, eplist_sz);
 932        if (!controller.epts)
 933                return -ENOMEM;
 934        memset(controller.epts, 0, eplist_sz);
 935
 936        controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ);
 937        if (!controller.items_mem) {
 938                free(controller.epts);
 939                return -ENOMEM;
 940        }
 941        memset(controller.items_mem, 0, ILIST_SZ);
 942
 943        for (i = 0; i < 2 * NUM_ENDPOINTS; i++) {
 944                /*
 945                 * Configure QH for each endpoint. The structure of the QH list
 946                 * is such that each two subsequent fields, N and N+1 where N is
 947                 * even, in the QH list represent QH for one endpoint. The Nth
 948                 * entry represents OUT configuration and the N+1th entry does
 949                 * represent IN configuration of the endpoint.
 950                 */
 951                head = controller.epts + i;
 952                if (i < 2)
 953                        head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE)
 954                                | CONFIG_ZLT | CONFIG_IOS;
 955                else
 956                        head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE)
 957                                | CONFIG_ZLT;
 958                head->next = TERMINATE;
 959                head->info = 0;
 960
 961                if (i & 1) {
 962                        ci_flush_qh(i / 2);
 963                        ci_flush_qtd(i / 2);
 964                }
 965        }
 966
 967        INIT_LIST_HEAD(&controller.gadget.ep_list);
 968
 969        /* Init EP 0 */
 970        memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init));
 971        controller.ep[0].desc = &ep0_desc;
 972        INIT_LIST_HEAD(&controller.ep[0].queue);
 973        controller.ep[0].req_primed = false;
 974        controller.gadget.ep0 = &controller.ep[0].ep;
 975        INIT_LIST_HEAD(&controller.gadget.ep0->ep_list);
 976
 977        /* Init EP 1..3 */
 978        for (i = 1; i < 4; i++) {
 979                memcpy(&controller.ep[i].ep, &ci_ep_init[i],
 980                       sizeof(*ci_ep_init));
 981                INIT_LIST_HEAD(&controller.ep[i].queue);
 982                controller.ep[i].req_primed = false;
 983                list_add_tail(&controller.ep[i].ep.ep_list,
 984                              &controller.gadget.ep_list);
 985        }
 986
 987        /* Init EP 4..n */
 988        for (i = 4; i < NUM_ENDPOINTS; i++) {
 989                memcpy(&controller.ep[i].ep, &ci_ep_init[4],
 990                       sizeof(*ci_ep_init));
 991                INIT_LIST_HEAD(&controller.ep[i].queue);
 992                controller.ep[i].req_primed = false;
 993                list_add_tail(&controller.ep[i].ep.ep_list,
 994                              &controller.gadget.ep_list);
 995        }
 996
 997        ci_ep_alloc_request(&controller.ep[0].ep, 0);
 998        if (!controller.ep0_req) {
 999                free(controller.items_mem);
1000                free(controller.epts);
1001                return -ENOMEM;
1002        }
1003
1004        return 0;
1005}
1006
1007int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1008{
1009        int ret;
1010
1011        if (!driver)
1012                return -EINVAL;
1013        if (!driver->bind || !driver->setup || !driver->disconnect)
1014                return -EINVAL;
1015        if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH)
1016                return -EINVAL;
1017
1018#if CONFIG_IS_ENABLED(DM_USB)
1019        ret = usb_setup_ehci_gadget(&controller.ctrl);
1020#else
1021        ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl);
1022#endif
1023        if (ret)
1024                return ret;
1025
1026        ret = ci_udc_probe();
1027        if (ret) {
1028                DBG("udc probe failed, returned %d\n", ret);
1029                return ret;
1030        }
1031
1032        ret = driver->bind(&controller.gadget);
1033        if (ret) {
1034                DBG("driver->bind() returned %d\n", ret);
1035                return ret;
1036        }
1037        controller.driver = driver;
1038
1039        return 0;
1040}
1041
1042int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1043{
1044        udc_disconnect();
1045
1046        driver->unbind(&controller.gadget);
1047        controller.driver = NULL;
1048
1049        ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req);
1050        free(controller.items_mem);
1051        free(controller.epts);
1052
1053        return 0;
1054}
1055
1056bool dfu_usb_get_reset(void)
1057{
1058        struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
1059
1060        return !!(readl(&udc->usbsts) & STS_URI);
1061}
1062