linux/drivers/usb/chipidea/udc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * udc.c - ChipIdea UDC driver
   4 *
   5 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
   6 *
   7 * Author: David Lopo
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/device.h>
  12#include <linux/dmapool.h>
  13#include <linux/err.h>
  14#include <linux/irqreturn.h>
  15#include <linux/kernel.h>
  16#include <linux/slab.h>
  17#include <linux/pm_runtime.h>
  18#include <linux/usb/ch9.h>
  19#include <linux/usb/gadget.h>
  20#include <linux/usb/otg-fsm.h>
  21#include <linux/usb/chipidea.h>
  22
  23#include "ci.h"
  24#include "udc.h"
  25#include "bits.h"
  26#include "otg.h"
  27#include "otg_fsm.h"
  28
  29/* control endpoint description */
  30static const struct usb_endpoint_descriptor
  31ctrl_endpt_out_desc = {
  32        .bLength         = USB_DT_ENDPOINT_SIZE,
  33        .bDescriptorType = USB_DT_ENDPOINT,
  34
  35        .bEndpointAddress = USB_DIR_OUT,
  36        .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
  37        .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
  38};
  39
  40static const struct usb_endpoint_descriptor
  41ctrl_endpt_in_desc = {
  42        .bLength         = USB_DT_ENDPOINT_SIZE,
  43        .bDescriptorType = USB_DT_ENDPOINT,
  44
  45        .bEndpointAddress = USB_DIR_IN,
  46        .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
  47        .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
  48};
  49
  50/**
  51 * hw_ep_bit: calculates the bit number
  52 * @num: endpoint number
  53 * @dir: endpoint direction
  54 *
  55 * This function returns bit number
  56 */
  57static inline int hw_ep_bit(int num, int dir)
  58{
  59        return num + ((dir == TX) ? 16 : 0);
  60}
  61
  62static inline int ep_to_bit(struct ci_hdrc *ci, int n)
  63{
  64        int fill = 16 - ci->hw_ep_max / 2;
  65
  66        if (n >= ci->hw_ep_max / 2)
  67                n += fill;
  68
  69        return n;
  70}
  71
  72/**
  73 * hw_device_state: enables/disables interrupts (execute without interruption)
  74 * @dma: 0 => disable, !0 => enable and set dma engine
  75 *
  76 * This function returns an error code
  77 */
  78static int hw_device_state(struct ci_hdrc *ci, u32 dma)
  79{
  80        if (dma) {
  81                hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
  82                /* interrupt, error, port change, reset, sleep/suspend */
  83                hw_write(ci, OP_USBINTR, ~0,
  84                             USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
  85        } else {
  86                hw_write(ci, OP_USBINTR, ~0, 0);
  87        }
  88        return 0;
  89}
  90
  91/**
  92 * hw_ep_flush: flush endpoint fifo (execute without interruption)
  93 * @num: endpoint number
  94 * @dir: endpoint direction
  95 *
  96 * This function returns an error code
  97 */
  98static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
  99{
 100        int n = hw_ep_bit(num, dir);
 101
 102        do {
 103                /* flush any pending transfer */
 104                hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
 105                while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
 106                        cpu_relax();
 107        } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
 108
 109        return 0;
 110}
 111
 112/**
 113 * hw_ep_disable: disables endpoint (execute without interruption)
 114 * @num: endpoint number
 115 * @dir: endpoint direction
 116 *
 117 * This function returns an error code
 118 */
 119static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
 120{
 121        hw_write(ci, OP_ENDPTCTRL + num,
 122                 (dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
 123        return 0;
 124}
 125
 126/**
 127 * hw_ep_enable: enables endpoint (execute without interruption)
 128 * @num:  endpoint number
 129 * @dir:  endpoint direction
 130 * @type: endpoint type
 131 *
 132 * This function returns an error code
 133 */
 134static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
 135{
 136        u32 mask, data;
 137
 138        if (dir == TX) {
 139                mask  = ENDPTCTRL_TXT;  /* type    */
 140                data  = type << __ffs(mask);
 141
 142                mask |= ENDPTCTRL_TXS;  /* unstall */
 143                mask |= ENDPTCTRL_TXR;  /* reset data toggle */
 144                data |= ENDPTCTRL_TXR;
 145                mask |= ENDPTCTRL_TXE;  /* enable  */
 146                data |= ENDPTCTRL_TXE;
 147        } else {
 148                mask  = ENDPTCTRL_RXT;  /* type    */
 149                data  = type << __ffs(mask);
 150
 151                mask |= ENDPTCTRL_RXS;  /* unstall */
 152                mask |= ENDPTCTRL_RXR;  /* reset data toggle */
 153                data |= ENDPTCTRL_RXR;
 154                mask |= ENDPTCTRL_RXE;  /* enable  */
 155                data |= ENDPTCTRL_RXE;
 156        }
 157        hw_write(ci, OP_ENDPTCTRL + num, mask, data);
 158        return 0;
 159}
 160
 161/**
 162 * hw_ep_get_halt: return endpoint halt status
 163 * @num: endpoint number
 164 * @dir: endpoint direction
 165 *
 166 * This function returns 1 if endpoint halted
 167 */
 168static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
 169{
 170        u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
 171
 172        return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
 173}
 174
 175/**
 176 * hw_ep_prime: primes endpoint (execute without interruption)
 177 * @num:     endpoint number
 178 * @dir:     endpoint direction
 179 * @is_ctrl: true if control endpoint
 180 *
 181 * This function returns an error code
 182 */
 183static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
 184{
 185        int n = hw_ep_bit(num, dir);
 186
 187        /* Synchronize before ep prime */
 188        wmb();
 189
 190        if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
 191                return -EAGAIN;
 192
 193        hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
 194
 195        while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
 196                cpu_relax();
 197        if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
 198                return -EAGAIN;
 199
 200        /* status shoult be tested according with manual but it doesn't work */
 201        return 0;
 202}
 203
 204/**
 205 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
 206 *                 without interruption)
 207 * @num:   endpoint number
 208 * @dir:   endpoint direction
 209 * @value: true => stall, false => unstall
 210 *
 211 * This function returns an error code
 212 */
 213static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
 214{
 215        if (value != 0 && value != 1)
 216                return -EINVAL;
 217
 218        do {
 219                enum ci_hw_regs reg = OP_ENDPTCTRL + num;
 220                u32 mask_xs = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
 221                u32 mask_xr = (dir == TX) ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
 222
 223                /* data toggle - reserved for EP0 but it's in ESS */
 224                hw_write(ci, reg, mask_xs|mask_xr,
 225                          value ? mask_xs : mask_xr);
 226        } while (value != hw_ep_get_halt(ci, num, dir));
 227
 228        return 0;
 229}
 230
 231/**
 232 * hw_is_port_high_speed: test if port is high speed
 233 *
 234 * This function returns true if high speed port
 235 */
 236static int hw_port_is_high_speed(struct ci_hdrc *ci)
 237{
 238        return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
 239                hw_read(ci, OP_PORTSC, PORTSC_HSP);
 240}
 241
 242/**
 243 * hw_test_and_clear_complete: test & clear complete status (execute without
 244 *                             interruption)
 245 * @n: endpoint number
 246 *
 247 * This function returns complete status
 248 */
 249static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
 250{
 251        n = ep_to_bit(ci, n);
 252        return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
 253}
 254
 255/**
 256 * hw_test_and_clear_intr_active: test & clear active interrupts (execute
 257 *                                without interruption)
 258 *
 259 * This function returns active interrutps
 260 */
 261static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
 262{
 263        u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
 264
 265        hw_write(ci, OP_USBSTS, ~0, reg);
 266        return reg;
 267}
 268
 269/**
 270 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
 271 *                                interruption)
 272 *
 273 * This function returns guard value
 274 */
 275static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
 276{
 277        return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
 278}
 279
 280/**
 281 * hw_test_and_set_setup_guard: test & set setup guard (execute without
 282 *                              interruption)
 283 *
 284 * This function returns guard value
 285 */
 286static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
 287{
 288        return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
 289}
 290
 291/**
 292 * hw_usb_set_address: configures USB address (execute without interruption)
 293 * @value: new USB address
 294 *
 295 * This function explicitly sets the address, without the "USBADRA" (advance)
 296 * feature, which is not supported by older versions of the controller.
 297 */
 298static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
 299{
 300        hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
 301                 value << __ffs(DEVICEADDR_USBADR));
 302}
 303
 304/**
 305 * hw_usb_reset: restart device after a bus reset (execute without
 306 *               interruption)
 307 *
 308 * This function returns an error code
 309 */
 310static int hw_usb_reset(struct ci_hdrc *ci)
 311{
 312        hw_usb_set_address(ci, 0);
 313
 314        /* ESS flushes only at end?!? */
 315        hw_write(ci, OP_ENDPTFLUSH,    ~0, ~0);
 316
 317        /* clear setup token semaphores */
 318        hw_write(ci, OP_ENDPTSETUPSTAT, 0,  0);
 319
 320        /* clear complete status */
 321        hw_write(ci, OP_ENDPTCOMPLETE,  0,  0);
 322
 323        /* wait until all bits cleared */
 324        while (hw_read(ci, OP_ENDPTPRIME, ~0))
 325                udelay(10);             /* not RTOS friendly */
 326
 327        /* reset all endpoints ? */
 328
 329        /* reset internal status and wait for further instructions
 330           no need to verify the port reset status (ESS does it) */
 331
 332        return 0;
 333}
 334
 335/******************************************************************************
 336 * UTIL block
 337 *****************************************************************************/
 338
 339static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
 340                          unsigned length)
 341{
 342        int i;
 343        u32 temp;
 344        struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
 345                                                  GFP_ATOMIC);
 346
 347        if (node == NULL)
 348                return -ENOMEM;
 349
 350        node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
 351        if (node->ptr == NULL) {
 352                kfree(node);
 353                return -ENOMEM;
 354        }
 355
 356        node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
 357        node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
 358        node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
 359        if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
 360                u32 mul = hwreq->req.length / hwep->ep.maxpacket;
 361
 362                if (hwreq->req.length == 0
 363                                || hwreq->req.length % hwep->ep.maxpacket)
 364                        mul++;
 365                node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
 366        }
 367
 368        temp = (u32) (hwreq->req.dma + hwreq->req.actual);
 369        if (length) {
 370                node->ptr->page[0] = cpu_to_le32(temp);
 371                for (i = 1; i < TD_PAGE_COUNT; i++) {
 372                        u32 page = temp + i * CI_HDRC_PAGE_SIZE;
 373                        page &= ~TD_RESERVED_MASK;
 374                        node->ptr->page[i] = cpu_to_le32(page);
 375                }
 376        }
 377
 378        hwreq->req.actual += length;
 379
 380        if (!list_empty(&hwreq->tds)) {
 381                /* get the last entry */
 382                lastnode = list_entry(hwreq->tds.prev,
 383                                struct td_node, td);
 384                lastnode->ptr->next = cpu_to_le32(node->dma);
 385        }
 386
 387        INIT_LIST_HEAD(&node->td);
 388        list_add_tail(&node->td, &hwreq->tds);
 389
 390        return 0;
 391}
 392
 393/**
 394 * _usb_addr: calculates endpoint address from direction & number
 395 * @ep:  endpoint
 396 */
 397static inline u8 _usb_addr(struct ci_hw_ep *ep)
 398{
 399        return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
 400}
 401
 402/**
 403 * _hardware_enqueue: configures a request at hardware level
 404 * @hwep:   endpoint
 405 * @hwreq:  request
 406 *
 407 * This function returns an error code
 408 */
 409static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 410{
 411        struct ci_hdrc *ci = hwep->ci;
 412        int ret = 0;
 413        unsigned rest = hwreq->req.length;
 414        int pages = TD_PAGE_COUNT;
 415        struct td_node *firstnode, *lastnode;
 416
 417        /* don't queue twice */
 418        if (hwreq->req.status == -EALREADY)
 419                return -EALREADY;
 420
 421        hwreq->req.status = -EALREADY;
 422
 423        ret = usb_gadget_map_request_by_dev(ci->dev->parent,
 424                                            &hwreq->req, hwep->dir);
 425        if (ret)
 426                return ret;
 427
 428        /*
 429         * The first buffer could be not page aligned.
 430         * In that case we have to span into one extra td.
 431         */
 432        if (hwreq->req.dma % PAGE_SIZE)
 433                pages--;
 434
 435        if (rest == 0) {
 436                ret = add_td_to_list(hwep, hwreq, 0);
 437                if (ret < 0)
 438                        goto done;
 439        }
 440
 441        while (rest > 0) {
 442                unsigned count = min(hwreq->req.length - hwreq->req.actual,
 443                                        (unsigned)(pages * CI_HDRC_PAGE_SIZE));
 444                ret = add_td_to_list(hwep, hwreq, count);
 445                if (ret < 0)
 446                        goto done;
 447
 448                rest -= count;
 449        }
 450
 451        if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
 452            && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
 453                ret = add_td_to_list(hwep, hwreq, 0);
 454                if (ret < 0)
 455                        goto done;
 456        }
 457
 458        firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
 459
 460        lastnode = list_entry(hwreq->tds.prev,
 461                struct td_node, td);
 462
 463        lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
 464        if (!hwreq->req.no_interrupt)
 465                lastnode->ptr->token |= cpu_to_le32(TD_IOC);
 466        wmb();
 467
 468        hwreq->req.actual = 0;
 469        if (!list_empty(&hwep->qh.queue)) {
 470                struct ci_hw_req *hwreqprev;
 471                int n = hw_ep_bit(hwep->num, hwep->dir);
 472                int tmp_stat;
 473                struct td_node *prevlastnode;
 474                u32 next = firstnode->dma & TD_ADDR_MASK;
 475
 476                hwreqprev = list_entry(hwep->qh.queue.prev,
 477                                struct ci_hw_req, queue);
 478                prevlastnode = list_entry(hwreqprev->tds.prev,
 479                                struct td_node, td);
 480
 481                prevlastnode->ptr->next = cpu_to_le32(next);
 482                wmb();
 483                if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
 484                        goto done;
 485                do {
 486                        hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
 487                        tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
 488                } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
 489                hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
 490                if (tmp_stat)
 491                        goto done;
 492        }
 493
 494        /*  QH configuration */
 495        hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
 496        hwep->qh.ptr->td.token &=
 497                cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
 498
 499        if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
 500                u32 mul = hwreq->req.length / hwep->ep.maxpacket;
 501
 502                if (hwreq->req.length == 0
 503                                || hwreq->req.length % hwep->ep.maxpacket)
 504                        mul++;
 505                hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
 506        }
 507
 508        ret = hw_ep_prime(ci, hwep->num, hwep->dir,
 509                           hwep->type == USB_ENDPOINT_XFER_CONTROL);
 510done:
 511        return ret;
 512}
 513
 514/*
 515 * free_pending_td: remove a pending request for the endpoint
 516 * @hwep: endpoint
 517 */
 518static void free_pending_td(struct ci_hw_ep *hwep)
 519{
 520        struct td_node *pending = hwep->pending_td;
 521
 522        dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
 523        hwep->pending_td = NULL;
 524        kfree(pending);
 525}
 526
 527static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
 528                                           struct td_node *node)
 529{
 530        hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
 531        hwep->qh.ptr->td.token &=
 532                cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
 533
 534        return hw_ep_prime(ci, hwep->num, hwep->dir,
 535                                hwep->type == USB_ENDPOINT_XFER_CONTROL);
 536}
 537
 538/**
 539 * _hardware_dequeue: handles a request at hardware level
 540 * @gadget: gadget
 541 * @hwep:   endpoint
 542 *
 543 * This function returns an error code
 544 */
 545static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 546{
 547        u32 tmptoken;
 548        struct td_node *node, *tmpnode;
 549        unsigned remaining_length;
 550        unsigned actual = hwreq->req.length;
 551        struct ci_hdrc *ci = hwep->ci;
 552
 553        if (hwreq->req.status != -EALREADY)
 554                return -EINVAL;
 555
 556        hwreq->req.status = 0;
 557
 558        list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
 559                tmptoken = le32_to_cpu(node->ptr->token);
 560                if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
 561                        int n = hw_ep_bit(hwep->num, hwep->dir);
 562
 563                        if (ci->rev == CI_REVISION_24)
 564                                if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
 565                                        reprime_dtd(ci, hwep, node);
 566                        hwreq->req.status = -EALREADY;
 567                        return -EBUSY;
 568                }
 569
 570                remaining_length = (tmptoken & TD_TOTAL_BYTES);
 571                remaining_length >>= __ffs(TD_TOTAL_BYTES);
 572                actual -= remaining_length;
 573
 574                hwreq->req.status = tmptoken & TD_STATUS;
 575                if ((TD_STATUS_HALTED & hwreq->req.status)) {
 576                        hwreq->req.status = -EPIPE;
 577                        break;
 578                } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
 579                        hwreq->req.status = -EPROTO;
 580                        break;
 581                } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
 582                        hwreq->req.status = -EILSEQ;
 583                        break;
 584                }
 585
 586                if (remaining_length) {
 587                        if (hwep->dir == TX) {
 588                                hwreq->req.status = -EPROTO;
 589                                break;
 590                        }
 591                }
 592                /*
 593                 * As the hardware could still address the freed td
 594                 * which will run the udc unusable, the cleanup of the
 595                 * td has to be delayed by one.
 596                 */
 597                if (hwep->pending_td)
 598                        free_pending_td(hwep);
 599
 600                hwep->pending_td = node;
 601                list_del_init(&node->td);
 602        }
 603
 604        usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
 605                                        &hwreq->req, hwep->dir);
 606
 607        hwreq->req.actual += actual;
 608
 609        if (hwreq->req.status)
 610                return hwreq->req.status;
 611
 612        return hwreq->req.actual;
 613}
 614
 615/**
 616 * _ep_nuke: dequeues all endpoint requests
 617 * @hwep: endpoint
 618 *
 619 * This function returns an error code
 620 * Caller must hold lock
 621 */
 622static int _ep_nuke(struct ci_hw_ep *hwep)
 623__releases(hwep->lock)
 624__acquires(hwep->lock)
 625{
 626        struct td_node *node, *tmpnode;
 627        if (hwep == NULL)
 628                return -EINVAL;
 629
 630        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 631
 632        while (!list_empty(&hwep->qh.queue)) {
 633
 634                /* pop oldest request */
 635                struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
 636                                                     struct ci_hw_req, queue);
 637
 638                list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
 639                        dma_pool_free(hwep->td_pool, node->ptr, node->dma);
 640                        list_del_init(&node->td);
 641                        node->ptr = NULL;
 642                        kfree(node);
 643                }
 644
 645                list_del_init(&hwreq->queue);
 646                hwreq->req.status = -ESHUTDOWN;
 647
 648                if (hwreq->req.complete != NULL) {
 649                        spin_unlock(hwep->lock);
 650                        usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
 651                        spin_lock(hwep->lock);
 652                }
 653        }
 654
 655        if (hwep->pending_td)
 656                free_pending_td(hwep);
 657
 658        return 0;
 659}
 660
 661static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
 662{
 663        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
 664        int direction, retval = 0;
 665        unsigned long flags;
 666
 667        if (ep == NULL || hwep->ep.desc == NULL)
 668                return -EINVAL;
 669
 670        if (usb_endpoint_xfer_isoc(hwep->ep.desc))
 671                return -EOPNOTSUPP;
 672
 673        spin_lock_irqsave(hwep->lock, flags);
 674
 675        if (value && hwep->dir == TX && check_transfer &&
 676                !list_empty(&hwep->qh.queue) &&
 677                        !usb_endpoint_xfer_control(hwep->ep.desc)) {
 678                spin_unlock_irqrestore(hwep->lock, flags);
 679                return -EAGAIN;
 680        }
 681
 682        direction = hwep->dir;
 683        do {
 684                retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
 685
 686                if (!value)
 687                        hwep->wedge = 0;
 688
 689                if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
 690                        hwep->dir = (hwep->dir == TX) ? RX : TX;
 691
 692        } while (hwep->dir != direction);
 693
 694        spin_unlock_irqrestore(hwep->lock, flags);
 695        return retval;
 696}
 697
 698
 699/**
 700 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
 701 * @gadget: gadget
 702 *
 703 * This function returns an error code
 704 */
 705static int _gadget_stop_activity(struct usb_gadget *gadget)
 706{
 707        struct usb_ep *ep;
 708        struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
 709        unsigned long flags;
 710
 711        spin_lock_irqsave(&ci->lock, flags);
 712        ci->gadget.speed = USB_SPEED_UNKNOWN;
 713        ci->remote_wakeup = 0;
 714        ci->suspended = 0;
 715        spin_unlock_irqrestore(&ci->lock, flags);
 716
 717        /* flush all endpoints */
 718        gadget_for_each_ep(ep, gadget) {
 719                usb_ep_fifo_flush(ep);
 720        }
 721        usb_ep_fifo_flush(&ci->ep0out->ep);
 722        usb_ep_fifo_flush(&ci->ep0in->ep);
 723
 724        /* make sure to disable all endpoints */
 725        gadget_for_each_ep(ep, gadget) {
 726                usb_ep_disable(ep);
 727        }
 728
 729        if (ci->status != NULL) {
 730                usb_ep_free_request(&ci->ep0in->ep, ci->status);
 731                ci->status = NULL;
 732        }
 733
 734        return 0;
 735}
 736
 737/******************************************************************************
 738 * ISR block
 739 *****************************************************************************/
 740/**
 741 * isr_reset_handler: USB reset interrupt handler
 742 * @ci: UDC device
 743 *
 744 * This function resets USB engine after a bus reset occurred
 745 */
 746static void isr_reset_handler(struct ci_hdrc *ci)
 747__releases(ci->lock)
 748__acquires(ci->lock)
 749{
 750        int retval;
 751
 752        spin_unlock(&ci->lock);
 753        if (ci->gadget.speed != USB_SPEED_UNKNOWN)
 754                usb_gadget_udc_reset(&ci->gadget, ci->driver);
 755
 756        retval = _gadget_stop_activity(&ci->gadget);
 757        if (retval)
 758                goto done;
 759
 760        retval = hw_usb_reset(ci);
 761        if (retval)
 762                goto done;
 763
 764        ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
 765        if (ci->status == NULL)
 766                retval = -ENOMEM;
 767
 768done:
 769        spin_lock(&ci->lock);
 770
 771        if (retval)
 772                dev_err(ci->dev, "error: %i\n", retval);
 773}
 774
 775/**
 776 * isr_get_status_complete: get_status request complete function
 777 * @ep:  endpoint
 778 * @req: request handled
 779 *
 780 * Caller must release lock
 781 */
 782static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
 783{
 784        if (ep == NULL || req == NULL)
 785                return;
 786
 787        kfree(req->buf);
 788        usb_ep_free_request(ep, req);
 789}
 790
 791/**
 792 * _ep_queue: queues (submits) an I/O request to an endpoint
 793 * @ep:        endpoint
 794 * @req:       request
 795 * @gfp_flags: GFP flags (not used)
 796 *
 797 * Caller must hold lock
 798 * This function returns an error code
 799 */
 800static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
 801                    gfp_t __maybe_unused gfp_flags)
 802{
 803        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
 804        struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
 805        struct ci_hdrc *ci = hwep->ci;
 806        int retval = 0;
 807
 808        if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
 809                return -EINVAL;
 810
 811        if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
 812                if (req->length)
 813                        hwep = (ci->ep0_dir == RX) ?
 814                               ci->ep0out : ci->ep0in;
 815                if (!list_empty(&hwep->qh.queue)) {
 816                        _ep_nuke(hwep);
 817                        dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
 818                                 _usb_addr(hwep));
 819                }
 820        }
 821
 822        if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
 823            hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
 824                dev_err(hwep->ci->dev, "request length too big for isochronous\n");
 825                return -EMSGSIZE;
 826        }
 827
 828        /* first nuke then test link, e.g. previous status has not sent */
 829        if (!list_empty(&hwreq->queue)) {
 830                dev_err(hwep->ci->dev, "request already in queue\n");
 831                return -EBUSY;
 832        }
 833
 834        /* push request */
 835        hwreq->req.status = -EINPROGRESS;
 836        hwreq->req.actual = 0;
 837
 838        retval = _hardware_enqueue(hwep, hwreq);
 839
 840        if (retval == -EALREADY)
 841                retval = 0;
 842        if (!retval)
 843                list_add_tail(&hwreq->queue, &hwep->qh.queue);
 844
 845        return retval;
 846}
 847
 848/**
 849 * isr_get_status_response: get_status request response
 850 * @ci: ci struct
 851 * @setup: setup request packet
 852 *
 853 * This function returns an error code
 854 */
 855static int isr_get_status_response(struct ci_hdrc *ci,
 856                                   struct usb_ctrlrequest *setup)
 857__releases(hwep->lock)
 858__acquires(hwep->lock)
 859{
 860        struct ci_hw_ep *hwep = ci->ep0in;
 861        struct usb_request *req = NULL;
 862        gfp_t gfp_flags = GFP_ATOMIC;
 863        int dir, num, retval;
 864
 865        if (hwep == NULL || setup == NULL)
 866                return -EINVAL;
 867
 868        spin_unlock(hwep->lock);
 869        req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
 870        spin_lock(hwep->lock);
 871        if (req == NULL)
 872                return -ENOMEM;
 873
 874        req->complete = isr_get_status_complete;
 875        req->length   = 2;
 876        req->buf      = kzalloc(req->length, gfp_flags);
 877        if (req->buf == NULL) {
 878                retval = -ENOMEM;
 879                goto err_free_req;
 880        }
 881
 882        if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
 883                *(u16 *)req->buf = (ci->remote_wakeup << 1) |
 884                        ci->gadget.is_selfpowered;
 885        } else if ((setup->bRequestType & USB_RECIP_MASK) \
 886                   == USB_RECIP_ENDPOINT) {
 887                dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
 888                        TX : RX;
 889                num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
 890                *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
 891        }
 892        /* else do nothing; reserved for future use */
 893
 894        retval = _ep_queue(&hwep->ep, req, gfp_flags);
 895        if (retval)
 896                goto err_free_buf;
 897
 898        return 0;
 899
 900 err_free_buf:
 901        kfree(req->buf);
 902 err_free_req:
 903        spin_unlock(hwep->lock);
 904        usb_ep_free_request(&hwep->ep, req);
 905        spin_lock(hwep->lock);
 906        return retval;
 907}
 908
 909/**
 910 * isr_setup_status_complete: setup_status request complete function
 911 * @ep:  endpoint
 912 * @req: request handled
 913 *
 914 * Caller must release lock. Put the port in test mode if test mode
 915 * feature is selected.
 916 */
 917static void
 918isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
 919{
 920        struct ci_hdrc *ci = req->context;
 921        unsigned long flags;
 922
 923        if (ci->setaddr) {
 924                hw_usb_set_address(ci, ci->address);
 925                ci->setaddr = false;
 926                if (ci->address)
 927                        usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
 928        }
 929
 930        spin_lock_irqsave(&ci->lock, flags);
 931        if (ci->test_mode)
 932                hw_port_test_set(ci, ci->test_mode);
 933        spin_unlock_irqrestore(&ci->lock, flags);
 934}
 935
 936/**
 937 * isr_setup_status_phase: queues the status phase of a setup transation
 938 * @ci: ci struct
 939 *
 940 * This function returns an error code
 941 */
 942static int isr_setup_status_phase(struct ci_hdrc *ci)
 943{
 944        struct ci_hw_ep *hwep;
 945
 946        /*
 947         * Unexpected USB controller behavior, caused by bad signal integrity
 948         * or ground reference problems, can lead to isr_setup_status_phase
 949         * being called with ci->status equal to NULL.
 950         * If this situation occurs, you should review your USB hardware design.
 951         */
 952        if (WARN_ON_ONCE(!ci->status))
 953                return -EPIPE;
 954
 955        hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
 956        ci->status->context = ci;
 957        ci->status->complete = isr_setup_status_complete;
 958
 959        return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
 960}
 961
 962/**
 963 * isr_tr_complete_low: transaction complete low level handler
 964 * @hwep: endpoint
 965 *
 966 * This function returns an error code
 967 * Caller must hold lock
 968 */
 969static int isr_tr_complete_low(struct ci_hw_ep *hwep)
 970__releases(hwep->lock)
 971__acquires(hwep->lock)
 972{
 973        struct ci_hw_req *hwreq, *hwreqtemp;
 974        struct ci_hw_ep *hweptemp = hwep;
 975        int retval = 0;
 976
 977        list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
 978                        queue) {
 979                retval = _hardware_dequeue(hwep, hwreq);
 980                if (retval < 0)
 981                        break;
 982                list_del_init(&hwreq->queue);
 983                if (hwreq->req.complete != NULL) {
 984                        spin_unlock(hwep->lock);
 985                        if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
 986                                        hwreq->req.length)
 987                                hweptemp = hwep->ci->ep0in;
 988                        usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
 989                        spin_lock(hwep->lock);
 990                }
 991        }
 992
 993        if (retval == -EBUSY)
 994                retval = 0;
 995
 996        return retval;
 997}
 998
 999static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
1000{
1001        dev_warn(&ci->gadget.dev,
1002                "connect the device to an alternate port if you want HNP\n");
1003        return isr_setup_status_phase(ci);
1004}
1005
1006/**
1007 * isr_setup_packet_handler: setup packet handler
1008 * @ci: UDC descriptor
1009 *
1010 * This function handles setup packet 
1011 */
1012static void isr_setup_packet_handler(struct ci_hdrc *ci)
1013__releases(ci->lock)
1014__acquires(ci->lock)
1015{
1016        struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
1017        struct usb_ctrlrequest req;
1018        int type, num, dir, err = -EINVAL;
1019        u8 tmode = 0;
1020
1021        /*
1022         * Flush data and handshake transactions of previous
1023         * setup packet.
1024         */
1025        _ep_nuke(ci->ep0out);
1026        _ep_nuke(ci->ep0in);
1027
1028        /* read_setup_packet */
1029        do {
1030                hw_test_and_set_setup_guard(ci);
1031                memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1032        } while (!hw_test_and_clear_setup_guard(ci));
1033
1034        type = req.bRequestType;
1035
1036        ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1037
1038        switch (req.bRequest) {
1039        case USB_REQ_CLEAR_FEATURE:
1040                if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1041                                le16_to_cpu(req.wValue) ==
1042                                USB_ENDPOINT_HALT) {
1043                        if (req.wLength != 0)
1044                                break;
1045                        num  = le16_to_cpu(req.wIndex);
1046                        dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1047                        num &= USB_ENDPOINT_NUMBER_MASK;
1048                        if (dir == TX)
1049                                num += ci->hw_ep_max / 2;
1050                        if (!ci->ci_hw_ep[num].wedge) {
1051                                spin_unlock(&ci->lock);
1052                                err = usb_ep_clear_halt(
1053                                        &ci->ci_hw_ep[num].ep);
1054                                spin_lock(&ci->lock);
1055                                if (err)
1056                                        break;
1057                        }
1058                        err = isr_setup_status_phase(ci);
1059                } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1060                                le16_to_cpu(req.wValue) ==
1061                                USB_DEVICE_REMOTE_WAKEUP) {
1062                        if (req.wLength != 0)
1063                                break;
1064                        ci->remote_wakeup = 0;
1065                        err = isr_setup_status_phase(ci);
1066                } else {
1067                        goto delegate;
1068                }
1069                break;
1070        case USB_REQ_GET_STATUS:
1071                if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
1072                        le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
1073                    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1074                    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1075                        goto delegate;
1076                if (le16_to_cpu(req.wLength) != 2 ||
1077                    le16_to_cpu(req.wValue)  != 0)
1078                        break;
1079                err = isr_get_status_response(ci, &req);
1080                break;
1081        case USB_REQ_SET_ADDRESS:
1082                if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1083                        goto delegate;
1084                if (le16_to_cpu(req.wLength) != 0 ||
1085                    le16_to_cpu(req.wIndex)  != 0)
1086                        break;
1087                ci->address = (u8)le16_to_cpu(req.wValue);
1088                ci->setaddr = true;
1089                err = isr_setup_status_phase(ci);
1090                break;
1091        case USB_REQ_SET_FEATURE:
1092                if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1093                                le16_to_cpu(req.wValue) ==
1094                                USB_ENDPOINT_HALT) {
1095                        if (req.wLength != 0)
1096                                break;
1097                        num  = le16_to_cpu(req.wIndex);
1098                        dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1099                        num &= USB_ENDPOINT_NUMBER_MASK;
1100                        if (dir == TX)
1101                                num += ci->hw_ep_max / 2;
1102
1103                        spin_unlock(&ci->lock);
1104                        err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1105                        spin_lock(&ci->lock);
1106                        if (!err)
1107                                isr_setup_status_phase(ci);
1108                } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1109                        if (req.wLength != 0)
1110                                break;
1111                        switch (le16_to_cpu(req.wValue)) {
1112                        case USB_DEVICE_REMOTE_WAKEUP:
1113                                ci->remote_wakeup = 1;
1114                                err = isr_setup_status_phase(ci);
1115                                break;
1116                        case USB_DEVICE_TEST_MODE:
1117                                tmode = le16_to_cpu(req.wIndex) >> 8;
1118                                switch (tmode) {
1119                                case TEST_J:
1120                                case TEST_K:
1121                                case TEST_SE0_NAK:
1122                                case TEST_PACKET:
1123                                case TEST_FORCE_EN:
1124                                        ci->test_mode = tmode;
1125                                        err = isr_setup_status_phase(
1126                                                        ci);
1127                                        break;
1128                                default:
1129                                        break;
1130                                }
1131                                break;
1132                        case USB_DEVICE_B_HNP_ENABLE:
1133                                if (ci_otg_is_fsm_mode(ci)) {
1134                                        ci->gadget.b_hnp_enable = 1;
1135                                        err = isr_setup_status_phase(
1136                                                        ci);
1137                                }
1138                                break;
1139                        case USB_DEVICE_A_ALT_HNP_SUPPORT:
1140                                if (ci_otg_is_fsm_mode(ci))
1141                                        err = otg_a_alt_hnp_support(ci);
1142                                break;
1143                        case USB_DEVICE_A_HNP_SUPPORT:
1144                                if (ci_otg_is_fsm_mode(ci)) {
1145                                        ci->gadget.a_hnp_support = 1;
1146                                        err = isr_setup_status_phase(
1147                                                        ci);
1148                                }
1149                                break;
1150                        default:
1151                                goto delegate;
1152                        }
1153                } else {
1154                        goto delegate;
1155                }
1156                break;
1157        default:
1158delegate:
1159                if (req.wLength == 0)   /* no data phase */
1160                        ci->ep0_dir = TX;
1161
1162                spin_unlock(&ci->lock);
1163                err = ci->driver->setup(&ci->gadget, &req);
1164                spin_lock(&ci->lock);
1165                break;
1166        }
1167
1168        if (err < 0) {
1169                spin_unlock(&ci->lock);
1170                if (_ep_set_halt(&hwep->ep, 1, false))
1171                        dev_err(ci->dev, "error: _ep_set_halt\n");
1172                spin_lock(&ci->lock);
1173        }
1174}
1175
1176/**
1177 * isr_tr_complete_handler: transaction complete interrupt handler
1178 * @ci: UDC descriptor
1179 *
1180 * This function handles traffic events
1181 */
1182static void isr_tr_complete_handler(struct ci_hdrc *ci)
1183__releases(ci->lock)
1184__acquires(ci->lock)
1185{
1186        unsigned i;
1187        int err;
1188
1189        for (i = 0; i < ci->hw_ep_max; i++) {
1190                struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
1191
1192                if (hwep->ep.desc == NULL)
1193                        continue;   /* not configured */
1194
1195                if (hw_test_and_clear_complete(ci, i)) {
1196                        err = isr_tr_complete_low(hwep);
1197                        if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1198                                if (err > 0)   /* needs status phase */
1199                                        err = isr_setup_status_phase(ci);
1200                                if (err < 0) {
1201                                        spin_unlock(&ci->lock);
1202                                        if (_ep_set_halt(&hwep->ep, 1, false))
1203                                                dev_err(ci->dev,
1204                                                "error: _ep_set_halt\n");
1205                                        spin_lock(&ci->lock);
1206                                }
1207                        }
1208                }
1209
1210                /* Only handle setup packet below */
1211                if (i == 0 &&
1212                        hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
1213                        isr_setup_packet_handler(ci);
1214        }
1215}
1216
1217/******************************************************************************
1218 * ENDPT block
1219 *****************************************************************************/
1220/**
1221 * ep_enable: configure endpoint, making it usable
1222 *
1223 * Check usb_ep_enable() at "usb_gadget.h" for details
1224 */
1225static int ep_enable(struct usb_ep *ep,
1226                     const struct usb_endpoint_descriptor *desc)
1227{
1228        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1229        int retval = 0;
1230        unsigned long flags;
1231        u32 cap = 0;
1232
1233        if (ep == NULL || desc == NULL)
1234                return -EINVAL;
1235
1236        spin_lock_irqsave(hwep->lock, flags);
1237
1238        /* only internal SW should enable ctrl endpts */
1239
1240        if (!list_empty(&hwep->qh.queue)) {
1241                dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1242                spin_unlock_irqrestore(hwep->lock, flags);
1243                return -EBUSY;
1244        }
1245
1246        hwep->ep.desc = desc;
1247
1248        hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1249        hwep->num  = usb_endpoint_num(desc);
1250        hwep->type = usb_endpoint_type(desc);
1251
1252        hwep->ep.maxpacket = usb_endpoint_maxp(desc);
1253        hwep->ep.mult = usb_endpoint_maxp_mult(desc);
1254
1255        if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1256                cap |= QH_IOS;
1257
1258        cap |= QH_ZLT;
1259        cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1260        /*
1261         * For ISO-TX, we set mult at QH as the largest value, and use
1262         * MultO at TD as real mult value.
1263         */
1264        if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
1265                cap |= 3 << __ffs(QH_MULT);
1266
1267        hwep->qh.ptr->cap = cpu_to_le32(cap);
1268
1269        hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
1270
1271        if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1272                dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
1273                retval = -EINVAL;
1274        }
1275
1276        /*
1277         * Enable endpoints in the HW other than ep0 as ep0
1278         * is always enabled
1279         */
1280        if (hwep->num)
1281                retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1282                                       hwep->type);
1283
1284        spin_unlock_irqrestore(hwep->lock, flags);
1285        return retval;
1286}
1287
1288/**
1289 * ep_disable: endpoint is no longer usable
1290 *
1291 * Check usb_ep_disable() at "usb_gadget.h" for details
1292 */
1293static int ep_disable(struct usb_ep *ep)
1294{
1295        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1296        int direction, retval = 0;
1297        unsigned long flags;
1298
1299        if (ep == NULL)
1300                return -EINVAL;
1301        else if (hwep->ep.desc == NULL)
1302                return -EBUSY;
1303
1304        spin_lock_irqsave(hwep->lock, flags);
1305
1306        /* only internal SW should disable ctrl endpts */
1307
1308        direction = hwep->dir;
1309        do {
1310                retval |= _ep_nuke(hwep);
1311                retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1312
1313                if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1314                        hwep->dir = (hwep->dir == TX) ? RX : TX;
1315
1316        } while (hwep->dir != direction);
1317
1318        hwep->ep.desc = NULL;
1319
1320        spin_unlock_irqrestore(hwep->lock, flags);
1321        return retval;
1322}
1323
1324/**
1325 * ep_alloc_request: allocate a request object to use with this endpoint
1326 *
1327 * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1328 */
1329static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1330{
1331        struct ci_hw_req *hwreq = NULL;
1332
1333        if (ep == NULL)
1334                return NULL;
1335
1336        hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1337        if (hwreq != NULL) {
1338                INIT_LIST_HEAD(&hwreq->queue);
1339                INIT_LIST_HEAD(&hwreq->tds);
1340        }
1341
1342        return (hwreq == NULL) ? NULL : &hwreq->req;
1343}
1344
1345/**
1346 * ep_free_request: frees a request object
1347 *
1348 * Check usb_ep_free_request() at "usb_gadget.h" for details
1349 */
1350static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1351{
1352        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1353        struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1354        struct td_node *node, *tmpnode;
1355        unsigned long flags;
1356
1357        if (ep == NULL || req == NULL) {
1358                return;
1359        } else if (!list_empty(&hwreq->queue)) {
1360                dev_err(hwep->ci->dev, "freeing queued request\n");
1361                return;
1362        }
1363
1364        spin_lock_irqsave(hwep->lock, flags);
1365
1366        list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1367                dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1368                list_del_init(&node->td);
1369                node->ptr = NULL;
1370                kfree(node);
1371        }
1372
1373        kfree(hwreq);
1374
1375        spin_unlock_irqrestore(hwep->lock, flags);
1376}
1377
1378/**
1379 * ep_queue: queues (submits) an I/O request to an endpoint
1380 *
1381 * Check usb_ep_queue()* at usb_gadget.h" for details
1382 */
1383static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1384                    gfp_t __maybe_unused gfp_flags)
1385{
1386        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1387        int retval = 0;
1388        unsigned long flags;
1389
1390        if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1391                return -EINVAL;
1392
1393        spin_lock_irqsave(hwep->lock, flags);
1394        retval = _ep_queue(ep, req, gfp_flags);
1395        spin_unlock_irqrestore(hwep->lock, flags);
1396        return retval;
1397}
1398
1399/**
1400 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1401 *
1402 * Check usb_ep_dequeue() at "usb_gadget.h" for details
1403 */
1404static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1405{
1406        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1407        struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1408        unsigned long flags;
1409        struct td_node *node, *tmpnode;
1410
1411        if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1412                hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1413                list_empty(&hwep->qh.queue))
1414                return -EINVAL;
1415
1416        spin_lock_irqsave(hwep->lock, flags);
1417
1418        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1419
1420        list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1421                dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1422                list_del(&node->td);
1423                kfree(node);
1424        }
1425
1426        /* pop request */
1427        list_del_init(&hwreq->queue);
1428
1429        usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1430
1431        req->status = -ECONNRESET;
1432
1433        if (hwreq->req.complete != NULL) {
1434                spin_unlock(hwep->lock);
1435                usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
1436                spin_lock(hwep->lock);
1437        }
1438
1439        spin_unlock_irqrestore(hwep->lock, flags);
1440        return 0;
1441}
1442
1443/**
1444 * ep_set_halt: sets the endpoint halt feature
1445 *
1446 * Check usb_ep_set_halt() at "usb_gadget.h" for details
1447 */
1448static int ep_set_halt(struct usb_ep *ep, int value)
1449{
1450        return _ep_set_halt(ep, value, true);
1451}
1452
1453/**
1454 * ep_set_wedge: sets the halt feature and ignores clear requests
1455 *
1456 * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1457 */
1458static int ep_set_wedge(struct usb_ep *ep)
1459{
1460        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1461        unsigned long flags;
1462
1463        if (ep == NULL || hwep->ep.desc == NULL)
1464                return -EINVAL;
1465
1466        spin_lock_irqsave(hwep->lock, flags);
1467        hwep->wedge = 1;
1468        spin_unlock_irqrestore(hwep->lock, flags);
1469
1470        return usb_ep_set_halt(ep);
1471}
1472
1473/**
1474 * ep_fifo_flush: flushes contents of a fifo
1475 *
1476 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1477 */
1478static void ep_fifo_flush(struct usb_ep *ep)
1479{
1480        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1481        unsigned long flags;
1482
1483        if (ep == NULL) {
1484                dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1485                return;
1486        }
1487
1488        spin_lock_irqsave(hwep->lock, flags);
1489
1490        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1491
1492        spin_unlock_irqrestore(hwep->lock, flags);
1493}
1494
1495/**
1496 * Endpoint-specific part of the API to the USB controller hardware
1497 * Check "usb_gadget.h" for details
1498 */
1499static const struct usb_ep_ops usb_ep_ops = {
1500        .enable        = ep_enable,
1501        .disable       = ep_disable,
1502        .alloc_request = ep_alloc_request,
1503        .free_request  = ep_free_request,
1504        .queue         = ep_queue,
1505        .dequeue       = ep_dequeue,
1506        .set_halt      = ep_set_halt,
1507        .set_wedge     = ep_set_wedge,
1508        .fifo_flush    = ep_fifo_flush,
1509};
1510
1511/******************************************************************************
1512 * GADGET block
1513 *****************************************************************************/
1514static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1515{
1516        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1517        unsigned long flags;
1518        int gadget_ready = 0;
1519
1520        spin_lock_irqsave(&ci->lock, flags);
1521        ci->vbus_active = is_active;
1522        if (ci->driver)
1523                gadget_ready = 1;
1524        spin_unlock_irqrestore(&ci->lock, flags);
1525
1526        if (ci->usb_phy)
1527                usb_phy_set_charger_state(ci->usb_phy, is_active ?
1528                        USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
1529
1530        if (gadget_ready) {
1531                if (is_active) {
1532                        pm_runtime_get_sync(&_gadget->dev);
1533                        hw_device_reset(ci);
1534                        hw_device_state(ci, ci->ep0out->qh.dma);
1535                        usb_gadget_set_state(_gadget, USB_STATE_POWERED);
1536                        usb_udc_vbus_handler(_gadget, true);
1537                } else {
1538                        usb_udc_vbus_handler(_gadget, false);
1539                        if (ci->driver)
1540                                ci->driver->disconnect(&ci->gadget);
1541                        hw_device_state(ci, 0);
1542                        if (ci->platdata->notify_event)
1543                                ci->platdata->notify_event(ci,
1544                                CI_HDRC_CONTROLLER_STOPPED_EVENT);
1545                        _gadget_stop_activity(&ci->gadget);
1546                        pm_runtime_put_sync(&_gadget->dev);
1547                        usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
1548                }
1549        }
1550
1551        return 0;
1552}
1553
1554static int ci_udc_wakeup(struct usb_gadget *_gadget)
1555{
1556        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1557        unsigned long flags;
1558        int ret = 0;
1559
1560        spin_lock_irqsave(&ci->lock, flags);
1561        if (!ci->remote_wakeup) {
1562                ret = -EOPNOTSUPP;
1563                goto out;
1564        }
1565        if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1566                ret = -EINVAL;
1567                goto out;
1568        }
1569        hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1570out:
1571        spin_unlock_irqrestore(&ci->lock, flags);
1572        return ret;
1573}
1574
1575static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1576{
1577        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1578
1579        if (ci->usb_phy)
1580                return usb_phy_set_power(ci->usb_phy, ma);
1581        return -ENOTSUPP;
1582}
1583
1584static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
1585{
1586        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1587        struct ci_hw_ep *hwep = ci->ep0in;
1588        unsigned long flags;
1589
1590        spin_lock_irqsave(hwep->lock, flags);
1591        _gadget->is_selfpowered = (is_on != 0);
1592        spin_unlock_irqrestore(hwep->lock, flags);
1593
1594        return 0;
1595}
1596
1597/* Change Data+ pullup status
1598 * this func is used by usb_gadget_connect/disconnet
1599 */
1600static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1601{
1602        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1603
1604        /*
1605         * Data+ pullup controlled by OTG state machine in OTG fsm mode;
1606         * and don't touch Data+ in host mode for dual role config.
1607         */
1608        if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
1609                return 0;
1610
1611        pm_runtime_get_sync(&ci->gadget.dev);
1612        if (is_on)
1613                hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1614        else
1615                hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1616        pm_runtime_put_sync(&ci->gadget.dev);
1617
1618        return 0;
1619}
1620
1621static int ci_udc_start(struct usb_gadget *gadget,
1622                         struct usb_gadget_driver *driver);
1623static int ci_udc_stop(struct usb_gadget *gadget);
1624/**
1625 * Device operations part of the API to the USB controller hardware,
1626 * which don't involve endpoints (or i/o)
1627 * Check  "usb_gadget.h" for details
1628 */
1629static const struct usb_gadget_ops usb_gadget_ops = {
1630        .vbus_session   = ci_udc_vbus_session,
1631        .wakeup         = ci_udc_wakeup,
1632        .set_selfpowered        = ci_udc_selfpowered,
1633        .pullup         = ci_udc_pullup,
1634        .vbus_draw      = ci_udc_vbus_draw,
1635        .udc_start      = ci_udc_start,
1636        .udc_stop       = ci_udc_stop,
1637};
1638
1639static int init_eps(struct ci_hdrc *ci)
1640{
1641        int retval = 0, i, j;
1642
1643        for (i = 0; i < ci->hw_ep_max/2; i++)
1644                for (j = RX; j <= TX; j++) {
1645                        int k = i + j * ci->hw_ep_max/2;
1646                        struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1647
1648                        scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1649                                        (j == TX)  ? "in" : "out");
1650
1651                        hwep->ci          = ci;
1652                        hwep->lock         = &ci->lock;
1653                        hwep->td_pool      = ci->td_pool;
1654
1655                        hwep->ep.name      = hwep->name;
1656                        hwep->ep.ops       = &usb_ep_ops;
1657
1658                        if (i == 0) {
1659                                hwep->ep.caps.type_control = true;
1660                        } else {
1661                                hwep->ep.caps.type_iso = true;
1662                                hwep->ep.caps.type_bulk = true;
1663                                hwep->ep.caps.type_int = true;
1664                        }
1665
1666                        if (j == TX)
1667                                hwep->ep.caps.dir_in = true;
1668                        else
1669                                hwep->ep.caps.dir_out = true;
1670
1671                        /*
1672                         * for ep0: maxP defined in desc, for other
1673                         * eps, maxP is set by epautoconfig() called
1674                         * by gadget layer
1675                         */
1676                        usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
1677
1678                        INIT_LIST_HEAD(&hwep->qh.queue);
1679                        hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
1680                                                       &hwep->qh.dma);
1681                        if (hwep->qh.ptr == NULL)
1682                                retval = -ENOMEM;
1683
1684                        /*
1685                         * set up shorthands for ep0 out and in endpoints,
1686                         * don't add to gadget's ep_list
1687                         */
1688                        if (i == 0) {
1689                                if (j == RX)
1690                                        ci->ep0out = hwep;
1691                                else
1692                                        ci->ep0in = hwep;
1693
1694                                usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
1695                                continue;
1696                        }
1697
1698                        list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1699                }
1700
1701        return retval;
1702}
1703
1704static void destroy_eps(struct ci_hdrc *ci)
1705{
1706        int i;
1707
1708        for (i = 0; i < ci->hw_ep_max; i++) {
1709                struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1710
1711                if (hwep->pending_td)
1712                        free_pending_td(hwep);
1713                dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1714        }
1715}
1716
1717/**
1718 * ci_udc_start: register a gadget driver
1719 * @gadget: our gadget
1720 * @driver: the driver being registered
1721 *
1722 * Interrupts are enabled here.
1723 */
1724static int ci_udc_start(struct usb_gadget *gadget,
1725                         struct usb_gadget_driver *driver)
1726{
1727        struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1728        int retval = -ENOMEM;
1729
1730        if (driver->disconnect == NULL)
1731                return -EINVAL;
1732
1733
1734        ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1735        retval = usb_ep_enable(&ci->ep0out->ep);
1736        if (retval)
1737                return retval;
1738
1739        ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1740        retval = usb_ep_enable(&ci->ep0in->ep);
1741        if (retval)
1742                return retval;
1743
1744        ci->driver = driver;
1745
1746        /* Start otg fsm for B-device */
1747        if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
1748                ci_hdrc_otg_fsm_start(ci);
1749                return retval;
1750        }
1751
1752        pm_runtime_get_sync(&ci->gadget.dev);
1753        if (ci->vbus_active) {
1754                hw_device_reset(ci);
1755        } else {
1756                usb_udc_vbus_handler(&ci->gadget, false);
1757                pm_runtime_put_sync(&ci->gadget.dev);
1758                return retval;
1759        }
1760
1761        retval = hw_device_state(ci, ci->ep0out->qh.dma);
1762        if (retval)
1763                pm_runtime_put_sync(&ci->gadget.dev);
1764
1765        return retval;
1766}
1767
1768static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1769{
1770        if (!ci_otg_is_fsm_mode(ci))
1771                return;
1772
1773        mutex_lock(&ci->fsm.lock);
1774        if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1775                ci->fsm.a_bidl_adis_tmout = 1;
1776                ci_hdrc_otg_fsm_start(ci);
1777        } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1778                ci->fsm.protocol = PROTO_UNDEF;
1779                ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1780        }
1781        mutex_unlock(&ci->fsm.lock);
1782}
1783
1784/**
1785 * ci_udc_stop: unregister a gadget driver
1786 */
1787static int ci_udc_stop(struct usb_gadget *gadget)
1788{
1789        struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1790        unsigned long flags;
1791
1792        spin_lock_irqsave(&ci->lock, flags);
1793
1794        if (ci->vbus_active) {
1795                hw_device_state(ci, 0);
1796                spin_unlock_irqrestore(&ci->lock, flags);
1797                if (ci->platdata->notify_event)
1798                        ci->platdata->notify_event(ci,
1799                        CI_HDRC_CONTROLLER_STOPPED_EVENT);
1800                _gadget_stop_activity(&ci->gadget);
1801                spin_lock_irqsave(&ci->lock, flags);
1802                pm_runtime_put(&ci->gadget.dev);
1803        }
1804
1805        ci->driver = NULL;
1806        spin_unlock_irqrestore(&ci->lock, flags);
1807
1808        ci_udc_stop_for_otg_fsm(ci);
1809        return 0;
1810}
1811
1812/******************************************************************************
1813 * BUS block
1814 *****************************************************************************/
1815/**
1816 * udc_irq: ci interrupt handler
1817 *
1818 * This function returns IRQ_HANDLED if the IRQ has been handled
1819 * It locks access to registers
1820 */
1821static irqreturn_t udc_irq(struct ci_hdrc *ci)
1822{
1823        irqreturn_t retval;
1824        u32 intr;
1825
1826        if (ci == NULL)
1827                return IRQ_HANDLED;
1828
1829        spin_lock(&ci->lock);
1830
1831        if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
1832                if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1833                                USBMODE_CM_DC) {
1834                        spin_unlock(&ci->lock);
1835                        return IRQ_NONE;
1836                }
1837        }
1838        intr = hw_test_and_clear_intr_active(ci);
1839
1840        if (intr) {
1841                /* order defines priority - do NOT change it */
1842                if (USBi_URI & intr)
1843                        isr_reset_handler(ci);
1844
1845                if (USBi_PCI & intr) {
1846                        ci->gadget.speed = hw_port_is_high_speed(ci) ?
1847                                USB_SPEED_HIGH : USB_SPEED_FULL;
1848                        if (ci->suspended) {
1849                                if (ci->driver->resume) {
1850                                        spin_unlock(&ci->lock);
1851                                        ci->driver->resume(&ci->gadget);
1852                                        spin_lock(&ci->lock);
1853                                }
1854                                ci->suspended = 0;
1855                                usb_gadget_set_state(&ci->gadget,
1856                                                ci->resume_state);
1857                        }
1858                }
1859
1860                if (USBi_UI  & intr)
1861                        isr_tr_complete_handler(ci);
1862
1863                if ((USBi_SLI & intr) && !(ci->suspended)) {
1864                        ci->suspended = 1;
1865                        ci->resume_state = ci->gadget.state;
1866                        if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
1867                            ci->driver->suspend) {
1868                                spin_unlock(&ci->lock);
1869                                ci->driver->suspend(&ci->gadget);
1870                                spin_lock(&ci->lock);
1871                        }
1872                        usb_gadget_set_state(&ci->gadget,
1873                                        USB_STATE_SUSPENDED);
1874                }
1875                retval = IRQ_HANDLED;
1876        } else {
1877                retval = IRQ_NONE;
1878        }
1879        spin_unlock(&ci->lock);
1880
1881        return retval;
1882}
1883
1884/**
1885 * udc_start: initialize gadget role
1886 * @ci: chipidea controller
1887 */
1888static int udc_start(struct ci_hdrc *ci)
1889{
1890        struct device *dev = ci->dev;
1891        struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
1892        int retval = 0;
1893
1894        ci->gadget.ops          = &usb_gadget_ops;
1895        ci->gadget.speed        = USB_SPEED_UNKNOWN;
1896        ci->gadget.max_speed    = USB_SPEED_HIGH;
1897        ci->gadget.name         = ci->platdata->name;
1898        ci->gadget.otg_caps     = otg_caps;
1899
1900        if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
1901                ci->gadget.quirk_avoids_skb_reserve = 1;
1902
1903        if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
1904                                                otg_caps->adp_support))
1905                ci->gadget.is_otg = 1;
1906
1907        INIT_LIST_HEAD(&ci->gadget.ep_list);
1908
1909        /* alloc resources */
1910        ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent,
1911                                       sizeof(struct ci_hw_qh),
1912                                       64, CI_HDRC_PAGE_SIZE);
1913        if (ci->qh_pool == NULL)
1914                return -ENOMEM;
1915
1916        ci->td_pool = dma_pool_create("ci_hw_td", dev->parent,
1917                                       sizeof(struct ci_hw_td),
1918                                       64, CI_HDRC_PAGE_SIZE);
1919        if (ci->td_pool == NULL) {
1920                retval = -ENOMEM;
1921                goto free_qh_pool;
1922        }
1923
1924        retval = init_eps(ci);
1925        if (retval)
1926                goto free_pools;
1927
1928        ci->gadget.ep0 = &ci->ep0in->ep;
1929
1930        retval = usb_add_gadget_udc(dev, &ci->gadget);
1931        if (retval)
1932                goto destroy_eps;
1933
1934        pm_runtime_no_callbacks(&ci->gadget.dev);
1935        pm_runtime_enable(&ci->gadget.dev);
1936
1937        return retval;
1938
1939destroy_eps:
1940        destroy_eps(ci);
1941free_pools:
1942        dma_pool_destroy(ci->td_pool);
1943free_qh_pool:
1944        dma_pool_destroy(ci->qh_pool);
1945        return retval;
1946}
1947
1948/**
1949 * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
1950 *
1951 * No interrupts active, the IRQ has been released
1952 */
1953void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
1954{
1955        if (!ci->roles[CI_ROLE_GADGET])
1956                return;
1957
1958        usb_del_gadget_udc(&ci->gadget);
1959
1960        destroy_eps(ci);
1961
1962        dma_pool_destroy(ci->td_pool);
1963        dma_pool_destroy(ci->qh_pool);
1964}
1965
1966static int udc_id_switch_for_device(struct ci_hdrc *ci)
1967{
1968        if (ci->is_otg)
1969                /* Clear and enable BSV irq */
1970                hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
1971                                        OTGSC_BSVIS | OTGSC_BSVIE);
1972
1973        return 0;
1974}
1975
1976static void udc_id_switch_for_host(struct ci_hdrc *ci)
1977{
1978        /*
1979         * host doesn't care B_SESSION_VALID event
1980         * so clear and disbale BSV irq
1981         */
1982        if (ci->is_otg)
1983                hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
1984
1985        ci->vbus_active = 0;
1986}
1987
1988/**
1989 * ci_hdrc_gadget_init - initialize device related bits
1990 * ci: the controller
1991 *
1992 * This function initializes the gadget, if the device is "device capable".
1993 */
1994int ci_hdrc_gadget_init(struct ci_hdrc *ci)
1995{
1996        struct ci_role_driver *rdrv;
1997        int ret;
1998
1999        if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
2000                return -ENXIO;
2001
2002        rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL);
2003        if (!rdrv)
2004                return -ENOMEM;
2005
2006        rdrv->start     = udc_id_switch_for_device;
2007        rdrv->stop      = udc_id_switch_for_host;
2008        rdrv->irq       = udc_irq;
2009        rdrv->name      = "gadget";
2010
2011        ret = udc_start(ci);
2012        if (!ret)
2013                ci->roles[CI_ROLE_GADGET] = rdrv;
2014
2015        return ret;
2016}
2017