linux/drivers/usb/chipidea/udc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * udc.c - ChipIdea UDC driver
   4 *
   5 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
   6 *
   7 * Author: David Lopo
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/device.h>
  12#include <linux/dmapool.h>
  13#include <linux/err.h>
  14#include <linux/irqreturn.h>
  15#include <linux/kernel.h>
  16#include <linux/slab.h>
  17#include <linux/pm_runtime.h>
  18#include <linux/pinctrl/consumer.h>
  19#include <linux/usb/ch9.h>
  20#include <linux/usb/gadget.h>
  21#include <linux/usb/otg-fsm.h>
  22#include <linux/usb/chipidea.h>
  23
  24#include "ci.h"
  25#include "udc.h"
  26#include "bits.h"
  27#include "otg.h"
  28#include "otg_fsm.h"
  29#include "trace.h"
  30
  31/* control endpoint description */
  32static const struct usb_endpoint_descriptor
  33ctrl_endpt_out_desc = {
  34        .bLength         = USB_DT_ENDPOINT_SIZE,
  35        .bDescriptorType = USB_DT_ENDPOINT,
  36
  37        .bEndpointAddress = USB_DIR_OUT,
  38        .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
  39        .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
  40};
  41
  42static const struct usb_endpoint_descriptor
  43ctrl_endpt_in_desc = {
  44        .bLength         = USB_DT_ENDPOINT_SIZE,
  45        .bDescriptorType = USB_DT_ENDPOINT,
  46
  47        .bEndpointAddress = USB_DIR_IN,
  48        .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
  49        .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
  50};
  51
  52/**
  53 * hw_ep_bit: calculates the bit number
  54 * @num: endpoint number
  55 * @dir: endpoint direction
  56 *
  57 * This function returns bit number
  58 */
  59static inline int hw_ep_bit(int num, int dir)
  60{
  61        return num + ((dir == TX) ? 16 : 0);
  62}
  63
  64static inline int ep_to_bit(struct ci_hdrc *ci, int n)
  65{
  66        int fill = 16 - ci->hw_ep_max / 2;
  67
  68        if (n >= ci->hw_ep_max / 2)
  69                n += fill;
  70
  71        return n;
  72}
  73
  74/**
  75 * hw_device_state: enables/disables interrupts (execute without interruption)
  76 * @ci: the controller
  77 * @dma: 0 => disable, !0 => enable and set dma engine
  78 *
  79 * This function returns an error code
  80 */
  81static int hw_device_state(struct ci_hdrc *ci, u32 dma)
  82{
  83        if (dma) {
  84                hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
  85                /* interrupt, error, port change, reset, sleep/suspend */
  86                hw_write(ci, OP_USBINTR, ~0,
  87                             USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
  88        } else {
  89                hw_write(ci, OP_USBINTR, ~0, 0);
  90        }
  91        return 0;
  92}
  93
  94/**
  95 * hw_ep_flush: flush endpoint fifo (execute without interruption)
  96 * @ci: the controller
  97 * @num: endpoint number
  98 * @dir: endpoint direction
  99 *
 100 * This function returns an error code
 101 */
 102static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
 103{
 104        int n = hw_ep_bit(num, dir);
 105
 106        do {
 107                /* flush any pending transfer */
 108                hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
 109                while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
 110                        cpu_relax();
 111        } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
 112
 113        return 0;
 114}
 115
 116/**
 117 * hw_ep_disable: disables endpoint (execute without interruption)
 118 * @ci: the controller
 119 * @num: endpoint number
 120 * @dir: endpoint direction
 121 *
 122 * This function returns an error code
 123 */
 124static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
 125{
 126        hw_write(ci, OP_ENDPTCTRL + num,
 127                 (dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
 128        return 0;
 129}
 130
 131/**
 132 * hw_ep_enable: enables endpoint (execute without interruption)
 133 * @ci: the controller
 134 * @num:  endpoint number
 135 * @dir:  endpoint direction
 136 * @type: endpoint type
 137 *
 138 * This function returns an error code
 139 */
 140static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
 141{
 142        u32 mask, data;
 143
 144        if (dir == TX) {
 145                mask  = ENDPTCTRL_TXT;  /* type    */
 146                data  = type << __ffs(mask);
 147
 148                mask |= ENDPTCTRL_TXS;  /* unstall */
 149                mask |= ENDPTCTRL_TXR;  /* reset data toggle */
 150                data |= ENDPTCTRL_TXR;
 151                mask |= ENDPTCTRL_TXE;  /* enable  */
 152                data |= ENDPTCTRL_TXE;
 153        } else {
 154                mask  = ENDPTCTRL_RXT;  /* type    */
 155                data  = type << __ffs(mask);
 156
 157                mask |= ENDPTCTRL_RXS;  /* unstall */
 158                mask |= ENDPTCTRL_RXR;  /* reset data toggle */
 159                data |= ENDPTCTRL_RXR;
 160                mask |= ENDPTCTRL_RXE;  /* enable  */
 161                data |= ENDPTCTRL_RXE;
 162        }
 163        hw_write(ci, OP_ENDPTCTRL + num, mask, data);
 164        return 0;
 165}
 166
 167/**
 168 * hw_ep_get_halt: return endpoint halt status
 169 * @ci: the controller
 170 * @num: endpoint number
 171 * @dir: endpoint direction
 172 *
 173 * This function returns 1 if endpoint halted
 174 */
 175static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
 176{
 177        u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
 178
 179        return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
 180}
 181
 182/**
 183 * hw_ep_prime: primes endpoint (execute without interruption)
 184 * @ci: the controller
 185 * @num:     endpoint number
 186 * @dir:     endpoint direction
 187 * @is_ctrl: true if control endpoint
 188 *
 189 * This function returns an error code
 190 */
 191static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
 192{
 193        int n = hw_ep_bit(num, dir);
 194
 195        /* Synchronize before ep prime */
 196        wmb();
 197
 198        if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
 199                return -EAGAIN;
 200
 201        hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
 202
 203        while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
 204                cpu_relax();
 205        if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
 206                return -EAGAIN;
 207
 208        /* status shoult be tested according with manual but it doesn't work */
 209        return 0;
 210}
 211
 212/**
 213 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
 214 *                 without interruption)
 215 * @ci: the controller
 216 * @num:   endpoint number
 217 * @dir:   endpoint direction
 218 * @value: true => stall, false => unstall
 219 *
 220 * This function returns an error code
 221 */
 222static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
 223{
 224        if (value != 0 && value != 1)
 225                return -EINVAL;
 226
 227        do {
 228                enum ci_hw_regs reg = OP_ENDPTCTRL + num;
 229                u32 mask_xs = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
 230                u32 mask_xr = (dir == TX) ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
 231
 232                /* data toggle - reserved for EP0 but it's in ESS */
 233                hw_write(ci, reg, mask_xs|mask_xr,
 234                          value ? mask_xs : mask_xr);
 235        } while (value != hw_ep_get_halt(ci, num, dir));
 236
 237        return 0;
 238}
 239
 240/**
 241 * hw_is_port_high_speed: test if port is high speed
 242 * @ci: the controller
 243 *
 244 * This function returns true if high speed port
 245 */
 246static int hw_port_is_high_speed(struct ci_hdrc *ci)
 247{
 248        return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
 249                hw_read(ci, OP_PORTSC, PORTSC_HSP);
 250}
 251
 252/**
 253 * hw_test_and_clear_complete: test & clear complete status (execute without
 254 *                             interruption)
 255 * @ci: the controller
 256 * @n: endpoint number
 257 *
 258 * This function returns complete status
 259 */
 260static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
 261{
 262        n = ep_to_bit(ci, n);
 263        return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
 264}
 265
 266/**
 267 * hw_test_and_clear_intr_active: test & clear active interrupts (execute
 268 *                                without interruption)
 269 * @ci: the controller
 270 *
 271 * This function returns active interrutps
 272 */
 273static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
 274{
 275        u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
 276
 277        hw_write(ci, OP_USBSTS, ~0, reg);
 278        return reg;
 279}
 280
 281/**
 282 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
 283 *                                interruption)
 284 * @ci: the controller
 285 *
 286 * This function returns guard value
 287 */
 288static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
 289{
 290        return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
 291}
 292
 293/**
 294 * hw_test_and_set_setup_guard: test & set setup guard (execute without
 295 *                              interruption)
 296 * @ci: the controller
 297 *
 298 * This function returns guard value
 299 */
 300static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
 301{
 302        return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
 303}
 304
 305/**
 306 * hw_usb_set_address: configures USB address (execute without interruption)
 307 * @ci: the controller
 308 * @value: new USB address
 309 *
 310 * This function explicitly sets the address, without the "USBADRA" (advance)
 311 * feature, which is not supported by older versions of the controller.
 312 */
 313static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
 314{
 315        hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
 316                 value << __ffs(DEVICEADDR_USBADR));
 317}
 318
 319/**
 320 * hw_usb_reset: restart device after a bus reset (execute without
 321 *               interruption)
 322 * @ci: the controller
 323 *
 324 * This function returns an error code
 325 */
 326static int hw_usb_reset(struct ci_hdrc *ci)
 327{
 328        hw_usb_set_address(ci, 0);
 329
 330        /* ESS flushes only at end?!? */
 331        hw_write(ci, OP_ENDPTFLUSH,    ~0, ~0);
 332
 333        /* clear setup token semaphores */
 334        hw_write(ci, OP_ENDPTSETUPSTAT, 0,  0);
 335
 336        /* clear complete status */
 337        hw_write(ci, OP_ENDPTCOMPLETE,  0,  0);
 338
 339        /* wait until all bits cleared */
 340        while (hw_read(ci, OP_ENDPTPRIME, ~0))
 341                udelay(10);             /* not RTOS friendly */
 342
 343        /* reset all endpoints ? */
 344
 345        /* reset internal status and wait for further instructions
 346           no need to verify the port reset status (ESS does it) */
 347
 348        return 0;
 349}
 350
 351/******************************************************************************
 352 * UTIL block
 353 *****************************************************************************/
 354
 355static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
 356                        unsigned int length, struct scatterlist *s)
 357{
 358        int i;
 359        u32 temp;
 360        struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
 361                                                  GFP_ATOMIC);
 362
 363        if (node == NULL)
 364                return -ENOMEM;
 365
 366        node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
 367        if (node->ptr == NULL) {
 368                kfree(node);
 369                return -ENOMEM;
 370        }
 371
 372        node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
 373        node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
 374        node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
 375        if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
 376                u32 mul = hwreq->req.length / hwep->ep.maxpacket;
 377
 378                if (hwreq->req.length == 0
 379                                || hwreq->req.length % hwep->ep.maxpacket)
 380                        mul++;
 381                node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
 382        }
 383
 384        if (s) {
 385                temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
 386                node->td_remaining_size = CI_MAX_BUF_SIZE - length;
 387        } else {
 388                temp = (u32) (hwreq->req.dma + hwreq->req.actual);
 389        }
 390
 391        if (length) {
 392                node->ptr->page[0] = cpu_to_le32(temp);
 393                for (i = 1; i < TD_PAGE_COUNT; i++) {
 394                        u32 page = temp + i * CI_HDRC_PAGE_SIZE;
 395                        page &= ~TD_RESERVED_MASK;
 396                        node->ptr->page[i] = cpu_to_le32(page);
 397                }
 398        }
 399
 400        hwreq->req.actual += length;
 401
 402        if (!list_empty(&hwreq->tds)) {
 403                /* get the last entry */
 404                lastnode = list_entry(hwreq->tds.prev,
 405                                struct td_node, td);
 406                lastnode->ptr->next = cpu_to_le32(node->dma);
 407        }
 408
 409        INIT_LIST_HEAD(&node->td);
 410        list_add_tail(&node->td, &hwreq->tds);
 411
 412        return 0;
 413}
 414
 415/**
 416 * _usb_addr: calculates endpoint address from direction & number
 417 * @ep:  endpoint
 418 */
 419static inline u8 _usb_addr(struct ci_hw_ep *ep)
 420{
 421        return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
 422}
 423
 424static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
 425                struct ci_hw_req *hwreq)
 426{
 427        unsigned int rest = hwreq->req.length;
 428        int pages = TD_PAGE_COUNT;
 429        int ret = 0;
 430
 431        if (rest == 0) {
 432                ret = add_td_to_list(hwep, hwreq, 0, NULL);
 433                if (ret < 0)
 434                        return ret;
 435        }
 436
 437        /*
 438         * The first buffer could be not page aligned.
 439         * In that case we have to span into one extra td.
 440         */
 441        if (hwreq->req.dma % PAGE_SIZE)
 442                pages--;
 443
 444        while (rest > 0) {
 445                unsigned int count = min(hwreq->req.length - hwreq->req.actual,
 446                        (unsigned int)(pages * CI_HDRC_PAGE_SIZE));
 447
 448                ret = add_td_to_list(hwep, hwreq, count, NULL);
 449                if (ret < 0)
 450                        return ret;
 451
 452                rest -= count;
 453        }
 454
 455        if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
 456            && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
 457                ret = add_td_to_list(hwep, hwreq, 0, NULL);
 458                if (ret < 0)
 459                        return ret;
 460        }
 461
 462        return ret;
 463}
 464
 465static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
 466                struct scatterlist *s)
 467{
 468        unsigned int rest = sg_dma_len(s);
 469        int ret = 0;
 470
 471        hwreq->req.actual = 0;
 472        while (rest > 0) {
 473                unsigned int count = min_t(unsigned int, rest,
 474                                CI_MAX_BUF_SIZE);
 475
 476                ret = add_td_to_list(hwep, hwreq, count, s);
 477                if (ret < 0)
 478                        return ret;
 479
 480                rest -= count;
 481        }
 482
 483        return ret;
 484}
 485
 486static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
 487{
 488        int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
 489                        / CI_HDRC_PAGE_SIZE;
 490        int i;
 491        u32 token;
 492
 493        token = le32_to_cpu(node->ptr->token) + (sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
 494        node->ptr->token = cpu_to_le32(token);
 495
 496        for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
 497                u32 page = (u32) sg_dma_address(s) +
 498                        (i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
 499
 500                page &= ~TD_RESERVED_MASK;
 501                node->ptr->page[i] = cpu_to_le32(page);
 502        }
 503}
 504
 505static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 506{
 507        struct usb_request *req = &hwreq->req;
 508        struct scatterlist *s = req->sg;
 509        int ret = 0, i = 0;
 510        struct td_node *node = NULL;
 511
 512        if (!s || req->zero || req->length == 0) {
 513                dev_err(hwep->ci->dev, "not supported operation for sg\n");
 514                return -EINVAL;
 515        }
 516
 517        while (i++ < req->num_mapped_sgs) {
 518                if (sg_dma_address(s) % PAGE_SIZE) {
 519                        dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
 520                        return -EINVAL;
 521                }
 522
 523                if (node && (node->td_remaining_size >= sg_dma_len(s))) {
 524                        ci_add_buffer_entry(node, s);
 525                        node->td_remaining_size -= sg_dma_len(s);
 526                } else {
 527                        ret = prepare_td_per_sg(hwep, hwreq, s);
 528                        if (ret)
 529                                return ret;
 530
 531                        node = list_entry(hwreq->tds.prev,
 532                                struct td_node, td);
 533                }
 534
 535                s = sg_next(s);
 536        }
 537
 538        return ret;
 539}
 540
 541/**
 542 * _hardware_enqueue: configures a request at hardware level
 543 * @hwep:   endpoint
 544 * @hwreq:  request
 545 *
 546 * This function returns an error code
 547 */
 548static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 549{
 550        struct ci_hdrc *ci = hwep->ci;
 551        int ret = 0;
 552        struct td_node *firstnode, *lastnode;
 553
 554        /* don't queue twice */
 555        if (hwreq->req.status == -EALREADY)
 556                return -EALREADY;
 557
 558        hwreq->req.status = -EALREADY;
 559
 560        ret = usb_gadget_map_request_by_dev(ci->dev->parent,
 561                                            &hwreq->req, hwep->dir);
 562        if (ret)
 563                return ret;
 564
 565        if (hwreq->req.num_mapped_sgs)
 566                ret = prepare_td_for_sg(hwep, hwreq);
 567        else
 568                ret = prepare_td_for_non_sg(hwep, hwreq);
 569
 570        if (ret)
 571                return ret;
 572
 573        lastnode = list_entry(hwreq->tds.prev,
 574                struct td_node, td);
 575
 576        lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
 577        if (!hwreq->req.no_interrupt)
 578                lastnode->ptr->token |= cpu_to_le32(TD_IOC);
 579
 580        list_for_each_entry_safe(firstnode, lastnode, &hwreq->tds, td)
 581                trace_ci_prepare_td(hwep, hwreq, firstnode);
 582
 583        firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
 584
 585        wmb();
 586
 587        hwreq->req.actual = 0;
 588        if (!list_empty(&hwep->qh.queue)) {
 589                struct ci_hw_req *hwreqprev;
 590                int n = hw_ep_bit(hwep->num, hwep->dir);
 591                int tmp_stat;
 592                struct td_node *prevlastnode;
 593                u32 next = firstnode->dma & TD_ADDR_MASK;
 594
 595                hwreqprev = list_entry(hwep->qh.queue.prev,
 596                                struct ci_hw_req, queue);
 597                prevlastnode = list_entry(hwreqprev->tds.prev,
 598                                struct td_node, td);
 599
 600                prevlastnode->ptr->next = cpu_to_le32(next);
 601                wmb();
 602                if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
 603                        goto done;
 604                do {
 605                        hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
 606                        tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
 607                } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
 608                hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
 609                if (tmp_stat)
 610                        goto done;
 611        }
 612
 613        /*  QH configuration */
 614        hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
 615        hwep->qh.ptr->td.token &=
 616                cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
 617
 618        if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
 619                u32 mul = hwreq->req.length / hwep->ep.maxpacket;
 620
 621                if (hwreq->req.length == 0
 622                                || hwreq->req.length % hwep->ep.maxpacket)
 623                        mul++;
 624                hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
 625        }
 626
 627        ret = hw_ep_prime(ci, hwep->num, hwep->dir,
 628                           hwep->type == USB_ENDPOINT_XFER_CONTROL);
 629done:
 630        return ret;
 631}
 632
 633/**
 634 * free_pending_td: remove a pending request for the endpoint
 635 * @hwep: endpoint
 636 */
 637static void free_pending_td(struct ci_hw_ep *hwep)
 638{
 639        struct td_node *pending = hwep->pending_td;
 640
 641        dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
 642        hwep->pending_td = NULL;
 643        kfree(pending);
 644}
 645
 646static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
 647                                           struct td_node *node)
 648{
 649        hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
 650        hwep->qh.ptr->td.token &=
 651                cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
 652
 653        return hw_ep_prime(ci, hwep->num, hwep->dir,
 654                                hwep->type == USB_ENDPOINT_XFER_CONTROL);
 655}
 656
 657/**
 658 * _hardware_dequeue: handles a request at hardware level
 659 * @hwep: endpoint
 660 * @hwreq:  request
 661 *
 662 * This function returns an error code
 663 */
 664static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 665{
 666        u32 tmptoken;
 667        struct td_node *node, *tmpnode;
 668        unsigned remaining_length;
 669        unsigned actual = hwreq->req.length;
 670        struct ci_hdrc *ci = hwep->ci;
 671
 672        if (hwreq->req.status != -EALREADY)
 673                return -EINVAL;
 674
 675        hwreq->req.status = 0;
 676
 677        list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
 678                tmptoken = le32_to_cpu(node->ptr->token);
 679                trace_ci_complete_td(hwep, hwreq, node);
 680                if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
 681                        int n = hw_ep_bit(hwep->num, hwep->dir);
 682
 683                        if (ci->rev == CI_REVISION_24)
 684                                if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
 685                                        reprime_dtd(ci, hwep, node);
 686                        hwreq->req.status = -EALREADY;
 687                        return -EBUSY;
 688                }
 689
 690                remaining_length = (tmptoken & TD_TOTAL_BYTES);
 691                remaining_length >>= __ffs(TD_TOTAL_BYTES);
 692                actual -= remaining_length;
 693
 694                hwreq->req.status = tmptoken & TD_STATUS;
 695                if ((TD_STATUS_HALTED & hwreq->req.status)) {
 696                        hwreq->req.status = -EPIPE;
 697                        break;
 698                } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
 699                        hwreq->req.status = -EPROTO;
 700                        break;
 701                } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
 702                        hwreq->req.status = -EILSEQ;
 703                        break;
 704                }
 705
 706                if (remaining_length) {
 707                        if (hwep->dir == TX) {
 708                                hwreq->req.status = -EPROTO;
 709                                break;
 710                        }
 711                }
 712                /*
 713                 * As the hardware could still address the freed td
 714                 * which will run the udc unusable, the cleanup of the
 715                 * td has to be delayed by one.
 716                 */
 717                if (hwep->pending_td)
 718                        free_pending_td(hwep);
 719
 720                hwep->pending_td = node;
 721                list_del_init(&node->td);
 722        }
 723
 724        usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
 725                                        &hwreq->req, hwep->dir);
 726
 727        hwreq->req.actual += actual;
 728
 729        if (hwreq->req.status)
 730                return hwreq->req.status;
 731
 732        return hwreq->req.actual;
 733}
 734
 735/**
 736 * _ep_nuke: dequeues all endpoint requests
 737 * @hwep: endpoint
 738 *
 739 * This function returns an error code
 740 * Caller must hold lock
 741 */
 742static int _ep_nuke(struct ci_hw_ep *hwep)
 743__releases(hwep->lock)
 744__acquires(hwep->lock)
 745{
 746        struct td_node *node, *tmpnode;
 747        if (hwep == NULL)
 748                return -EINVAL;
 749
 750        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 751
 752        while (!list_empty(&hwep->qh.queue)) {
 753
 754                /* pop oldest request */
 755                struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
 756                                                     struct ci_hw_req, queue);
 757
 758                list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
 759                        dma_pool_free(hwep->td_pool, node->ptr, node->dma);
 760                        list_del_init(&node->td);
 761                        node->ptr = NULL;
 762                        kfree(node);
 763                }
 764
 765                list_del_init(&hwreq->queue);
 766                hwreq->req.status = -ESHUTDOWN;
 767
 768                if (hwreq->req.complete != NULL) {
 769                        spin_unlock(hwep->lock);
 770                        usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
 771                        spin_lock(hwep->lock);
 772                }
 773        }
 774
 775        if (hwep->pending_td)
 776                free_pending_td(hwep);
 777
 778        return 0;
 779}
 780
 781static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
 782{
 783        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
 784        int direction, retval = 0;
 785        unsigned long flags;
 786
 787        if (ep == NULL || hwep->ep.desc == NULL)
 788                return -EINVAL;
 789
 790        if (usb_endpoint_xfer_isoc(hwep->ep.desc))
 791                return -EOPNOTSUPP;
 792
 793        spin_lock_irqsave(hwep->lock, flags);
 794
 795        if (value && hwep->dir == TX && check_transfer &&
 796                !list_empty(&hwep->qh.queue) &&
 797                        !usb_endpoint_xfer_control(hwep->ep.desc)) {
 798                spin_unlock_irqrestore(hwep->lock, flags);
 799                return -EAGAIN;
 800        }
 801
 802        direction = hwep->dir;
 803        do {
 804                retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
 805
 806                if (!value)
 807                        hwep->wedge = 0;
 808
 809                if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
 810                        hwep->dir = (hwep->dir == TX) ? RX : TX;
 811
 812        } while (hwep->dir != direction);
 813
 814        spin_unlock_irqrestore(hwep->lock, flags);
 815        return retval;
 816}
 817
 818
 819/**
 820 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
 821 * @gadget: gadget
 822 *
 823 * This function returns an error code
 824 */
 825static int _gadget_stop_activity(struct usb_gadget *gadget)
 826{
 827        struct usb_ep *ep;
 828        struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
 829        unsigned long flags;
 830
 831        /* flush all endpoints */
 832        gadget_for_each_ep(ep, gadget) {
 833                usb_ep_fifo_flush(ep);
 834        }
 835        usb_ep_fifo_flush(&ci->ep0out->ep);
 836        usb_ep_fifo_flush(&ci->ep0in->ep);
 837
 838        /* make sure to disable all endpoints */
 839        gadget_for_each_ep(ep, gadget) {
 840                usb_ep_disable(ep);
 841        }
 842
 843        if (ci->status != NULL) {
 844                usb_ep_free_request(&ci->ep0in->ep, ci->status);
 845                ci->status = NULL;
 846        }
 847
 848        spin_lock_irqsave(&ci->lock, flags);
 849        ci->gadget.speed = USB_SPEED_UNKNOWN;
 850        ci->remote_wakeup = 0;
 851        ci->suspended = 0;
 852        spin_unlock_irqrestore(&ci->lock, flags);
 853
 854        return 0;
 855}
 856
 857/******************************************************************************
 858 * ISR block
 859 *****************************************************************************/
 860/**
 861 * isr_reset_handler: USB reset interrupt handler
 862 * @ci: UDC device
 863 *
 864 * This function resets USB engine after a bus reset occurred
 865 */
 866static void isr_reset_handler(struct ci_hdrc *ci)
 867__releases(ci->lock)
 868__acquires(ci->lock)
 869{
 870        int retval;
 871
 872        spin_unlock(&ci->lock);
 873        if (ci->gadget.speed != USB_SPEED_UNKNOWN)
 874                usb_gadget_udc_reset(&ci->gadget, ci->driver);
 875
 876        retval = _gadget_stop_activity(&ci->gadget);
 877        if (retval)
 878                goto done;
 879
 880        retval = hw_usb_reset(ci);
 881        if (retval)
 882                goto done;
 883
 884        ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
 885        if (ci->status == NULL)
 886                retval = -ENOMEM;
 887
 888done:
 889        spin_lock(&ci->lock);
 890
 891        if (retval)
 892                dev_err(ci->dev, "error: %i\n", retval);
 893}
 894
 895/**
 896 * isr_get_status_complete: get_status request complete function
 897 * @ep:  endpoint
 898 * @req: request handled
 899 *
 900 * Caller must release lock
 901 */
 902static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
 903{
 904        if (ep == NULL || req == NULL)
 905                return;
 906
 907        kfree(req->buf);
 908        usb_ep_free_request(ep, req);
 909}
 910
 911/**
 912 * _ep_queue: queues (submits) an I/O request to an endpoint
 913 * @ep:        endpoint
 914 * @req:       request
 915 * @gfp_flags: GFP flags (not used)
 916 *
 917 * Caller must hold lock
 918 * This function returns an error code
 919 */
 920static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
 921                    gfp_t __maybe_unused gfp_flags)
 922{
 923        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
 924        struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
 925        struct ci_hdrc *ci = hwep->ci;
 926        int retval = 0;
 927
 928        if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
 929                return -EINVAL;
 930
 931        if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
 932                if (req->length)
 933                        hwep = (ci->ep0_dir == RX) ?
 934                               ci->ep0out : ci->ep0in;
 935                if (!list_empty(&hwep->qh.queue)) {
 936                        _ep_nuke(hwep);
 937                        dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
 938                                 _usb_addr(hwep));
 939                }
 940        }
 941
 942        if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
 943            hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
 944                dev_err(hwep->ci->dev, "request length too big for isochronous\n");
 945                return -EMSGSIZE;
 946        }
 947
 948        /* first nuke then test link, e.g. previous status has not sent */
 949        if (!list_empty(&hwreq->queue)) {
 950                dev_err(hwep->ci->dev, "request already in queue\n");
 951                return -EBUSY;
 952        }
 953
 954        /* push request */
 955        hwreq->req.status = -EINPROGRESS;
 956        hwreq->req.actual = 0;
 957
 958        retval = _hardware_enqueue(hwep, hwreq);
 959
 960        if (retval == -EALREADY)
 961                retval = 0;
 962        if (!retval)
 963                list_add_tail(&hwreq->queue, &hwep->qh.queue);
 964
 965        return retval;
 966}
 967
 968/**
 969 * isr_get_status_response: get_status request response
 970 * @ci: ci struct
 971 * @setup: setup request packet
 972 *
 973 * This function returns an error code
 974 */
 975static int isr_get_status_response(struct ci_hdrc *ci,
 976                                   struct usb_ctrlrequest *setup)
 977__releases(hwep->lock)
 978__acquires(hwep->lock)
 979{
 980        struct ci_hw_ep *hwep = ci->ep0in;
 981        struct usb_request *req = NULL;
 982        gfp_t gfp_flags = GFP_ATOMIC;
 983        int dir, num, retval;
 984
 985        if (hwep == NULL || setup == NULL)
 986                return -EINVAL;
 987
 988        spin_unlock(hwep->lock);
 989        req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
 990        spin_lock(hwep->lock);
 991        if (req == NULL)
 992                return -ENOMEM;
 993
 994        req->complete = isr_get_status_complete;
 995        req->length   = 2;
 996        req->buf      = kzalloc(req->length, gfp_flags);
 997        if (req->buf == NULL) {
 998                retval = -ENOMEM;
 999                goto err_free_req;
1000        }
1001
1002        if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1003                *(u16 *)req->buf = (ci->remote_wakeup << 1) |
1004                        ci->gadget.is_selfpowered;
1005        } else if ((setup->bRequestType & USB_RECIP_MASK) \
1006                   == USB_RECIP_ENDPOINT) {
1007                dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
1008                        TX : RX;
1009                num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
1010                *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
1011        }
1012        /* else do nothing; reserved for future use */
1013
1014        retval = _ep_queue(&hwep->ep, req, gfp_flags);
1015        if (retval)
1016                goto err_free_buf;
1017
1018        return 0;
1019
1020 err_free_buf:
1021        kfree(req->buf);
1022 err_free_req:
1023        spin_unlock(hwep->lock);
1024        usb_ep_free_request(&hwep->ep, req);
1025        spin_lock(hwep->lock);
1026        return retval;
1027}
1028
1029/**
1030 * isr_setup_status_complete: setup_status request complete function
1031 * @ep:  endpoint
1032 * @req: request handled
1033 *
1034 * Caller must release lock. Put the port in test mode if test mode
1035 * feature is selected.
1036 */
1037static void
1038isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
1039{
1040        struct ci_hdrc *ci = req->context;
1041        unsigned long flags;
1042
1043        if (ci->setaddr) {
1044                hw_usb_set_address(ci, ci->address);
1045                ci->setaddr = false;
1046                if (ci->address)
1047                        usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
1048        }
1049
1050        spin_lock_irqsave(&ci->lock, flags);
1051        if (ci->test_mode)
1052                hw_port_test_set(ci, ci->test_mode);
1053        spin_unlock_irqrestore(&ci->lock, flags);
1054}
1055
1056/**
1057 * isr_setup_status_phase: queues the status phase of a setup transation
1058 * @ci: ci struct
1059 *
1060 * This function returns an error code
1061 */
1062static int isr_setup_status_phase(struct ci_hdrc *ci)
1063{
1064        struct ci_hw_ep *hwep;
1065
1066        /*
1067         * Unexpected USB controller behavior, caused by bad signal integrity
1068         * or ground reference problems, can lead to isr_setup_status_phase
1069         * being called with ci->status equal to NULL.
1070         * If this situation occurs, you should review your USB hardware design.
1071         */
1072        if (WARN_ON_ONCE(!ci->status))
1073                return -EPIPE;
1074
1075        hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
1076        ci->status->context = ci;
1077        ci->status->complete = isr_setup_status_complete;
1078
1079        return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
1080}
1081
1082/**
1083 * isr_tr_complete_low: transaction complete low level handler
1084 * @hwep: endpoint
1085 *
1086 * This function returns an error code
1087 * Caller must hold lock
1088 */
1089static int isr_tr_complete_low(struct ci_hw_ep *hwep)
1090__releases(hwep->lock)
1091__acquires(hwep->lock)
1092{
1093        struct ci_hw_req *hwreq, *hwreqtemp;
1094        struct ci_hw_ep *hweptemp = hwep;
1095        int retval = 0;
1096
1097        list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
1098                        queue) {
1099                retval = _hardware_dequeue(hwep, hwreq);
1100                if (retval < 0)
1101                        break;
1102                list_del_init(&hwreq->queue);
1103                if (hwreq->req.complete != NULL) {
1104                        spin_unlock(hwep->lock);
1105                        if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
1106                                        hwreq->req.length)
1107                                hweptemp = hwep->ci->ep0in;
1108                        usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
1109                        spin_lock(hwep->lock);
1110                }
1111        }
1112
1113        if (retval == -EBUSY)
1114                retval = 0;
1115
1116        return retval;
1117}
1118
1119static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
1120{
1121        dev_warn(&ci->gadget.dev,
1122                "connect the device to an alternate port if you want HNP\n");
1123        return isr_setup_status_phase(ci);
1124}
1125
1126/**
1127 * isr_setup_packet_handler: setup packet handler
1128 * @ci: UDC descriptor
1129 *
1130 * This function handles setup packet 
1131 */
1132static void isr_setup_packet_handler(struct ci_hdrc *ci)
1133__releases(ci->lock)
1134__acquires(ci->lock)
1135{
1136        struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
1137        struct usb_ctrlrequest req;
1138        int type, num, dir, err = -EINVAL;
1139        u8 tmode = 0;
1140
1141        /*
1142         * Flush data and handshake transactions of previous
1143         * setup packet.
1144         */
1145        _ep_nuke(ci->ep0out);
1146        _ep_nuke(ci->ep0in);
1147
1148        /* read_setup_packet */
1149        do {
1150                hw_test_and_set_setup_guard(ci);
1151                memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1152        } while (!hw_test_and_clear_setup_guard(ci));
1153
1154        type = req.bRequestType;
1155
1156        ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1157
1158        switch (req.bRequest) {
1159        case USB_REQ_CLEAR_FEATURE:
1160                if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1161                                le16_to_cpu(req.wValue) ==
1162                                USB_ENDPOINT_HALT) {
1163                        if (req.wLength != 0)
1164                                break;
1165                        num  = le16_to_cpu(req.wIndex);
1166                        dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1167                        num &= USB_ENDPOINT_NUMBER_MASK;
1168                        if (dir == TX)
1169                                num += ci->hw_ep_max / 2;
1170                        if (!ci->ci_hw_ep[num].wedge) {
1171                                spin_unlock(&ci->lock);
1172                                err = usb_ep_clear_halt(
1173                                        &ci->ci_hw_ep[num].ep);
1174                                spin_lock(&ci->lock);
1175                                if (err)
1176                                        break;
1177                        }
1178                        err = isr_setup_status_phase(ci);
1179                } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1180                                le16_to_cpu(req.wValue) ==
1181                                USB_DEVICE_REMOTE_WAKEUP) {
1182                        if (req.wLength != 0)
1183                                break;
1184                        ci->remote_wakeup = 0;
1185                        err = isr_setup_status_phase(ci);
1186                } else {
1187                        goto delegate;
1188                }
1189                break;
1190        case USB_REQ_GET_STATUS:
1191                if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
1192                        le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
1193                    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1194                    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1195                        goto delegate;
1196                if (le16_to_cpu(req.wLength) != 2 ||
1197                    le16_to_cpu(req.wValue)  != 0)
1198                        break;
1199                err = isr_get_status_response(ci, &req);
1200                break;
1201        case USB_REQ_SET_ADDRESS:
1202                if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1203                        goto delegate;
1204                if (le16_to_cpu(req.wLength) != 0 ||
1205                    le16_to_cpu(req.wIndex)  != 0)
1206                        break;
1207                ci->address = (u8)le16_to_cpu(req.wValue);
1208                ci->setaddr = true;
1209                err = isr_setup_status_phase(ci);
1210                break;
1211        case USB_REQ_SET_FEATURE:
1212                if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1213                                le16_to_cpu(req.wValue) ==
1214                                USB_ENDPOINT_HALT) {
1215                        if (req.wLength != 0)
1216                                break;
1217                        num  = le16_to_cpu(req.wIndex);
1218                        dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1219                        num &= USB_ENDPOINT_NUMBER_MASK;
1220                        if (dir == TX)
1221                                num += ci->hw_ep_max / 2;
1222
1223                        spin_unlock(&ci->lock);
1224                        err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1225                        spin_lock(&ci->lock);
1226                        if (!err)
1227                                isr_setup_status_phase(ci);
1228                } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1229                        if (req.wLength != 0)
1230                                break;
1231                        switch (le16_to_cpu(req.wValue)) {
1232                        case USB_DEVICE_REMOTE_WAKEUP:
1233                                ci->remote_wakeup = 1;
1234                                err = isr_setup_status_phase(ci);
1235                                break;
1236                        case USB_DEVICE_TEST_MODE:
1237                                tmode = le16_to_cpu(req.wIndex) >> 8;
1238                                switch (tmode) {
1239                                case USB_TEST_J:
1240                                case USB_TEST_K:
1241                                case USB_TEST_SE0_NAK:
1242                                case USB_TEST_PACKET:
1243                                case USB_TEST_FORCE_ENABLE:
1244                                        ci->test_mode = tmode;
1245                                        err = isr_setup_status_phase(
1246                                                        ci);
1247                                        break;
1248                                default:
1249                                        break;
1250                                }
1251                                break;
1252                        case USB_DEVICE_B_HNP_ENABLE:
1253                                if (ci_otg_is_fsm_mode(ci)) {
1254                                        ci->gadget.b_hnp_enable = 1;
1255                                        err = isr_setup_status_phase(
1256                                                        ci);
1257                                }
1258                                break;
1259                        case USB_DEVICE_A_ALT_HNP_SUPPORT:
1260                                if (ci_otg_is_fsm_mode(ci))
1261                                        err = otg_a_alt_hnp_support(ci);
1262                                break;
1263                        case USB_DEVICE_A_HNP_SUPPORT:
1264                                if (ci_otg_is_fsm_mode(ci)) {
1265                                        ci->gadget.a_hnp_support = 1;
1266                                        err = isr_setup_status_phase(
1267                                                        ci);
1268                                }
1269                                break;
1270                        default:
1271                                goto delegate;
1272                        }
1273                } else {
1274                        goto delegate;
1275                }
1276                break;
1277        default:
1278delegate:
1279                if (req.wLength == 0)   /* no data phase */
1280                        ci->ep0_dir = TX;
1281
1282                spin_unlock(&ci->lock);
1283                err = ci->driver->setup(&ci->gadget, &req);
1284                spin_lock(&ci->lock);
1285                break;
1286        }
1287
1288        if (err < 0) {
1289                spin_unlock(&ci->lock);
1290                if (_ep_set_halt(&hwep->ep, 1, false))
1291                        dev_err(ci->dev, "error: _ep_set_halt\n");
1292                spin_lock(&ci->lock);
1293        }
1294}
1295
1296/**
1297 * isr_tr_complete_handler: transaction complete interrupt handler
1298 * @ci: UDC descriptor
1299 *
1300 * This function handles traffic events
1301 */
1302static void isr_tr_complete_handler(struct ci_hdrc *ci)
1303__releases(ci->lock)
1304__acquires(ci->lock)
1305{
1306        unsigned i;
1307        int err;
1308
1309        for (i = 0; i < ci->hw_ep_max; i++) {
1310                struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
1311
1312                if (hwep->ep.desc == NULL)
1313                        continue;   /* not configured */
1314
1315                if (hw_test_and_clear_complete(ci, i)) {
1316                        err = isr_tr_complete_low(hwep);
1317                        if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1318                                if (err > 0)   /* needs status phase */
1319                                        err = isr_setup_status_phase(ci);
1320                                if (err < 0) {
1321                                        spin_unlock(&ci->lock);
1322                                        if (_ep_set_halt(&hwep->ep, 1, false))
1323                                                dev_err(ci->dev,
1324                                                "error: _ep_set_halt\n");
1325                                        spin_lock(&ci->lock);
1326                                }
1327                        }
1328                }
1329
1330                /* Only handle setup packet below */
1331                if (i == 0 &&
1332                        hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
1333                        isr_setup_packet_handler(ci);
1334        }
1335}
1336
1337/******************************************************************************
1338 * ENDPT block
1339 *****************************************************************************/
1340/*
1341 * ep_enable: configure endpoint, making it usable
1342 *
1343 * Check usb_ep_enable() at "usb_gadget.h" for details
1344 */
1345static int ep_enable(struct usb_ep *ep,
1346                     const struct usb_endpoint_descriptor *desc)
1347{
1348        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1349        int retval = 0;
1350        unsigned long flags;
1351        u32 cap = 0;
1352
1353        if (ep == NULL || desc == NULL)
1354                return -EINVAL;
1355
1356        spin_lock_irqsave(hwep->lock, flags);
1357
1358        /* only internal SW should enable ctrl endpts */
1359
1360        if (!list_empty(&hwep->qh.queue)) {
1361                dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1362                spin_unlock_irqrestore(hwep->lock, flags);
1363                return -EBUSY;
1364        }
1365
1366        hwep->ep.desc = desc;
1367
1368        hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1369        hwep->num  = usb_endpoint_num(desc);
1370        hwep->type = usb_endpoint_type(desc);
1371
1372        hwep->ep.maxpacket = usb_endpoint_maxp(desc);
1373        hwep->ep.mult = usb_endpoint_maxp_mult(desc);
1374
1375        if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1376                cap |= QH_IOS;
1377
1378        cap |= QH_ZLT;
1379        cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1380        /*
1381         * For ISO-TX, we set mult at QH as the largest value, and use
1382         * MultO at TD as real mult value.
1383         */
1384        if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
1385                cap |= 3 << __ffs(QH_MULT);
1386
1387        hwep->qh.ptr->cap = cpu_to_le32(cap);
1388
1389        hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
1390
1391        if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1392                dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
1393                retval = -EINVAL;
1394        }
1395
1396        /*
1397         * Enable endpoints in the HW other than ep0 as ep0
1398         * is always enabled
1399         */
1400        if (hwep->num)
1401                retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1402                                       hwep->type);
1403
1404        spin_unlock_irqrestore(hwep->lock, flags);
1405        return retval;
1406}
1407
1408/*
1409 * ep_disable: endpoint is no longer usable
1410 *
1411 * Check usb_ep_disable() at "usb_gadget.h" for details
1412 */
1413static int ep_disable(struct usb_ep *ep)
1414{
1415        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1416        int direction, retval = 0;
1417        unsigned long flags;
1418
1419        if (ep == NULL)
1420                return -EINVAL;
1421        else if (hwep->ep.desc == NULL)
1422                return -EBUSY;
1423
1424        spin_lock_irqsave(hwep->lock, flags);
1425        if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1426                spin_unlock_irqrestore(hwep->lock, flags);
1427                return 0;
1428        }
1429
1430        /* only internal SW should disable ctrl endpts */
1431
1432        direction = hwep->dir;
1433        do {
1434                retval |= _ep_nuke(hwep);
1435                retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1436
1437                if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1438                        hwep->dir = (hwep->dir == TX) ? RX : TX;
1439
1440        } while (hwep->dir != direction);
1441
1442        hwep->ep.desc = NULL;
1443
1444        spin_unlock_irqrestore(hwep->lock, flags);
1445        return retval;
1446}
1447
1448/*
1449 * ep_alloc_request: allocate a request object to use with this endpoint
1450 *
1451 * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1452 */
1453static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1454{
1455        struct ci_hw_req *hwreq = NULL;
1456
1457        if (ep == NULL)
1458                return NULL;
1459
1460        hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1461        if (hwreq != NULL) {
1462                INIT_LIST_HEAD(&hwreq->queue);
1463                INIT_LIST_HEAD(&hwreq->tds);
1464        }
1465
1466        return (hwreq == NULL) ? NULL : &hwreq->req;
1467}
1468
1469/*
1470 * ep_free_request: frees a request object
1471 *
1472 * Check usb_ep_free_request() at "usb_gadget.h" for details
1473 */
1474static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1475{
1476        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1477        struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1478        struct td_node *node, *tmpnode;
1479        unsigned long flags;
1480
1481        if (ep == NULL || req == NULL) {
1482                return;
1483        } else if (!list_empty(&hwreq->queue)) {
1484                dev_err(hwep->ci->dev, "freeing queued request\n");
1485                return;
1486        }
1487
1488        spin_lock_irqsave(hwep->lock, flags);
1489
1490        list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1491                dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1492                list_del_init(&node->td);
1493                node->ptr = NULL;
1494                kfree(node);
1495        }
1496
1497        kfree(hwreq);
1498
1499        spin_unlock_irqrestore(hwep->lock, flags);
1500}
1501
1502/*
1503 * ep_queue: queues (submits) an I/O request to an endpoint
1504 *
1505 * Check usb_ep_queue()* at usb_gadget.h" for details
1506 */
1507static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1508                    gfp_t __maybe_unused gfp_flags)
1509{
1510        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1511        int retval = 0;
1512        unsigned long flags;
1513
1514        if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1515                return -EINVAL;
1516
1517        spin_lock_irqsave(hwep->lock, flags);
1518        if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1519                spin_unlock_irqrestore(hwep->lock, flags);
1520                return 0;
1521        }
1522        retval = _ep_queue(ep, req, gfp_flags);
1523        spin_unlock_irqrestore(hwep->lock, flags);
1524        return retval;
1525}
1526
1527/*
1528 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1529 *
1530 * Check usb_ep_dequeue() at "usb_gadget.h" for details
1531 */
1532static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1533{
1534        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1535        struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1536        unsigned long flags;
1537        struct td_node *node, *tmpnode;
1538
1539        if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1540                hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1541                list_empty(&hwep->qh.queue))
1542                return -EINVAL;
1543
1544        spin_lock_irqsave(hwep->lock, flags);
1545        if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1546                hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1547
1548        list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1549                dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1550                list_del(&node->td);
1551                kfree(node);
1552        }
1553
1554        /* pop request */
1555        list_del_init(&hwreq->queue);
1556
1557        usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1558
1559        req->status = -ECONNRESET;
1560
1561        if (hwreq->req.complete != NULL) {
1562                spin_unlock(hwep->lock);
1563                usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
1564                spin_lock(hwep->lock);
1565        }
1566
1567        spin_unlock_irqrestore(hwep->lock, flags);
1568        return 0;
1569}
1570
1571/*
1572 * ep_set_halt: sets the endpoint halt feature
1573 *
1574 * Check usb_ep_set_halt() at "usb_gadget.h" for details
1575 */
1576static int ep_set_halt(struct usb_ep *ep, int value)
1577{
1578        return _ep_set_halt(ep, value, true);
1579}
1580
1581/*
1582 * ep_set_wedge: sets the halt feature and ignores clear requests
1583 *
1584 * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1585 */
1586static int ep_set_wedge(struct usb_ep *ep)
1587{
1588        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1589        unsigned long flags;
1590
1591        if (ep == NULL || hwep->ep.desc == NULL)
1592                return -EINVAL;
1593
1594        spin_lock_irqsave(hwep->lock, flags);
1595        hwep->wedge = 1;
1596        spin_unlock_irqrestore(hwep->lock, flags);
1597
1598        return usb_ep_set_halt(ep);
1599}
1600
1601/*
1602 * ep_fifo_flush: flushes contents of a fifo
1603 *
1604 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1605 */
1606static void ep_fifo_flush(struct usb_ep *ep)
1607{
1608        struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1609        unsigned long flags;
1610
1611        if (ep == NULL) {
1612                dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1613                return;
1614        }
1615
1616        spin_lock_irqsave(hwep->lock, flags);
1617        if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1618                spin_unlock_irqrestore(hwep->lock, flags);
1619                return;
1620        }
1621
1622        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1623
1624        spin_unlock_irqrestore(hwep->lock, flags);
1625}
1626
1627/*
1628 * Endpoint-specific part of the API to the USB controller hardware
1629 * Check "usb_gadget.h" for details
1630 */
1631static const struct usb_ep_ops usb_ep_ops = {
1632        .enable        = ep_enable,
1633        .disable       = ep_disable,
1634        .alloc_request = ep_alloc_request,
1635        .free_request  = ep_free_request,
1636        .queue         = ep_queue,
1637        .dequeue       = ep_dequeue,
1638        .set_halt      = ep_set_halt,
1639        .set_wedge     = ep_set_wedge,
1640        .fifo_flush    = ep_fifo_flush,
1641};
1642
1643/******************************************************************************
1644 * GADGET block
1645 *****************************************************************************/
1646/*
1647 * ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
1648 */
1649static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
1650{
1651        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1652
1653        if (is_active) {
1654                pm_runtime_get_sync(ci->dev);
1655                hw_device_reset(ci);
1656                spin_lock_irq(&ci->lock);
1657                if (ci->driver) {
1658                        hw_device_state(ci, ci->ep0out->qh.dma);
1659                        usb_gadget_set_state(_gadget, USB_STATE_POWERED);
1660                        spin_unlock_irq(&ci->lock);
1661                        usb_udc_vbus_handler(_gadget, true);
1662                } else {
1663                        spin_unlock_irq(&ci->lock);
1664                }
1665        } else {
1666                usb_udc_vbus_handler(_gadget, false);
1667                if (ci->driver)
1668                        ci->driver->disconnect(&ci->gadget);
1669                hw_device_state(ci, 0);
1670                if (ci->platdata->notify_event)
1671                        ci->platdata->notify_event(ci,
1672                        CI_HDRC_CONTROLLER_STOPPED_EVENT);
1673                _gadget_stop_activity(&ci->gadget);
1674                pm_runtime_put_sync(ci->dev);
1675                usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
1676        }
1677}
1678
1679static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1680{
1681        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1682        unsigned long flags;
1683        int ret = 0;
1684
1685        spin_lock_irqsave(&ci->lock, flags);
1686        ci->vbus_active = is_active;
1687        spin_unlock_irqrestore(&ci->lock, flags);
1688
1689        if (ci->usb_phy)
1690                usb_phy_set_charger_state(ci->usb_phy, is_active ?
1691                        USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
1692
1693        if (ci->platdata->notify_event)
1694                ret = ci->platdata->notify_event(ci,
1695                                CI_HDRC_CONTROLLER_VBUS_EVENT);
1696
1697        if (ci->driver)
1698                ci_hdrc_gadget_connect(_gadget, is_active);
1699
1700        return ret;
1701}
1702
1703static int ci_udc_wakeup(struct usb_gadget *_gadget)
1704{
1705        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1706        unsigned long flags;
1707        int ret = 0;
1708
1709        spin_lock_irqsave(&ci->lock, flags);
1710        if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1711                spin_unlock_irqrestore(&ci->lock, flags);
1712                return 0;
1713        }
1714        if (!ci->remote_wakeup) {
1715                ret = -EOPNOTSUPP;
1716                goto out;
1717        }
1718        if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1719                ret = -EINVAL;
1720                goto out;
1721        }
1722        hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1723out:
1724        spin_unlock_irqrestore(&ci->lock, flags);
1725        return ret;
1726}
1727
1728static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1729{
1730        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1731
1732        if (ci->usb_phy)
1733                return usb_phy_set_power(ci->usb_phy, ma);
1734        return -ENOTSUPP;
1735}
1736
1737static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
1738{
1739        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1740        struct ci_hw_ep *hwep = ci->ep0in;
1741        unsigned long flags;
1742
1743        spin_lock_irqsave(hwep->lock, flags);
1744        _gadget->is_selfpowered = (is_on != 0);
1745        spin_unlock_irqrestore(hwep->lock, flags);
1746
1747        return 0;
1748}
1749
1750/* Change Data+ pullup status
1751 * this func is used by usb_gadget_connect/disconnect
1752 */
1753static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1754{
1755        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1756
1757        /*
1758         * Data+ pullup controlled by OTG state machine in OTG fsm mode;
1759         * and don't touch Data+ in host mode for dual role config.
1760         */
1761        if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
1762                return 0;
1763
1764        pm_runtime_get_sync(ci->dev);
1765        if (is_on)
1766                hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1767        else
1768                hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1769        pm_runtime_put_sync(ci->dev);
1770
1771        return 0;
1772}
1773
1774static int ci_udc_start(struct usb_gadget *gadget,
1775                         struct usb_gadget_driver *driver);
1776static int ci_udc_stop(struct usb_gadget *gadget);
1777
1778/* Match ISOC IN from the highest endpoint */
1779static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
1780                              struct usb_endpoint_descriptor *desc,
1781                              struct usb_ss_ep_comp_descriptor *comp_desc)
1782{
1783        struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1784        struct usb_ep *ep;
1785
1786        if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
1787                list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
1788                        if (ep->caps.dir_in && !ep->claimed)
1789                                return ep;
1790                }
1791        }
1792
1793        return NULL;
1794}
1795
1796/*
1797 * Device operations part of the API to the USB controller hardware,
1798 * which don't involve endpoints (or i/o)
1799 * Check  "usb_gadget.h" for details
1800 */
1801static const struct usb_gadget_ops usb_gadget_ops = {
1802        .vbus_session   = ci_udc_vbus_session,
1803        .wakeup         = ci_udc_wakeup,
1804        .set_selfpowered        = ci_udc_selfpowered,
1805        .pullup         = ci_udc_pullup,
1806        .vbus_draw      = ci_udc_vbus_draw,
1807        .udc_start      = ci_udc_start,
1808        .udc_stop       = ci_udc_stop,
1809        .match_ep       = ci_udc_match_ep,
1810};
1811
1812static int init_eps(struct ci_hdrc *ci)
1813{
1814        int retval = 0, i, j;
1815
1816        for (i = 0; i < ci->hw_ep_max/2; i++)
1817                for (j = RX; j <= TX; j++) {
1818                        int k = i + j * ci->hw_ep_max/2;
1819                        struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1820
1821                        scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1822                                        (j == TX)  ? "in" : "out");
1823
1824                        hwep->ci          = ci;
1825                        hwep->lock         = &ci->lock;
1826                        hwep->td_pool      = ci->td_pool;
1827
1828                        hwep->ep.name      = hwep->name;
1829                        hwep->ep.ops       = &usb_ep_ops;
1830
1831                        if (i == 0) {
1832                                hwep->ep.caps.type_control = true;
1833                        } else {
1834                                hwep->ep.caps.type_iso = true;
1835                                hwep->ep.caps.type_bulk = true;
1836                                hwep->ep.caps.type_int = true;
1837                        }
1838
1839                        if (j == TX)
1840                                hwep->ep.caps.dir_in = true;
1841                        else
1842                                hwep->ep.caps.dir_out = true;
1843
1844                        /*
1845                         * for ep0: maxP defined in desc, for other
1846                         * eps, maxP is set by epautoconfig() called
1847                         * by gadget layer
1848                         */
1849                        usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
1850
1851                        INIT_LIST_HEAD(&hwep->qh.queue);
1852                        hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
1853                                                       &hwep->qh.dma);
1854                        if (hwep->qh.ptr == NULL)
1855                                retval = -ENOMEM;
1856
1857                        /*
1858                         * set up shorthands for ep0 out and in endpoints,
1859                         * don't add to gadget's ep_list
1860                         */
1861                        if (i == 0) {
1862                                if (j == RX)
1863                                        ci->ep0out = hwep;
1864                                else
1865                                        ci->ep0in = hwep;
1866
1867                                usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
1868                                continue;
1869                        }
1870
1871                        list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1872                }
1873
1874        return retval;
1875}
1876
1877static void destroy_eps(struct ci_hdrc *ci)
1878{
1879        int i;
1880
1881        for (i = 0; i < ci->hw_ep_max; i++) {
1882                struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1883
1884                if (hwep->pending_td)
1885                        free_pending_td(hwep);
1886                dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1887        }
1888}
1889
1890/**
1891 * ci_udc_start: register a gadget driver
1892 * @gadget: our gadget
1893 * @driver: the driver being registered
1894 *
1895 * Interrupts are enabled here.
1896 */
1897static int ci_udc_start(struct usb_gadget *gadget,
1898                         struct usb_gadget_driver *driver)
1899{
1900        struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1901        int retval;
1902
1903        if (driver->disconnect == NULL)
1904                return -EINVAL;
1905
1906        ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1907        retval = usb_ep_enable(&ci->ep0out->ep);
1908        if (retval)
1909                return retval;
1910
1911        ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1912        retval = usb_ep_enable(&ci->ep0in->ep);
1913        if (retval)
1914                return retval;
1915
1916        ci->driver = driver;
1917
1918        /* Start otg fsm for B-device */
1919        if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
1920                ci_hdrc_otg_fsm_start(ci);
1921                return retval;
1922        }
1923
1924        if (ci->vbus_active)
1925                ci_hdrc_gadget_connect(gadget, 1);
1926        else
1927                usb_udc_vbus_handler(&ci->gadget, false);
1928
1929        return retval;
1930}
1931
1932static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1933{
1934        if (!ci_otg_is_fsm_mode(ci))
1935                return;
1936
1937        mutex_lock(&ci->fsm.lock);
1938        if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1939                ci->fsm.a_bidl_adis_tmout = 1;
1940                ci_hdrc_otg_fsm_start(ci);
1941        } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1942                ci->fsm.protocol = PROTO_UNDEF;
1943                ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1944        }
1945        mutex_unlock(&ci->fsm.lock);
1946}
1947
1948/*
1949 * ci_udc_stop: unregister a gadget driver
1950 */
1951static int ci_udc_stop(struct usb_gadget *gadget)
1952{
1953        struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1954        unsigned long flags;
1955
1956        spin_lock_irqsave(&ci->lock, flags);
1957        ci->driver = NULL;
1958
1959        if (ci->vbus_active) {
1960                hw_device_state(ci, 0);
1961                spin_unlock_irqrestore(&ci->lock, flags);
1962                if (ci->platdata->notify_event)
1963                        ci->platdata->notify_event(ci,
1964                        CI_HDRC_CONTROLLER_STOPPED_EVENT);
1965                _gadget_stop_activity(&ci->gadget);
1966                spin_lock_irqsave(&ci->lock, flags);
1967                pm_runtime_put(ci->dev);
1968        }
1969
1970        spin_unlock_irqrestore(&ci->lock, flags);
1971
1972        ci_udc_stop_for_otg_fsm(ci);
1973        return 0;
1974}
1975
1976/******************************************************************************
1977 * BUS block
1978 *****************************************************************************/
1979/*
1980 * udc_irq: ci interrupt handler
1981 *
1982 * This function returns IRQ_HANDLED if the IRQ has been handled
1983 * It locks access to registers
1984 */
1985static irqreturn_t udc_irq(struct ci_hdrc *ci)
1986{
1987        irqreturn_t retval;
1988        u32 intr;
1989
1990        if (ci == NULL)
1991                return IRQ_HANDLED;
1992
1993        spin_lock(&ci->lock);
1994
1995        if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
1996                if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1997                                USBMODE_CM_DC) {
1998                        spin_unlock(&ci->lock);
1999                        return IRQ_NONE;
2000                }
2001        }
2002        intr = hw_test_and_clear_intr_active(ci);
2003
2004        if (intr) {
2005                /* order defines priority - do NOT change it */
2006                if (USBi_URI & intr)
2007                        isr_reset_handler(ci);
2008
2009                if (USBi_PCI & intr) {
2010                        ci->gadget.speed = hw_port_is_high_speed(ci) ?
2011                                USB_SPEED_HIGH : USB_SPEED_FULL;
2012                        if (ci->suspended) {
2013                                if (ci->driver->resume) {
2014                                        spin_unlock(&ci->lock);
2015                                        ci->driver->resume(&ci->gadget);
2016                                        spin_lock(&ci->lock);
2017                                }
2018                                ci->suspended = 0;
2019                                usb_gadget_set_state(&ci->gadget,
2020                                                ci->resume_state);
2021                        }
2022                }
2023
2024                if (USBi_UI  & intr)
2025                        isr_tr_complete_handler(ci);
2026
2027                if ((USBi_SLI & intr) && !(ci->suspended)) {
2028                        ci->suspended = 1;
2029                        ci->resume_state = ci->gadget.state;
2030                        if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
2031                            ci->driver->suspend) {
2032                                spin_unlock(&ci->lock);
2033                                ci->driver->suspend(&ci->gadget);
2034                                spin_lock(&ci->lock);
2035                        }
2036                        usb_gadget_set_state(&ci->gadget,
2037                                        USB_STATE_SUSPENDED);
2038                }
2039                retval = IRQ_HANDLED;
2040        } else {
2041                retval = IRQ_NONE;
2042        }
2043        spin_unlock(&ci->lock);
2044
2045        return retval;
2046}
2047
2048/**
2049 * udc_start: initialize gadget role
2050 * @ci: chipidea controller
2051 */
2052static int udc_start(struct ci_hdrc *ci)
2053{
2054        struct device *dev = ci->dev;
2055        struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
2056        int retval = 0;
2057
2058        ci->gadget.ops          = &usb_gadget_ops;
2059        ci->gadget.speed        = USB_SPEED_UNKNOWN;
2060        ci->gadget.max_speed    = USB_SPEED_HIGH;
2061        ci->gadget.name         = ci->platdata->name;
2062        ci->gadget.otg_caps     = otg_caps;
2063        ci->gadget.sg_supported = 1;
2064
2065        if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
2066                ci->gadget.quirk_avoids_skb_reserve = 1;
2067
2068        if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
2069                                                otg_caps->adp_support))
2070                ci->gadget.is_otg = 1;
2071
2072        INIT_LIST_HEAD(&ci->gadget.ep_list);
2073
2074        /* alloc resources */
2075        ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent,
2076                                       sizeof(struct ci_hw_qh),
2077                                       64, CI_HDRC_PAGE_SIZE);
2078        if (ci->qh_pool == NULL)
2079                return -ENOMEM;
2080
2081        ci->td_pool = dma_pool_create("ci_hw_td", dev->parent,
2082                                       sizeof(struct ci_hw_td),
2083                                       64, CI_HDRC_PAGE_SIZE);
2084        if (ci->td_pool == NULL) {
2085                retval = -ENOMEM;
2086                goto free_qh_pool;
2087        }
2088
2089        retval = init_eps(ci);
2090        if (retval)
2091                goto free_pools;
2092
2093        ci->gadget.ep0 = &ci->ep0in->ep;
2094
2095        retval = usb_add_gadget_udc(dev, &ci->gadget);
2096        if (retval)
2097                goto destroy_eps;
2098
2099        return retval;
2100
2101destroy_eps:
2102        destroy_eps(ci);
2103free_pools:
2104        dma_pool_destroy(ci->td_pool);
2105free_qh_pool:
2106        dma_pool_destroy(ci->qh_pool);
2107        return retval;
2108}
2109
2110/*
2111 * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
2112 *
2113 * No interrupts active, the IRQ has been released
2114 */
2115void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
2116{
2117        if (!ci->roles[CI_ROLE_GADGET])
2118                return;
2119
2120        usb_del_gadget_udc(&ci->gadget);
2121
2122        destroy_eps(ci);
2123
2124        dma_pool_destroy(ci->td_pool);
2125        dma_pool_destroy(ci->qh_pool);
2126}
2127
2128static int udc_id_switch_for_device(struct ci_hdrc *ci)
2129{
2130        if (ci->platdata->pins_device)
2131                pinctrl_select_state(ci->platdata->pctl,
2132                                     ci->platdata->pins_device);
2133
2134        if (ci->is_otg)
2135                /* Clear and enable BSV irq */
2136                hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
2137                                        OTGSC_BSVIS | OTGSC_BSVIE);
2138
2139        return 0;
2140}
2141
2142static void udc_id_switch_for_host(struct ci_hdrc *ci)
2143{
2144        /*
2145         * host doesn't care B_SESSION_VALID event
2146         * so clear and disbale BSV irq
2147         */
2148        if (ci->is_otg)
2149                hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
2150
2151        ci->vbus_active = 0;
2152
2153        if (ci->platdata->pins_device && ci->platdata->pins_default)
2154                pinctrl_select_state(ci->platdata->pctl,
2155                                     ci->platdata->pins_default);
2156}
2157
2158/**
2159 * ci_hdrc_gadget_init - initialize device related bits
2160 * @ci: the controller
2161 *
2162 * This function initializes the gadget, if the device is "device capable".
2163 */
2164int ci_hdrc_gadget_init(struct ci_hdrc *ci)
2165{
2166        struct ci_role_driver *rdrv;
2167        int ret;
2168
2169        if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
2170                return -ENXIO;
2171
2172        rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL);
2173        if (!rdrv)
2174                return -ENOMEM;
2175
2176        rdrv->start     = udc_id_switch_for_device;
2177        rdrv->stop      = udc_id_switch_for_host;
2178        rdrv->irq       = udc_irq;
2179        rdrv->name      = "gadget";
2180
2181        ret = udc_start(ci);
2182        if (!ret)
2183                ci->roles[CI_ROLE_GADGET] = rdrv;
2184
2185        return ret;
2186}
2187