linux/drivers/usb/dwc2/hcd_intr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
   4 *
   5 * Copyright (C) 2004-2013 Synopsys, Inc.
   6 *
   7 * Redistribution and use in source and binary forms, with or without
   8 * modification, are permitted provided that the following conditions
   9 * are met:
  10 * 1. Redistributions of source code must retain the above copyright
  11 *    notice, this list of conditions, and the following disclaimer,
  12 *    without modification.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. The names of the above-listed copyright holders may not be used
  17 *    to endorse or promote products derived from this software without
  18 *    specific prior written permission.
  19 *
  20 * ALTERNATIVELY, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") as published by the Free Software
  22 * Foundation; either version 2 of the License, or (at your option) any
  23 * later version.
  24 *
  25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  36 */
  37
  38/*
  39 * This file contains the interrupt handlers for Host mode
  40 */
  41#include <linux/kernel.h>
  42#include <linux/module.h>
  43#include <linux/spinlock.h>
  44#include <linux/interrupt.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/io.h>
  47#include <linux/slab.h>
  48#include <linux/usb.h>
  49
  50#include <linux/usb/hcd.h>
  51#include <linux/usb/ch11.h>
  52
  53#include "core.h"
  54#include "hcd.h"
  55
  56/*
  57 * If we get this many NAKs on a split transaction we'll slow down
  58 * retransmission.  A 1 here means delay after the first NAK.
  59 */
  60#define DWC2_NAKS_BEFORE_DELAY          3
  61
  62/* This function is for debug only */
  63static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
  64{
  65        u16 curr_frame_number = hsotg->frame_number;
  66        u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1);
  67
  68        if (expected != curr_frame_number)
  69                dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n",
  70                              expected, curr_frame_number);
  71
  72#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
  73        if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
  74                if (expected != curr_frame_number) {
  75                        hsotg->frame_num_array[hsotg->frame_num_idx] =
  76                                        curr_frame_number;
  77                        hsotg->last_frame_num_array[hsotg->frame_num_idx] =
  78                                        hsotg->last_frame_num;
  79                        hsotg->frame_num_idx++;
  80                }
  81        } else if (!hsotg->dumped_frame_num_array) {
  82                int i;
  83
  84                dev_info(hsotg->dev, "Frame     Last Frame\n");
  85                dev_info(hsotg->dev, "-----     ----------\n");
  86                for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
  87                        dev_info(hsotg->dev, "0x%04x    0x%04x\n",
  88                                 hsotg->frame_num_array[i],
  89                                 hsotg->last_frame_num_array[i]);
  90                }
  91                hsotg->dumped_frame_num_array = 1;
  92        }
  93#endif
  94        hsotg->last_frame_num = curr_frame_number;
  95}
  96
  97static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
  98                                    struct dwc2_host_chan *chan,
  99                                    struct dwc2_qtd *qtd)
 100{
 101        struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub;
 102        struct urb *usb_urb;
 103
 104        if (!chan->qh)
 105                return;
 106
 107        if (chan->qh->dev_speed == USB_SPEED_HIGH)
 108                return;
 109
 110        if (!qtd->urb)
 111                return;
 112
 113        usb_urb = qtd->urb->priv;
 114        if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
 115                return;
 116
 117        /*
 118         * The root hub doesn't really have a TT, but Linux thinks it
 119         * does because how could you have a "high speed hub" that
 120         * directly talks directly to low speed devices without a TT?
 121         * It's all lies.  Lies, I tell you.
 122         */
 123        if (usb_urb->dev->tt->hub == root_hub)
 124                return;
 125
 126        if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
 127                chan->qh->tt_buffer_dirty = 1;
 128                if (usb_hub_clear_tt_buffer(usb_urb))
 129                        /* Clear failed; let's hope things work anyway */
 130                        chan->qh->tt_buffer_dirty = 0;
 131        }
 132}
 133
 134/*
 135 * Handles the start-of-frame interrupt in host mode. Non-periodic
 136 * transactions may be queued to the DWC_otg controller for the current
 137 * (micro)frame. Periodic transactions may be queued to the controller
 138 * for the next (micro)frame.
 139 */
 140static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
 141{
 142        struct list_head *qh_entry;
 143        struct dwc2_qh *qh;
 144        enum dwc2_transaction_type tr_type;
 145
 146        /* Clear interrupt */
 147        dwc2_writel(hsotg, GINTSTS_SOF, GINTSTS);
 148
 149#ifdef DEBUG_SOF
 150        dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
 151#endif
 152
 153        hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
 154
 155        dwc2_track_missed_sofs(hsotg);
 156
 157        /* Determine whether any periodic QHs should be executed */
 158        qh_entry = hsotg->periodic_sched_inactive.next;
 159        while (qh_entry != &hsotg->periodic_sched_inactive) {
 160                qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
 161                qh_entry = qh_entry->next;
 162                if (dwc2_frame_num_le(qh->next_active_frame,
 163                                      hsotg->frame_number)) {
 164                        dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n",
 165                                      qh, hsotg->frame_number,
 166                                      qh->next_active_frame);
 167
 168                        /*
 169                         * Move QH to the ready list to be executed next
 170                         * (micro)frame
 171                         */
 172                        list_move_tail(&qh->qh_list_entry,
 173                                       &hsotg->periodic_sched_ready);
 174                }
 175        }
 176        tr_type = dwc2_hcd_select_transactions(hsotg);
 177        if (tr_type != DWC2_TRANSACTION_NONE)
 178                dwc2_hcd_queue_transactions(hsotg, tr_type);
 179}
 180
 181/*
 182 * Handles the Rx FIFO Level Interrupt, which indicates that there is
 183 * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
 184 * memory if the DWC_otg controller is operating in Slave mode.
 185 */
 186static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
 187{
 188        u32 grxsts, chnum, bcnt, dpid, pktsts;
 189        struct dwc2_host_chan *chan;
 190
 191        if (dbg_perio())
 192                dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
 193
 194        grxsts = dwc2_readl(hsotg, GRXSTSP);
 195        chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
 196        chan = hsotg->hc_ptr_array[chnum];
 197        if (!chan) {
 198                dev_err(hsotg->dev, "Unable to get corresponding channel\n");
 199                return;
 200        }
 201
 202        bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
 203        dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
 204        pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
 205
 206        /* Packet Status */
 207        if (dbg_perio()) {
 208                dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
 209                dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
 210                dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n", dpid,
 211                         chan->data_pid_start);
 212                dev_vdbg(hsotg->dev, "    PStatus = %d\n", pktsts);
 213        }
 214
 215        switch (pktsts) {
 216        case GRXSTS_PKTSTS_HCHIN:
 217                /* Read the data into the host buffer */
 218                if (bcnt > 0) {
 219                        dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
 220
 221                        /* Update the HC fields for the next packet received */
 222                        chan->xfer_count += bcnt;
 223                        chan->xfer_buf += bcnt;
 224                }
 225                break;
 226        case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
 227        case GRXSTS_PKTSTS_DATATOGGLEERR:
 228        case GRXSTS_PKTSTS_HCHHALTED:
 229                /* Handled in interrupt, just ignore data */
 230                break;
 231        default:
 232                dev_err(hsotg->dev,
 233                        "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
 234                break;
 235        }
 236}
 237
 238/*
 239 * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
 240 * data packets may be written to the FIFO for OUT transfers. More requests
 241 * may be written to the non-periodic request queue for IN transfers. This
 242 * interrupt is enabled only in Slave mode.
 243 */
 244static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
 245{
 246        dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
 247        dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
 248}
 249
 250/*
 251 * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
 252 * packets may be written to the FIFO for OUT transfers. More requests may be
 253 * written to the periodic request queue for IN transfers. This interrupt is
 254 * enabled only in Slave mode.
 255 */
 256static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
 257{
 258        if (dbg_perio())
 259                dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
 260        dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
 261}
 262
 263static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
 264                              u32 *hprt0_modify)
 265{
 266        struct dwc2_core_params *params = &hsotg->params;
 267        int do_reset = 0;
 268        u32 usbcfg;
 269        u32 prtspd;
 270        u32 hcfg;
 271        u32 fslspclksel;
 272        u32 hfir;
 273
 274        dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
 275
 276        /* Every time when port enables calculate HFIR.FrInterval */
 277        hfir = dwc2_readl(hsotg, HFIR);
 278        hfir &= ~HFIR_FRINT_MASK;
 279        hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
 280                HFIR_FRINT_MASK;
 281        dwc2_writel(hsotg, hfir, HFIR);
 282
 283        /* Check if we need to adjust the PHY clock speed for low power */
 284        if (!params->host_support_fs_ls_low_power) {
 285                /* Port has been enabled, set the reset change flag */
 286                hsotg->flags.b.port_reset_change = 1;
 287                return;
 288        }
 289
 290        usbcfg = dwc2_readl(hsotg, GUSBCFG);
 291        prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
 292
 293        if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
 294                /* Low power */
 295                if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
 296                        /* Set PHY low power clock select for FS/LS devices */
 297                        usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
 298                        dwc2_writel(hsotg, usbcfg, GUSBCFG);
 299                        do_reset = 1;
 300                }
 301
 302                hcfg = dwc2_readl(hsotg, HCFG);
 303                fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
 304                              HCFG_FSLSPCLKSEL_SHIFT;
 305
 306                if (prtspd == HPRT0_SPD_LOW_SPEED &&
 307                    params->host_ls_low_power_phy_clk) {
 308                        /* 6 MHZ */
 309                        dev_vdbg(hsotg->dev,
 310                                 "FS_PHY programming HCFG to 6 MHz\n");
 311                        if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
 312                                fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
 313                                hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
 314                                hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
 315                                dwc2_writel(hsotg, hcfg, HCFG);
 316                                do_reset = 1;
 317                        }
 318                } else {
 319                        /* 48 MHZ */
 320                        dev_vdbg(hsotg->dev,
 321                                 "FS_PHY programming HCFG to 48 MHz\n");
 322                        if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
 323                                fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
 324                                hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
 325                                hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
 326                                dwc2_writel(hsotg, hcfg, HCFG);
 327                                do_reset = 1;
 328                        }
 329                }
 330        } else {
 331                /* Not low power */
 332                if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
 333                        usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
 334                        dwc2_writel(hsotg, usbcfg, GUSBCFG);
 335                        do_reset = 1;
 336                }
 337        }
 338
 339        if (do_reset) {
 340                *hprt0_modify |= HPRT0_RST;
 341                dwc2_writel(hsotg, *hprt0_modify, HPRT0);
 342                queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
 343                                   msecs_to_jiffies(60));
 344        } else {
 345                /* Port has been enabled, set the reset change flag */
 346                hsotg->flags.b.port_reset_change = 1;
 347        }
 348}
 349
 350/*
 351 * There are multiple conditions that can cause a port interrupt. This function
 352 * determines which interrupt conditions have occurred and handles them
 353 * appropriately.
 354 */
 355static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
 356{
 357        u32 hprt0;
 358        u32 hprt0_modify;
 359
 360        dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
 361
 362        hprt0 = dwc2_readl(hsotg, HPRT0);
 363        hprt0_modify = hprt0;
 364
 365        /*
 366         * Clear appropriate bits in HPRT0 to clear the interrupt bit in
 367         * GINTSTS
 368         */
 369        hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
 370                          HPRT0_OVRCURRCHG);
 371
 372        /*
 373         * Port Connect Detected
 374         * Set flag and clear if detected
 375         */
 376        if (hprt0 & HPRT0_CONNDET) {
 377                dwc2_writel(hsotg, hprt0_modify | HPRT0_CONNDET, HPRT0);
 378
 379                dev_vdbg(hsotg->dev,
 380                         "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
 381                         hprt0);
 382                dwc2_hcd_connect(hsotg);
 383
 384                /*
 385                 * The Hub driver asserts a reset when it sees port connect
 386                 * status change flag
 387                 */
 388        }
 389
 390        /*
 391         * Port Enable Changed
 392         * Clear if detected - Set internal flag if disabled
 393         */
 394        if (hprt0 & HPRT0_ENACHG) {
 395                dwc2_writel(hsotg, hprt0_modify | HPRT0_ENACHG, HPRT0);
 396                dev_vdbg(hsotg->dev,
 397                         "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
 398                         hprt0, !!(hprt0 & HPRT0_ENA));
 399                if (hprt0 & HPRT0_ENA) {
 400                        hsotg->new_connection = true;
 401                        dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
 402                } else {
 403                        hsotg->flags.b.port_enable_change = 1;
 404                        if (hsotg->params.dma_desc_fs_enable) {
 405                                u32 hcfg;
 406
 407                                hsotg->params.dma_desc_enable = false;
 408                                hsotg->new_connection = false;
 409                                hcfg = dwc2_readl(hsotg, HCFG);
 410                                hcfg &= ~HCFG_DESCDMA;
 411                                dwc2_writel(hsotg, hcfg, HCFG);
 412                        }
 413                }
 414        }
 415
 416        /* Overcurrent Change Interrupt */
 417        if (hprt0 & HPRT0_OVRCURRCHG) {
 418                dwc2_writel(hsotg, hprt0_modify | HPRT0_OVRCURRCHG,
 419                            HPRT0);
 420                dev_vdbg(hsotg->dev,
 421                         "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
 422                         hprt0);
 423                hsotg->flags.b.port_over_current_change = 1;
 424        }
 425}
 426
 427/*
 428 * Gets the actual length of a transfer after the transfer halts. halt_status
 429 * holds the reason for the halt.
 430 *
 431 * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
 432 * is set to 1 upon return if less than the requested number of bytes were
 433 * transferred. short_read may also be NULL on entry, in which case it remains
 434 * unchanged.
 435 */
 436static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
 437                                       struct dwc2_host_chan *chan, int chnum,
 438                                       struct dwc2_qtd *qtd,
 439                                       enum dwc2_halt_status halt_status,
 440                                       int *short_read)
 441{
 442        u32 hctsiz, count, length;
 443
 444        hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
 445
 446        if (halt_status == DWC2_HC_XFER_COMPLETE) {
 447                if (chan->ep_is_in) {
 448                        count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
 449                                TSIZ_XFERSIZE_SHIFT;
 450                        length = chan->xfer_len - count;
 451                        if (short_read)
 452                                *short_read = (count != 0);
 453                } else if (chan->qh->do_split) {
 454                        length = qtd->ssplit_out_xfer_count;
 455                } else {
 456                        length = chan->xfer_len;
 457                }
 458        } else {
 459                /*
 460                 * Must use the hctsiz.pktcnt field to determine how much data
 461                 * has been transferred. This field reflects the number of
 462                 * packets that have been transferred via the USB. This is
 463                 * always an integral number of packets if the transfer was
 464                 * halted before its normal completion. (Can't use the
 465                 * hctsiz.xfersize field because that reflects the number of
 466                 * bytes transferred via the AHB, not the USB).
 467                 */
 468                count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
 469                length = (chan->start_pkt_count - count) * chan->max_packet;
 470        }
 471
 472        return length;
 473}
 474
 475/**
 476 * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
 477 * Complete interrupt on the host channel. Updates the actual_length field
 478 * of the URB based on the number of bytes transferred via the host channel.
 479 * Sets the URB status if the data transfer is finished.
 480 *
 481 * @hsotg: Programming view of the DWC_otg controller
 482 * @chan: Programming view of host channel
 483 * @chnum: Channel number
 484 * @urb: Processing URB
 485 * @qtd: Queue transfer descriptor
 486 *
 487 * Return: 1 if the data transfer specified by the URB is completely finished,
 488 * 0 otherwise
 489 */
 490static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
 491                                 struct dwc2_host_chan *chan, int chnum,
 492                                 struct dwc2_hcd_urb *urb,
 493                                 struct dwc2_qtd *qtd)
 494{
 495        u32 hctsiz;
 496        int xfer_done = 0;
 497        int short_read = 0;
 498        int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
 499                                                      DWC2_HC_XFER_COMPLETE,
 500                                                      &short_read);
 501
 502        if (urb->actual_length + xfer_length > urb->length) {
 503                dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
 504                xfer_length = urb->length - urb->actual_length;
 505        }
 506
 507        dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
 508                 urb->actual_length, xfer_length);
 509        urb->actual_length += xfer_length;
 510
 511        if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
 512            (urb->flags & URB_SEND_ZERO_PACKET) &&
 513            urb->actual_length >= urb->length &&
 514            !(urb->length % chan->max_packet)) {
 515                xfer_done = 0;
 516        } else if (short_read || urb->actual_length >= urb->length) {
 517                xfer_done = 1;
 518                urb->status = 0;
 519        }
 520
 521        hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
 522        dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
 523                 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
 524        dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
 525        dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
 526                 (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
 527        dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
 528        dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
 529        dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
 530                 xfer_done);
 531
 532        return xfer_done;
 533}
 534
 535/*
 536 * Save the starting data toggle for the next transfer. The data toggle is
 537 * saved in the QH for non-control transfers and it's saved in the QTD for
 538 * control transfers.
 539 */
 540void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
 541                               struct dwc2_host_chan *chan, int chnum,
 542                               struct dwc2_qtd *qtd)
 543{
 544        u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
 545        u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
 546
 547        if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
 548                if (WARN(!chan || !chan->qh,
 549                         "chan->qh must be specified for non-control eps\n"))
 550                        return;
 551
 552                if (pid == TSIZ_SC_MC_PID_DATA0)
 553                        chan->qh->data_toggle = DWC2_HC_PID_DATA0;
 554                else
 555                        chan->qh->data_toggle = DWC2_HC_PID_DATA1;
 556        } else {
 557                if (WARN(!qtd,
 558                         "qtd must be specified for control eps\n"))
 559                        return;
 560
 561                if (pid == TSIZ_SC_MC_PID_DATA0)
 562                        qtd->data_toggle = DWC2_HC_PID_DATA0;
 563                else
 564                        qtd->data_toggle = DWC2_HC_PID_DATA1;
 565        }
 566}
 567
 568/**
 569 * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
 570 * the transfer is stopped for any reason. The fields of the current entry in
 571 * the frame descriptor array are set based on the transfer state and the input
 572 * halt_status. Completes the Isochronous URB if all the URB frames have been
 573 * completed.
 574 *
 575 * @hsotg: Programming view of the DWC_otg controller
 576 * @chan: Programming view of host channel
 577 * @chnum: Channel number
 578 * @halt_status: Reason for halting a host channel
 579 * @qtd: Queue transfer descriptor
 580 *
 581 * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
 582 * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
 583 */
 584static enum dwc2_halt_status dwc2_update_isoc_urb_state(
 585                struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
 586                int chnum, struct dwc2_qtd *qtd,
 587                enum dwc2_halt_status halt_status)
 588{
 589        struct dwc2_hcd_iso_packet_desc *frame_desc;
 590        struct dwc2_hcd_urb *urb = qtd->urb;
 591
 592        if (!urb)
 593                return DWC2_HC_XFER_NO_HALT_STATUS;
 594
 595        frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
 596
 597        switch (halt_status) {
 598        case DWC2_HC_XFER_COMPLETE:
 599                frame_desc->status = 0;
 600                frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
 601                                        chan, chnum, qtd, halt_status, NULL);
 602                break;
 603        case DWC2_HC_XFER_FRAME_OVERRUN:
 604                urb->error_count++;
 605                if (chan->ep_is_in)
 606                        frame_desc->status = -ENOSR;
 607                else
 608                        frame_desc->status = -ECOMM;
 609                frame_desc->actual_length = 0;
 610                break;
 611        case DWC2_HC_XFER_BABBLE_ERR:
 612                urb->error_count++;
 613                frame_desc->status = -EOVERFLOW;
 614                /* Don't need to update actual_length in this case */
 615                break;
 616        case DWC2_HC_XFER_XACT_ERR:
 617                urb->error_count++;
 618                frame_desc->status = -EPROTO;
 619                frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
 620                                        chan, chnum, qtd, halt_status, NULL);
 621
 622                /* Skip whole frame */
 623                if (chan->qh->do_split &&
 624                    chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
 625                    hsotg->params.host_dma) {
 626                        qtd->complete_split = 0;
 627                        qtd->isoc_split_offset = 0;
 628                }
 629
 630                break;
 631        default:
 632                dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
 633                        halt_status);
 634                break;
 635        }
 636
 637        if (++qtd->isoc_frame_index == urb->packet_count) {
 638                /*
 639                 * urb->status is not used for isoc transfers. The individual
 640                 * frame_desc statuses are used instead.
 641                 */
 642                dwc2_host_complete(hsotg, qtd, 0);
 643                halt_status = DWC2_HC_XFER_URB_COMPLETE;
 644        } else {
 645                halt_status = DWC2_HC_XFER_COMPLETE;
 646        }
 647
 648        return halt_status;
 649}
 650
 651/*
 652 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
 653 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
 654 * still linked to the QH, the QH is added to the end of the inactive
 655 * non-periodic schedule. For periodic QHs, removes the QH from the periodic
 656 * schedule if no more QTDs are linked to the QH.
 657 */
 658static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
 659                               int free_qtd)
 660{
 661        int continue_split = 0;
 662        struct dwc2_qtd *qtd;
 663
 664        if (dbg_qh(qh))
 665                dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
 666                         hsotg, qh, free_qtd);
 667
 668        if (list_empty(&qh->qtd_list)) {
 669                dev_dbg(hsotg->dev, "## QTD list empty ##\n");
 670                goto no_qtd;
 671        }
 672
 673        qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
 674
 675        if (qtd->complete_split)
 676                continue_split = 1;
 677        else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
 678                 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
 679                continue_split = 1;
 680
 681        if (free_qtd) {
 682                dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
 683                continue_split = 0;
 684        }
 685
 686no_qtd:
 687        qh->channel = NULL;
 688        dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
 689}
 690
 691/**
 692 * dwc2_release_channel() - Releases a host channel for use by other transfers
 693 *
 694 * @hsotg:       The HCD state structure
 695 * @chan:        The host channel to release
 696 * @qtd:         The QTD associated with the host channel. This QTD may be
 697 *               freed if the transfer is complete or an error has occurred.
 698 * @halt_status: Reason the channel is being released. This status
 699 *               determines the actions taken by this function.
 700 *
 701 * Also attempts to select and queue more transactions since at least one host
 702 * channel is available.
 703 */
 704static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
 705                                 struct dwc2_host_chan *chan,
 706                                 struct dwc2_qtd *qtd,
 707                                 enum dwc2_halt_status halt_status)
 708{
 709        enum dwc2_transaction_type tr_type;
 710        u32 haintmsk;
 711        int free_qtd = 0;
 712
 713        if (dbg_hc(chan))
 714                dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
 715                         __func__, chan->hc_num, halt_status);
 716
 717        switch (halt_status) {
 718        case DWC2_HC_XFER_URB_COMPLETE:
 719                free_qtd = 1;
 720                break;
 721        case DWC2_HC_XFER_AHB_ERR:
 722        case DWC2_HC_XFER_STALL:
 723        case DWC2_HC_XFER_BABBLE_ERR:
 724                free_qtd = 1;
 725                break;
 726        case DWC2_HC_XFER_XACT_ERR:
 727                if (qtd && qtd->error_count >= 3) {
 728                        dev_vdbg(hsotg->dev,
 729                                 "  Complete URB with transaction error\n");
 730                        free_qtd = 1;
 731                        dwc2_host_complete(hsotg, qtd, -EPROTO);
 732                }
 733                break;
 734        case DWC2_HC_XFER_URB_DEQUEUE:
 735                /*
 736                 * The QTD has already been removed and the QH has been
 737                 * deactivated. Don't want to do anything except release the
 738                 * host channel and try to queue more transfers.
 739                 */
 740                goto cleanup;
 741        case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
 742                dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
 743                free_qtd = 1;
 744                dwc2_host_complete(hsotg, qtd, -EIO);
 745                break;
 746        case DWC2_HC_XFER_NO_HALT_STATUS:
 747        default:
 748                break;
 749        }
 750
 751        dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
 752
 753cleanup:
 754        /*
 755         * Release the host channel for use by other transfers. The cleanup
 756         * function clears the channel interrupt enables and conditions, so
 757         * there's no need to clear the Channel Halted interrupt separately.
 758         */
 759        if (!list_empty(&chan->hc_list_entry))
 760                list_del(&chan->hc_list_entry);
 761        dwc2_hc_cleanup(hsotg, chan);
 762        list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
 763
 764        if (hsotg->params.uframe_sched) {
 765                hsotg->available_host_channels++;
 766        } else {
 767                switch (chan->ep_type) {
 768                case USB_ENDPOINT_XFER_CONTROL:
 769                case USB_ENDPOINT_XFER_BULK:
 770                        hsotg->non_periodic_channels--;
 771                        break;
 772                default:
 773                        /*
 774                         * Don't release reservations for periodic channels
 775                         * here. That's done when a periodic transfer is
 776                         * descheduled (i.e. when the QH is removed from the
 777                         * periodic schedule).
 778                         */
 779                        break;
 780                }
 781        }
 782
 783        haintmsk = dwc2_readl(hsotg, HAINTMSK);
 784        haintmsk &= ~(1 << chan->hc_num);
 785        dwc2_writel(hsotg, haintmsk, HAINTMSK);
 786
 787        /* Try to queue more transfers now that there's a free channel */
 788        tr_type = dwc2_hcd_select_transactions(hsotg);
 789        if (tr_type != DWC2_TRANSACTION_NONE)
 790                dwc2_hcd_queue_transactions(hsotg, tr_type);
 791}
 792
 793/*
 794 * Halts a host channel. If the channel cannot be halted immediately because
 795 * the request queue is full, this function ensures that the FIFO empty
 796 * interrupt for the appropriate queue is enabled so that the halt request can
 797 * be queued when there is space in the request queue.
 798 *
 799 * This function may also be called in DMA mode. In that case, the channel is
 800 * simply released since the core always halts the channel automatically in
 801 * DMA mode.
 802 */
 803static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
 804                              struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
 805                              enum dwc2_halt_status halt_status)
 806{
 807        if (dbg_hc(chan))
 808                dev_vdbg(hsotg->dev, "%s()\n", __func__);
 809
 810        if (hsotg->params.host_dma) {
 811                if (dbg_hc(chan))
 812                        dev_vdbg(hsotg->dev, "DMA enabled\n");
 813                dwc2_release_channel(hsotg, chan, qtd, halt_status);
 814                return;
 815        }
 816
 817        /* Slave mode processing */
 818        dwc2_hc_halt(hsotg, chan, halt_status);
 819
 820        if (chan->halt_on_queue) {
 821                u32 gintmsk;
 822
 823                dev_vdbg(hsotg->dev, "Halt on queue\n");
 824                if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
 825                    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
 826                        dev_vdbg(hsotg->dev, "control/bulk\n");
 827                        /*
 828                         * Make sure the Non-periodic Tx FIFO empty interrupt
 829                         * is enabled so that the non-periodic schedule will
 830                         * be processed
 831                         */
 832                        gintmsk = dwc2_readl(hsotg, GINTMSK);
 833                        gintmsk |= GINTSTS_NPTXFEMP;
 834                        dwc2_writel(hsotg, gintmsk, GINTMSK);
 835                } else {
 836                        dev_vdbg(hsotg->dev, "isoc/intr\n");
 837                        /*
 838                         * Move the QH from the periodic queued schedule to
 839                         * the periodic assigned schedule. This allows the
 840                         * halt to be queued when the periodic schedule is
 841                         * processed.
 842                         */
 843                        list_move_tail(&chan->qh->qh_list_entry,
 844                                       &hsotg->periodic_sched_assigned);
 845
 846                        /*
 847                         * Make sure the Periodic Tx FIFO Empty interrupt is
 848                         * enabled so that the periodic schedule will be
 849                         * processed
 850                         */
 851                        gintmsk = dwc2_readl(hsotg, GINTMSK);
 852                        gintmsk |= GINTSTS_PTXFEMP;
 853                        dwc2_writel(hsotg, gintmsk, GINTMSK);
 854                }
 855        }
 856}
 857
 858/*
 859 * Performs common cleanup for non-periodic transfers after a Transfer
 860 * Complete interrupt. This function should be called after any endpoint type
 861 * specific handling is finished to release the host channel.
 862 */
 863static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
 864                                            struct dwc2_host_chan *chan,
 865                                            int chnum, struct dwc2_qtd *qtd,
 866                                            enum dwc2_halt_status halt_status)
 867{
 868        dev_vdbg(hsotg->dev, "%s()\n", __func__);
 869
 870        qtd->error_count = 0;
 871
 872        if (chan->hcint & HCINTMSK_NYET) {
 873                /*
 874                 * Got a NYET on the last transaction of the transfer. This
 875                 * means that the endpoint should be in the PING state at the
 876                 * beginning of the next transfer.
 877                 */
 878                dev_vdbg(hsotg->dev, "got NYET\n");
 879                chan->qh->ping_state = 1;
 880        }
 881
 882        /*
 883         * Always halt and release the host channel to make it available for
 884         * more transfers. There may still be more phases for a control
 885         * transfer or more data packets for a bulk transfer at this point,
 886         * but the host channel is still halted. A channel will be reassigned
 887         * to the transfer when the non-periodic schedule is processed after
 888         * the channel is released. This allows transactions to be queued
 889         * properly via dwc2_hcd_queue_transactions, which also enables the
 890         * Tx FIFO Empty interrupt if necessary.
 891         */
 892        if (chan->ep_is_in) {
 893                /*
 894                 * IN transfers in Slave mode require an explicit disable to
 895                 * halt the channel. (In DMA mode, this call simply releases
 896                 * the channel.)
 897                 */
 898                dwc2_halt_channel(hsotg, chan, qtd, halt_status);
 899        } else {
 900                /*
 901                 * The channel is automatically disabled by the core for OUT
 902                 * transfers in Slave mode
 903                 */
 904                dwc2_release_channel(hsotg, chan, qtd, halt_status);
 905        }
 906}
 907
 908/*
 909 * Performs common cleanup for periodic transfers after a Transfer Complete
 910 * interrupt. This function should be called after any endpoint type specific
 911 * handling is finished to release the host channel.
 912 */
 913static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
 914                                        struct dwc2_host_chan *chan, int chnum,
 915                                        struct dwc2_qtd *qtd,
 916                                        enum dwc2_halt_status halt_status)
 917{
 918        u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
 919
 920        qtd->error_count = 0;
 921
 922        if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
 923                /* Core halts channel in these cases */
 924                dwc2_release_channel(hsotg, chan, qtd, halt_status);
 925        else
 926                /* Flush any outstanding requests from the Tx queue */
 927                dwc2_halt_channel(hsotg, chan, qtd, halt_status);
 928}
 929
 930static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
 931                                       struct dwc2_host_chan *chan, int chnum,
 932                                       struct dwc2_qtd *qtd)
 933{
 934        struct dwc2_hcd_iso_packet_desc *frame_desc;
 935        u32 len;
 936        u32 hctsiz;
 937        u32 pid;
 938
 939        if (!qtd->urb)
 940                return 0;
 941
 942        frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
 943        len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
 944                                          DWC2_HC_XFER_COMPLETE, NULL);
 945        if (!len && !qtd->isoc_split_offset) {
 946                qtd->complete_split = 0;
 947                return 0;
 948        }
 949
 950        frame_desc->actual_length += len;
 951
 952        if (chan->align_buf) {
 953                dev_vdbg(hsotg->dev, "non-aligned buffer\n");
 954                dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
 955                                 DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
 956                memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
 957                       chan->qh->dw_align_buf, len);
 958        }
 959
 960        qtd->isoc_split_offset += len;
 961
 962        hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
 963        pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
 964
 965        if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
 966                frame_desc->status = 0;
 967                qtd->isoc_frame_index++;
 968                qtd->complete_split = 0;
 969                qtd->isoc_split_offset = 0;
 970        }
 971
 972        if (qtd->isoc_frame_index == qtd->urb->packet_count) {
 973                dwc2_host_complete(hsotg, qtd, 0);
 974                dwc2_release_channel(hsotg, chan, qtd,
 975                                     DWC2_HC_XFER_URB_COMPLETE);
 976        } else {
 977                dwc2_release_channel(hsotg, chan, qtd,
 978                                     DWC2_HC_XFER_NO_HALT_STATUS);
 979        }
 980
 981        return 1;       /* Indicates that channel released */
 982}
 983
 984/*
 985 * Handles a host channel Transfer Complete interrupt. This handler may be
 986 * called in either DMA mode or Slave mode.
 987 */
 988static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
 989                                  struct dwc2_host_chan *chan, int chnum,
 990                                  struct dwc2_qtd *qtd)
 991{
 992        struct dwc2_hcd_urb *urb = qtd->urb;
 993        enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
 994        int pipe_type;
 995        int urb_xfer_done;
 996
 997        if (dbg_hc(chan))
 998                dev_vdbg(hsotg->dev,
 999                         "--Host Channel %d Interrupt: Transfer Complete--\n",
1000                         chnum);
1001
1002        if (!urb)
1003                goto handle_xfercomp_done;
1004
1005        pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1006
1007        if (hsotg->params.dma_desc_enable) {
1008                dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
1009                if (pipe_type == USB_ENDPOINT_XFER_ISOC)
1010                        /* Do not disable the interrupt, just clear it */
1011                        return;
1012                goto handle_xfercomp_done;
1013        }
1014
1015        /* Handle xfer complete on CSPLIT */
1016        if (chan->qh->do_split) {
1017                if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1018                    hsotg->params.host_dma) {
1019                        if (qtd->complete_split &&
1020                            dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1021                                                        qtd))
1022                                goto handle_xfercomp_done;
1023                } else {
1024                        qtd->complete_split = 0;
1025                }
1026        }
1027
1028        /* Update the QTD and URB states */
1029        switch (pipe_type) {
1030        case USB_ENDPOINT_XFER_CONTROL:
1031                switch (qtd->control_phase) {
1032                case DWC2_CONTROL_SETUP:
1033                        if (urb->length > 0)
1034                                qtd->control_phase = DWC2_CONTROL_DATA;
1035                        else
1036                                qtd->control_phase = DWC2_CONTROL_STATUS;
1037                        dev_vdbg(hsotg->dev,
1038                                 "  Control setup transaction done\n");
1039                        halt_status = DWC2_HC_XFER_COMPLETE;
1040                        break;
1041                case DWC2_CONTROL_DATA:
1042                        urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1043                                                              chnum, urb, qtd);
1044                        if (urb_xfer_done) {
1045                                qtd->control_phase = DWC2_CONTROL_STATUS;
1046                                dev_vdbg(hsotg->dev,
1047                                         "  Control data transfer done\n");
1048                        } else {
1049                                dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1050                                                          qtd);
1051                        }
1052                        halt_status = DWC2_HC_XFER_COMPLETE;
1053                        break;
1054                case DWC2_CONTROL_STATUS:
1055                        dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1056                        if (urb->status == -EINPROGRESS)
1057                                urb->status = 0;
1058                        dwc2_host_complete(hsotg, qtd, urb->status);
1059                        halt_status = DWC2_HC_XFER_URB_COMPLETE;
1060                        break;
1061                }
1062
1063                dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1064                                                halt_status);
1065                break;
1066        case USB_ENDPOINT_XFER_BULK:
1067                dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1068                urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1069                                                      qtd);
1070                if (urb_xfer_done) {
1071                        dwc2_host_complete(hsotg, qtd, urb->status);
1072                        halt_status = DWC2_HC_XFER_URB_COMPLETE;
1073                } else {
1074                        halt_status = DWC2_HC_XFER_COMPLETE;
1075                }
1076
1077                dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1078                dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1079                                                halt_status);
1080                break;
1081        case USB_ENDPOINT_XFER_INT:
1082                dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1083                urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1084                                                      qtd);
1085
1086                /*
1087                 * Interrupt URB is done on the first transfer complete
1088                 * interrupt
1089                 */
1090                if (urb_xfer_done) {
1091                        dwc2_host_complete(hsotg, qtd, urb->status);
1092                        halt_status = DWC2_HC_XFER_URB_COMPLETE;
1093                } else {
1094                        halt_status = DWC2_HC_XFER_COMPLETE;
1095                }
1096
1097                dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1098                dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1099                                            halt_status);
1100                break;
1101        case USB_ENDPOINT_XFER_ISOC:
1102                if (dbg_perio())
1103                        dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1104                if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1105                        halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1106                                                        chnum, qtd,
1107                                                        DWC2_HC_XFER_COMPLETE);
1108                dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1109                                            halt_status);
1110                break;
1111        }
1112
1113handle_xfercomp_done:
1114        disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1115}
1116
1117/*
1118 * Handles a host channel STALL interrupt. This handler may be called in
1119 * either DMA mode or Slave mode.
1120 */
1121static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1122                               struct dwc2_host_chan *chan, int chnum,
1123                               struct dwc2_qtd *qtd)
1124{
1125        struct dwc2_hcd_urb *urb = qtd->urb;
1126        int pipe_type;
1127
1128        dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1129                chnum);
1130
1131        if (hsotg->params.dma_desc_enable) {
1132                dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1133                                            DWC2_HC_XFER_STALL);
1134                goto handle_stall_done;
1135        }
1136
1137        if (!urb)
1138                goto handle_stall_halt;
1139
1140        pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1141
1142        if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1143                dwc2_host_complete(hsotg, qtd, -EPIPE);
1144
1145        if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1146            pipe_type == USB_ENDPOINT_XFER_INT) {
1147                dwc2_host_complete(hsotg, qtd, -EPIPE);
1148                /*
1149                 * USB protocol requires resetting the data toggle for bulk
1150                 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1151                 * setup command is issued to the endpoint. Anticipate the
1152                 * CLEAR_FEATURE command since a STALL has occurred and reset
1153                 * the data toggle now.
1154                 */
1155                chan->qh->data_toggle = 0;
1156        }
1157
1158handle_stall_halt:
1159        dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1160
1161handle_stall_done:
1162        disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1163}
1164
1165/*
1166 * Updates the state of the URB when a transfer has been stopped due to an
1167 * abnormal condition before the transfer completes. Modifies the
1168 * actual_length field of the URB to reflect the number of bytes that have
1169 * actually been transferred via the host channel.
1170 */
1171static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1172                                      struct dwc2_host_chan *chan, int chnum,
1173                                      struct dwc2_hcd_urb *urb,
1174                                      struct dwc2_qtd *qtd,
1175                                      enum dwc2_halt_status halt_status)
1176{
1177        u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1178                                                      qtd, halt_status, NULL);
1179        u32 hctsiz;
1180
1181        if (urb->actual_length + xfer_length > urb->length) {
1182                dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1183                xfer_length = urb->length - urb->actual_length;
1184        }
1185
1186        urb->actual_length += xfer_length;
1187
1188        hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1189        dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1190                 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1191        dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1192                 chan->start_pkt_count);
1193        dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1194                 (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1195        dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1196        dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1197                 xfer_length);
1198        dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1199                 urb->actual_length);
1200        dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1201                 urb->length);
1202}
1203
1204/*
1205 * Handles a host channel NAK interrupt. This handler may be called in either
1206 * DMA mode or Slave mode.
1207 */
1208static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1209                             struct dwc2_host_chan *chan, int chnum,
1210                             struct dwc2_qtd *qtd)
1211{
1212        if (!qtd) {
1213                dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
1214                return;
1215        }
1216
1217        if (!qtd->urb) {
1218                dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
1219                return;
1220        }
1221
1222        if (dbg_hc(chan))
1223                dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1224                         chnum);
1225
1226        /*
1227         * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1228         * interrupt. Re-start the SSPLIT transfer.
1229         *
1230         * Normally for non-periodic transfers we'll retry right away, but to
1231         * avoid interrupt storms we'll wait before retrying if we've got
1232         * several NAKs. If we didn't do this we'd retry directly from the
1233         * interrupt handler and could end up quickly getting another
1234         * interrupt (another NAK), which we'd retry. Note that we do not
1235         * delay retries for IN parts of control requests, as those are expected
1236         * to complete fairly quickly, and if we delay them we risk confusing
1237         * the device and cause it issue STALL.
1238         *
1239         * Note that in DMA mode software only gets involved to re-send NAKed
1240         * transfers for split transactions, so we only need to apply this
1241         * delaying logic when handling splits. In non-DMA mode presumably we
1242         * might want a similar delay if someone can demonstrate this problem
1243         * affects that code path too.
1244         */
1245        if (chan->do_split) {
1246                if (chan->complete_split)
1247                        qtd->error_count = 0;
1248                qtd->complete_split = 0;
1249                qtd->num_naks++;
1250                qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
1251                                !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
1252                                  chan->ep_is_in);
1253                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1254                goto handle_nak_done;
1255        }
1256
1257        switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1258        case USB_ENDPOINT_XFER_CONTROL:
1259        case USB_ENDPOINT_XFER_BULK:
1260                if (hsotg->params.host_dma && chan->ep_is_in) {
1261                        /*
1262                         * NAK interrupts are enabled on bulk/control IN
1263                         * transfers in DMA mode for the sole purpose of
1264                         * resetting the error count after a transaction error
1265                         * occurs. The core will continue transferring data.
1266                         */
1267                        qtd->error_count = 0;
1268                        break;
1269                }
1270
1271                /*
1272                 * NAK interrupts normally occur during OUT transfers in DMA
1273                 * or Slave mode. For IN transfers, more requests will be
1274                 * queued as request queue space is available.
1275                 */
1276                qtd->error_count = 0;
1277
1278                if (!chan->qh->ping_state) {
1279                        dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1280                                                  qtd, DWC2_HC_XFER_NAK);
1281                        dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1282
1283                        if (chan->speed == USB_SPEED_HIGH)
1284                                chan->qh->ping_state = 1;
1285                }
1286
1287                /*
1288                 * Halt the channel so the transfer can be re-started from
1289                 * the appropriate point or the PING protocol will
1290                 * start/continue
1291                 */
1292                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1293                break;
1294        case USB_ENDPOINT_XFER_INT:
1295                qtd->error_count = 0;
1296                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1297                break;
1298        case USB_ENDPOINT_XFER_ISOC:
1299                /* Should never get called for isochronous transfers */
1300                dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1301                break;
1302        }
1303
1304handle_nak_done:
1305        disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1306}
1307
1308/*
1309 * Handles a host channel ACK interrupt. This interrupt is enabled when
1310 * performing the PING protocol in Slave mode, when errors occur during
1311 * either Slave mode or DMA mode, and during Start Split transactions.
1312 */
1313static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1314                             struct dwc2_host_chan *chan, int chnum,
1315                             struct dwc2_qtd *qtd)
1316{
1317        struct dwc2_hcd_iso_packet_desc *frame_desc;
1318
1319        if (dbg_hc(chan))
1320                dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1321                         chnum);
1322
1323        if (chan->do_split) {
1324                /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1325                if (!chan->ep_is_in &&
1326                    chan->data_pid_start != DWC2_HC_PID_SETUP)
1327                        qtd->ssplit_out_xfer_count = chan->xfer_len;
1328
1329                if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1330                        qtd->complete_split = 1;
1331                        dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1332                } else {
1333                        /* ISOC OUT */
1334                        switch (chan->xact_pos) {
1335                        case DWC2_HCSPLT_XACTPOS_ALL:
1336                                break;
1337                        case DWC2_HCSPLT_XACTPOS_END:
1338                                qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1339                                qtd->isoc_split_offset = 0;
1340                                break;
1341                        case DWC2_HCSPLT_XACTPOS_BEGIN:
1342                        case DWC2_HCSPLT_XACTPOS_MID:
1343                                /*
1344                                 * For BEGIN or MID, calculate the length for
1345                                 * the next microframe to determine the correct
1346                                 * SSPLIT token, either MID or END
1347                                 */
1348                                frame_desc = &qtd->urb->iso_descs[
1349                                                qtd->isoc_frame_index];
1350                                qtd->isoc_split_offset += 188;
1351
1352                                if (frame_desc->length - qtd->isoc_split_offset
1353                                                        <= 188)
1354                                        qtd->isoc_split_pos =
1355                                                        DWC2_HCSPLT_XACTPOS_END;
1356                                else
1357                                        qtd->isoc_split_pos =
1358                                                        DWC2_HCSPLT_XACTPOS_MID;
1359                                break;
1360                        }
1361                }
1362        } else {
1363                qtd->error_count = 0;
1364
1365                if (chan->qh->ping_state) {
1366                        chan->qh->ping_state = 0;
1367                        /*
1368                         * Halt the channel so the transfer can be re-started
1369                         * from the appropriate point. This only happens in
1370                         * Slave mode. In DMA mode, the ping_state is cleared
1371                         * when the transfer is started because the core
1372                         * automatically executes the PING, then the transfer.
1373                         */
1374                        dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1375                }
1376        }
1377
1378        /*
1379         * If the ACK occurred when _not_ in the PING state, let the channel
1380         * continue transferring data after clearing the error count
1381         */
1382        disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1383}
1384
1385/*
1386 * Handles a host channel NYET interrupt. This interrupt should only occur on
1387 * Bulk and Control OUT endpoints and for complete split transactions. If a
1388 * NYET occurs at the same time as a Transfer Complete interrupt, it is
1389 * handled in the xfercomp interrupt handler, not here. This handler may be
1390 * called in either DMA mode or Slave mode.
1391 */
1392static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1393                              struct dwc2_host_chan *chan, int chnum,
1394                              struct dwc2_qtd *qtd)
1395{
1396        if (dbg_hc(chan))
1397                dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1398                         chnum);
1399
1400        /*
1401         * NYET on CSPLIT
1402         * re-do the CSPLIT immediately on non-periodic
1403         */
1404        if (chan->do_split && chan->complete_split) {
1405                if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1406                    hsotg->params.host_dma) {
1407                        qtd->complete_split = 0;
1408                        qtd->isoc_split_offset = 0;
1409                        qtd->isoc_frame_index++;
1410                        if (qtd->urb &&
1411                            qtd->isoc_frame_index == qtd->urb->packet_count) {
1412                                dwc2_host_complete(hsotg, qtd, 0);
1413                                dwc2_release_channel(hsotg, chan, qtd,
1414                                                     DWC2_HC_XFER_URB_COMPLETE);
1415                        } else {
1416                                dwc2_release_channel(hsotg, chan, qtd,
1417                                                DWC2_HC_XFER_NO_HALT_STATUS);
1418                        }
1419                        goto handle_nyet_done;
1420                }
1421
1422                if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1423                    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1424                        struct dwc2_qh *qh = chan->qh;
1425                        bool past_end;
1426
1427                        if (!hsotg->params.uframe_sched) {
1428                                int frnum = dwc2_hcd_get_frame_number(hsotg);
1429
1430                                /* Don't have num_hs_transfers; simple logic */
1431                                past_end = dwc2_full_frame_num(frnum) !=
1432                                     dwc2_full_frame_num(qh->next_active_frame);
1433                        } else {
1434                                int end_frnum;
1435
1436                                /*
1437                                 * Figure out the end frame based on
1438                                 * schedule.
1439                                 *
1440                                 * We don't want to go on trying again
1441                                 * and again forever. Let's stop when
1442                                 * we've done all the transfers that
1443                                 * were scheduled.
1444                                 *
1445                                 * We're going to be comparing
1446                                 * start_active_frame and
1447                                 * next_active_frame, both of which
1448                                 * are 1 before the time the packet
1449                                 * goes on the wire, so that cancels
1450                                 * out. Basically if had 1 transfer
1451                                 * and we saw 1 NYET then we're done.
1452                                 * We're getting a NYET here so if
1453                                 * next >= (start + num_transfers)
1454                                 * we're done. The complexity is that
1455                                 * for all but ISOC_OUT we skip one
1456                                 * slot.
1457                                 */
1458                                end_frnum = dwc2_frame_num_inc(
1459                                        qh->start_active_frame,
1460                                        qh->num_hs_transfers);
1461
1462                                if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
1463                                    qh->ep_is_in)
1464                                        end_frnum =
1465                                               dwc2_frame_num_inc(end_frnum, 1);
1466
1467                                past_end = dwc2_frame_num_le(
1468                                        end_frnum, qh->next_active_frame);
1469                        }
1470
1471                        if (past_end) {
1472                                /* Treat this as a transaction error. */
1473#if 0
1474                                /*
1475                                 * Todo: Fix system performance so this can
1476                                 * be treated as an error. Right now complete
1477                                 * splits cannot be scheduled precisely enough
1478                                 * due to other system activity, so this error
1479                                 * occurs regularly in Slave mode.
1480                                 */
1481                                qtd->error_count++;
1482#endif
1483                                qtd->complete_split = 0;
1484                                dwc2_halt_channel(hsotg, chan, qtd,
1485                                                  DWC2_HC_XFER_XACT_ERR);
1486                                /* Todo: add support for isoc release */
1487                                goto handle_nyet_done;
1488                        }
1489                }
1490
1491                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1492                goto handle_nyet_done;
1493        }
1494
1495        chan->qh->ping_state = 1;
1496        qtd->error_count = 0;
1497
1498        dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1499                                  DWC2_HC_XFER_NYET);
1500        dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1501
1502        /*
1503         * Halt the channel and re-start the transfer so the PING protocol
1504         * will start
1505         */
1506        dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1507
1508handle_nyet_done:
1509        disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1510}
1511
1512/*
1513 * Handles a host channel babble interrupt. This handler may be called in
1514 * either DMA mode or Slave mode.
1515 */
1516static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1517                                struct dwc2_host_chan *chan, int chnum,
1518                                struct dwc2_qtd *qtd)
1519{
1520        dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1521                chnum);
1522
1523        dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1524
1525        if (hsotg->params.dma_desc_enable) {
1526                dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1527                                            DWC2_HC_XFER_BABBLE_ERR);
1528                goto disable_int;
1529        }
1530
1531        if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1532                dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1533                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1534        } else {
1535                enum dwc2_halt_status halt_status;
1536
1537                halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1538                                                qtd, DWC2_HC_XFER_BABBLE_ERR);
1539                dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1540        }
1541
1542disable_int:
1543        disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1544}
1545
1546/*
1547 * Handles a host channel AHB error interrupt. This handler is only called in
1548 * DMA mode.
1549 */
1550static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1551                                struct dwc2_host_chan *chan, int chnum,
1552                                struct dwc2_qtd *qtd)
1553{
1554        struct dwc2_hcd_urb *urb = qtd->urb;
1555        char *pipetype, *speed;
1556        u32 hcchar;
1557        u32 hcsplt;
1558        u32 hctsiz;
1559        u32 hc_dma;
1560
1561        dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1562                chnum);
1563
1564        if (!urb)
1565                goto handle_ahberr_halt;
1566
1567        dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1568
1569        hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1570        hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
1571        hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1572        hc_dma = dwc2_readl(hsotg, HCDMA(chnum));
1573
1574        dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1575        dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1576        dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1577        dev_err(hsotg->dev, "  Device address: %d\n",
1578                dwc2_hcd_get_dev_addr(&urb->pipe_info));
1579        dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1580                dwc2_hcd_get_ep_num(&urb->pipe_info),
1581                dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1582
1583        switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1584        case USB_ENDPOINT_XFER_CONTROL:
1585                pipetype = "CONTROL";
1586                break;
1587        case USB_ENDPOINT_XFER_BULK:
1588                pipetype = "BULK";
1589                break;
1590        case USB_ENDPOINT_XFER_INT:
1591                pipetype = "INTERRUPT";
1592                break;
1593        case USB_ENDPOINT_XFER_ISOC:
1594                pipetype = "ISOCHRONOUS";
1595                break;
1596        default:
1597                pipetype = "UNKNOWN";
1598                break;
1599        }
1600
1601        dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1602
1603        switch (chan->speed) {
1604        case USB_SPEED_HIGH:
1605                speed = "HIGH";
1606                break;
1607        case USB_SPEED_FULL:
1608                speed = "FULL";
1609                break;
1610        case USB_SPEED_LOW:
1611                speed = "LOW";
1612                break;
1613        default:
1614                speed = "UNKNOWN";
1615                break;
1616        }
1617
1618        dev_err(hsotg->dev, "  Speed: %s\n", speed);
1619
1620        dev_err(hsotg->dev, "  Max packet size: %d (mult %d)\n",
1621                dwc2_hcd_get_maxp(&urb->pipe_info),
1622                dwc2_hcd_get_maxp_mult(&urb->pipe_info));
1623        dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1624        dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1625                urb->buf, (unsigned long)urb->dma);
1626        dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1627                urb->setup_packet, (unsigned long)urb->setup_dma);
1628        dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1629
1630        /* Core halts the channel for Descriptor DMA mode */
1631        if (hsotg->params.dma_desc_enable) {
1632                dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1633                                            DWC2_HC_XFER_AHB_ERR);
1634                goto handle_ahberr_done;
1635        }
1636
1637        dwc2_host_complete(hsotg, qtd, -EIO);
1638
1639handle_ahberr_halt:
1640        /*
1641         * Force a channel halt. Don't call dwc2_halt_channel because that won't
1642         * write to the HCCHARn register in DMA mode to force the halt.
1643         */
1644        dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1645
1646handle_ahberr_done:
1647        disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1648}
1649
1650/*
1651 * Handles a host channel transaction error interrupt. This handler may be
1652 * called in either DMA mode or Slave mode.
1653 */
1654static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1655                                 struct dwc2_host_chan *chan, int chnum,
1656                                 struct dwc2_qtd *qtd)
1657{
1658        dev_dbg(hsotg->dev,
1659                "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1660
1661        dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1662
1663        if (hsotg->params.dma_desc_enable) {
1664                dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1665                                            DWC2_HC_XFER_XACT_ERR);
1666                goto handle_xacterr_done;
1667        }
1668
1669        switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1670        case USB_ENDPOINT_XFER_CONTROL:
1671        case USB_ENDPOINT_XFER_BULK:
1672                qtd->error_count++;
1673                if (!chan->qh->ping_state) {
1674                        dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1675                                                  qtd, DWC2_HC_XFER_XACT_ERR);
1676                        dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1677                        if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1678                                chan->qh->ping_state = 1;
1679                }
1680
1681                /*
1682                 * Halt the channel so the transfer can be re-started from
1683                 * the appropriate point or the PING protocol will start
1684                 */
1685                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1686                break;
1687        case USB_ENDPOINT_XFER_INT:
1688                qtd->error_count++;
1689                if (chan->do_split && chan->complete_split)
1690                        qtd->complete_split = 0;
1691                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1692                break;
1693        case USB_ENDPOINT_XFER_ISOC:
1694                {
1695                        enum dwc2_halt_status halt_status;
1696
1697                        halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1698                                         chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1699                        dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1700                }
1701                break;
1702        }
1703
1704handle_xacterr_done:
1705        disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1706}
1707
1708/*
1709 * Handles a host channel frame overrun interrupt. This handler may be called
1710 * in either DMA mode or Slave mode.
1711 */
1712static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1713                                  struct dwc2_host_chan *chan, int chnum,
1714                                  struct dwc2_qtd *qtd)
1715{
1716        enum dwc2_halt_status halt_status;
1717
1718        if (dbg_hc(chan))
1719                dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1720                        chnum);
1721
1722        dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1723
1724        switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1725        case USB_ENDPOINT_XFER_CONTROL:
1726        case USB_ENDPOINT_XFER_BULK:
1727                break;
1728        case USB_ENDPOINT_XFER_INT:
1729                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1730                break;
1731        case USB_ENDPOINT_XFER_ISOC:
1732                halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1733                                        qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1734                dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1735                break;
1736        }
1737
1738        disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1739}
1740
1741/*
1742 * Handles a host channel data toggle error interrupt. This handler may be
1743 * called in either DMA mode or Slave mode.
1744 */
1745static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1746                                    struct dwc2_host_chan *chan, int chnum,
1747                                    struct dwc2_qtd *qtd)
1748{
1749        dev_dbg(hsotg->dev,
1750                "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1751
1752        if (chan->ep_is_in)
1753                qtd->error_count = 0;
1754        else
1755                dev_err(hsotg->dev,
1756                        "Data Toggle Error on OUT transfer, channel %d\n",
1757                        chnum);
1758
1759        dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1760        disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1761}
1762
1763/*
1764 * For debug only. It checks that a valid halt status is set and that
1765 * HCCHARn.chdis is clear. If there's a problem, corrective action is
1766 * taken and a warning is issued.
1767 *
1768 * Return: true if halt status is ok, false otherwise
1769 */
1770static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1771                                struct dwc2_host_chan *chan, int chnum,
1772                                struct dwc2_qtd *qtd)
1773{
1774#ifdef DEBUG
1775        u32 hcchar;
1776        u32 hctsiz;
1777        u32 hcintmsk;
1778        u32 hcsplt;
1779
1780        if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1781                /*
1782                 * This code is here only as a check. This condition should
1783                 * never happen. Ignore the halt if it does occur.
1784                 */
1785                hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1786                hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1787                hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
1788                hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
1789                dev_dbg(hsotg->dev,
1790                        "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1791                         __func__);
1792                dev_dbg(hsotg->dev,
1793                        "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1794                        chnum, hcchar, hctsiz);
1795                dev_dbg(hsotg->dev,
1796                        "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1797                        chan->hcint, hcintmsk, hcsplt);
1798                if (qtd)
1799                        dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1800                                qtd->complete_split);
1801                dev_warn(hsotg->dev,
1802                         "%s: no halt status, channel %d, ignoring interrupt\n",
1803                         __func__, chnum);
1804                return false;
1805        }
1806
1807        /*
1808         * This code is here only as a check. hcchar.chdis should never be set
1809         * when the halt interrupt occurs. Halt the channel again if it does
1810         * occur.
1811         */
1812        hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1813        if (hcchar & HCCHAR_CHDIS) {
1814                dev_warn(hsotg->dev,
1815                         "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1816                         __func__, hcchar);
1817                chan->halt_pending = 0;
1818                dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1819                return false;
1820        }
1821#endif
1822
1823        return true;
1824}
1825
1826/*
1827 * Handles a host Channel Halted interrupt in DMA mode. This handler
1828 * determines the reason the channel halted and proceeds accordingly.
1829 */
1830static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1831                                    struct dwc2_host_chan *chan, int chnum,
1832                                    struct dwc2_qtd *qtd)
1833{
1834        u32 hcintmsk;
1835        int out_nak_enh = 0;
1836
1837        if (dbg_hc(chan))
1838                dev_vdbg(hsotg->dev,
1839                         "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1840                         chnum);
1841
1842        /*
1843         * For core with OUT NAK enhancement, the flow for high-speed
1844         * CONTROL/BULK OUT is handled a little differently
1845         */
1846        if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1847                if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1848                    (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1849                     chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1850                        out_nak_enh = 1;
1851                }
1852        }
1853
1854        if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1855            (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1856             !hsotg->params.dma_desc_enable)) {
1857                if (hsotg->params.dma_desc_enable)
1858                        dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1859                                                    chan->halt_status);
1860                else
1861                        /*
1862                         * Just release the channel. A dequeue can happen on a
1863                         * transfer timeout. In the case of an AHB Error, the
1864                         * channel was forced to halt because there's no way to
1865                         * gracefully recover.
1866                         */
1867                        dwc2_release_channel(hsotg, chan, qtd,
1868                                             chan->halt_status);
1869                return;
1870        }
1871
1872        hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
1873
1874        if (chan->hcint & HCINTMSK_XFERCOMPL) {
1875                /*
1876                 * Todo: This is here because of a possible hardware bug. Spec
1877                 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1878                 * interrupt w/ACK bit set should occur, but I only see the
1879                 * XFERCOMP bit, even with it masked out. This is a workaround
1880                 * for that behavior. Should fix this when hardware is fixed.
1881                 */
1882                if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1883                        dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1884                dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1885        } else if (chan->hcint & HCINTMSK_STALL) {
1886                dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1887        } else if ((chan->hcint & HCINTMSK_XACTERR) &&
1888                   !hsotg->params.dma_desc_enable) {
1889                if (out_nak_enh) {
1890                        if (chan->hcint &
1891                            (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1892                                dev_vdbg(hsotg->dev,
1893                                         "XactErr with NYET/NAK/ACK\n");
1894                                qtd->error_count = 0;
1895                        } else {
1896                                dev_vdbg(hsotg->dev,
1897                                         "XactErr without NYET/NAK/ACK\n");
1898                        }
1899                }
1900
1901                /*
1902                 * Must handle xacterr before nak or ack. Could get a xacterr
1903                 * at the same time as either of these on a BULK/CONTROL OUT
1904                 * that started with a PING. The xacterr takes precedence.
1905                 */
1906                dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1907        } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1908                   hsotg->params.dma_desc_enable) {
1909                dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1910        } else if ((chan->hcint & HCINTMSK_AHBERR) &&
1911                   hsotg->params.dma_desc_enable) {
1912                dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1913        } else if (chan->hcint & HCINTMSK_BBLERR) {
1914                dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1915        } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1916                dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1917        } else if (!out_nak_enh) {
1918                if (chan->hcint & HCINTMSK_NYET) {
1919                        /*
1920                         * Must handle nyet before nak or ack. Could get a nyet
1921                         * at the same time as either of those on a BULK/CONTROL
1922                         * OUT that started with a PING. The nyet takes
1923                         * precedence.
1924                         */
1925                        dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1926                } else if ((chan->hcint & HCINTMSK_NAK) &&
1927                           !(hcintmsk & HCINTMSK_NAK)) {
1928                        /*
1929                         * If nak is not masked, it's because a non-split IN
1930                         * transfer is in an error state. In that case, the nak
1931                         * is handled by the nak interrupt handler, not here.
1932                         * Handle nak here for BULK/CONTROL OUT transfers, which
1933                         * halt on a NAK to allow rewinding the buffer pointer.
1934                         */
1935                        dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1936                } else if ((chan->hcint & HCINTMSK_ACK) &&
1937                           !(hcintmsk & HCINTMSK_ACK)) {
1938                        /*
1939                         * If ack is not masked, it's because a non-split IN
1940                         * transfer is in an error state. In that case, the ack
1941                         * is handled by the ack interrupt handler, not here.
1942                         * Handle ack here for split transfers. Start splits
1943                         * halt on ACK.
1944                         */
1945                        dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1946                } else {
1947                        if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1948                            chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1949                                /*
1950                                 * A periodic transfer halted with no other
1951                                 * channel interrupts set. Assume it was halted
1952                                 * by the core because it could not be completed
1953                                 * in its scheduled (micro)frame.
1954                                 */
1955                                dev_dbg(hsotg->dev,
1956                                        "%s: Halt channel %d (assume incomplete periodic transfer)\n",
1957                                        __func__, chnum);
1958                                dwc2_halt_channel(hsotg, chan, qtd,
1959                                        DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1960                        } else {
1961                                dev_err(hsotg->dev,
1962                                        "%s: Channel %d - ChHltd set, but reason is unknown\n",
1963                                        __func__, chnum);
1964                                dev_err(hsotg->dev,
1965                                        "hcint 0x%08x, intsts 0x%08x\n",
1966                                        chan->hcint,
1967                                        dwc2_readl(hsotg, GINTSTS));
1968                                goto error;
1969                        }
1970                }
1971        } else {
1972                dev_info(hsotg->dev,
1973                         "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1974                         chan->hcint);
1975error:
1976                /* Failthrough: use 3-strikes rule */
1977                qtd->error_count++;
1978                dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1979                                          qtd, DWC2_HC_XFER_XACT_ERR);
1980                /*
1981                 * We can get here after a completed transaction
1982                 * (urb->actual_length >= urb->length) which was not reported
1983                 * as completed. If that is the case, and we do not abort
1984                 * the transfer, a transfer of size 0 will be enqueued
1985                 * subsequently. If urb->actual_length is not DMA-aligned,
1986                 * the buffer will then point to an unaligned address, and
1987                 * the resulting behavior is undefined. Bail out in that
1988                 * situation.
1989                 */
1990                if (qtd->urb->actual_length >= qtd->urb->length)
1991                        qtd->error_count = 3;
1992                dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1993                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1994        }
1995}
1996
1997/*
1998 * Handles a host channel Channel Halted interrupt
1999 *
2000 * In slave mode, this handler is called only when the driver specifically
2001 * requests a halt. This occurs during handling other host channel interrupts
2002 * (e.g. nak, xacterr, stall, nyet, etc.).
2003 *
2004 * In DMA mode, this is the interrupt that occurs when the core has finished
2005 * processing a transfer on a channel. Other host channel interrupts (except
2006 * ahberr) are disabled in DMA mode.
2007 */
2008static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
2009                                struct dwc2_host_chan *chan, int chnum,
2010                                struct dwc2_qtd *qtd)
2011{
2012        if (dbg_hc(chan))
2013                dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
2014                         chnum);
2015
2016        if (hsotg->params.host_dma) {
2017                dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
2018        } else {
2019                if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
2020                        return;
2021                dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
2022        }
2023}
2024
2025/*
2026 * Check if the given qtd is still the top of the list (and thus valid).
2027 *
2028 * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
2029 * the qtd from the top of the list, this will return false (otherwise true).
2030 */
2031static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
2032{
2033        struct dwc2_qtd *cur_head;
2034
2035        if (!qh)
2036                return false;
2037
2038        cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
2039                                    qtd_list_entry);
2040        return (cur_head == qtd);
2041}
2042
2043/* Handles interrupt for a specific Host Channel */
2044static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
2045{
2046        struct dwc2_qtd *qtd;
2047        struct dwc2_host_chan *chan;
2048        u32 hcint, hcintmsk;
2049
2050        chan = hsotg->hc_ptr_array[chnum];
2051
2052        hcint = dwc2_readl(hsotg, HCINT(chnum));
2053        hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
2054        if (!chan) {
2055                dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
2056                dwc2_writel(hsotg, hcint, HCINT(chnum));
2057                return;
2058        }
2059
2060        if (dbg_hc(chan)) {
2061                dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
2062                         chnum);
2063                dev_vdbg(hsotg->dev,
2064                         "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2065                         hcint, hcintmsk, hcint & hcintmsk);
2066        }
2067
2068        dwc2_writel(hsotg, hcint, HCINT(chnum));
2069
2070        /*
2071         * If we got an interrupt after someone called
2072         * dwc2_hcd_endpoint_disable() we don't want to crash below
2073         */
2074        if (!chan->qh) {
2075                dev_warn(hsotg->dev, "Interrupt on disabled channel\n");
2076                return;
2077        }
2078
2079        chan->hcint = hcint;
2080        hcint &= hcintmsk;
2081
2082        /*
2083         * If the channel was halted due to a dequeue, the qtd list might
2084         * be empty or at least the first entry will not be the active qtd.
2085         * In this case, take a shortcut and just release the channel.
2086         */
2087        if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
2088                /*
2089                 * If the channel was halted, this should be the only
2090                 * interrupt unmasked
2091                 */
2092                WARN_ON(hcint != HCINTMSK_CHHLTD);
2093                if (hsotg->params.dma_desc_enable)
2094                        dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2095                                                    chan->halt_status);
2096                else
2097                        dwc2_release_channel(hsotg, chan, NULL,
2098                                             chan->halt_status);
2099                return;
2100        }
2101
2102        if (list_empty(&chan->qh->qtd_list)) {
2103                /*
2104                 * TODO: Will this ever happen with the
2105                 * DWC2_HC_XFER_URB_DEQUEUE handling above?
2106                 */
2107                dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2108                        chnum);
2109                dev_dbg(hsotg->dev,
2110                        "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2111                        chan->hcint, hcintmsk, hcint);
2112                chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2113                disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2114                chan->hcint = 0;
2115                return;
2116        }
2117
2118        qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2119                               qtd_list_entry);
2120
2121        if (!hsotg->params.host_dma) {
2122                if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2123                        hcint &= ~HCINTMSK_CHHLTD;
2124        }
2125
2126        if (hcint & HCINTMSK_XFERCOMPL) {
2127                dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2128                /*
2129                 * If NYET occurred at same time as Xfer Complete, the NYET is
2130                 * handled by the Xfer Complete interrupt handler. Don't want
2131                 * to call the NYET interrupt handler in this case.
2132                 */
2133                hcint &= ~HCINTMSK_NYET;
2134        }
2135
2136        if (hcint & HCINTMSK_CHHLTD) {
2137                dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2138                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2139                        goto exit;
2140        }
2141        if (hcint & HCINTMSK_AHBERR) {
2142                dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2143                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2144                        goto exit;
2145        }
2146        if (hcint & HCINTMSK_STALL) {
2147                dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2148                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2149                        goto exit;
2150        }
2151        if (hcint & HCINTMSK_NAK) {
2152                dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2153                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2154                        goto exit;
2155        }
2156        if (hcint & HCINTMSK_ACK) {
2157                dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2158                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2159                        goto exit;
2160        }
2161        if (hcint & HCINTMSK_NYET) {
2162                dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2163                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2164                        goto exit;
2165        }
2166        if (hcint & HCINTMSK_XACTERR) {
2167                dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2168                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2169                        goto exit;
2170        }
2171        if (hcint & HCINTMSK_BBLERR) {
2172                dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2173                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2174                        goto exit;
2175        }
2176        if (hcint & HCINTMSK_FRMOVRUN) {
2177                dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2178                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2179                        goto exit;
2180        }
2181        if (hcint & HCINTMSK_DATATGLERR) {
2182                dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2183                if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2184                        goto exit;
2185        }
2186
2187exit:
2188        chan->hcint = 0;
2189}
2190
2191/*
2192 * This interrupt indicates that one or more host channels has a pending
2193 * interrupt. There are multiple conditions that can cause each host channel
2194 * interrupt. This function determines which conditions have occurred for each
2195 * host channel interrupt and handles them appropriately.
2196 */
2197static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2198{
2199        u32 haint;
2200        int i;
2201        struct dwc2_host_chan *chan, *chan_tmp;
2202
2203        haint = dwc2_readl(hsotg, HAINT);
2204        if (dbg_perio()) {
2205                dev_vdbg(hsotg->dev, "%s()\n", __func__);
2206
2207                dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2208        }
2209
2210        /*
2211         * According to USB 2.0 spec section 11.18.8, a host must
2212         * issue complete-split transactions in a microframe for a
2213         * set of full-/low-speed endpoints in the same relative
2214         * order as the start-splits were issued in a microframe for.
2215         */
2216        list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order,
2217                                 split_order_list_entry) {
2218                int hc_num = chan->hc_num;
2219
2220                if (haint & (1 << hc_num)) {
2221                        dwc2_hc_n_intr(hsotg, hc_num);
2222                        haint &= ~(1 << hc_num);
2223                }
2224        }
2225
2226        for (i = 0; i < hsotg->params.host_channels; i++) {
2227                if (haint & (1 << i))
2228                        dwc2_hc_n_intr(hsotg, i);
2229        }
2230}
2231
2232/* This function handles interrupts for the HCD */
2233irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2234{
2235        u32 gintsts, dbg_gintsts;
2236        irqreturn_t retval = IRQ_NONE;
2237
2238        if (!dwc2_is_controller_alive(hsotg)) {
2239                dev_warn(hsotg->dev, "Controller is dead\n");
2240                return retval;
2241        }
2242
2243        spin_lock(&hsotg->lock);
2244
2245        /* Check if HOST Mode */
2246        if (dwc2_is_host_mode(hsotg)) {
2247                gintsts = dwc2_read_core_intr(hsotg);
2248                if (!gintsts) {
2249                        spin_unlock(&hsotg->lock);
2250                        return retval;
2251                }
2252
2253                retval = IRQ_HANDLED;
2254
2255                dbg_gintsts = gintsts;
2256#ifndef DEBUG_SOF
2257                dbg_gintsts &= ~GINTSTS_SOF;
2258#endif
2259                if (!dbg_perio())
2260                        dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2261                                         GINTSTS_PTXFEMP);
2262
2263                /* Only print if there are any non-suppressed interrupts left */
2264                if (dbg_gintsts)
2265                        dev_vdbg(hsotg->dev,
2266                                 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2267                                 gintsts);
2268
2269                if (gintsts & GINTSTS_SOF)
2270                        dwc2_sof_intr(hsotg);
2271                if (gintsts & GINTSTS_RXFLVL)
2272                        dwc2_rx_fifo_level_intr(hsotg);
2273                if (gintsts & GINTSTS_NPTXFEMP)
2274                        dwc2_np_tx_fifo_empty_intr(hsotg);
2275                if (gintsts & GINTSTS_PRTINT)
2276                        dwc2_port_intr(hsotg);
2277                if (gintsts & GINTSTS_HCHINT)
2278                        dwc2_hc_intr(hsotg);
2279                if (gintsts & GINTSTS_PTXFEMP)
2280                        dwc2_perio_tx_fifo_empty_intr(hsotg);
2281
2282                if (dbg_gintsts) {
2283                        dev_vdbg(hsotg->dev,
2284                                 "DWC OTG HCD Finished Servicing Interrupts\n");
2285                        dev_vdbg(hsotg->dev,
2286                                 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2287                                 dwc2_readl(hsotg, GINTSTS),
2288                                 dwc2_readl(hsotg, GINTMSK));
2289                }
2290        }
2291
2292        spin_unlock(&hsotg->lock);
2293
2294        return retval;
2295}
2296