linux/drivers/usb/host/xhci-ring.c
<<
>>
Prefs
   1/*
   2 * xHCI host controller driver
   3 *
   4 * Copyright (C) 2008 Intel Corp.
   5 *
   6 * Author: Sarah Sharp
   7 * Some code borrowed from the Linux EHCI driver.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16 * for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software Foundation,
  20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23/*
  24 * Ring initialization rules:
  25 * 1. Each segment is initialized to zero, except for link TRBs.
  26 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
  27 *    Consumer Cycle State (CCS), depending on ring function.
  28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  29 *
  30 * Ring behavior rules:
  31 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
  32 *    least one free TRB in the ring.  This is useful if you want to turn that
  33 *    into a link TRB and expand the ring.
  34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  35 *    link TRB, then load the pointer with the address in the link TRB.  If the
  36 *    link TRB had its toggle bit set, you may need to update the ring cycle
  37 *    state (see cycle bit rules).  You may have to do this multiple times
  38 *    until you reach a non-link TRB.
  39 * 3. A ring is full if enqueue++ (for the definition of increment above)
  40 *    equals the dequeue pointer.
  41 *
  42 * Cycle bit rules:
  43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  44 *    in a link TRB, it must toggle the ring cycle state.
  45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  46 *    in a link TRB, it must toggle the ring cycle state.
  47 *
  48 * Producer rules:
  49 * 1. Check if ring is full before you enqueue.
  50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  51 *    Update enqueue pointer between each write (which may update the ring
  52 *    cycle state).
  53 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
  54 *    and endpoint rings.  If HC is the producer for the event ring,
  55 *    and it generates an interrupt according to interrupt modulation rules.
  56 *
  57 * Consumer rules:
  58 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
  59 *    the TRB is owned by the consumer.
  60 * 2. Update dequeue pointer (which may update the ring cycle state) and
  61 *    continue processing TRBs until you reach a TRB which is not owned by you.
  62 * 3. Notify the producer.  SW is the consumer for the event ring, and it
  63 *   updates event ring dequeue pointer.  HC is the consumer for the command and
  64 *   endpoint rings; it generates events on the event ring for these.
  65 */
  66
  67#include <linux/scatterlist.h>
  68#include <linux/slab.h>
  69#include <linux/dma-mapping.h>
  70#include "xhci.h"
  71#include "xhci-trace.h"
  72#include "xhci-mtk.h"
  73
  74/*
  75 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  76 * address of the TRB.
  77 */
  78dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  79                union xhci_trb *trb)
  80{
  81        unsigned long segment_offset;
  82
  83        if (!seg || !trb || trb < seg->trbs)
  84                return 0;
  85        /* offset in TRBs */
  86        segment_offset = trb - seg->trbs;
  87        if (segment_offset >= TRBS_PER_SEGMENT)
  88                return 0;
  89        return seg->dma + (segment_offset * sizeof(*trb));
  90}
  91
  92static bool trb_is_link(union xhci_trb *trb)
  93{
  94        return TRB_TYPE_LINK_LE32(trb->link.control);
  95}
  96
  97static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
  98{
  99        return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
 100}
 101
 102static bool last_trb_on_ring(struct xhci_ring *ring,
 103                        struct xhci_segment *seg, union xhci_trb *trb)
 104{
 105        return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
 106}
 107
 108static bool link_trb_toggles_cycle(union xhci_trb *trb)
 109{
 110        return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
 111}
 112
 113/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 114 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 115 * effect the ring dequeue or enqueue pointers.
 116 */
 117static void next_trb(struct xhci_hcd *xhci,
 118                struct xhci_ring *ring,
 119                struct xhci_segment **seg,
 120                union xhci_trb **trb)
 121{
 122        if (trb_is_link(*trb)) {
 123                *seg = (*seg)->next;
 124                *trb = ((*seg)->trbs);
 125        } else {
 126                (*trb)++;
 127        }
 128}
 129
 130/*
 131 * See Cycle bit rules. SW is the consumer for the event ring only.
 132 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 133 */
 134static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
 135{
 136        ring->deq_updates++;
 137
 138        /* event ring doesn't have link trbs, check for last trb */
 139        if (ring->type == TYPE_EVENT) {
 140                if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
 141                        ring->dequeue++;
 142                        return;
 143                }
 144                if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
 145                        ring->cycle_state ^= 1;
 146                ring->deq_seg = ring->deq_seg->next;
 147                ring->dequeue = ring->deq_seg->trbs;
 148                return;
 149        }
 150
 151        /* All other rings have link trbs */
 152        if (!trb_is_link(ring->dequeue)) {
 153                ring->dequeue++;
 154                ring->num_trbs_free++;
 155        }
 156        while (trb_is_link(ring->dequeue)) {
 157                ring->deq_seg = ring->deq_seg->next;
 158                ring->dequeue = ring->deq_seg->trbs;
 159        }
 160        return;
 161}
 162
 163/*
 164 * See Cycle bit rules. SW is the consumer for the event ring only.
 165 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 166 *
 167 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 168 * chain bit is set), then set the chain bit in all the following link TRBs.
 169 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 170 * have their chain bit cleared (so that each Link TRB is a separate TD).
 171 *
 172 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
 173 * set, but other sections talk about dealing with the chain bit set.  This was
 174 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 175 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
 176 *
 177 * @more_trbs_coming:   Will you enqueue more TRBs before calling
 178 *                      prepare_transfer()?
 179 */
 180static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 181                        bool more_trbs_coming)
 182{
 183        u32 chain;
 184        union xhci_trb *next;
 185
 186        chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
 187        /* If this is not event ring, there is one less usable TRB */
 188        if (!trb_is_link(ring->enqueue))
 189                ring->num_trbs_free--;
 190        next = ++(ring->enqueue);
 191
 192        ring->enq_updates++;
 193        /* Update the dequeue pointer further if that was a link TRB */
 194        while (trb_is_link(next)) {
 195
 196                /*
 197                 * If the caller doesn't plan on enqueueing more TDs before
 198                 * ringing the doorbell, then we don't want to give the link TRB
 199                 * to the hardware just yet. We'll give the link TRB back in
 200                 * prepare_ring() just before we enqueue the TD at the top of
 201                 * the ring.
 202                 */
 203                if (!chain && !more_trbs_coming)
 204                        break;
 205
 206                /* If we're not dealing with 0.95 hardware or isoc rings on
 207                 * AMD 0.96 host, carry over the chain bit of the previous TRB
 208                 * (which may mean the chain bit is cleared).
 209                 */
 210                if (!(ring->type == TYPE_ISOC &&
 211                      (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
 212                    !xhci_link_trb_quirk(xhci)) {
 213                        next->link.control &= cpu_to_le32(~TRB_CHAIN);
 214                        next->link.control |= cpu_to_le32(chain);
 215                }
 216                /* Give this link TRB to the hardware */
 217                wmb();
 218                next->link.control ^= cpu_to_le32(TRB_CYCLE);
 219
 220                /* Toggle the cycle bit after the last ring segment. */
 221                if (link_trb_toggles_cycle(next))
 222                        ring->cycle_state ^= 1;
 223
 224                ring->enq_seg = ring->enq_seg->next;
 225                ring->enqueue = ring->enq_seg->trbs;
 226                next = ring->enqueue;
 227        }
 228}
 229
 230/*
 231 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 232 * enqueue pointer will not advance into dequeue segment. See rules above.
 233 */
 234static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
 235                unsigned int num_trbs)
 236{
 237        int num_trbs_in_deq_seg;
 238
 239        if (ring->num_trbs_free < num_trbs)
 240                return 0;
 241
 242        if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
 243                num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
 244                if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
 245                        return 0;
 246        }
 247
 248        return 1;
 249}
 250
 251/* Ring the host controller doorbell after placing a command on the ring */
 252void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 253{
 254        if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
 255                return;
 256
 257        xhci_dbg(xhci, "// Ding dong!\n");
 258        writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
 259        /* Flush PCI posted writes */
 260        readl(&xhci->dba->doorbell[0]);
 261}
 262
 263static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 264{
 265        u64 temp_64;
 266        int ret;
 267
 268        xhci_dbg(xhci, "Abort command ring\n");
 269
 270        temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 271        xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
 272
 273        /*
 274         * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
 275         * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
 276         * but the completion event in never sent. Use the cmd timeout timer to
 277         * handle those cases. Use twice the time to cover the bit polling retry
 278         */
 279        mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
 280        xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
 281                        &xhci->op_regs->cmd_ring);
 282
 283        /* Section 4.6.1.2 of xHCI 1.0 spec says software should
 284         * time the completion od all xHCI commands, including
 285         * the Command Abort operation. If software doesn't see
 286         * CRR negated in a timely manner (e.g. longer than 5
 287         * seconds), then it should assume that the there are
 288         * larger problems with the xHC and assert HCRST.
 289         */
 290        ret = xhci_handshake(&xhci->op_regs->cmd_ring,
 291                        CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
 292        if (ret < 0) {
 293                /* we are about to kill xhci, give it one more chance */
 294                xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
 295                              &xhci->op_regs->cmd_ring);
 296                udelay(1000);
 297                ret = xhci_handshake(&xhci->op_regs->cmd_ring,
 298                                     CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
 299                if (ret == 0)
 300                        return 0;
 301
 302                xhci_err(xhci, "Stopped the command ring failed, "
 303                                "maybe the host is dead\n");
 304                del_timer(&xhci->cmd_timer);
 305                xhci->xhc_state |= XHCI_STATE_DYING;
 306                xhci_quiesce(xhci);
 307                xhci_halt(xhci);
 308                return -ESHUTDOWN;
 309        }
 310
 311        return 0;
 312}
 313
 314void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
 315                unsigned int slot_id,
 316                unsigned int ep_index,
 317                unsigned int stream_id)
 318{
 319        __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
 320        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
 321        unsigned int ep_state = ep->ep_state;
 322
 323        /* Don't ring the doorbell for this endpoint if there are pending
 324         * cancellations because we don't want to interrupt processing.
 325         * We don't want to restart any stream rings if there's a set dequeue
 326         * pointer command pending because the device can choose to start any
 327         * stream once the endpoint is on the HW schedule.
 328         */
 329        if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
 330            (ep_state & EP_HALTED))
 331                return;
 332        writel(DB_VALUE(ep_index, stream_id), db_addr);
 333        /* The CPU has better things to do at this point than wait for a
 334         * write-posting flush.  It'll get there soon enough.
 335         */
 336}
 337
 338/* Ring the doorbell for any rings with pending URBs */
 339static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
 340                unsigned int slot_id,
 341                unsigned int ep_index)
 342{
 343        unsigned int stream_id;
 344        struct xhci_virt_ep *ep;
 345
 346        ep = &xhci->devs[slot_id]->eps[ep_index];
 347
 348        /* A ring has pending URBs if its TD list is not empty */
 349        if (!(ep->ep_state & EP_HAS_STREAMS)) {
 350                if (ep->ring && !(list_empty(&ep->ring->td_list)))
 351                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
 352                return;
 353        }
 354
 355        for (stream_id = 1; stream_id < ep->stream_info->num_streams;
 356                        stream_id++) {
 357                struct xhci_stream_info *stream_info = ep->stream_info;
 358                if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
 359                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
 360                                                stream_id);
 361        }
 362}
 363
 364/* Get the right ring for the given slot_id, ep_index and stream_id.
 365 * If the endpoint supports streams, boundary check the URB's stream ID.
 366 * If the endpoint doesn't support streams, return the singular endpoint ring.
 367 */
 368struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
 369                unsigned int slot_id, unsigned int ep_index,
 370                unsigned int stream_id)
 371{
 372        struct xhci_virt_ep *ep;
 373
 374        ep = &xhci->devs[slot_id]->eps[ep_index];
 375        /* Common case: no streams */
 376        if (!(ep->ep_state & EP_HAS_STREAMS))
 377                return ep->ring;
 378
 379        if (stream_id == 0) {
 380                xhci_warn(xhci,
 381                                "WARN: Slot ID %u, ep index %u has streams, "
 382                                "but URB has no stream ID.\n",
 383                                slot_id, ep_index);
 384                return NULL;
 385        }
 386
 387        if (stream_id < ep->stream_info->num_streams)
 388                return ep->stream_info->stream_rings[stream_id];
 389
 390        xhci_warn(xhci,
 391                        "WARN: Slot ID %u, ep index %u has "
 392                        "stream IDs 1 to %u allocated, "
 393                        "but stream ID %u is requested.\n",
 394                        slot_id, ep_index,
 395                        ep->stream_info->num_streams - 1,
 396                        stream_id);
 397        return NULL;
 398}
 399
 400/*
 401 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 402 * Record the new state of the xHC's endpoint ring dequeue segment,
 403 * dequeue pointer, and new consumer cycle state in state.
 404 * Update our internal representation of the ring's dequeue pointer.
 405 *
 406 * We do this in three jumps:
 407 *  - First we update our new ring state to be the same as when the xHC stopped.
 408 *  - Then we traverse the ring to find the segment that contains
 409 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 410 *    any link TRBs with the toggle cycle bit set.
 411 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 412 *    if we've moved it past a link TRB with the toggle cycle bit set.
 413 *
 414 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 415 * with correct __le32 accesses they should work fine.  Only users of this are
 416 * in here.
 417 */
 418void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 419                unsigned int slot_id, unsigned int ep_index,
 420                unsigned int stream_id, struct xhci_td *cur_td,
 421                struct xhci_dequeue_state *state)
 422{
 423        struct xhci_virt_device *dev = xhci->devs[slot_id];
 424        struct xhci_virt_ep *ep = &dev->eps[ep_index];
 425        struct xhci_ring *ep_ring;
 426        struct xhci_segment *new_seg;
 427        union xhci_trb *new_deq;
 428        dma_addr_t addr;
 429        u64 hw_dequeue;
 430        bool cycle_found = false;
 431        bool td_last_trb_found = false;
 432
 433        ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
 434                        ep_index, stream_id);
 435        if (!ep_ring) {
 436                xhci_warn(xhci, "WARN can't find new dequeue state "
 437                                "for invalid stream ID %u.\n",
 438                                stream_id);
 439                return;
 440        }
 441
 442        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
 443        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 444                        "Finding endpoint context");
 445        /* 4.6.9 the css flag is written to the stream context for streams */
 446        if (ep->ep_state & EP_HAS_STREAMS) {
 447                struct xhci_stream_ctx *ctx =
 448                        &ep->stream_info->stream_ctx_array[stream_id];
 449                hw_dequeue = le64_to_cpu(ctx->stream_ring);
 450        } else {
 451                struct xhci_ep_ctx *ep_ctx
 452                        = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
 453                hw_dequeue = le64_to_cpu(ep_ctx->deq);
 454        }
 455
 456        new_seg = ep_ring->deq_seg;
 457        new_deq = ep_ring->dequeue;
 458        state->new_cycle_state = hw_dequeue & 0x1;
 459
 460        /*
 461         * We want to find the pointer, segment and cycle state of the new trb
 462         * (the one after current TD's last_trb). We know the cycle state at
 463         * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
 464         * found.
 465         */
 466        do {
 467                if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
 468                    == (dma_addr_t)(hw_dequeue & ~0xf)) {
 469                        cycle_found = true;
 470                        if (td_last_trb_found)
 471                                break;
 472                }
 473                if (new_deq == cur_td->last_trb)
 474                        td_last_trb_found = true;
 475
 476                if (cycle_found &&
 477                    TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
 478                    new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
 479                        state->new_cycle_state ^= 0x1;
 480
 481                next_trb(xhci, ep_ring, &new_seg, &new_deq);
 482
 483                /* Search wrapped around, bail out */
 484                if (new_deq == ep->ring->dequeue) {
 485                        xhci_err(xhci, "Error: Failed finding new dequeue state\n");
 486                        state->new_deq_seg = NULL;
 487                        state->new_deq_ptr = NULL;
 488                        return;
 489                }
 490
 491        } while (!cycle_found || !td_last_trb_found);
 492
 493        state->new_deq_seg = new_seg;
 494        state->new_deq_ptr = new_deq;
 495
 496        /* Don't update the ring cycle state for the producer (us). */
 497        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 498                        "Cycle state = 0x%x", state->new_cycle_state);
 499
 500        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 501                        "New dequeue segment = %p (virtual)",
 502                        state->new_deq_seg);
 503        addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
 504        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 505                        "New dequeue pointer = 0x%llx (DMA)",
 506                        (unsigned long long) addr);
 507}
 508
 509/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 510 * (The last TRB actually points to the ring enqueue pointer, which is not part
 511 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 512 */
 513static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
 514                struct xhci_td *cur_td, bool flip_cycle)
 515{
 516        struct xhci_segment *cur_seg;
 517        union xhci_trb *cur_trb;
 518
 519        for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
 520                        true;
 521                        next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
 522                if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
 523                        /* Unchain any chained Link TRBs, but
 524                         * leave the pointers intact.
 525                         */
 526                        cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
 527                        /* Flip the cycle bit (link TRBs can't be the first
 528                         * or last TRB).
 529                         */
 530                        if (flip_cycle)
 531                                cur_trb->generic.field[3] ^=
 532                                        cpu_to_le32(TRB_CYCLE);
 533                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 534                                        "Cancel (unchain) link TRB");
 535                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 536                                        "Address = %p (0x%llx dma); "
 537                                        "in seg %p (0x%llx dma)",
 538                                        cur_trb,
 539                                        (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
 540                                        cur_seg,
 541                                        (unsigned long long)cur_seg->dma);
 542                } else {
 543                        cur_trb->generic.field[0] = 0;
 544                        cur_trb->generic.field[1] = 0;
 545                        cur_trb->generic.field[2] = 0;
 546                        /* Preserve only the cycle bit of this TRB */
 547                        cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
 548                        /* Flip the cycle bit except on the first or last TRB */
 549                        if (flip_cycle && cur_trb != cur_td->first_trb &&
 550                                        cur_trb != cur_td->last_trb)
 551                                cur_trb->generic.field[3] ^=
 552                                        cpu_to_le32(TRB_CYCLE);
 553                        cur_trb->generic.field[3] |= cpu_to_le32(
 554                                TRB_TYPE(TRB_TR_NOOP));
 555                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 556                                        "TRB to noop at offset 0x%llx",
 557                                        (unsigned long long)
 558                                        xhci_trb_virt_to_dma(cur_seg, cur_trb));
 559                }
 560                if (cur_trb == cur_td->last_trb)
 561                        break;
 562        }
 563}
 564
 565static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
 566                struct xhci_virt_ep *ep)
 567{
 568        ep->ep_state &= ~EP_HALT_PENDING;
 569        /* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
 570         * timer is running on another CPU, we don't decrement stop_cmds_pending
 571         * (since we didn't successfully stop the watchdog timer).
 572         */
 573        if (del_timer(&ep->stop_cmd_timer))
 574                ep->stop_cmds_pending--;
 575}
 576
 577/* Must be called with xhci->lock held in interrupt context */
 578static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
 579                struct xhci_td *cur_td, int status)
 580{
 581        struct usb_hcd *hcd;
 582        struct urb      *urb;
 583        struct urb_priv *urb_priv;
 584
 585        urb = cur_td->urb;
 586        urb_priv = urb->hcpriv;
 587        urb_priv->td_cnt++;
 588        hcd = bus_to_hcd(urb->dev->bus);
 589
 590        /* Only giveback urb when this is the last td in urb */
 591        if (urb_priv->td_cnt == urb_priv->length) {
 592                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 593                        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
 594                        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
 595                                if (xhci->quirks & XHCI_AMD_PLL_FIX)
 596                                        usb_amd_quirk_pll_enable();
 597                        }
 598                }
 599                usb_hcd_unlink_urb_from_ep(hcd, urb);
 600
 601                spin_unlock(&xhci->lock);
 602                usb_hcd_giveback_urb(hcd, urb, status);
 603                xhci_urb_free_priv(urb_priv);
 604                spin_lock(&xhci->lock);
 605        }
 606}
 607
 608void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
 609                                 struct xhci_td *td)
 610{
 611        struct device *dev = xhci_to_hcd(xhci)->self.controller;
 612        struct xhci_segment *seg = td->bounce_seg;
 613        struct urb *urb = td->urb;
 614
 615        if (!seg || !urb)
 616                return;
 617
 618        if (usb_urb_dir_out(urb)) {
 619                dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
 620                                 DMA_TO_DEVICE);
 621                return;
 622        }
 623
 624        /* for in tranfers we need to copy the data from bounce to sg */
 625        sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
 626                             seg->bounce_len, seg->bounce_offs);
 627        dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
 628                         DMA_FROM_DEVICE);
 629        seg->bounce_len = 0;
 630        seg->bounce_offs = 0;
 631}
 632
 633/*
 634 * When we get a command completion for a Stop Endpoint Command, we need to
 635 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 636 *
 637 *  1. If the HW was in the middle of processing the TD that needs to be
 638 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 639 *     in the TD with a Set Dequeue Pointer Command.
 640 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 641 *     bit cleared) so that the HW will skip over them.
 642 */
 643static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
 644                union xhci_trb *trb, struct xhci_event_cmd *event)
 645{
 646        unsigned int ep_index;
 647        struct xhci_ring *ep_ring;
 648        struct xhci_virt_ep *ep;
 649        struct list_head *entry;
 650        struct xhci_td *cur_td = NULL;
 651        struct xhci_td *last_unlinked_td;
 652
 653        struct xhci_dequeue_state deq_state;
 654
 655        if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
 656                if (!xhci->devs[slot_id])
 657                        xhci_warn(xhci, "Stop endpoint command "
 658                                "completion for disabled slot %u\n",
 659                                slot_id);
 660                return;
 661        }
 662
 663        memset(&deq_state, 0, sizeof(deq_state));
 664        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
 665        ep = &xhci->devs[slot_id]->eps[ep_index];
 666
 667        if (list_empty(&ep->cancelled_td_list)) {
 668                xhci_stop_watchdog_timer_in_irq(xhci, ep);
 669                ep->stopped_td = NULL;
 670                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 671                return;
 672        }
 673
 674        /* Fix up the ep ring first, so HW stops executing cancelled TDs.
 675         * We have the xHCI lock, so nothing can modify this list until we drop
 676         * it.  We're also in the event handler, so we can't get re-interrupted
 677         * if another Stop Endpoint command completes
 678         */
 679        list_for_each(entry, &ep->cancelled_td_list) {
 680                cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
 681                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 682                                "Removing canceled TD starting at 0x%llx (dma).",
 683                                (unsigned long long)xhci_trb_virt_to_dma(
 684                                        cur_td->start_seg, cur_td->first_trb));
 685                ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
 686                if (!ep_ring) {
 687                        /* This shouldn't happen unless a driver is mucking
 688                         * with the stream ID after submission.  This will
 689                         * leave the TD on the hardware ring, and the hardware
 690                         * will try to execute it, and may access a buffer
 691                         * that has already been freed.  In the best case, the
 692                         * hardware will execute it, and the event handler will
 693                         * ignore the completion event for that TD, since it was
 694                         * removed from the td_list for that endpoint.  In
 695                         * short, don't muck with the stream ID after
 696                         * submission.
 697                         */
 698                        xhci_warn(xhci, "WARN Cancelled URB %p "
 699                                        "has invalid stream ID %u.\n",
 700                                        cur_td->urb,
 701                                        cur_td->urb->stream_id);
 702                        goto remove_finished_td;
 703                }
 704                /*
 705                 * If we stopped on the TD we need to cancel, then we have to
 706                 * move the xHC endpoint ring dequeue pointer past this TD.
 707                 */
 708                if (cur_td == ep->stopped_td)
 709                        xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
 710                                        cur_td->urb->stream_id,
 711                                        cur_td, &deq_state);
 712                else
 713                        td_to_noop(xhci, ep_ring, cur_td, false);
 714remove_finished_td:
 715                /*
 716                 * The event handler won't see a completion for this TD anymore,
 717                 * so remove it from the endpoint ring's TD list.  Keep it in
 718                 * the cancelled TD list for URB completion later.
 719                 */
 720                list_del_init(&cur_td->td_list);
 721        }
 722        last_unlinked_td = cur_td;
 723        xhci_stop_watchdog_timer_in_irq(xhci, ep);
 724
 725        /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
 726        if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
 727                xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
 728                                ep->stopped_td->urb->stream_id, &deq_state);
 729                xhci_ring_cmd_db(xhci);
 730        } else {
 731                /* Otherwise ring the doorbell(s) to restart queued transfers */
 732                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 733        }
 734
 735        ep->stopped_td = NULL;
 736
 737        /*
 738         * Drop the lock and complete the URBs in the cancelled TD list.
 739         * New TDs to be cancelled might be added to the end of the list before
 740         * we can complete all the URBs for the TDs we already unlinked.
 741         * So stop when we've completed the URB for the last TD we unlinked.
 742         */
 743        do {
 744                cur_td = list_entry(ep->cancelled_td_list.next,
 745                                struct xhci_td, cancelled_td_list);
 746                list_del_init(&cur_td->cancelled_td_list);
 747
 748                /* Clean up the cancelled URB */
 749                /* Doesn't matter what we pass for status, since the core will
 750                 * just overwrite it (because the URB has been unlinked).
 751                 */
 752                ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
 753                if (ep_ring && cur_td->bounce_seg)
 754                        xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
 755
 756                if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
 757                                (ep_ring->stream_timeout_handler == true)) {
 758                        /* We get here if stream timer timed out and stop
 759                         * command is issued. Send urb status as -EAGAIN
 760                         * so that the same urb can be re-submitted.
 761                         */
 762                        xhci_giveback_urb_in_irq(xhci, cur_td, -EAGAIN);
 763                        ep_ring->stream_timeout_handler = false;
 764                } else {
 765                        xhci_giveback_urb_in_irq(xhci, cur_td, 0);
 766                }
 767
 768                /* Stop processing the cancelled list if the watchdog timer is
 769                 * running.
 770                 */
 771                if (xhci->xhc_state & XHCI_STATE_DYING)
 772                        return;
 773        } while (cur_td != last_unlinked_td);
 774
 775        /* Return to the event handler with xhci->lock re-acquired */
 776}
 777
 778static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
 779{
 780        struct xhci_td *cur_td;
 781
 782        while (!list_empty(&ring->td_list)) {
 783                cur_td = list_first_entry(&ring->td_list,
 784                                struct xhci_td, td_list);
 785                list_del_init(&cur_td->td_list);
 786                if (!list_empty(&cur_td->cancelled_td_list))
 787                        list_del_init(&cur_td->cancelled_td_list);
 788
 789                if (cur_td->bounce_seg)
 790                        xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
 791                xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
 792        }
 793}
 794
 795static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
 796                int slot_id, int ep_index)
 797{
 798        struct xhci_td *cur_td;
 799        struct xhci_virt_ep *ep;
 800        struct xhci_ring *ring;
 801
 802        ep = &xhci->devs[slot_id]->eps[ep_index];
 803        if ((ep->ep_state & EP_HAS_STREAMS) ||
 804                        (ep->ep_state & EP_GETTING_NO_STREAMS)) {
 805                int stream_id;
 806
 807                for (stream_id = 0; stream_id < ep->stream_info->num_streams;
 808                                stream_id++) {
 809                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 810                                        "Killing URBs for slot ID %u, ep index %u, stream %u",
 811                                        slot_id, ep_index, stream_id + 1);
 812                        xhci_kill_ring_urbs(xhci,
 813                                        ep->stream_info->stream_rings[stream_id]);
 814                }
 815        } else {
 816                ring = ep->ring;
 817                if (!ring)
 818                        return;
 819                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 820                                "Killing URBs for slot ID %u, ep index %u",
 821                                slot_id, ep_index);
 822                xhci_kill_ring_urbs(xhci, ring);
 823        }
 824        while (!list_empty(&ep->cancelled_td_list)) {
 825                cur_td = list_first_entry(&ep->cancelled_td_list,
 826                                struct xhci_td, cancelled_td_list);
 827                list_del_init(&cur_td->cancelled_td_list);
 828                xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
 829        }
 830}
 831
 832/* This function is called when the stream ring timer gets timedout.
 833 * dwc3 host controller has an issue where it doesn't process the BULK IN
 834 * stream ring TD's(once in a while) even after ringing DoorBell for that
 835 * stream ring. Because of this behaviour there will be no transfer events
 836 * generated by the controller on the stream ring, resulting in the hang
 837 * condition. xhci_stream_timeout() solves this issue by sending a stop
 838 * command on the stream ring after stream timer gets timedout.
 839 */
 840void xhci_stream_timeout(unsigned long arg)
 841{
 842        struct xhci_hcd *xhci;
 843        struct xhci_virt_ep *ep;
 844        struct xhci_ring *ep_ring;
 845        unsigned int slot_id, ep_index, stream_id;
 846        struct xhci_td *td = NULL;
 847        struct urb *urb = NULL;
 848        struct urb_priv *urb_priv;
 849        struct xhci_command *command;
 850        unsigned long flags;
 851        int i;
 852
 853        ep_ring = (struct xhci_ring *) arg;
 854        xhci = ep_ring->xhci;
 855
 856        spin_lock_irqsave(&xhci->lock, flags);
 857
 858        if (!list_empty(&ep_ring->td_list)) {
 859                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
 860                urb = td->urb;
 861                urb_priv = urb->hcpriv;
 862
 863                slot_id = urb->dev->slot_id;
 864                ep_index = xhci_get_endpoint_index(&urb->ep->desc);
 865                stream_id = ep_ring->stream_id;
 866                ep = &xhci->devs[slot_id]->eps[ep_index];
 867                ep_ring->stream_timeout_handler = true;
 868
 869                /* Delete the stream ring timer */
 870                del_timer(&ep_ring->stream_timer);
 871
 872                for (i = 0; i < urb_priv->length; i++) {
 873                        td = urb_priv->td[i];
 874                        list_add_tail(&td->cancelled_td_list,
 875                                        &ep->cancelled_td_list);
 876                }
 877
 878                /* Queue a stop endpoint command, but only if this is
 879                 * the first cancellation to be handled.
 880                 */
 881                if (!(ep->ep_state & EP_HALT_PENDING)) {
 882                        command = xhci_alloc_command(xhci, false,
 883                                        false, GFP_ATOMIC);
 884                        if (!command) {
 885                                xhci_warn(xhci,
 886                                        "%s: Failed to allocate command\n",
 887                                                __func__);
 888                                spin_unlock_irqrestore(&xhci->lock, flags);
 889                                return;
 890                        }
 891
 892                        ep->ep_state |= EP_HALT_PENDING;
 893                        ep->stop_cmds_pending++;
 894                        ep->stop_cmd_timer.expires = jiffies +
 895                                XHCI_STOP_EP_CMD_TIMEOUT * HZ;
 896                        add_timer(&ep->stop_cmd_timer);
 897                        xhci_queue_stop_endpoint(xhci, command,
 898                                        urb->dev->slot_id, ep_index, 0);
 899                        xhci_ring_cmd_db(xhci);
 900                }
 901
 902                spin_unlock_irqrestore(&xhci->lock, flags);
 903                return;
 904        }
 905
 906        spin_unlock_irqrestore(&xhci->lock, flags);
 907        /* let the SCSI stack take care */
 908        del_timer(&ep_ring->stream_timer);
 909}
 910
 911/* Watchdog timer function for when a stop endpoint command fails to complete.
 912 * In this case, we assume the host controller is broken or dying or dead.  The
 913 * host may still be completing some other events, so we have to be careful to
 914 * let the event ring handler and the URB dequeueing/enqueueing functions know
 915 * through xhci->state.
 916 *
 917 * The timer may also fire if the host takes a very long time to respond to the
 918 * command, and the stop endpoint command completion handler cannot delete the
 919 * timer before the timer function is called.  Another endpoint cancellation may
 920 * sneak in before the timer function can grab the lock, and that may queue
 921 * another stop endpoint command and add the timer back.  So we cannot use a
 922 * simple flag to say whether there is a pending stop endpoint command for a
 923 * particular endpoint.
 924 *
 925 * Instead we use a combination of that flag and a counter for the number of
 926 * pending stop endpoint commands.  If the timer is the tail end of the last
 927 * stop endpoint command, and the endpoint's command is still pending, we assume
 928 * the host is dying.
 929 */
 930void xhci_stop_endpoint_command_watchdog(unsigned long arg)
 931{
 932        struct xhci_hcd *xhci;
 933        struct xhci_virt_ep *ep;
 934        int ret, i, j;
 935        unsigned long flags;
 936
 937        ep = (struct xhci_virt_ep *) arg;
 938        xhci = ep->xhci;
 939
 940        spin_lock_irqsave(&xhci->lock, flags);
 941
 942        ep->stop_cmds_pending--;
 943        if (xhci->xhc_state & XHCI_STATE_REMOVING) {
 944                spin_unlock_irqrestore(&xhci->lock, flags);
 945                return;
 946        }
 947        if (xhci->xhc_state & XHCI_STATE_DYING) {
 948                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 949                                "Stop EP timer ran, but another timer marked "
 950                                "xHCI as DYING, exiting.");
 951                spin_unlock_irqrestore(&xhci->lock, flags);
 952                return;
 953        }
 954        if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
 955                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 956                                "Stop EP timer ran, but no command pending, "
 957                                "exiting.");
 958                spin_unlock_irqrestore(&xhci->lock, flags);
 959                return;
 960        }
 961
 962        xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
 963        xhci_warn(xhci, "Assuming host is dying, halting host.\n");
 964        /* Oops, HC is dead or dying or at least not responding to the stop
 965         * endpoint command.
 966         */
 967        xhci->xhc_state |= XHCI_STATE_DYING;
 968        /* Disable interrupts from the host controller and start halting it */
 969        xhci_quiesce(xhci);
 970        spin_unlock_irqrestore(&xhci->lock, flags);
 971
 972        ret = xhci_halt(xhci);
 973
 974        spin_lock_irqsave(&xhci->lock, flags);
 975        if (ret < 0) {
 976                /* This is bad; the host is not responding to commands and it's
 977                 * not allowing itself to be halted.  At least interrupts are
 978                 * disabled. If we call usb_hc_died(), it will attempt to
 979                 * disconnect all device drivers under this host.  Those
 980                 * disconnect() methods will wait for all URBs to be unlinked,
 981                 * so we must complete them.
 982                 */
 983                xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
 984                xhci_warn(xhci, "Completing active URBs anyway.\n");
 985                /* We could turn all TDs on the rings to no-ops.  This won't
 986                 * help if the host has cached part of the ring, and is slow if
 987                 * we want to preserve the cycle bit.  Skip it and hope the host
 988                 * doesn't touch the memory.
 989                 */
 990        }
 991        for (i = 0; i < MAX_HC_SLOTS; i++) {
 992                if (!xhci->devs[i])
 993                        continue;
 994                for (j = 0; j < 31; j++)
 995                        xhci_kill_endpoint_urbs(xhci, i, j);
 996        }
 997        spin_unlock_irqrestore(&xhci->lock, flags);
 998        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 999                        "Calling usb_hc_died()");
1000        usb_hc_died(xhci_to_hcd(xhci));
1001        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1002                        "xHCI host controller is dead.");
1003}
1004
1005
1006static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1007                struct xhci_virt_device *dev,
1008                struct xhci_ring *ep_ring,
1009                unsigned int ep_index)
1010{
1011        union xhci_trb *dequeue_temp;
1012        int num_trbs_free_temp;
1013        bool revert = false;
1014
1015        num_trbs_free_temp = ep_ring->num_trbs_free;
1016        dequeue_temp = ep_ring->dequeue;
1017
1018        /* If we get two back-to-back stalls, and the first stalled transfer
1019         * ends just before a link TRB, the dequeue pointer will be left on
1020         * the link TRB by the code in the while loop.  So we have to update
1021         * the dequeue pointer one segment further, or we'll jump off
1022         * the segment into la-la-land.
1023         */
1024        if (trb_is_link(ep_ring->dequeue)) {
1025                ep_ring->deq_seg = ep_ring->deq_seg->next;
1026                ep_ring->dequeue = ep_ring->deq_seg->trbs;
1027        }
1028
1029        while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1030                /* We have more usable TRBs */
1031                ep_ring->num_trbs_free++;
1032                ep_ring->dequeue++;
1033                if (trb_is_link(ep_ring->dequeue)) {
1034                        if (ep_ring->dequeue ==
1035                                        dev->eps[ep_index].queued_deq_ptr)
1036                                break;
1037                        ep_ring->deq_seg = ep_ring->deq_seg->next;
1038                        ep_ring->dequeue = ep_ring->deq_seg->trbs;
1039                }
1040                if (ep_ring->dequeue == dequeue_temp) {
1041                        revert = true;
1042                        break;
1043                }
1044        }
1045
1046        if (revert) {
1047                xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1048                ep_ring->num_trbs_free = num_trbs_free_temp;
1049        }
1050}
1051
1052/*
1053 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1054 * we need to clear the set deq pending flag in the endpoint ring state, so that
1055 * the TD queueing code can ring the doorbell again.  We also need to ring the
1056 * endpoint doorbell to restart the ring, but only if there aren't more
1057 * cancellations pending.
1058 */
1059static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1060                union xhci_trb *trb, u32 cmd_comp_code)
1061{
1062        unsigned int ep_index;
1063        unsigned int stream_id;
1064        struct xhci_ring *ep_ring;
1065        struct xhci_virt_device *dev;
1066        struct xhci_virt_ep *ep;
1067        struct xhci_ep_ctx *ep_ctx;
1068        struct xhci_slot_ctx *slot_ctx;
1069
1070        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1071        stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1072        dev = xhci->devs[slot_id];
1073        ep = &dev->eps[ep_index];
1074
1075        ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1076        if (!ep_ring) {
1077                xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1078                                stream_id);
1079                /* XXX: Harmless??? */
1080                goto cleanup;
1081        }
1082
1083        ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1084        slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1085
1086        if (cmd_comp_code != COMP_SUCCESS) {
1087                unsigned int ep_state;
1088                unsigned int slot_state;
1089
1090                switch (cmd_comp_code) {
1091                case COMP_TRB_ERR:
1092                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1093                        break;
1094                case COMP_CTX_STATE:
1095                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1096                        ep_state = le32_to_cpu(ep_ctx->ep_info);
1097                        ep_state &= EP_STATE_MASK;
1098                        slot_state = le32_to_cpu(slot_ctx->dev_state);
1099                        slot_state = GET_SLOT_STATE(slot_state);
1100                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1101                                        "Slot state = %u, EP state = %u",
1102                                        slot_state, ep_state);
1103                        break;
1104                case COMP_EBADSLT:
1105                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1106                                        slot_id);
1107                        break;
1108                default:
1109                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1110                                        cmd_comp_code);
1111                        break;
1112                }
1113                /* OK what do we do now?  The endpoint state is hosed, and we
1114                 * should never get to this point if the synchronization between
1115                 * queueing, and endpoint state are correct.  This might happen
1116                 * if the device gets disconnected after we've finished
1117                 * cancelling URBs, which might not be an error...
1118                 */
1119        } else {
1120                u64 deq;
1121                /* 4.6.10 deq ptr is written to the stream ctx for streams */
1122                if (ep->ep_state & EP_HAS_STREAMS) {
1123                        struct xhci_stream_ctx *ctx =
1124                                &ep->stream_info->stream_ctx_array[stream_id];
1125                        deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1126                } else {
1127                        deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1128                }
1129                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1130                        "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1131                if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1132                                         ep->queued_deq_ptr) == deq) {
1133                        /* Update the ring's dequeue segment and dequeue pointer
1134                         * to reflect the new position.
1135                         */
1136                        update_ring_for_set_deq_completion(xhci, dev,
1137                                ep_ring, ep_index);
1138                } else {
1139                        xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1140                        xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1141                                  ep->queued_deq_seg, ep->queued_deq_ptr);
1142                }
1143        }
1144
1145cleanup:
1146        dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1147        dev->eps[ep_index].queued_deq_seg = NULL;
1148        dev->eps[ep_index].queued_deq_ptr = NULL;
1149        /* Restart any rings with pending URBs */
1150        ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1151}
1152
1153static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1154                union xhci_trb *trb, u32 cmd_comp_code)
1155{
1156        unsigned int ep_index;
1157
1158        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1159        /* This command will only fail if the endpoint wasn't halted,
1160         * but we don't care.
1161         */
1162        xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1163                "Ignoring reset ep completion code of %u", cmd_comp_code);
1164
1165        /* HW with the reset endpoint quirk needs to have a configure endpoint
1166         * command complete before the endpoint can be used.  Queue that here
1167         * because the HW can't handle two commands being queued in a row.
1168         */
1169        if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1170                struct xhci_command *command;
1171                command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1172                if (!command) {
1173                        xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
1174                        return;
1175                }
1176                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1177                                "Queueing configure endpoint command");
1178                xhci_queue_configure_endpoint(xhci, command,
1179                                xhci->devs[slot_id]->in_ctx->dma, slot_id,
1180                                false);
1181                xhci_ring_cmd_db(xhci);
1182        } else {
1183                /* Clear our internal halted state */
1184                xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1185        }
1186}
1187
1188static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1189                u32 cmd_comp_code)
1190{
1191        if (cmd_comp_code == COMP_SUCCESS)
1192                xhci->slot_id = slot_id;
1193        else
1194                xhci->slot_id = 0;
1195}
1196
1197static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1198{
1199        struct xhci_virt_device *virt_dev;
1200
1201        virt_dev = xhci->devs[slot_id];
1202        if (!virt_dev)
1203                return;
1204        if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1205                /* Delete default control endpoint resources */
1206                xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1207        xhci_free_virt_device(xhci, slot_id);
1208}
1209
1210static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1211                struct xhci_event_cmd *event, u32 cmd_comp_code)
1212{
1213        struct xhci_virt_device *virt_dev;
1214        struct xhci_input_control_ctx *ctrl_ctx;
1215        unsigned int ep_index;
1216        unsigned int ep_state;
1217        u32 add_flags, drop_flags;
1218
1219        /*
1220         * Configure endpoint commands can come from the USB core
1221         * configuration or alt setting changes, or because the HW
1222         * needed an extra configure endpoint command after a reset
1223         * endpoint command or streams were being configured.
1224         * If the command was for a halted endpoint, the xHCI driver
1225         * is not waiting on the configure endpoint command.
1226         */
1227        virt_dev = xhci->devs[slot_id];
1228        ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1229        if (!ctrl_ctx) {
1230                xhci_warn(xhci, "Could not get input context, bad type.\n");
1231                return;
1232        }
1233
1234        add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1235        drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1236        /* Input ctx add_flags are the endpoint index plus one */
1237        ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1238
1239        /* A usb_set_interface() call directly after clearing a halted
1240         * condition may race on this quirky hardware.  Not worth
1241         * worrying about, since this is prototype hardware.  Not sure
1242         * if this will work for streams, but streams support was
1243         * untested on this prototype.
1244         */
1245        if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1246                        ep_index != (unsigned int) -1 &&
1247                        add_flags - SLOT_FLAG == drop_flags) {
1248                ep_state = virt_dev->eps[ep_index].ep_state;
1249                if (!(ep_state & EP_HALTED))
1250                        return;
1251                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1252                                "Completed config ep cmd - "
1253                                "last ep index = %d, state = %d",
1254                                ep_index, ep_state);
1255                /* Clear internal halted state and restart ring(s) */
1256                virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1257                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1258                return;
1259        }
1260        return;
1261}
1262
1263static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1264                struct xhci_event_cmd *event)
1265{
1266        xhci_dbg(xhci, "Completed reset device command.\n");
1267        if (!xhci->devs[slot_id])
1268                xhci_warn(xhci, "Reset device command completion "
1269                                "for disabled slot %u\n", slot_id);
1270}
1271
1272static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1273                struct xhci_event_cmd *event)
1274{
1275        if (!(xhci->quirks & XHCI_NEC_HOST)) {
1276                xhci->error_bitmask |= 1 << 6;
1277                return;
1278        }
1279        xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1280                        "NEC firmware version %2x.%02x",
1281                        NEC_FW_MAJOR(le32_to_cpu(event->status)),
1282                        NEC_FW_MINOR(le32_to_cpu(event->status)));
1283}
1284
1285static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1286{
1287        list_del(&cmd->cmd_list);
1288
1289        if (cmd->completion) {
1290                cmd->status = status;
1291                complete(cmd->completion);
1292        } else {
1293                kfree(cmd);
1294        }
1295}
1296
1297void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1298{
1299        struct xhci_command *cur_cmd, *tmp_cmd;
1300        list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1301                xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
1302}
1303
1304/*
1305 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1306 * If there are other commands waiting then restart the ring and kick the timer.
1307 * This must be called with command ring stopped and xhci->lock held.
1308 */
1309static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1310                                         struct xhci_command *cur_cmd)
1311{
1312        struct xhci_command *i_cmd, *tmp_cmd;
1313        u32 cycle_state;
1314
1315        /* Turn all aborted commands in list to no-ops, then restart */
1316        list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1317                                 cmd_list) {
1318
1319                if (i_cmd->status != COMP_CMD_ABORT)
1320                        continue;
1321
1322                i_cmd->status = COMP_CMD_STOP;
1323
1324                xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1325                         i_cmd->command_trb);
1326                /* get cycle state from the original cmd trb */
1327                cycle_state = le32_to_cpu(
1328                        i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1329                /* modify the command trb to no-op command */
1330                i_cmd->command_trb->generic.field[0] = 0;
1331                i_cmd->command_trb->generic.field[1] = 0;
1332                i_cmd->command_trb->generic.field[2] = 0;
1333                i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1334                        TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1335
1336                /*
1337                 * caller waiting for completion is called when command
1338                 *  completion event is received for these no-op commands
1339                 */
1340        }
1341
1342        xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1343
1344        /* ring command ring doorbell to restart the command ring */
1345        if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1346            !(xhci->xhc_state & XHCI_STATE_DYING)) {
1347                xhci->current_cmd = cur_cmd;
1348                mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1349                xhci_ring_cmd_db(xhci);
1350        }
1351        return;
1352}
1353
1354
1355void xhci_handle_command_timeout(unsigned long data)
1356{
1357        struct xhci_hcd *xhci;
1358        int ret;
1359        unsigned long flags;
1360        u64 hw_ring_state;
1361        bool second_timeout = false;
1362        xhci = (struct xhci_hcd *) data;
1363
1364        /* mark this command to be cancelled */
1365        spin_lock_irqsave(&xhci->lock, flags);
1366        if (xhci->current_cmd) {
1367                if (xhci->current_cmd->status == COMP_CMD_ABORT)
1368                        second_timeout = true;
1369                xhci->current_cmd->status = COMP_CMD_ABORT;
1370        }
1371
1372        /* Make sure command ring is running before aborting it */
1373        hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1374        if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1375            (hw_ring_state & CMD_RING_RUNNING))  {
1376                spin_unlock_irqrestore(&xhci->lock, flags);
1377                xhci_dbg(xhci, "Command timeout\n");
1378                ret = xhci_abort_cmd_ring(xhci);
1379                if (unlikely(ret == -ESHUTDOWN)) {
1380                        xhci_err(xhci, "Abort command ring failed\n");
1381                        xhci_cleanup_command_queue(xhci);
1382                        usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1383                        xhci_dbg(xhci, "xHCI host controller is dead.\n");
1384                }
1385                return;
1386        }
1387
1388        /* command ring failed to restart, or host removed. Bail out */
1389        if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
1390                spin_unlock_irqrestore(&xhci->lock, flags);
1391                xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
1392                xhci_cleanup_command_queue(xhci);
1393                return;
1394        }
1395
1396        /* command timeout on stopped ring, ring can't be aborted */
1397        xhci_dbg(xhci, "Command timeout on stopped ring\n");
1398        xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1399        spin_unlock_irqrestore(&xhci->lock, flags);
1400        return;
1401}
1402
1403static void handle_cmd_completion(struct xhci_hcd *xhci,
1404                struct xhci_event_cmd *event)
1405{
1406        int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1407        u64 cmd_dma;
1408        dma_addr_t cmd_dequeue_dma;
1409        u32 cmd_comp_code;
1410        union xhci_trb *cmd_trb;
1411        struct xhci_command *cmd;
1412        u32 cmd_type;
1413
1414        cmd_dma = le64_to_cpu(event->cmd_trb);
1415        cmd_trb = xhci->cmd_ring->dequeue;
1416        cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1417                        cmd_trb);
1418        /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1419        if (cmd_dequeue_dma == 0) {
1420                xhci->error_bitmask |= 1 << 4;
1421                return;
1422        }
1423        /* Does the DMA address match our internal dequeue pointer address? */
1424        if (cmd_dma != (u64) cmd_dequeue_dma) {
1425                xhci->error_bitmask |= 1 << 5;
1426                return;
1427        }
1428
1429        cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1430
1431        del_timer(&xhci->cmd_timer);
1432
1433        trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1434
1435        cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1436
1437        /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1438        if (cmd_comp_code == COMP_CMD_STOP) {
1439                xhci_handle_stopped_cmd_ring(xhci, cmd);
1440                return;
1441        }
1442
1443        if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1444                xhci_err(xhci,
1445                         "Command completion event does not match command\n");
1446                return;
1447        }
1448
1449        /*
1450         * Host aborted the command ring, check if the current command was
1451         * supposed to be aborted, otherwise continue normally.
1452         * The command ring is stopped now, but the xHC will issue a Command
1453         * Ring Stopped event which will cause us to restart it.
1454         */
1455        if (cmd_comp_code == COMP_CMD_ABORT) {
1456                xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1457                if (cmd->status == COMP_CMD_ABORT)
1458                        goto event_handled;
1459        }
1460
1461        cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1462        switch (cmd_type) {
1463        case TRB_ENABLE_SLOT:
1464                xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1465                break;
1466        case TRB_DISABLE_SLOT:
1467                xhci_handle_cmd_disable_slot(xhci, slot_id);
1468                break;
1469        case TRB_CONFIG_EP:
1470                if (!cmd->completion)
1471                        xhci_handle_cmd_config_ep(xhci, slot_id, event,
1472                                                  cmd_comp_code);
1473                break;
1474        case TRB_EVAL_CONTEXT:
1475                break;
1476        case TRB_ADDR_DEV:
1477                break;
1478        case TRB_STOP_RING:
1479                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1480                                le32_to_cpu(cmd_trb->generic.field[3])));
1481                xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1482                break;
1483        case TRB_SET_DEQ:
1484                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1485                                le32_to_cpu(cmd_trb->generic.field[3])));
1486                xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1487                break;
1488        case TRB_CMD_NOOP:
1489                /* Is this an aborted command turned to NO-OP? */
1490                if (cmd->status == COMP_CMD_STOP)
1491                        cmd_comp_code = COMP_CMD_STOP;
1492                break;
1493        case TRB_RESET_EP:
1494                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1495                                le32_to_cpu(cmd_trb->generic.field[3])));
1496                xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1497                break;
1498        case TRB_RESET_DEV:
1499                /* SLOT_ID field in reset device cmd completion event TRB is 0.
1500                 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1501                 */
1502                slot_id = TRB_TO_SLOT_ID(
1503                                le32_to_cpu(cmd_trb->generic.field[3]));
1504                xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1505                break;
1506        case TRB_NEC_GET_FW:
1507                xhci_handle_cmd_nec_get_fw(xhci, event);
1508                break;
1509        default:
1510                /* Skip over unknown commands on the event ring */
1511                xhci->error_bitmask |= 1 << 6;
1512                break;
1513        }
1514
1515        /* restart timer if this wasn't the last command */
1516        if (cmd->cmd_list.next != &xhci->cmd_list) {
1517                xhci->current_cmd = list_entry(cmd->cmd_list.next,
1518                                               struct xhci_command, cmd_list);
1519                mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1520        }
1521
1522event_handled:
1523        xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1524
1525        inc_deq(xhci, xhci->cmd_ring);
1526}
1527
1528static void handle_vendor_event(struct xhci_hcd *xhci,
1529                union xhci_trb *event)
1530{
1531        u32 trb_type;
1532
1533        trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1534        xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1535        if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1536                handle_cmd_completion(xhci, &event->event_cmd);
1537}
1538
1539/* @port_id: the one-based port ID from the hardware (indexed from array of all
1540 * port registers -- USB 3.0 and USB 2.0).
1541 *
1542 * Returns a zero-based port number, which is suitable for indexing into each of
1543 * the split roothubs' port arrays and bus state arrays.
1544 * Add one to it in order to call xhci_find_slot_id_by_port.
1545 */
1546static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1547                struct xhci_hcd *xhci, u32 port_id)
1548{
1549        unsigned int i;
1550        unsigned int num_similar_speed_ports = 0;
1551
1552        /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1553         * and usb2_ports are 0-based indexes.  Count the number of similar
1554         * speed ports, up to 1 port before this port.
1555         */
1556        for (i = 0; i < (port_id - 1); i++) {
1557                u8 port_speed = xhci->port_array[i];
1558
1559                /*
1560                 * Skip ports that don't have known speeds, or have duplicate
1561                 * Extended Capabilities port speed entries.
1562                 */
1563                if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1564                        continue;
1565
1566                /*
1567                 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
1568                 * 1.1 ports are under the USB 2.0 hub.  If the port speed
1569                 * matches the device speed, it's a similar speed port.
1570                 */
1571                if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
1572                        num_similar_speed_ports++;
1573        }
1574        return num_similar_speed_ports;
1575}
1576
1577static void handle_device_notification(struct xhci_hcd *xhci,
1578                union xhci_trb *event)
1579{
1580        u32 slot_id;
1581        struct usb_device *udev;
1582
1583        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1584        if (!xhci->devs[slot_id]) {
1585                xhci_warn(xhci, "Device Notification event for "
1586                                "unused slot %u\n", slot_id);
1587                return;
1588        }
1589
1590        xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1591                        slot_id);
1592        udev = xhci->devs[slot_id]->udev;
1593        if (udev && udev->parent)
1594                usb_wakeup_notification(udev->parent, udev->portnum);
1595}
1596
1597static void handle_port_status(struct xhci_hcd *xhci,
1598                union xhci_trb *event)
1599{
1600        struct usb_hcd *hcd;
1601        u32 port_id;
1602        u32 temp, temp1;
1603        int max_ports;
1604        int slot_id;
1605        unsigned int faked_port_index;
1606        u8 major_revision;
1607        struct xhci_bus_state *bus_state;
1608        __le32 __iomem **port_array;
1609        bool bogus_port_status = false;
1610
1611        /* Port status change events always have a successful completion code */
1612        if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1613                xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1614                xhci->error_bitmask |= 1 << 8;
1615        }
1616        port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1617        xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1618
1619        max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1620        if ((port_id <= 0) || (port_id > max_ports)) {
1621                xhci_warn(xhci, "Invalid port id %d\n", port_id);
1622                inc_deq(xhci, xhci->event_ring);
1623                return;
1624        }
1625
1626        /* Figure out which usb_hcd this port is attached to:
1627         * is it a USB 3.0 port or a USB 2.0/1.1 port?
1628         */
1629        major_revision = xhci->port_array[port_id - 1];
1630
1631        /* Find the right roothub. */
1632        hcd = xhci_to_hcd(xhci);
1633        if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
1634                hcd = xhci->shared_hcd;
1635
1636        if (major_revision == 0) {
1637                xhci_warn(xhci, "Event for port %u not in "
1638                                "Extended Capabilities, ignoring.\n",
1639                                port_id);
1640                bogus_port_status = true;
1641                goto cleanup;
1642        }
1643        if (major_revision == DUPLICATE_ENTRY) {
1644                xhci_warn(xhci, "Event for port %u duplicated in"
1645                                "Extended Capabilities, ignoring.\n",
1646                                port_id);
1647                bogus_port_status = true;
1648                goto cleanup;
1649        }
1650
1651        /*
1652         * Hardware port IDs reported by a Port Status Change Event include USB
1653         * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
1654         * resume event, but we first need to translate the hardware port ID
1655         * into the index into the ports on the correct split roothub, and the
1656         * correct bus_state structure.
1657         */
1658        bus_state = &xhci->bus_state[hcd_index(hcd)];
1659        if (hcd->speed >= HCD_USB3)
1660                port_array = xhci->usb3_ports;
1661        else
1662                port_array = xhci->usb2_ports;
1663        /* Find the faked port hub number */
1664        faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1665                        port_id);
1666
1667        temp = readl(port_array[faked_port_index]);
1668        if (hcd->state == HC_STATE_SUSPENDED) {
1669                xhci_dbg(xhci, "resume root hub\n");
1670                usb_hcd_resume_root_hub(hcd);
1671        }
1672
1673        if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1674                bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1675
1676        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1677                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1678
1679                temp1 = readl(&xhci->op_regs->command);
1680                if (!(temp1 & CMD_RUN)) {
1681                        xhci_warn(xhci, "xHC is not running.\n");
1682                        goto cleanup;
1683                }
1684
1685                if (DEV_SUPERSPEED_ANY(temp)) {
1686                        xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1687                        /* Set a flag to say the port signaled remote wakeup,
1688                         * so we can tell the difference between the end of
1689                         * device and host initiated resume.
1690                         */
1691                        bus_state->port_remote_wakeup |= 1 << faked_port_index;
1692                        xhci_test_and_clear_bit(xhci, port_array,
1693                                        faked_port_index, PORT_PLC);
1694                        xhci_set_link_state(xhci, port_array, faked_port_index,
1695                                                XDEV_U0);
1696                        /* Need to wait until the next link state change
1697                         * indicates the device is actually in U0.
1698                         */
1699                        bogus_port_status = true;
1700                        goto cleanup;
1701                } else if (!test_bit(faked_port_index,
1702                                     &bus_state->resuming_ports)) {
1703                        xhci_dbg(xhci, "resume HS port %d\n", port_id);
1704                        bus_state->resume_done[faked_port_index] = jiffies +
1705                                msecs_to_jiffies(USB_RESUME_TIMEOUT);
1706                        set_bit(faked_port_index, &bus_state->resuming_ports);
1707                        mod_timer(&hcd->rh_timer,
1708                                  bus_state->resume_done[faked_port_index]);
1709                        /* Do the rest in GetPortStatus */
1710                }
1711        }
1712
1713        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1714                        DEV_SUPERSPEED_ANY(temp)) {
1715                xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1716                /* We've just brought the device into U0 through either the
1717                 * Resume state after a device remote wakeup, or through the
1718                 * U3Exit state after a host-initiated resume.  If it's a device
1719                 * initiated remote wake, don't pass up the link state change,
1720                 * so the roothub behavior is consistent with external
1721                 * USB 3.0 hub behavior.
1722                 */
1723                slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1724                                faked_port_index + 1);
1725                if (slot_id && xhci->devs[slot_id])
1726                        xhci_ring_device(xhci, slot_id);
1727                if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1728                        bus_state->port_remote_wakeup &=
1729                                ~(1 << faked_port_index);
1730                        xhci_test_and_clear_bit(xhci, port_array,
1731                                        faked_port_index, PORT_PLC);
1732                        usb_wakeup_notification(hcd->self.root_hub,
1733                                        faked_port_index + 1);
1734                        bogus_port_status = true;
1735                        goto cleanup;
1736                }
1737        }
1738
1739        /*
1740         * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1741         * RExit to a disconnect state).  If so, let the the driver know it's
1742         * out of the RExit state.
1743         */
1744        if (!DEV_SUPERSPEED_ANY(temp) &&
1745                        test_and_clear_bit(faked_port_index,
1746                                &bus_state->rexit_ports)) {
1747                complete(&bus_state->rexit_done[faked_port_index]);
1748                bogus_port_status = true;
1749                goto cleanup;
1750        }
1751
1752        if (hcd->speed < HCD_USB3)
1753                xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1754                                        PORT_PLC);
1755
1756cleanup:
1757        /* Update event ring dequeue pointer before dropping the lock */
1758        inc_deq(xhci, xhci->event_ring);
1759
1760        /* Don't make the USB core poll the roothub if we got a bad port status
1761         * change event.  Besides, at that point we can't tell which roothub
1762         * (USB 2.0 or USB 3.0) to kick.
1763         */
1764        if (bogus_port_status)
1765                return;
1766
1767        /*
1768         * xHCI port-status-change events occur when the "or" of all the
1769         * status-change bits in the portsc register changes from 0 to 1.
1770         * New status changes won't cause an event if any other change
1771         * bits are still set.  When an event occurs, switch over to
1772         * polling to avoid losing status changes.
1773         */
1774        xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1775        set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1776        spin_unlock(&xhci->lock);
1777        /* Pass this up to the core */
1778        usb_hcd_poll_rh_status(hcd);
1779        spin_lock(&xhci->lock);
1780}
1781
1782/*
1783 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1784 * at end_trb, which may be in another segment.  If the suspect DMA address is a
1785 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
1786 * returns 0.
1787 */
1788struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1789                struct xhci_segment *start_seg,
1790                union xhci_trb  *start_trb,
1791                union xhci_trb  *end_trb,
1792                dma_addr_t      suspect_dma,
1793                bool            debug)
1794{
1795        dma_addr_t start_dma;
1796        dma_addr_t end_seg_dma;
1797        dma_addr_t end_trb_dma;
1798        struct xhci_segment *cur_seg;
1799
1800        start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1801        cur_seg = start_seg;
1802
1803        do {
1804                if (start_dma == 0)
1805                        return NULL;
1806                /* We may get an event for a Link TRB in the middle of a TD */
1807                end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1808                                &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1809                /* If the end TRB isn't in this segment, this is set to 0 */
1810                end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1811
1812                if (debug)
1813                        xhci_warn(xhci,
1814                                "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1815                                (unsigned long long)suspect_dma,
1816                                (unsigned long long)start_dma,
1817                                (unsigned long long)end_trb_dma,
1818                                (unsigned long long)cur_seg->dma,
1819                                (unsigned long long)end_seg_dma);
1820
1821                if (end_trb_dma > 0) {
1822                        /* The end TRB is in this segment, so suspect should be here */
1823                        if (start_dma <= end_trb_dma) {
1824                                if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1825                                        return cur_seg;
1826                        } else {
1827                                /* Case for one segment with
1828                                 * a TD wrapped around to the top
1829                                 */
1830                                if ((suspect_dma >= start_dma &&
1831                                                        suspect_dma <= end_seg_dma) ||
1832                                                (suspect_dma >= cur_seg->dma &&
1833                                                 suspect_dma <= end_trb_dma))
1834                                        return cur_seg;
1835                        }
1836                        return NULL;
1837                } else {
1838                        /* Might still be somewhere in this segment */
1839                        if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1840                                return cur_seg;
1841                }
1842                cur_seg = cur_seg->next;
1843                start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1844        } while (cur_seg != start_seg);
1845
1846        return NULL;
1847}
1848
1849static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1850                unsigned int slot_id, unsigned int ep_index,
1851                unsigned int stream_id,
1852                struct xhci_td *td, union xhci_trb *event_trb)
1853{
1854        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1855        struct xhci_command *command;
1856        command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1857        if (!command)
1858                return;
1859
1860        ep->ep_state |= EP_HALTED;
1861        ep->stopped_stream = stream_id;
1862
1863        xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1864        xhci_cleanup_stalled_ring(xhci, ep_index, td);
1865
1866        ep->stopped_stream = 0;
1867
1868        xhci_ring_cmd_db(xhci);
1869}
1870
1871/* Check if an error has halted the endpoint ring.  The class driver will
1872 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1873 * However, a babble and other errors also halt the endpoint ring, and the class
1874 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1875 * Ring Dequeue Pointer command manually.
1876 */
1877static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1878                struct xhci_ep_ctx *ep_ctx,
1879                unsigned int trb_comp_code)
1880{
1881        /* TRB completion codes that may require a manual halt cleanup */
1882        if (trb_comp_code == COMP_TX_ERR ||
1883                        trb_comp_code == COMP_BABBLE ||
1884                        trb_comp_code == COMP_SPLIT_ERR)
1885                /* The 0.95 spec says a babbling control endpoint
1886                 * is not halted. The 0.96 spec says it is.  Some HW
1887                 * claims to be 0.95 compliant, but it halts the control
1888                 * endpoint anyway.  Check if a babble halted the
1889                 * endpoint.
1890                 */
1891                if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1892                    cpu_to_le32(EP_STATE_HALTED))
1893                        return 1;
1894
1895        return 0;
1896}
1897
1898int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1899{
1900        if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1901                /* Vendor defined "informational" completion code,
1902                 * treat as not-an-error.
1903                 */
1904                xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1905                                trb_comp_code);
1906                xhci_dbg(xhci, "Treating code as success.\n");
1907                return 1;
1908        }
1909        return 0;
1910}
1911
1912/*
1913 * Finish the td processing, remove the td from td list;
1914 * Return 1 if the urb can be given back.
1915 */
1916static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1917        union xhci_trb *event_trb, struct xhci_transfer_event *event,
1918        struct xhci_virt_ep *ep, int *status, bool skip)
1919{
1920        struct xhci_virt_device *xdev;
1921        struct xhci_ring *ep_ring;
1922        unsigned int slot_id;
1923        int ep_index;
1924        struct urb *urb = NULL;
1925        struct xhci_ep_ctx *ep_ctx;
1926        int ret = 0;
1927        struct urb_priv *urb_priv;
1928        u32 trb_comp_code;
1929
1930        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1931        xdev = xhci->devs[slot_id];
1932        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1933        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1934        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1935        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1936
1937        if (skip)
1938                goto td_cleanup;
1939
1940        if (trb_comp_code == COMP_STOP_INVAL ||
1941                        trb_comp_code == COMP_STOP ||
1942                        trb_comp_code == COMP_STOP_SHORT) {
1943                /* The Endpoint Stop Command completion will take care of any
1944                 * stopped TDs.  A stopped TD may be restarted, so don't update
1945                 * the ring dequeue pointer or take this TD off any lists yet.
1946                 */
1947                ep->stopped_td = td;
1948                return 0;
1949        }
1950        if (trb_comp_code == COMP_STALL ||
1951                xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1952                                                trb_comp_code)) {
1953                /* Issue a reset endpoint command to clear the host side
1954                 * halt, followed by a set dequeue command to move the
1955                 * dequeue pointer past the TD.
1956                 * The class driver clears the device side halt later.
1957                 */
1958                xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1959                                        ep_ring->stream_id, td, event_trb);
1960        } else {
1961                /* Update ring dequeue pointer */
1962                while (ep_ring->dequeue != td->last_trb)
1963                        inc_deq(xhci, ep_ring);
1964                inc_deq(xhci, ep_ring);
1965        }
1966
1967td_cleanup:
1968        /* Clean up the endpoint's TD list */
1969        urb = td->urb;
1970        urb_priv = urb->hcpriv;
1971
1972        /* if a bounce buffer was used to align this td then unmap it */
1973        if (td->bounce_seg)
1974                xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1975
1976        /* Do one last check of the actual transfer length.
1977         * If the host controller said we transferred more data than the buffer
1978         * length, urb->actual_length will be a very big number (since it's
1979         * unsigned).  Play it safe and say we didn't transfer anything.
1980         */
1981        if (urb->actual_length > urb->transfer_buffer_length) {
1982                xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
1983                        urb->transfer_buffer_length,
1984                        urb->actual_length);
1985                urb->actual_length = 0;
1986                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1987                        *status = -EREMOTEIO;
1988                else
1989                        *status = 0;
1990        }
1991        list_del_init(&td->td_list);
1992        /* Was this TD slated to be cancelled but completed anyway? */
1993        if (!list_empty(&td->cancelled_td_list))
1994                list_del_init(&td->cancelled_td_list);
1995
1996        urb_priv->td_cnt++;
1997        /* Giveback the urb when all the tds are completed */
1998        if (urb_priv->td_cnt == urb_priv->length) {
1999                ret = 1;
2000                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2001                        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
2002                        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
2003                                if (xhci->quirks & XHCI_AMD_PLL_FIX)
2004                                        usb_amd_quirk_pll_enable();
2005                        }
2006                }
2007        }
2008
2009        return ret;
2010}
2011
2012/*
2013 * Process control tds, update urb status and actual_length.
2014 */
2015static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2016        union xhci_trb *event_trb, struct xhci_transfer_event *event,
2017        struct xhci_virt_ep *ep, int *status)
2018{
2019        struct xhci_virt_device *xdev;
2020        struct xhci_ring *ep_ring;
2021        unsigned int slot_id;
2022        int ep_index;
2023        struct xhci_ep_ctx *ep_ctx;
2024        u32 trb_comp_code;
2025
2026        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2027        xdev = xhci->devs[slot_id];
2028        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2029        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2030        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2031        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2032
2033        switch (trb_comp_code) {
2034        case COMP_SUCCESS:
2035                if (event_trb == ep_ring->dequeue) {
2036                        xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
2037                                        "without IOC set??\n");
2038                        *status = -ESHUTDOWN;
2039                } else if (event_trb != td->last_trb) {
2040                        xhci_warn(xhci, "WARN: Success on ctrl data TRB "
2041                                        "without IOC set??\n");
2042                        *status = -ESHUTDOWN;
2043                } else {
2044                        *status = 0;
2045                }
2046                break;
2047        case COMP_SHORT_TX:
2048                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2049                        *status = -EREMOTEIO;
2050                else
2051                        *status = 0;
2052                break;
2053        case COMP_STOP_SHORT:
2054                if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
2055                        xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2056                else
2057                        td->urb->actual_length =
2058                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2059
2060                return finish_td(xhci, td, event_trb, event, ep, status, false);
2061        case COMP_STOP:
2062                /* Did we stop at data stage? */
2063                if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
2064                        td->urb->actual_length =
2065                                td->urb->transfer_buffer_length -
2066                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2067                /* fall through */
2068        case COMP_STOP_INVAL:
2069                return finish_td(xhci, td, event_trb, event, ep, status, false);
2070        default:
2071                if (!xhci_requires_manual_halt_cleanup(xhci,
2072                                        ep_ctx, trb_comp_code))
2073                        break;
2074                xhci_dbg(xhci, "TRB error code %u, "
2075                                "halted endpoint index = %u\n",
2076                                trb_comp_code, ep_index);
2077                /* else fall through */
2078        case COMP_STALL:
2079                /* Did we transfer part of the data (middle) phase? */
2080                if (event_trb != ep_ring->dequeue &&
2081                                event_trb != td->last_trb)
2082                        td->urb->actual_length =
2083                                td->urb->transfer_buffer_length -
2084                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2085                else if (!td->urb_length_set)
2086                        td->urb->actual_length = 0;
2087
2088                return finish_td(xhci, td, event_trb, event, ep, status, false);
2089        }
2090        /*
2091         * Did we transfer any data, despite the errors that might have
2092         * happened?  I.e. did we get past the setup stage?
2093         */
2094        if (event_trb != ep_ring->dequeue) {
2095                /* The event was for the status stage */
2096                if (event_trb == td->last_trb) {
2097                        if (td->urb_length_set) {
2098                                /* Don't overwrite a previously set error code
2099                                 */
2100                                if ((*status == -EINPROGRESS || *status == 0) &&
2101                                                (td->urb->transfer_flags
2102                                                 & URB_SHORT_NOT_OK))
2103                                        /* Did we already see a short data
2104                                         * stage? */
2105                                        *status = -EREMOTEIO;
2106                        } else {
2107                                td->urb->actual_length =
2108                                        td->urb->transfer_buffer_length;
2109                        }
2110                } else {
2111                        /*
2112                         * Maybe the event was for the data stage? If so, update
2113                         * already the actual_length of the URB and flag it as
2114                         * set, so that it is not overwritten in the event for
2115                         * the last TRB.
2116                         */
2117                        td->urb_length_set = true;
2118                        td->urb->actual_length =
2119                                td->urb->transfer_buffer_length -
2120                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2121                        xhci_dbg(xhci, "Waiting for status "
2122                                        "stage event\n");
2123                        return 0;
2124                }
2125        }
2126
2127        return finish_td(xhci, td, event_trb, event, ep, status, false);
2128}
2129
2130/*
2131 * Process isochronous tds, update urb packet status and actual_length.
2132 */
2133static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2134        union xhci_trb *event_trb, struct xhci_transfer_event *event,
2135        struct xhci_virt_ep *ep, int *status)
2136{
2137        struct xhci_ring *ep_ring;
2138        struct urb_priv *urb_priv;
2139        int idx;
2140        int len = 0;
2141        union xhci_trb *cur_trb;
2142        struct xhci_segment *cur_seg;
2143        struct usb_iso_packet_descriptor *frame;
2144        u32 trb_comp_code;
2145        bool skip_td = false;
2146
2147        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2148        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2149        urb_priv = td->urb->hcpriv;
2150        idx = urb_priv->td_cnt;
2151        frame = &td->urb->iso_frame_desc[idx];
2152
2153        /* handle completion code */
2154        switch (trb_comp_code) {
2155        case COMP_SUCCESS:
2156                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2157                        frame->status = 0;
2158                        break;
2159                }
2160                if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2161                        trb_comp_code = COMP_SHORT_TX;
2162        /* fallthrough */
2163        case COMP_STOP_SHORT:
2164        case COMP_SHORT_TX:
2165                frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2166                                -EREMOTEIO : 0;
2167                break;
2168        case COMP_BW_OVER:
2169                frame->status = -ECOMM;
2170                skip_td = true;
2171                break;
2172        case COMP_BUFF_OVER:
2173        case COMP_BABBLE:
2174                frame->status = -EOVERFLOW;
2175                skip_td = true;
2176                break;
2177        case COMP_DEV_ERR:
2178        case COMP_STALL:
2179                frame->status = -EPROTO;
2180                skip_td = true;
2181                break;
2182        case COMP_TX_ERR:
2183                frame->status = -EPROTO;
2184                if (event_trb != td->last_trb)
2185                        return 0;
2186                skip_td = true;
2187                break;
2188        case COMP_STOP:
2189        case COMP_STOP_INVAL:
2190                break;
2191        default:
2192                frame->status = -1;
2193                break;
2194        }
2195
2196        if (trb_comp_code == COMP_SUCCESS || skip_td) {
2197                frame->actual_length = frame->length;
2198                td->urb->actual_length += frame->length;
2199        } else if (trb_comp_code == COMP_STOP_SHORT) {
2200                frame->actual_length =
2201                        EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2202                td->urb->actual_length += frame->actual_length;
2203        } else {
2204                for (cur_trb = ep_ring->dequeue,
2205                     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2206                     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2207                        if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2208                            !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2209                                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2210                }
2211                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2212                        EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2213
2214                if (trb_comp_code != COMP_STOP_INVAL) {
2215                        frame->actual_length = len;
2216                        td->urb->actual_length += len;
2217                }
2218        }
2219
2220        return finish_td(xhci, td, event_trb, event, ep, status, false);
2221}
2222
2223static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2224                        struct xhci_transfer_event *event,
2225                        struct xhci_virt_ep *ep, int *status)
2226{
2227        struct xhci_ring *ep_ring;
2228        struct urb_priv *urb_priv;
2229        struct usb_iso_packet_descriptor *frame;
2230        int idx;
2231
2232        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2233        urb_priv = td->urb->hcpriv;
2234        idx = urb_priv->td_cnt;
2235        frame = &td->urb->iso_frame_desc[idx];
2236
2237        /* The transfer is partly done. */
2238        frame->status = -EXDEV;
2239
2240        /* calc actual length */
2241        frame->actual_length = 0;
2242
2243        /* Update ring dequeue pointer */
2244        while (ep_ring->dequeue != td->last_trb)
2245                inc_deq(xhci, ep_ring);
2246        inc_deq(xhci, ep_ring);
2247
2248        return finish_td(xhci, td, NULL, event, ep, status, true);
2249}
2250
2251/*
2252 * Process bulk and interrupt tds, update urb status and actual_length.
2253 */
2254static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2255        union xhci_trb *event_trb, struct xhci_transfer_event *event,
2256        struct xhci_virt_ep *ep, int *status)
2257{
2258        struct xhci_ring *ep_ring;
2259        union xhci_trb *cur_trb;
2260        struct xhci_segment *cur_seg;
2261        u32 trb_comp_code;
2262
2263        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2264        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2265
2266        switch (trb_comp_code) {
2267        case COMP_SUCCESS:
2268                /* Double check that the HW transferred everything. */
2269                if (event_trb != td->last_trb ||
2270                    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2271                        xhci_warn(xhci, "WARN Successful completion "
2272                                        "on short TX\n");
2273                        if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2274                                *status = -EREMOTEIO;
2275                        else
2276                                *status = 0;
2277                        if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2278                                trb_comp_code = COMP_SHORT_TX;
2279                } else {
2280                        *status = 0;
2281                }
2282                break;
2283        case COMP_STOP_SHORT:
2284        case COMP_SHORT_TX:
2285                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2286                        *status = -EREMOTEIO;
2287                else
2288                        *status = 0;
2289                break;
2290        default:
2291                /* Others already handled above */
2292                break;
2293        }
2294        if (trb_comp_code == COMP_SHORT_TX)
2295                xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2296                                "%d bytes untransferred\n",
2297                                td->urb->ep->desc.bEndpointAddress,
2298                                td->urb->transfer_buffer_length,
2299                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2300        /* Stopped - short packet completion */
2301        if (trb_comp_code == COMP_STOP_SHORT) {
2302                td->urb->actual_length =
2303                        EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2304
2305                if (td->urb->transfer_buffer_length <
2306                                td->urb->actual_length) {
2307                        xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
2308                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2309                        td->urb->actual_length = 0;
2310                         /* status will be set by usb core for canceled urbs */
2311                }
2312        /* Fast path - was this the last TRB in the TD for this URB? */
2313        } else if (event_trb == td->last_trb) {
2314                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2315                        td->urb->actual_length =
2316                                td->urb->transfer_buffer_length -
2317                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2318                        if (td->urb->transfer_buffer_length <
2319                                        td->urb->actual_length) {
2320                                xhci_warn(xhci, "HC gave bad length "
2321                                                "of %d bytes left\n",
2322                                          EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2323                                td->urb->actual_length = 0;
2324                                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2325                                        *status = -EREMOTEIO;
2326                                else
2327                                        *status = 0;
2328                        }
2329                        /* Don't overwrite a previously set error code */
2330                        if (*status == -EINPROGRESS) {
2331                                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2332                                        *status = -EREMOTEIO;
2333                                else
2334                                        *status = 0;
2335                        }
2336                } else {
2337                        td->urb->actual_length =
2338                                td->urb->transfer_buffer_length;
2339                        /* Ignore a short packet completion if the
2340                         * untransferred length was zero.
2341                         */
2342                        if (*status == -EREMOTEIO)
2343                                *status = 0;
2344                }
2345        } else {
2346                /* Slow path - walk the list, starting from the dequeue
2347                 * pointer, to get the actual length transferred.
2348                 */
2349                td->urb->actual_length = 0;
2350                for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2351                                cur_trb != event_trb;
2352                                next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2353                        if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2354                            !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2355                                td->urb->actual_length +=
2356                                        TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2357                }
2358                /* If the ring didn't stop on a Link or No-op TRB, add
2359                 * in the actual bytes transferred from the Normal TRB
2360                 */
2361                if (trb_comp_code != COMP_STOP_INVAL)
2362                        td->urb->actual_length +=
2363                                TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2364                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2365        }
2366
2367        return finish_td(xhci, td, event_trb, event, ep, status, false);
2368}
2369
2370/*
2371 * If this function returns an error condition, it means it got a Transfer
2372 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2373 * At this point, the host controller is probably hosed and should be reset.
2374 */
2375static int handle_tx_event(struct xhci_hcd *xhci,
2376                struct xhci_transfer_event *event)
2377        __releases(&xhci->lock)
2378        __acquires(&xhci->lock)
2379{
2380        struct xhci_virt_device *xdev;
2381        struct xhci_virt_ep *ep;
2382        struct xhci_ring *ep_ring;
2383        unsigned int slot_id;
2384        int ep_index;
2385        struct xhci_td *td = NULL;
2386        dma_addr_t event_dma;
2387        struct xhci_segment *event_seg;
2388        union xhci_trb *event_trb;
2389        struct urb *urb = NULL;
2390        int status = -EINPROGRESS;
2391        struct urb_priv *urb_priv;
2392        struct xhci_ep_ctx *ep_ctx;
2393        struct list_head *tmp;
2394        u32 trb_comp_code;
2395        int ret = 0;
2396        int td_num = 0;
2397        bool handling_skipped_tds = false;
2398
2399        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2400        xdev = xhci->devs[slot_id];
2401        if (!xdev) {
2402                xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2403                xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2404                         (unsigned long long) xhci_trb_virt_to_dma(
2405                                 xhci->event_ring->deq_seg,
2406                                 xhci->event_ring->dequeue),
2407                         lower_32_bits(le64_to_cpu(event->buffer)),
2408                         upper_32_bits(le64_to_cpu(event->buffer)),
2409                         le32_to_cpu(event->transfer_len),
2410                         le32_to_cpu(event->flags));
2411                xhci_dbg(xhci, "Event ring:\n");
2412                xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2413                return -ENODEV;
2414        }
2415
2416        /* Endpoint ID is 1 based, our index is zero based */
2417        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2418        ep = &xdev->eps[ep_index];
2419        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2420        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2421        if (!ep_ring ||
2422            (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2423            EP_STATE_DISABLED) {
2424                xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2425                                "or incorrect stream ring\n");
2426                xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2427                         (unsigned long long) xhci_trb_virt_to_dma(
2428                                 xhci->event_ring->deq_seg,
2429                                 xhci->event_ring->dequeue),
2430                         lower_32_bits(le64_to_cpu(event->buffer)),
2431                         upper_32_bits(le64_to_cpu(event->buffer)),
2432                         le32_to_cpu(event->transfer_len),
2433                         le32_to_cpu(event->flags));
2434                xhci_dbg(xhci, "Event ring:\n");
2435                xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2436                return -ENODEV;
2437        }
2438
2439        /* Count current td numbers if ep->skip is set */
2440        if (ep->skip) {
2441                list_for_each(tmp, &ep_ring->td_list)
2442                        td_num++;
2443        }
2444
2445        if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
2446                                (ep->ep_state & EP_HAS_STREAMS))
2447                del_timer(&ep_ring->stream_timer);
2448
2449        event_dma = le64_to_cpu(event->buffer);
2450        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2451        /* Look for common error cases */
2452        switch (trb_comp_code) {
2453        /* Skip codes that require special handling depending on
2454         * transfer type
2455         */
2456        case COMP_SUCCESS:
2457                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2458                        break;
2459                if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2460                        trb_comp_code = COMP_SHORT_TX;
2461                else
2462                        xhci_warn_ratelimited(xhci,
2463                                        "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2464        case COMP_SHORT_TX:
2465                break;
2466        case COMP_STOP:
2467                xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2468                break;
2469        case COMP_STOP_INVAL:
2470                xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2471                break;
2472        case COMP_STOP_SHORT:
2473                xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
2474                break;
2475        case COMP_STALL:
2476                xhci_dbg(xhci, "Stalled endpoint\n");
2477                ep->ep_state |= EP_HALTED;
2478                status = -EPIPE;
2479                break;
2480        case COMP_TRB_ERR:
2481                xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2482                status = -EILSEQ;
2483                break;
2484        case COMP_SPLIT_ERR:
2485        case COMP_TX_ERR:
2486                xhci_dbg(xhci, "Transfer error on endpoint\n");
2487                status = -EPROTO;
2488                break;
2489        case COMP_BABBLE:
2490                xhci_dbg(xhci, "Babble error on endpoint\n");
2491                status = -EOVERFLOW;
2492                break;
2493        case COMP_DB_ERR:
2494                xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2495                status = -ENOSR;
2496                break;
2497        case COMP_BW_OVER:
2498                xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2499                break;
2500        case COMP_BUFF_OVER:
2501                xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2502                break;
2503        case COMP_UNDERRUN:
2504                /*
2505                 * When the Isoch ring is empty, the xHC will generate
2506                 * a Ring Overrun Event for IN Isoch endpoint or Ring
2507                 * Underrun Event for OUT Isoch endpoint.
2508                 */
2509                xhci_dbg(xhci, "underrun event on endpoint\n");
2510                if (!list_empty(&ep_ring->td_list))
2511                        xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2512                                        "still with TDs queued?\n",
2513                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2514                                 ep_index);
2515                goto cleanup;
2516        case COMP_OVERRUN:
2517                xhci_dbg(xhci, "overrun event on endpoint\n");
2518                if (!list_empty(&ep_ring->td_list))
2519                        xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2520                                        "still with TDs queued?\n",
2521                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2522                                 ep_index);
2523                goto cleanup;
2524        case COMP_DEV_ERR:
2525                xhci_warn(xhci, "WARN: detect an incompatible device");
2526                status = -EPROTO;
2527                break;
2528        case COMP_MISSED_INT:
2529                /*
2530                 * When encounter missed service error, one or more isoc tds
2531                 * may be missed by xHC.
2532                 * Set skip flag of the ep_ring; Complete the missed tds as
2533                 * short transfer when process the ep_ring next time.
2534                 */
2535                ep->skip = true;
2536                xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2537                goto cleanup;
2538        case COMP_PING_ERR:
2539                ep->skip = true;
2540                xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2541                goto cleanup;
2542        default:
2543                if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2544                        status = 0;
2545                        break;
2546                }
2547                xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
2548                          trb_comp_code);
2549                goto cleanup;
2550        }
2551
2552        do {
2553                /* This TRB should be in the TD at the head of this ring's
2554                 * TD list.
2555                 */
2556                if (list_empty(&ep_ring->td_list)) {
2557                        /*
2558                         * A stopped endpoint may generate an extra completion
2559                         * event if the device was suspended.  Don't print
2560                         * warnings.
2561                         */
2562                        if (!(trb_comp_code == COMP_STOP ||
2563                                                trb_comp_code == COMP_STOP_INVAL)) {
2564                                xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2565                                                TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2566                                                ep_index);
2567                                xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2568                                                (le32_to_cpu(event->flags) &
2569                                                 TRB_TYPE_BITMASK)>>10);
2570                                xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2571                        }
2572                        if (ep->skip) {
2573                                ep->skip = false;
2574                                xhci_dbg(xhci, "td_list is empty while skip "
2575                                                "flag set. Clear skip flag.\n");
2576                        }
2577                        ret = 0;
2578                        goto cleanup;
2579                }
2580
2581                /* We've skipped all the TDs on the ep ring when ep->skip set */
2582                if (ep->skip && td_num == 0) {
2583                        ep->skip = false;
2584                        xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2585                                                "Clear skip flag.\n");
2586                        ret = 0;
2587                        goto cleanup;
2588                }
2589
2590                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2591                if (ep->skip)
2592                        td_num--;
2593
2594                /* Is this a TRB in the currently executing TD? */
2595                event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2596                                td->last_trb, event_dma, false);
2597
2598                /*
2599                 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2600                 * is not in the current TD pointed by ep_ring->dequeue because
2601                 * that the hardware dequeue pointer still at the previous TRB
2602                 * of the current TD. The previous TRB maybe a Link TD or the
2603                 * last TRB of the previous TD. The command completion handle
2604                 * will take care the rest.
2605                 */
2606                if (!event_seg && (trb_comp_code == COMP_STOP ||
2607                                   trb_comp_code == COMP_STOP_INVAL)) {
2608                        ret = 0;
2609                        goto cleanup;
2610                }
2611
2612                if (!event_seg) {
2613                        if (!ep->skip ||
2614                            !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2615                                /* Some host controllers give a spurious
2616                                 * successful event after a short transfer.
2617                                 * Ignore it.
2618                                 */
2619                                if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2620                                                ep_ring->last_td_was_short) {
2621                                        ep_ring->last_td_was_short = false;
2622                                        ret = 0;
2623                                        goto cleanup;
2624                                }
2625                                /* HC is busted, give up! */
2626                                xhci_err(xhci,
2627                                        "ERROR Transfer event TRB DMA ptr not "
2628                                        "part of current TD ep_index %d "
2629                                        "comp_code %u\n", ep_index,
2630                                        trb_comp_code);
2631                                trb_in_td(xhci, ep_ring->deq_seg,
2632                                          ep_ring->dequeue, td->last_trb,
2633                                          event_dma, true);
2634                                return -ESHUTDOWN;
2635                        }
2636
2637                        ret = skip_isoc_td(xhci, td, event, ep, &status);
2638                        goto cleanup;
2639                }
2640                if (trb_comp_code == COMP_SHORT_TX)
2641                        ep_ring->last_td_was_short = true;
2642                else
2643                        ep_ring->last_td_was_short = false;
2644
2645                if (ep->skip) {
2646                        xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2647                        ep->skip = false;
2648                }
2649
2650                event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2651                                                sizeof(*event_trb)];
2652                /*
2653                 * No-op TRB should not trigger interrupts.
2654                 * If event_trb is a no-op TRB, it means the
2655                 * corresponding TD has been cancelled. Just ignore
2656                 * the TD.
2657                 */
2658                if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2659                        xhci_dbg(xhci,
2660                                 "event_trb is a no-op TRB. Skip it\n");
2661                        goto cleanup;
2662                }
2663
2664                /* Now update the urb's actual_length and give back to
2665                 * the core
2666                 */
2667                if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2668                        ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2669                                                 &status);
2670                else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2671                        ret = process_isoc_td(xhci, td, event_trb, event, ep,
2672                                                 &status);
2673                else
2674                        ret = process_bulk_intr_td(xhci, td, event_trb, event,
2675                                                 ep, &status);
2676
2677cleanup:
2678
2679
2680                handling_skipped_tds = ep->skip &&
2681                        trb_comp_code != COMP_MISSED_INT &&
2682                        trb_comp_code != COMP_PING_ERR;
2683
2684                /*
2685                 * Do not update event ring dequeue pointer if we're in a loop
2686                 * processing missed tds.
2687                 */
2688                if (!handling_skipped_tds)
2689                        inc_deq(xhci, xhci->event_ring);
2690
2691                if (ret) {
2692                        urb = td->urb;
2693                        urb_priv = urb->hcpriv;
2694
2695                        xhci_urb_free_priv(urb_priv);
2696
2697                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2698                        if ((urb->actual_length != urb->transfer_buffer_length &&
2699                                                (urb->transfer_flags &
2700                                                 URB_SHORT_NOT_OK)) ||
2701                                        (status != 0 &&
2702                                         !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2703                                xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2704                                                "expected = %d, status = %d\n",
2705                                                urb, urb->actual_length,
2706                                                urb->transfer_buffer_length,
2707                                                status);
2708                        spin_unlock(&xhci->lock);
2709                        /* EHCI, UHCI, and OHCI always unconditionally set the
2710                         * urb->status of an isochronous endpoint to 0.
2711                         */
2712                        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2713                                status = 0;
2714                        usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2715                        spin_lock(&xhci->lock);
2716                }
2717
2718        /*
2719         * If ep->skip is set, it means there are missed tds on the
2720         * endpoint ring need to take care of.
2721         * Process them as short transfer until reach the td pointed by
2722         * the event.
2723         */
2724        } while (handling_skipped_tds);
2725
2726        return 0;
2727}
2728
2729/*
2730 * This function handles all OS-owned events on the event ring.  It may drop
2731 * xhci->lock between event processing (e.g. to pass up port status changes).
2732 * Returns >0 for "possibly more events to process" (caller should call again),
2733 * otherwise 0 if done.  In future, <0 returns should indicate error code.
2734 */
2735static int xhci_handle_event(struct xhci_hcd *xhci)
2736{
2737        union xhci_trb *event;
2738        int update_ptrs = 1;
2739        int ret;
2740
2741        if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2742                xhci->error_bitmask |= 1 << 1;
2743                return 0;
2744        }
2745
2746        event = xhci->event_ring->dequeue;
2747        /* Does the HC or OS own the TRB? */
2748        if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2749            xhci->event_ring->cycle_state) {
2750                xhci->error_bitmask |= 1 << 2;
2751                return 0;
2752        }
2753
2754        /*
2755         * Barrier between reading the TRB_CYCLE (valid) flag above and any
2756         * speculative reads of the event's flags/data below.
2757         */
2758        rmb();
2759        /* FIXME: Handle more event types. */
2760        switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2761        case TRB_TYPE(TRB_COMPLETION):
2762                handle_cmd_completion(xhci, &event->event_cmd);
2763                break;
2764        case TRB_TYPE(TRB_PORT_STATUS):
2765                handle_port_status(xhci, event);
2766                update_ptrs = 0;
2767                break;
2768        case TRB_TYPE(TRB_TRANSFER):
2769                ret = handle_tx_event(xhci, &event->trans_event);
2770                if (ret < 0)
2771                        xhci->error_bitmask |= 1 << 9;
2772                else
2773                        update_ptrs = 0;
2774                break;
2775        case TRB_TYPE(TRB_DEV_NOTE):
2776                handle_device_notification(xhci, event);
2777                break;
2778        default:
2779                if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2780                    TRB_TYPE(48))
2781                        handle_vendor_event(xhci, event);
2782                else
2783                        xhci->error_bitmask |= 1 << 3;
2784        }
2785        /* Any of the above functions may drop and re-acquire the lock, so check
2786         * to make sure a watchdog timer didn't mark the host as non-responsive.
2787         */
2788        if (xhci->xhc_state & XHCI_STATE_DYING) {
2789                xhci_dbg(xhci, "xHCI host dying, returning from "
2790                                "event handler.\n");
2791                return 0;
2792        }
2793
2794        if (update_ptrs)
2795                /* Update SW event ring dequeue pointer */
2796                inc_deq(xhci, xhci->event_ring);
2797
2798        /* Are there more items on the event ring?  Caller will call us again to
2799         * check.
2800         */
2801        return 1;
2802}
2803
2804/*
2805 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2806 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
2807 * indicators of an event TRB error, but we check the status *first* to be safe.
2808 */
2809irqreturn_t xhci_irq(struct usb_hcd *hcd)
2810{
2811        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2812        u32 status;
2813        u64 temp_64;
2814        union xhci_trb *event_ring_deq;
2815        dma_addr_t deq;
2816
2817        spin_lock(&xhci->lock);
2818        /* Check if the xHC generated the interrupt, or the irq is shared */
2819        status = readl(&xhci->op_regs->status);
2820        if (status == 0xffffffff)
2821                goto hw_died;
2822
2823        if (!(status & STS_EINT)) {
2824                spin_unlock(&xhci->lock);
2825                return IRQ_NONE;
2826        }
2827        if (status & STS_FATAL) {
2828                xhci_warn(xhci, "WARNING: Host System Error\n");
2829                xhci_halt(xhci);
2830hw_died:
2831                spin_unlock(&xhci->lock);
2832                return IRQ_HANDLED;
2833        }
2834
2835        /*
2836         * Clear the op reg interrupt status first,
2837         * so we can receive interrupts from other MSI-X interrupters.
2838         * Write 1 to clear the interrupt status.
2839         */
2840        status |= STS_EINT;
2841        writel(status, &xhci->op_regs->status);
2842        /* FIXME when MSI-X is supported and there are multiple vectors */
2843        /* Clear the MSI-X event interrupt status */
2844
2845        if (hcd->irq) {
2846                u32 irq_pending;
2847                /* Acknowledge the PCI interrupt */
2848                irq_pending = readl(&xhci->ir_set->irq_pending);
2849                irq_pending |= IMAN_IP;
2850                writel(irq_pending, &xhci->ir_set->irq_pending);
2851        }
2852
2853        if (xhci->xhc_state & XHCI_STATE_DYING ||
2854            xhci->xhc_state & XHCI_STATE_HALTED) {
2855                xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2856                                "Shouldn't IRQs be disabled?\n");
2857                /* Clear the event handler busy flag (RW1C);
2858                 * the event ring should be empty.
2859                 */
2860                temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2861                xhci_write_64(xhci, temp_64 | ERST_EHB,
2862                                &xhci->ir_set->erst_dequeue);
2863                spin_unlock(&xhci->lock);
2864
2865                return IRQ_HANDLED;
2866        }
2867
2868        event_ring_deq = xhci->event_ring->dequeue;
2869        /* FIXME this should be a delayed service routine
2870         * that clears the EHB.
2871         */
2872        while (xhci_handle_event(xhci) > 0) {}
2873
2874        temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2875        /* If necessary, update the HW's version of the event ring deq ptr. */
2876        if (event_ring_deq != xhci->event_ring->dequeue) {
2877                deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2878                                xhci->event_ring->dequeue);
2879                if (deq == 0)
2880                        xhci_warn(xhci, "WARN something wrong with SW event "
2881                                        "ring dequeue ptr.\n");
2882                /* Update HC event ring dequeue pointer */
2883                temp_64 &= ERST_PTR_MASK;
2884                temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2885        }
2886
2887        /* Clear the event handler busy flag (RW1C); event ring is empty. */
2888        temp_64 |= ERST_EHB;
2889        xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2890
2891        spin_unlock(&xhci->lock);
2892
2893        return IRQ_HANDLED;
2894}
2895
2896irqreturn_t xhci_msi_irq(int irq, void *hcd)
2897{
2898        return xhci_irq(hcd);
2899}
2900
2901/****           Endpoint Ring Operations        ****/
2902
2903/*
2904 * Generic function for queueing a TRB on a ring.
2905 * The caller must have checked to make sure there's room on the ring.
2906 *
2907 * @more_trbs_coming:   Will you enqueue more TRBs before calling
2908 *                      prepare_transfer()?
2909 */
2910static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2911                bool more_trbs_coming,
2912                u32 field1, u32 field2, u32 field3, u32 field4)
2913{
2914        struct xhci_generic_trb *trb;
2915
2916        trb = &ring->enqueue->generic;
2917        trb->field[0] = cpu_to_le32(field1);
2918        trb->field[1] = cpu_to_le32(field2);
2919        trb->field[2] = cpu_to_le32(field3);
2920        trb->field[3] = cpu_to_le32(field4);
2921        inc_enq(xhci, ring, more_trbs_coming);
2922}
2923
2924/*
2925 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2926 * FIXME allocate segments if the ring is full.
2927 */
2928static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2929                u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2930{
2931        unsigned int num_trbs_needed;
2932
2933        /* Make sure the endpoint has been added to xHC schedule */
2934        switch (ep_state) {
2935        case EP_STATE_DISABLED:
2936                /*
2937                 * USB core changed config/interfaces without notifying us,
2938                 * or hardware is reporting the wrong state.
2939                 */
2940                xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2941                return -ENOENT;
2942        case EP_STATE_ERROR:
2943                xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2944                /* FIXME event handling code for error needs to clear it */
2945                /* XXX not sure if this should be -ENOENT or not */
2946                return -EINVAL;
2947        case EP_STATE_HALTED:
2948                xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2949        case EP_STATE_STOPPED:
2950        case EP_STATE_RUNNING:
2951                break;
2952        default:
2953                xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2954                /*
2955                 * FIXME issue Configure Endpoint command to try to get the HC
2956                 * back into a known state.
2957                 */
2958                return -EINVAL;
2959        }
2960
2961        while (1) {
2962                if (room_on_ring(xhci, ep_ring, num_trbs))
2963                        break;
2964
2965                if (ep_ring == xhci->cmd_ring) {
2966                        xhci_err(xhci, "Do not support expand command ring\n");
2967                        return -ENOMEM;
2968                }
2969
2970                xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2971                                "ERROR no room on ep ring, try ring expansion");
2972                num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2973                if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2974                                        mem_flags)) {
2975                        xhci_err(xhci, "Ring expansion failed\n");
2976                        return -ENOMEM;
2977                }
2978        }
2979
2980        while (trb_is_link(ep_ring->enqueue)) {
2981                /* If we're not dealing with 0.95 hardware or isoc rings
2982                 * on AMD 0.96 host, clear the chain bit.
2983                 */
2984                if (!xhci_link_trb_quirk(xhci) &&
2985                    !(ep_ring->type == TYPE_ISOC &&
2986                      (xhci->quirks & XHCI_AMD_0x96_HOST)))
2987                        ep_ring->enqueue->link.control &=
2988                                cpu_to_le32(~TRB_CHAIN);
2989                else
2990                        ep_ring->enqueue->link.control |=
2991                                cpu_to_le32(TRB_CHAIN);
2992
2993                wmb();
2994                ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
2995
2996                /* Toggle the cycle bit after the last ring segment. */
2997                if (link_trb_toggles_cycle(ep_ring->enqueue))
2998                        ep_ring->cycle_state ^= 1;
2999
3000                ep_ring->enq_seg = ep_ring->enq_seg->next;
3001                ep_ring->enqueue = ep_ring->enq_seg->trbs;
3002        }
3003        return 0;
3004}
3005
3006static int prepare_transfer(struct xhci_hcd *xhci,
3007                struct xhci_virt_device *xdev,
3008                unsigned int ep_index,
3009                unsigned int stream_id,
3010                unsigned int num_trbs,
3011                struct urb *urb,
3012                unsigned int td_index,
3013                gfp_t mem_flags)
3014{
3015        int ret;
3016        struct urb_priv *urb_priv;
3017        struct xhci_td  *td;
3018        struct xhci_ring *ep_ring;
3019        struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3020
3021        ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
3022        if (!ep_ring) {
3023                xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3024                                stream_id);
3025                return -EINVAL;
3026        }
3027
3028        ret = prepare_ring(xhci, ep_ring,
3029                           le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3030                           num_trbs, mem_flags);
3031        if (ret)
3032                return ret;
3033
3034        urb_priv = urb->hcpriv;
3035        td = urb_priv->td[td_index];
3036
3037        INIT_LIST_HEAD(&td->td_list);
3038        INIT_LIST_HEAD(&td->cancelled_td_list);
3039
3040        if (td_index == 0) {
3041                ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3042                if (unlikely(ret))
3043                        return ret;
3044        }
3045
3046        td->urb = urb;
3047        /* Add this TD to the tail of the endpoint ring's TD list */
3048        list_add_tail(&td->td_list, &ep_ring->td_list);
3049        td->start_seg = ep_ring->enq_seg;
3050        td->first_trb = ep_ring->enqueue;
3051
3052        urb_priv->td[td_index] = td;
3053
3054        return 0;
3055}
3056
3057static unsigned int count_trbs(u64 addr, u64 len)
3058{
3059        unsigned int num_trbs;
3060
3061        num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3062                        TRB_MAX_BUFF_SIZE);
3063        if (num_trbs == 0)
3064                num_trbs++;
3065
3066        return num_trbs;
3067}
3068
3069static inline unsigned int count_trbs_needed(struct urb *urb)
3070{
3071        return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3072}
3073
3074static unsigned int count_sg_trbs_needed(struct urb *urb)
3075{
3076        struct scatterlist *sg;
3077        unsigned int i, len, full_len, num_trbs = 0;
3078
3079        full_len = urb->transfer_buffer_length;
3080
3081        for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3082                len = sg_dma_len(sg);
3083                num_trbs += count_trbs(sg_dma_address(sg), len);
3084                len = min_t(unsigned int, len, full_len);
3085                full_len -= len;
3086                if (full_len == 0)
3087                        break;
3088        }
3089
3090        return num_trbs;
3091}
3092
3093static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3094{
3095        u64 addr, len;
3096
3097        addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3098        len = urb->iso_frame_desc[i].length;
3099
3100        return count_trbs(addr, len);
3101}
3102
3103static void check_trb_math(struct urb *urb, int running_total)
3104{
3105        if (unlikely(running_total != urb->transfer_buffer_length))
3106                dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3107                                "queued %#x (%d), asked for %#x (%d)\n",
3108                                __func__,
3109                                urb->ep->desc.bEndpointAddress,
3110                                running_total, running_total,
3111                                urb->transfer_buffer_length,
3112                                urb->transfer_buffer_length);
3113}
3114
3115static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3116                unsigned int ep_index, unsigned int stream_id, int start_cycle,
3117                struct xhci_generic_trb *start_trb)
3118{
3119        /*
3120         * Pass all the TRBs to the hardware at once and make sure this write
3121         * isn't reordered.
3122         */
3123        wmb();
3124        if (start_cycle)
3125                start_trb->field[3] |= cpu_to_le32(start_cycle);
3126        else
3127                start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3128        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3129}
3130
3131static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3132                                                struct xhci_ep_ctx *ep_ctx)
3133{
3134        int xhci_interval;
3135        int ep_interval;
3136
3137        xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3138        ep_interval = urb->interval;
3139
3140        /* Convert to microframes */
3141        if (urb->dev->speed == USB_SPEED_LOW ||
3142                        urb->dev->speed == USB_SPEED_FULL)
3143                ep_interval *= 8;
3144
3145        /* FIXME change this to a warning and a suggestion to use the new API
3146         * to set the polling interval (once the API is added).
3147         */
3148        if (xhci_interval != ep_interval) {
3149                dev_dbg_ratelimited(&urb->dev->dev,
3150                                "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3151                                ep_interval, ep_interval == 1 ? "" : "s",
3152                                xhci_interval, xhci_interval == 1 ? "" : "s");
3153                urb->interval = xhci_interval;
3154                /* Convert back to frames for LS/FS devices */
3155                if (urb->dev->speed == USB_SPEED_LOW ||
3156                                urb->dev->speed == USB_SPEED_FULL)
3157                        urb->interval /= 8;
3158        }
3159}
3160
3161/*
3162 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
3163 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
3164 * (comprised of sg list entries) can take several service intervals to
3165 * transmit.
3166 */
3167int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3168                struct urb *urb, int slot_id, unsigned int ep_index)
3169{
3170        struct xhci_ep_ctx *ep_ctx;
3171
3172        ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3173        check_interval(xhci, urb, ep_ctx);
3174
3175        return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3176}
3177
3178/*
3179 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3180 * packets remaining in the TD (*not* including this TRB).
3181 *
3182 * Total TD packet count = total_packet_count =
3183 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3184 *
3185 * Packets transferred up to and including this TRB = packets_transferred =
3186 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3187 *
3188 * TD size = total_packet_count - packets_transferred
3189 *
3190 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3191 * including this TRB, right shifted by 10
3192 *
3193 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3194 * This is taken care of in the TRB_TD_SIZE() macro
3195 *
3196 * The last TRB in a TD must have the TD size set to zero.
3197 */
3198static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3199                              int trb_buff_len, unsigned int td_total_len,
3200                              struct urb *urb, bool more_trbs_coming)
3201{
3202        u32 maxp, total_packet_count;
3203
3204        /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
3205        if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3206                return ((td_total_len - transferred) >> 10);
3207
3208        /* One TRB with a zero-length data packet. */
3209        if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3210            trb_buff_len == td_total_len)
3211                return 0;
3212
3213        /* for MTK xHCI, TD size doesn't include this TRB */
3214        if (xhci->quirks & XHCI_MTK_HOST)
3215                trb_buff_len = 0;
3216
3217        maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3218        total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3219
3220        /* Queueing functions don't count the current TRB into transferred */
3221        return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3222}
3223
3224
3225static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3226                         u32 *trb_buff_len, struct xhci_segment *seg)
3227{
3228        struct device *dev = xhci_to_hcd(xhci)->self.controller;
3229        unsigned int unalign;
3230        unsigned int max_pkt;
3231        u32 new_buff_len;
3232
3233        max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3234        unalign = (enqd_len + *trb_buff_len) % max_pkt;
3235
3236        /* we got lucky, last normal TRB data on segment is packet aligned */
3237        if (unalign == 0)
3238                return 0;
3239
3240        xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3241                 unalign, *trb_buff_len);
3242
3243        /* is the last nornal TRB alignable by splitting it */
3244        if (*trb_buff_len > unalign) {
3245                *trb_buff_len -= unalign;
3246                xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3247                return 0;
3248        }
3249
3250        /*
3251         * We want enqd_len + trb_buff_len to sum up to a number aligned to
3252         * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3253         * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3254         */
3255        new_buff_len = max_pkt - (enqd_len % max_pkt);
3256
3257        if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3258                new_buff_len = (urb->transfer_buffer_length - enqd_len);
3259
3260        /* create a max max_pkt sized bounce buffer pointed to by last trb */
3261        if (usb_urb_dir_out(urb)) {
3262                sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
3263                                   seg->bounce_buf, new_buff_len, enqd_len);
3264                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3265                                                 max_pkt, DMA_TO_DEVICE);
3266        } else {
3267                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3268                                                 max_pkt, DMA_FROM_DEVICE);
3269        }
3270
3271        if (dma_mapping_error(dev, seg->bounce_dma)) {
3272                /* try without aligning. Some host controllers survive */
3273                xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3274                return 0;
3275        }
3276        *trb_buff_len = new_buff_len;
3277        seg->bounce_len = new_buff_len;
3278        seg->bounce_offs = enqd_len;
3279
3280        xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3281
3282        return 1;
3283}
3284
3285/* This is very similar to what ehci-q.c qtd_fill() does */
3286int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3287                struct urb *urb, int slot_id, unsigned int ep_index)
3288{
3289        struct xhci_ring *ring;
3290        struct urb_priv *urb_priv;
3291        struct xhci_td *td;
3292        struct xhci_generic_trb *start_trb;
3293        struct scatterlist *sg = NULL;
3294        bool more_trbs_coming = true;
3295        bool need_zero_pkt = false;
3296        bool first_trb = true;
3297        unsigned int num_trbs;
3298        unsigned int start_cycle, num_sgs = 0;
3299        unsigned int enqd_len, block_len, trb_buff_len, full_len;
3300        int sent_len, ret;
3301        u32 field, length_field, remainder;
3302        u64 addr, send_addr;
3303
3304        ring = xhci_urb_to_transfer_ring(xhci, urb);
3305        if (!ring)
3306                return -EINVAL;
3307
3308        full_len = urb->transfer_buffer_length;
3309        /* If we have scatter/gather list, we use it. */
3310        if (urb->num_sgs) {
3311                num_sgs = urb->num_mapped_sgs;
3312                sg = urb->sg;
3313                addr = (u64) sg_dma_address(sg);
3314                block_len = sg_dma_len(sg);
3315                num_trbs = count_sg_trbs_needed(urb);
3316        } else {
3317                num_trbs = count_trbs_needed(urb);
3318                addr = (u64) urb->transfer_dma;
3319                block_len = full_len;
3320        }
3321        ret = prepare_transfer(xhci, xhci->devs[slot_id],
3322                        ep_index, urb->stream_id,
3323                        num_trbs, urb, 0, mem_flags);
3324        if (unlikely(ret < 0))
3325                return ret;
3326
3327        urb_priv = urb->hcpriv;
3328
3329        /* Deal with URB_ZERO_PACKET - need one more td/trb */
3330        if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
3331                need_zero_pkt = true;
3332
3333        td = urb_priv->td[0];
3334
3335        /*
3336         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3337         * until we've finished creating all the other TRBs.  The ring's cycle
3338         * state may change as we enqueue the other TRBs, so save it too.
3339         */
3340        start_trb = &ring->enqueue->generic;
3341        start_cycle = ring->cycle_state;
3342        send_addr = addr;
3343
3344        /* Queue the TRBs, even if they are zero-length */
3345        for (enqd_len = 0; first_trb || enqd_len < full_len;
3346                        enqd_len += trb_buff_len) {
3347                field = TRB_TYPE(TRB_NORMAL);
3348
3349                /* TRB buffer should not cross 64KB boundaries */
3350                trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3351                trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3352
3353                if (enqd_len + trb_buff_len > full_len)
3354                        trb_buff_len = full_len - enqd_len;
3355
3356                /* Don't change the cycle bit of the first TRB until later */
3357                if (first_trb) {
3358                        first_trb = false;
3359                        if (start_cycle == 0)
3360                                field |= TRB_CYCLE;
3361                } else
3362                        field |= ring->cycle_state;
3363
3364                /* Chain all the TRBs together; clear the chain bit in the last
3365                 * TRB to indicate it's the last TRB in the chain.
3366                 */
3367                if (enqd_len + trb_buff_len < full_len) {
3368                        field |= TRB_CHAIN;
3369                        if (trb_is_link(ring->enqueue + 1)) {
3370                                if (xhci_align_td(xhci, urb, enqd_len,
3371                                                  &trb_buff_len,
3372                                                  ring->enq_seg)) {
3373                                        send_addr = ring->enq_seg->bounce_dma;
3374                                        /* assuming TD won't span 2 segs */
3375                                        td->bounce_seg = ring->enq_seg;
3376                                }
3377                        }
3378                }
3379                if (enqd_len + trb_buff_len >= full_len) {
3380                        field &= ~TRB_CHAIN;
3381                        field |= TRB_IOC;
3382                        more_trbs_coming = false;
3383                        td->last_trb = ring->enqueue;
3384                }
3385
3386                /* Only set interrupt on short packet for IN endpoints */
3387                if (usb_urb_dir_in(urb))
3388                        field |= TRB_ISP;
3389
3390                /* Set the TRB length, TD size, and interrupter fields. */
3391                remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3392                                              full_len, urb, more_trbs_coming);
3393
3394                length_field = TRB_LEN(trb_buff_len) |
3395                        TRB_TD_SIZE(remainder) |
3396                        TRB_INTR_TARGET(0);
3397
3398                queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3399                                lower_32_bits(send_addr),
3400                                upper_32_bits(send_addr),
3401                                length_field,
3402                                field);
3403
3404                addr += trb_buff_len;
3405                sent_len = trb_buff_len;
3406
3407                while (sg && sent_len >= block_len) {
3408                        /* New sg entry */
3409                        --num_sgs;
3410                        sent_len -= block_len;
3411                        if (num_sgs != 0) {
3412                                sg = sg_next(sg);
3413                                block_len = sg_dma_len(sg);
3414                                addr = (u64) sg_dma_address(sg);
3415                                addr += sent_len;
3416                        }
3417                }
3418                block_len -= sent_len;
3419                send_addr = addr;
3420        }
3421
3422        if (need_zero_pkt) {
3423                ret = prepare_transfer(xhci, xhci->devs[slot_id],
3424                                       ep_index, urb->stream_id,
3425                                       1, urb, 1, mem_flags);
3426                urb_priv->td[1]->last_trb = ring->enqueue;
3427                field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3428                queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3429        }
3430
3431        check_trb_math(urb, enqd_len);
3432
3433        if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0) &&
3434                                (usb_endpoint_dir_in(&urb->ep->desc) == 1)) {
3435                /* Start the stream timer so that xhci_stream_timeout() can be
3436                 * triggered if xhci is stuck while processing BULK IN streams.
3437                 */
3438                ring->stream_timeout_handler = false;
3439                mod_timer(&ring->stream_timer, jiffies + 5 * HZ);
3440        }
3441        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3442                        start_cycle, start_trb);
3443        return 0;
3444}
3445
3446/* Caller must have locked xhci->lock */
3447int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3448                struct urb *urb, int slot_id, unsigned int ep_index)
3449{
3450        struct xhci_ring *ep_ring;
3451        int num_trbs;
3452        int ret;
3453        struct usb_ctrlrequest *setup;
3454        struct xhci_generic_trb *start_trb;
3455        int start_cycle;
3456        u32 field, length_field, remainder;
3457        struct urb_priv *urb_priv;
3458        struct xhci_td *td;
3459
3460        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3461        if (!ep_ring)
3462                return -EINVAL;
3463
3464        /*
3465         * Need to copy setup packet into setup TRB, so we can't use the setup
3466         * DMA address.
3467         */
3468        if (!urb->setup_packet)
3469                return -EINVAL;
3470
3471        /* 1 TRB for setup, 1 for status */
3472        num_trbs = 2;
3473        /*
3474         * Don't need to check if we need additional event data and normal TRBs,
3475         * since data in control transfers will never get bigger than 16MB
3476         * XXX: can we get a buffer that crosses 64KB boundaries?
3477         */
3478        if (urb->transfer_buffer_length > 0)
3479                num_trbs++;
3480        ret = prepare_transfer(xhci, xhci->devs[slot_id],
3481                        ep_index, urb->stream_id,
3482                        num_trbs, urb, 0, mem_flags);
3483        if (ret < 0)
3484                return ret;
3485
3486        urb_priv = urb->hcpriv;
3487        td = urb_priv->td[0];
3488
3489        /*
3490         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3491         * until we've finished creating all the other TRBs.  The ring's cycle
3492         * state may change as we enqueue the other TRBs, so save it too.
3493         */
3494        start_trb = &ep_ring->enqueue->generic;
3495        start_cycle = ep_ring->cycle_state;
3496
3497        /* Queue setup TRB - see section 6.4.1.2.1 */
3498        /* FIXME better way to translate setup_packet into two u32 fields? */
3499        setup = (struct usb_ctrlrequest *) urb->setup_packet;
3500        field = 0;
3501        field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3502        if (start_cycle == 0)
3503                field |= 0x1;
3504
3505        /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3506        if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3507                if (urb->transfer_buffer_length > 0) {
3508                        if (setup->bRequestType & USB_DIR_IN)
3509                                field |= TRB_TX_TYPE(TRB_DATA_IN);
3510                        else
3511                                field |= TRB_TX_TYPE(TRB_DATA_OUT);
3512                }
3513        }
3514
3515        queue_trb(xhci, ep_ring, true,
3516                  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3517                  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3518                  TRB_LEN(8) | TRB_INTR_TARGET(0),
3519                  /* Immediate data in pointer */
3520                  field);
3521
3522        /* If there's data, queue data TRBs */
3523        /* Only set interrupt on short packet for IN endpoints */
3524        if (usb_urb_dir_in(urb))
3525                field = TRB_ISP | TRB_TYPE(TRB_DATA);
3526        else
3527                field = TRB_TYPE(TRB_DATA);
3528
3529        remainder = xhci_td_remainder(xhci, 0,
3530                                   urb->transfer_buffer_length,
3531                                   urb->transfer_buffer_length,
3532                                   urb, 1);
3533
3534        length_field = TRB_LEN(urb->transfer_buffer_length) |
3535                TRB_TD_SIZE(remainder) |
3536                TRB_INTR_TARGET(0);
3537
3538        if (urb->transfer_buffer_length > 0) {
3539                if (setup->bRequestType & USB_DIR_IN)
3540                        field |= TRB_DIR_IN;
3541                queue_trb(xhci, ep_ring, true,
3542                                lower_32_bits(urb->transfer_dma),
3543                                upper_32_bits(urb->transfer_dma),
3544                                length_field,
3545                                field | ep_ring->cycle_state);
3546        }
3547
3548        /* Save the DMA address of the last TRB in the TD */
3549        td->last_trb = ep_ring->enqueue;
3550
3551        /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3552        /* If the device sent data, the status stage is an OUT transfer */
3553        if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3554                field = 0;
3555        else
3556                field = TRB_DIR_IN;
3557        queue_trb(xhci, ep_ring, false,
3558                        0,
3559                        0,
3560                        TRB_INTR_TARGET(0),
3561                        /* Event on completion */
3562                        field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3563
3564        giveback_first_trb(xhci, slot_id, ep_index, 0,
3565                        start_cycle, start_trb);
3566        return 0;
3567}
3568
3569/*
3570 * The transfer burst count field of the isochronous TRB defines the number of
3571 * bursts that are required to move all packets in this TD.  Only SuperSpeed
3572 * devices can burst up to bMaxBurst number of packets per service interval.
3573 * This field is zero based, meaning a value of zero in the field means one
3574 * burst.  Basically, for everything but SuperSpeed devices, this field will be
3575 * zero.  Only xHCI 1.0 host controllers support this field.
3576 */
3577static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3578                struct urb *urb, unsigned int total_packet_count)
3579{
3580        unsigned int max_burst;
3581
3582        if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3583                return 0;
3584
3585        max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3586        return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3587}
3588
3589/*
3590 * Returns the number of packets in the last "burst" of packets.  This field is
3591 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
3592 * the last burst packet count is equal to the total number of packets in the
3593 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
3594 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3595 * contain 1 to (bMaxBurst + 1) packets.
3596 */
3597static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3598                struct urb *urb, unsigned int total_packet_count)
3599{
3600        unsigned int max_burst;
3601        unsigned int residue;
3602
3603        if (xhci->hci_version < 0x100)
3604                return 0;
3605
3606        if (urb->dev->speed >= USB_SPEED_SUPER) {
3607                /* bMaxBurst is zero based: 0 means 1 packet per burst */
3608                max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3609                residue = total_packet_count % (max_burst + 1);
3610                /* If residue is zero, the last burst contains (max_burst + 1)
3611                 * number of packets, but the TLBPC field is zero-based.
3612                 */
3613                if (residue == 0)
3614                        return max_burst;
3615                return residue - 1;
3616        }
3617        if (total_packet_count == 0)
3618                return 0;
3619        return total_packet_count - 1;
3620}
3621
3622/*
3623 * Calculates Frame ID field of the isochronous TRB identifies the
3624 * target frame that the Interval associated with this Isochronous
3625 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3626 *
3627 * Returns actual frame id on success, negative value on error.
3628 */
3629static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3630                struct urb *urb, int index)
3631{
3632        int start_frame, ist, ret = 0;
3633        int start_frame_id, end_frame_id, current_frame_id;
3634
3635        if (urb->dev->speed == USB_SPEED_LOW ||
3636                        urb->dev->speed == USB_SPEED_FULL)
3637                start_frame = urb->start_frame + index * urb->interval;
3638        else
3639                start_frame = (urb->start_frame + index * urb->interval) >> 3;
3640
3641        /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3642         *
3643         * If bit [3] of IST is cleared to '0', software can add a TRB no
3644         * later than IST[2:0] Microframes before that TRB is scheduled to
3645         * be executed.
3646         * If bit [3] of IST is set to '1', software can add a TRB no later
3647         * than IST[2:0] Frames before that TRB is scheduled to be executed.
3648         */
3649        ist = HCS_IST(xhci->hcs_params2) & 0x7;
3650        if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3651                ist <<= 3;
3652
3653        /* Software shall not schedule an Isoch TD with a Frame ID value that
3654         * is less than the Start Frame ID or greater than the End Frame ID,
3655         * where:
3656         *
3657         * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3658         * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3659         *
3660         * Both the End Frame ID and Start Frame ID values are calculated
3661         * in microframes. When software determines the valid Frame ID value;
3662         * The End Frame ID value should be rounded down to the nearest Frame
3663         * boundary, and the Start Frame ID value should be rounded up to the
3664         * nearest Frame boundary.
3665         */
3666        current_frame_id = readl(&xhci->run_regs->microframe_index);
3667        start_frame_id = roundup(current_frame_id + ist + 1, 8);
3668        end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3669
3670        start_frame &= 0x7ff;
3671        start_frame_id = (start_frame_id >> 3) & 0x7ff;
3672        end_frame_id = (end_frame_id >> 3) & 0x7ff;
3673
3674        xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3675                 __func__, index, readl(&xhci->run_regs->microframe_index),
3676                 start_frame_id, end_frame_id, start_frame);
3677
3678        if (start_frame_id < end_frame_id) {
3679                if (start_frame > end_frame_id ||
3680                                start_frame < start_frame_id)
3681                        ret = -EINVAL;
3682        } else if (start_frame_id > end_frame_id) {
3683                if ((start_frame > end_frame_id &&
3684                                start_frame < start_frame_id))
3685                        ret = -EINVAL;
3686        } else {
3687                        ret = -EINVAL;
3688        }
3689
3690        if (index == 0) {
3691                if (ret == -EINVAL || start_frame == start_frame_id) {
3692                        start_frame = start_frame_id + 1;
3693                        if (urb->dev->speed == USB_SPEED_LOW ||
3694                                        urb->dev->speed == USB_SPEED_FULL)
3695                                urb->start_frame = start_frame;
3696                        else
3697                                urb->start_frame = start_frame << 3;
3698                        ret = 0;
3699                }
3700        }
3701
3702        if (ret) {
3703                xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3704                                start_frame, current_frame_id, index,
3705                                start_frame_id, end_frame_id);
3706                xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3707                return ret;
3708        }
3709
3710        return start_frame;
3711}
3712
3713/* This is for isoc transfer */
3714static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3715                struct urb *urb, int slot_id, unsigned int ep_index)
3716{
3717        struct xhci_ring *ep_ring;
3718        struct urb_priv *urb_priv;
3719        struct xhci_td *td;
3720        int num_tds, trbs_per_td;
3721        struct xhci_generic_trb *start_trb;
3722        bool first_trb;
3723        int start_cycle;
3724        u32 field, length_field;
3725        int running_total, trb_buff_len, td_len, td_remain_len, ret;
3726        u64 start_addr, addr;
3727        int i, j;
3728        bool more_trbs_coming;
3729        struct xhci_virt_ep *xep;
3730        int frame_id;
3731
3732        xep = &xhci->devs[slot_id]->eps[ep_index];
3733        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3734
3735        num_tds = urb->number_of_packets;
3736        if (num_tds < 1) {
3737                xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3738                return -EINVAL;
3739        }
3740        start_addr = (u64) urb->transfer_dma;
3741        start_trb = &ep_ring->enqueue->generic;
3742        start_cycle = ep_ring->cycle_state;
3743
3744        urb_priv = urb->hcpriv;
3745        /* Queue the TRBs for each TD, even if they are zero-length */
3746        for (i = 0; i < num_tds; i++) {
3747                unsigned int total_pkt_count, max_pkt;
3748                unsigned int burst_count, last_burst_pkt_count;
3749                u32 sia_frame_id;
3750
3751                first_trb = true;
3752                running_total = 0;
3753                addr = start_addr + urb->iso_frame_desc[i].offset;
3754                td_len = urb->iso_frame_desc[i].length;
3755                td_remain_len = td_len;
3756                max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3757                total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
3758
3759                /* A zero-length transfer still involves at least one packet. */
3760                if (total_pkt_count == 0)
3761                        total_pkt_count++;
3762                burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
3763                last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3764                                                        urb, total_pkt_count);
3765
3766                trbs_per_td = count_isoc_trbs_needed(urb, i);
3767
3768                ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3769                                urb->stream_id, trbs_per_td, urb, i, mem_flags);
3770                if (ret < 0) {
3771                        if (i == 0)
3772                                return ret;
3773                        goto cleanup;
3774                }
3775                td = urb_priv->td[i];
3776
3777                /* use SIA as default, if frame id is used overwrite it */
3778                sia_frame_id = TRB_SIA;
3779                if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3780                    HCC_CFC(xhci->hcc_params)) {
3781                        frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
3782                        if (frame_id >= 0)
3783                                sia_frame_id = TRB_FRAME_ID(frame_id);
3784                }
3785                /*
3786                 * Set isoc specific data for the first TRB in a TD.
3787                 * Prevent HW from getting the TRBs by keeping the cycle state
3788                 * inverted in the first TDs isoc TRB.
3789                 */
3790                field = TRB_TYPE(TRB_ISOC) |
3791                        TRB_TLBPC(last_burst_pkt_count) |
3792                        sia_frame_id |
3793                        (i ? ep_ring->cycle_state : !start_cycle);
3794
3795                /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
3796                if (!xep->use_extended_tbc)
3797                        field |= TRB_TBC(burst_count);
3798
3799                /* fill the rest of the TRB fields, and remaining normal TRBs */
3800                for (j = 0; j < trbs_per_td; j++) {
3801                        u32 remainder = 0;
3802
3803                        /* only first TRB is isoc, overwrite otherwise */
3804                        if (!first_trb)
3805                                field = TRB_TYPE(TRB_NORMAL) |
3806                                        ep_ring->cycle_state;
3807
3808                        /* Only set interrupt on short packet for IN EPs */
3809                        if (usb_urb_dir_in(urb))
3810                                field |= TRB_ISP;
3811
3812                        /* Set the chain bit for all except the last TRB  */
3813                        if (j < trbs_per_td - 1) {
3814                                more_trbs_coming = true;
3815                                field |= TRB_CHAIN;
3816                        } else {
3817                                more_trbs_coming = false;
3818                                td->last_trb = ep_ring->enqueue;
3819                                field |= TRB_IOC;
3820                                /* set BEI, except for the last TD */
3821                                if (xhci->hci_version >= 0x100 &&
3822                                    !(xhci->quirks & XHCI_AVOID_BEI) &&
3823                                    i < num_tds - 1)
3824                                        field |= TRB_BEI;
3825                        }
3826                        /* Calculate TRB length */
3827                        trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3828                        if (trb_buff_len > td_remain_len)
3829                                trb_buff_len = td_remain_len;
3830
3831                        /* Set the TRB length, TD size, & interrupter fields. */
3832                        remainder = xhci_td_remainder(xhci, running_total,
3833                                                   trb_buff_len, td_len,
3834                                                   urb, more_trbs_coming);
3835
3836                        length_field = TRB_LEN(trb_buff_len) |
3837                                TRB_INTR_TARGET(0);
3838
3839                        /* xhci 1.1 with ETE uses TD Size field for TBC */
3840                        if (first_trb && xep->use_extended_tbc)
3841                                length_field |= TRB_TD_SIZE_TBC(burst_count);
3842                        else
3843                                length_field |= TRB_TD_SIZE(remainder);
3844                        first_trb = false;
3845
3846                        queue_trb(xhci, ep_ring, more_trbs_coming,
3847                                lower_32_bits(addr),
3848                                upper_32_bits(addr),
3849                                length_field,
3850                                field);
3851                        running_total += trb_buff_len;
3852
3853                        addr += trb_buff_len;
3854                        td_remain_len -= trb_buff_len;
3855                }
3856
3857                /* Check TD length */
3858                if (running_total != td_len) {
3859                        xhci_err(xhci, "ISOC TD length unmatch\n");
3860                        ret = -EINVAL;
3861                        goto cleanup;
3862                }
3863        }
3864
3865        /* store the next frame id */
3866        if (HCC_CFC(xhci->hcc_params))
3867                xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3868
3869        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3870                if (xhci->quirks & XHCI_AMD_PLL_FIX)
3871                        usb_amd_quirk_pll_disable();
3872        }
3873        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3874
3875        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3876                        start_cycle, start_trb);
3877        return 0;
3878cleanup:
3879        /* Clean up a partially enqueued isoc transfer. */
3880
3881        for (i--; i >= 0; i--)
3882                list_del_init(&urb_priv->td[i]->td_list);
3883
3884        /* Use the first TD as a temporary variable to turn the TDs we've queued
3885         * into No-ops with a software-owned cycle bit. That way the hardware
3886         * won't accidentally start executing bogus TDs when we partially
3887         * overwrite them.  td->first_trb and td->start_seg are already set.
3888         */
3889        urb_priv->td[0]->last_trb = ep_ring->enqueue;
3890        /* Every TRB except the first & last will have its cycle bit flipped. */
3891        td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3892
3893        /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3894        ep_ring->enqueue = urb_priv->td[0]->first_trb;
3895        ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3896        ep_ring->cycle_state = start_cycle;
3897        ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3898        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3899        return ret;
3900}
3901
3902/*
3903 * Check transfer ring to guarantee there is enough room for the urb.
3904 * Update ISO URB start_frame and interval.
3905 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
3906 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
3907 * Contiguous Frame ID is not supported by HC.
3908 */
3909int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3910                struct urb *urb, int slot_id, unsigned int ep_index)
3911{
3912        struct xhci_virt_device *xdev;
3913        struct xhci_ring *ep_ring;
3914        struct xhci_ep_ctx *ep_ctx;
3915        int start_frame;
3916        int num_tds, num_trbs, i;
3917        int ret;
3918        struct xhci_virt_ep *xep;
3919        int ist;
3920
3921        xdev = xhci->devs[slot_id];
3922        xep = &xhci->devs[slot_id]->eps[ep_index];
3923        ep_ring = xdev->eps[ep_index].ring;
3924        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3925
3926        num_trbs = 0;
3927        num_tds = urb->number_of_packets;
3928        for (i = 0; i < num_tds; i++)
3929                num_trbs += count_isoc_trbs_needed(urb, i);
3930
3931        /* Check the ring to guarantee there is enough room for the whole urb.
3932         * Do not insert any td of the urb to the ring if the check failed.
3933         */
3934        ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3935                           num_trbs, mem_flags);
3936        if (ret)
3937                return ret;
3938
3939        /*
3940         * Check interval value. This should be done before we start to
3941         * calculate the start frame value.
3942         */
3943        check_interval(xhci, urb, ep_ctx);
3944
3945        /* Calculate the start frame and put it in urb->start_frame. */
3946        if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3947                if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3948                                EP_STATE_RUNNING) {
3949                        urb->start_frame = xep->next_frame_id;
3950                        goto skip_start_over;
3951                }
3952        }
3953
3954        start_frame = readl(&xhci->run_regs->microframe_index);
3955        start_frame &= 0x3fff;
3956        /*
3957         * Round up to the next frame and consider the time before trb really
3958         * gets scheduled by hardare.
3959         */
3960        ist = HCS_IST(xhci->hcs_params2) & 0x7;
3961        if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3962                ist <<= 3;
3963        start_frame += ist + XHCI_CFC_DELAY;
3964        start_frame = roundup(start_frame, 8);
3965
3966        /*
3967         * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
3968         * is greate than 8 microframes.
3969         */
3970        if (urb->dev->speed == USB_SPEED_LOW ||
3971                        urb->dev->speed == USB_SPEED_FULL) {
3972                start_frame = roundup(start_frame, urb->interval << 3);
3973                urb->start_frame = start_frame >> 3;
3974        } else {
3975                start_frame = roundup(start_frame, urb->interval);
3976                urb->start_frame = start_frame;
3977        }
3978
3979skip_start_over:
3980        ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3981
3982        return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3983}
3984
3985/****           Command Ring Operations         ****/
3986
3987/* Generic function for queueing a command TRB on the command ring.
3988 * Check to make sure there's room on the command ring for one command TRB.
3989 * Also check that there's room reserved for commands that must not fail.
3990 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3991 * then only check for the number of reserved spots.
3992 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3993 * because the command event handler may want to resubmit a failed command.
3994 */
3995static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3996                         u32 field1, u32 field2,
3997                         u32 field3, u32 field4, bool command_must_succeed)
3998{
3999        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4000        int ret;
4001
4002        if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4003                (xhci->xhc_state & XHCI_STATE_HALTED)) {
4004                xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4005                return -ESHUTDOWN;
4006        }
4007
4008        if (!command_must_succeed)
4009                reserved_trbs++;
4010
4011        ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4012                        reserved_trbs, GFP_ATOMIC);
4013        if (ret < 0) {
4014                xhci_err(xhci, "ERR: No room for command on command ring\n");
4015                if (command_must_succeed)
4016                        xhci_err(xhci, "ERR: Reserved TRB counting for "
4017                                        "unfailable commands failed.\n");
4018                return ret;
4019        }
4020
4021        cmd->command_trb = xhci->cmd_ring->enqueue;
4022        list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4023
4024        /* if there are no other commands queued we start the timeout timer */
4025        if (xhci->cmd_list.next == &cmd->cmd_list &&
4026            !timer_pending(&xhci->cmd_timer)) {
4027                xhci->current_cmd = cmd;
4028                mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
4029        }
4030
4031        queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4032                        field4 | xhci->cmd_ring->cycle_state);
4033        return 0;
4034}
4035
4036/* Queue a slot enable or disable request on the command ring */
4037int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4038                u32 trb_type, u32 slot_id)
4039{
4040        return queue_command(xhci, cmd, 0, 0, 0,
4041                        TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4042}
4043
4044/* Queue an address device command TRB */
4045int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4046                dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4047{
4048        return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4049                        upper_32_bits(in_ctx_ptr), 0,
4050                        TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4051                        | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4052}
4053
4054int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4055                u32 field1, u32 field2, u32 field3, u32 field4)
4056{
4057        return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4058}
4059
4060/* Queue a reset device command TRB */
4061int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4062                u32 slot_id)
4063{
4064        return queue_command(xhci, cmd, 0, 0, 0,
4065                        TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4066                        false);
4067}
4068
4069/* Queue a configure endpoint command TRB */
4070int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4071                struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4072                u32 slot_id, bool command_must_succeed)
4073{
4074        return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4075                        upper_32_bits(in_ctx_ptr), 0,
4076                        TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4077                        command_must_succeed);
4078}
4079
4080/* Queue an evaluate context command TRB */
4081int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4082                dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4083{
4084        return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4085                        upper_32_bits(in_ctx_ptr), 0,
4086                        TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4087                        command_must_succeed);
4088}
4089
4090/*
4091 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4092 * activity on an endpoint that is about to be suspended.
4093 */
4094int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4095                             int slot_id, unsigned int ep_index, int suspend)
4096{
4097        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4098        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4099        u32 type = TRB_TYPE(TRB_STOP_RING);
4100        u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4101
4102        return queue_command(xhci, cmd, 0, 0, 0,
4103                        trb_slot_id | trb_ep_index | type | trb_suspend, false);
4104}
4105
4106/* Set Transfer Ring Dequeue Pointer command */
4107void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
4108                unsigned int slot_id, unsigned int ep_index,
4109                unsigned int stream_id,
4110                struct xhci_dequeue_state *deq_state)
4111{
4112        dma_addr_t addr;
4113        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4114        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4115        u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4116        u32 trb_sct = 0;
4117        u32 type = TRB_TYPE(TRB_SET_DEQ);
4118        struct xhci_virt_ep *ep;
4119        struct xhci_command *cmd;
4120        int ret;
4121
4122        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4123                "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
4124                deq_state->new_deq_seg,
4125                (unsigned long long)deq_state->new_deq_seg->dma,
4126                deq_state->new_deq_ptr,
4127                (unsigned long long)xhci_trb_virt_to_dma(
4128                        deq_state->new_deq_seg, deq_state->new_deq_ptr),
4129                deq_state->new_cycle_state);
4130
4131        addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
4132                                    deq_state->new_deq_ptr);
4133        if (addr == 0) {
4134                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4135                xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4136                          deq_state->new_deq_seg, deq_state->new_deq_ptr);
4137                return;
4138        }
4139        ep = &xhci->devs[slot_id]->eps[ep_index];
4140        if ((ep->ep_state & SET_DEQ_PENDING)) {
4141                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4142                xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4143                return;
4144        }
4145
4146        /* This function gets called from contexts where it cannot sleep */
4147        cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
4148        if (!cmd) {
4149                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
4150                return;
4151        }
4152
4153        ep->queued_deq_seg = deq_state->new_deq_seg;
4154        ep->queued_deq_ptr = deq_state->new_deq_ptr;
4155        if (stream_id)
4156                trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
4157        ret = queue_command(xhci, cmd,
4158                lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
4159                upper_32_bits(addr), trb_stream_id,
4160                trb_slot_id | trb_ep_index | type, false);
4161        if (ret < 0) {
4162                xhci_free_command(xhci, cmd);
4163                return;
4164        }
4165
4166        /* Stop the TD queueing code from ringing the doorbell until
4167         * this command completes.  The HC won't set the dequeue pointer
4168         * if the ring is running, and ringing the doorbell starts the
4169         * ring running.
4170         */
4171        ep->ep_state |= SET_DEQ_PENDING;
4172}
4173
4174int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4175                        int slot_id, unsigned int ep_index)
4176{
4177        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4178        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4179        u32 type = TRB_TYPE(TRB_RESET_EP);
4180
4181        return queue_command(xhci, cmd, 0, 0, 0,
4182                        trb_slot_id | trb_ep_index | type, false);
4183}
4184