linux/drivers/usb/host/xhci-ring.c
<<
>>
Prefs
   1/*
   2 * xHCI host controller driver
   3 *
   4 * Copyright (C) 2008 Intel Corp.
   5 *
   6 * Author: Sarah Sharp
   7 * Some code borrowed from the Linux EHCI driver.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16 * for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software Foundation,
  20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23/*
  24 * Ring initialization rules:
  25 * 1. Each segment is initialized to zero, except for link TRBs.
  26 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
  27 *    Consumer Cycle State (CCS), depending on ring function.
  28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  29 *
  30 * Ring behavior rules:
  31 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
  32 *    least one free TRB in the ring.  This is useful if you want to turn that
  33 *    into a link TRB and expand the ring.
  34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  35 *    link TRB, then load the pointer with the address in the link TRB.  If the
  36 *    link TRB had its toggle bit set, you may need to update the ring cycle
  37 *    state (see cycle bit rules).  You may have to do this multiple times
  38 *    until you reach a non-link TRB.
  39 * 3. A ring is full if enqueue++ (for the definition of increment above)
  40 *    equals the dequeue pointer.
  41 *
  42 * Cycle bit rules:
  43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  44 *    in a link TRB, it must toggle the ring cycle state.
  45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  46 *    in a link TRB, it must toggle the ring cycle state.
  47 *
  48 * Producer rules:
  49 * 1. Check if ring is full before you enqueue.
  50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  51 *    Update enqueue pointer between each write (which may update the ring
  52 *    cycle state).
  53 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
  54 *    and endpoint rings.  If HC is the producer for the event ring,
  55 *    and it generates an interrupt according to interrupt modulation rules.
  56 *
  57 * Consumer rules:
  58 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
  59 *    the TRB is owned by the consumer.
  60 * 2. Update dequeue pointer (which may update the ring cycle state) and
  61 *    continue processing TRBs until you reach a TRB which is not owned by you.
  62 * 3. Notify the producer.  SW is the consumer for the event ring, and it
  63 *   updates event ring dequeue pointer.  HC is the consumer for the command and
  64 *   endpoint rings; it generates events on the event ring for these.
  65 */
  66
  67#include <linux/scatterlist.h>
  68#include <linux/slab.h>
  69#include "xhci.h"
  70#include "xhci-trace.h"
  71
  72/*
  73 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  74 * address of the TRB.
  75 */
  76dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  77                union xhci_trb *trb)
  78{
  79        unsigned long segment_offset;
  80
  81        if (!seg || !trb || trb < seg->trbs)
  82                return 0;
  83        /* offset in TRBs */
  84        segment_offset = trb - seg->trbs;
  85        if (segment_offset > TRBS_PER_SEGMENT)
  86                return 0;
  87        return seg->dma + (segment_offset * sizeof(*trb));
  88}
  89
  90/* Does this link TRB point to the first segment in a ring,
  91 * or was the previous TRB the last TRB on the last segment in the ERST?
  92 */
  93static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  94                struct xhci_segment *seg, union xhci_trb *trb)
  95{
  96        if (ring == xhci->event_ring)
  97                return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  98                        (seg->next == xhci->event_ring->first_seg);
  99        else
 100                return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
 101}
 102
 103/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
 104 * segment?  I.e. would the updated event TRB pointer step off the end of the
 105 * event seg?
 106 */
 107static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
 108                struct xhci_segment *seg, union xhci_trb *trb)
 109{
 110        if (ring == xhci->event_ring)
 111                return trb == &seg->trbs[TRBS_PER_SEGMENT];
 112        else
 113                return TRB_TYPE_LINK_LE32(trb->link.control);
 114}
 115
 116static int enqueue_is_link_trb(struct xhci_ring *ring)
 117{
 118        struct xhci_link_trb *link = &ring->enqueue->link;
 119        return TRB_TYPE_LINK_LE32(link->control);
 120}
 121
 122/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 123 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 124 * effect the ring dequeue or enqueue pointers.
 125 */
 126static void next_trb(struct xhci_hcd *xhci,
 127                struct xhci_ring *ring,
 128                struct xhci_segment **seg,
 129                union xhci_trb **trb)
 130{
 131        if (last_trb(xhci, ring, *seg, *trb)) {
 132                *seg = (*seg)->next;
 133                *trb = ((*seg)->trbs);
 134        } else {
 135                (*trb)++;
 136        }
 137}
 138
 139/*
 140 * See Cycle bit rules. SW is the consumer for the event ring only.
 141 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 142 */
 143static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
 144{
 145        ring->deq_updates++;
 146
 147        /*
 148         * If this is not event ring, and the dequeue pointer
 149         * is not on a link TRB, there is one more usable TRB
 150         */
 151        if (ring->type != TYPE_EVENT &&
 152                        !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
 153                ring->num_trbs_free++;
 154
 155        do {
 156                /*
 157                 * Update the dequeue pointer further if that was a link TRB or
 158                 * we're at the end of an event ring segment (which doesn't have
 159                 * link TRBS)
 160                 */
 161                if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
 162                        if (ring->type == TYPE_EVENT &&
 163                                        last_trb_on_last_seg(xhci, ring,
 164                                                ring->deq_seg, ring->dequeue)) {
 165                                ring->cycle_state ^= 1;
 166                        }
 167                        ring->deq_seg = ring->deq_seg->next;
 168                        ring->dequeue = ring->deq_seg->trbs;
 169                } else {
 170                        ring->dequeue++;
 171                }
 172        } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
 173}
 174
 175/*
 176 * See Cycle bit rules. SW is the consumer for the event ring only.
 177 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 178 *
 179 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 180 * chain bit is set), then set the chain bit in all the following link TRBs.
 181 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 182 * have their chain bit cleared (so that each Link TRB is a separate TD).
 183 *
 184 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
 185 * set, but other sections talk about dealing with the chain bit set.  This was
 186 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 187 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
 188 *
 189 * @more_trbs_coming:   Will you enqueue more TRBs before calling
 190 *                      prepare_transfer()?
 191 */
 192static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 193                        bool more_trbs_coming)
 194{
 195        u32 chain;
 196        union xhci_trb *next;
 197
 198        chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
 199        /* If this is not event ring, there is one less usable TRB */
 200        if (ring->type != TYPE_EVENT &&
 201                        !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
 202                ring->num_trbs_free--;
 203        next = ++(ring->enqueue);
 204
 205        ring->enq_updates++;
 206        /* Update the dequeue pointer further if that was a link TRB or we're at
 207         * the end of an event ring segment (which doesn't have link TRBS)
 208         */
 209        while (last_trb(xhci, ring, ring->enq_seg, next)) {
 210                if (ring->type != TYPE_EVENT) {
 211                        /*
 212                         * If the caller doesn't plan on enqueueing more
 213                         * TDs before ringing the doorbell, then we
 214                         * don't want to give the link TRB to the
 215                         * hardware just yet.  We'll give the link TRB
 216                         * back in prepare_ring() just before we enqueue
 217                         * the TD at the top of the ring.
 218                         */
 219                        if (!chain && !more_trbs_coming)
 220                                break;
 221
 222                        /* If we're not dealing with 0.95 hardware or
 223                         * isoc rings on AMD 0.96 host,
 224                         * carry over the chain bit of the previous TRB
 225                         * (which may mean the chain bit is cleared).
 226                         */
 227                        if (!(ring->type == TYPE_ISOC &&
 228                                        (xhci->quirks & XHCI_AMD_0x96_HOST))
 229                                                && !xhci_link_trb_quirk(xhci)) {
 230                                next->link.control &=
 231                                        cpu_to_le32(~TRB_CHAIN);
 232                                next->link.control |=
 233                                        cpu_to_le32(chain);
 234                        }
 235                        /* Give this link TRB to the hardware */
 236                        wmb();
 237                        next->link.control ^= cpu_to_le32(TRB_CYCLE);
 238
 239                        /* Toggle the cycle bit after the last ring segment. */
 240                        if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
 241                                ring->cycle_state ^= 1;
 242                        }
 243                }
 244                ring->enq_seg = ring->enq_seg->next;
 245                ring->enqueue = ring->enq_seg->trbs;
 246                next = ring->enqueue;
 247        }
 248}
 249
 250/*
 251 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 252 * enqueue pointer will not advance into dequeue segment. See rules above.
 253 */
 254static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
 255                unsigned int num_trbs)
 256{
 257        int num_trbs_in_deq_seg;
 258
 259        if (ring->num_trbs_free < num_trbs)
 260                return 0;
 261
 262        if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
 263                num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
 264                if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
 265                        return 0;
 266        }
 267
 268        return 1;
 269}
 270
 271/* Ring the host controller doorbell after placing a command on the ring */
 272void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 273{
 274        if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
 275                return;
 276
 277        xhci_dbg(xhci, "// Ding dong!\n");
 278        writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
 279        /* Flush PCI posted writes */
 280        readl(&xhci->dba->doorbell[0]);
 281}
 282
 283static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 284{
 285        u64 temp_64;
 286        int ret;
 287
 288        xhci_dbg(xhci, "Abort command ring\n");
 289
 290        temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 291        xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
 292        xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
 293                        &xhci->op_regs->cmd_ring);
 294
 295        /* Section 4.6.1.2 of xHCI 1.0 spec says software should
 296         * time the completion od all xHCI commands, including
 297         * the Command Abort operation. If software doesn't see
 298         * CRR negated in a timely manner (e.g. longer than 5
 299         * seconds), then it should assume that the there are
 300         * larger problems with the xHC and assert HCRST.
 301         */
 302        ret = xhci_handshake(&xhci->op_regs->cmd_ring,
 303                        CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
 304        if (ret < 0) {
 305                xhci_err(xhci, "Stopped the command ring failed, "
 306                                "maybe the host is dead\n");
 307                xhci->xhc_state |= XHCI_STATE_DYING;
 308                xhci_quiesce(xhci);
 309                xhci_halt(xhci);
 310                return -ESHUTDOWN;
 311        }
 312
 313        return 0;
 314}
 315
 316void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
 317                unsigned int slot_id,
 318                unsigned int ep_index,
 319                unsigned int stream_id)
 320{
 321        __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
 322        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
 323        unsigned int ep_state = ep->ep_state;
 324
 325        /* Don't ring the doorbell for this endpoint if there are pending
 326         * cancellations because we don't want to interrupt processing.
 327         * We don't want to restart any stream rings if there's a set dequeue
 328         * pointer command pending because the device can choose to start any
 329         * stream once the endpoint is on the HW schedule.
 330         */
 331        if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
 332            (ep_state & EP_HALTED))
 333                return;
 334        writel(DB_VALUE(ep_index, stream_id), db_addr);
 335        /* The CPU has better things to do at this point than wait for a
 336         * write-posting flush.  It'll get there soon enough.
 337         */
 338}
 339
 340/* Ring the doorbell for any rings with pending URBs */
 341static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
 342                unsigned int slot_id,
 343                unsigned int ep_index)
 344{
 345        unsigned int stream_id;
 346        struct xhci_virt_ep *ep;
 347
 348        ep = &xhci->devs[slot_id]->eps[ep_index];
 349
 350        /* A ring has pending URBs if its TD list is not empty */
 351        if (!(ep->ep_state & EP_HAS_STREAMS)) {
 352                if (ep->ring && !(list_empty(&ep->ring->td_list)))
 353                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
 354                return;
 355        }
 356
 357        for (stream_id = 1; stream_id < ep->stream_info->num_streams;
 358                        stream_id++) {
 359                struct xhci_stream_info *stream_info = ep->stream_info;
 360                if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
 361                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
 362                                                stream_id);
 363        }
 364}
 365
 366static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
 367                unsigned int slot_id, unsigned int ep_index,
 368                unsigned int stream_id)
 369{
 370        struct xhci_virt_ep *ep;
 371
 372        ep = &xhci->devs[slot_id]->eps[ep_index];
 373        /* Common case: no streams */
 374        if (!(ep->ep_state & EP_HAS_STREAMS))
 375                return ep->ring;
 376
 377        if (stream_id == 0) {
 378                xhci_warn(xhci,
 379                                "WARN: Slot ID %u, ep index %u has streams, "
 380                                "but URB has no stream ID.\n",
 381                                slot_id, ep_index);
 382                return NULL;
 383        }
 384
 385        if (stream_id < ep->stream_info->num_streams)
 386                return ep->stream_info->stream_rings[stream_id];
 387
 388        xhci_warn(xhci,
 389                        "WARN: Slot ID %u, ep index %u has "
 390                        "stream IDs 1 to %u allocated, "
 391                        "but stream ID %u is requested.\n",
 392                        slot_id, ep_index,
 393                        ep->stream_info->num_streams - 1,
 394                        stream_id);
 395        return NULL;
 396}
 397
 398/* Get the right ring for the given URB.
 399 * If the endpoint supports streams, boundary check the URB's stream ID.
 400 * If the endpoint doesn't support streams, return the singular endpoint ring.
 401 */
 402static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
 403                struct urb *urb)
 404{
 405        return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
 406                xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
 407}
 408
 409/*
 410 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 411 * Record the new state of the xHC's endpoint ring dequeue segment,
 412 * dequeue pointer, and new consumer cycle state in state.
 413 * Update our internal representation of the ring's dequeue pointer.
 414 *
 415 * We do this in three jumps:
 416 *  - First we update our new ring state to be the same as when the xHC stopped.
 417 *  - Then we traverse the ring to find the segment that contains
 418 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 419 *    any link TRBs with the toggle cycle bit set.
 420 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 421 *    if we've moved it past a link TRB with the toggle cycle bit set.
 422 *
 423 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 424 * with correct __le32 accesses they should work fine.  Only users of this are
 425 * in here.
 426 */
 427void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 428                unsigned int slot_id, unsigned int ep_index,
 429                unsigned int stream_id, struct xhci_td *cur_td,
 430                struct xhci_dequeue_state *state)
 431{
 432        struct xhci_virt_device *dev = xhci->devs[slot_id];
 433        struct xhci_virt_ep *ep = &dev->eps[ep_index];
 434        struct xhci_ring *ep_ring;
 435        struct xhci_segment *new_seg;
 436        union xhci_trb *new_deq;
 437        dma_addr_t addr;
 438        u64 hw_dequeue;
 439        bool cycle_found = false;
 440        bool td_last_trb_found = false;
 441
 442        ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
 443                        ep_index, stream_id);
 444        if (!ep_ring) {
 445                xhci_warn(xhci, "WARN can't find new dequeue state "
 446                                "for invalid stream ID %u.\n",
 447                                stream_id);
 448                return;
 449        }
 450
 451        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
 452        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 453                        "Finding endpoint context");
 454        /* 4.6.9 the css flag is written to the stream context for streams */
 455        if (ep->ep_state & EP_HAS_STREAMS) {
 456                struct xhci_stream_ctx *ctx =
 457                        &ep->stream_info->stream_ctx_array[stream_id];
 458                hw_dequeue = le64_to_cpu(ctx->stream_ring);
 459        } else {
 460                struct xhci_ep_ctx *ep_ctx
 461                        = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
 462                hw_dequeue = le64_to_cpu(ep_ctx->deq);
 463        }
 464
 465        new_seg = ep_ring->deq_seg;
 466        new_deq = ep_ring->dequeue;
 467        state->new_cycle_state = hw_dequeue & 0x1;
 468
 469        /*
 470         * We want to find the pointer, segment and cycle state of the new trb
 471         * (the one after current TD's last_trb). We know the cycle state at
 472         * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
 473         * found.
 474         */
 475        do {
 476                if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
 477                    == (dma_addr_t)(hw_dequeue & ~0xf)) {
 478                        cycle_found = true;
 479                        if (td_last_trb_found)
 480                                break;
 481                }
 482                if (new_deq == cur_td->last_trb)
 483                        td_last_trb_found = true;
 484
 485                if (cycle_found &&
 486                    TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
 487                    new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
 488                        state->new_cycle_state ^= 0x1;
 489
 490                next_trb(xhci, ep_ring, &new_seg, &new_deq);
 491
 492                /* Search wrapped around, bail out */
 493                if (new_deq == ep->ring->dequeue) {
 494                        xhci_err(xhci, "Error: Failed finding new dequeue state\n");
 495                        state->new_deq_seg = NULL;
 496                        state->new_deq_ptr = NULL;
 497                        return;
 498                }
 499
 500        } while (!cycle_found || !td_last_trb_found);
 501
 502        state->new_deq_seg = new_seg;
 503        state->new_deq_ptr = new_deq;
 504
 505        /* Don't update the ring cycle state for the producer (us). */
 506        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 507                        "Cycle state = 0x%x", state->new_cycle_state);
 508
 509        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 510                        "New dequeue segment = %p (virtual)",
 511                        state->new_deq_seg);
 512        addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
 513        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 514                        "New dequeue pointer = 0x%llx (DMA)",
 515                        (unsigned long long) addr);
 516}
 517
 518/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 519 * (The last TRB actually points to the ring enqueue pointer, which is not part
 520 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 521 */
 522static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
 523                struct xhci_td *cur_td, bool flip_cycle)
 524{
 525        struct xhci_segment *cur_seg;
 526        union xhci_trb *cur_trb;
 527
 528        for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
 529                        true;
 530                        next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
 531                if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
 532                        /* Unchain any chained Link TRBs, but
 533                         * leave the pointers intact.
 534                         */
 535                        cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
 536                        /* Flip the cycle bit (link TRBs can't be the first
 537                         * or last TRB).
 538                         */
 539                        if (flip_cycle)
 540                                cur_trb->generic.field[3] ^=
 541                                        cpu_to_le32(TRB_CYCLE);
 542                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 543                                        "Cancel (unchain) link TRB");
 544                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 545                                        "Address = %p (0x%llx dma); "
 546                                        "in seg %p (0x%llx dma)",
 547                                        cur_trb,
 548                                        (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
 549                                        cur_seg,
 550                                        (unsigned long long)cur_seg->dma);
 551                } else {
 552                        cur_trb->generic.field[0] = 0;
 553                        cur_trb->generic.field[1] = 0;
 554                        cur_trb->generic.field[2] = 0;
 555                        /* Preserve only the cycle bit of this TRB */
 556                        cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
 557                        /* Flip the cycle bit except on the first or last TRB */
 558                        if (flip_cycle && cur_trb != cur_td->first_trb &&
 559                                        cur_trb != cur_td->last_trb)
 560                                cur_trb->generic.field[3] ^=
 561                                        cpu_to_le32(TRB_CYCLE);
 562                        cur_trb->generic.field[3] |= cpu_to_le32(
 563                                TRB_TYPE(TRB_TR_NOOP));
 564                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 565                                        "TRB to noop at offset 0x%llx",
 566                                        (unsigned long long)
 567                                        xhci_trb_virt_to_dma(cur_seg, cur_trb));
 568                }
 569                if (cur_trb == cur_td->last_trb)
 570                        break;
 571        }
 572}
 573
 574static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
 575                struct xhci_virt_ep *ep)
 576{
 577        ep->ep_state &= ~EP_HALT_PENDING;
 578        /* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
 579         * timer is running on another CPU, we don't decrement stop_cmds_pending
 580         * (since we didn't successfully stop the watchdog timer).
 581         */
 582        if (del_timer(&ep->stop_cmd_timer))
 583                ep->stop_cmds_pending--;
 584}
 585
 586/* Must be called with xhci->lock held in interrupt context */
 587static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
 588                struct xhci_td *cur_td, int status)
 589{
 590        struct usb_hcd *hcd;
 591        struct urb      *urb;
 592        struct urb_priv *urb_priv;
 593
 594        urb = cur_td->urb;
 595        urb_priv = urb->hcpriv;
 596        urb_priv->td_cnt++;
 597        hcd = bus_to_hcd(urb->dev->bus);
 598
 599        /* Only giveback urb when this is the last td in urb */
 600        if (urb_priv->td_cnt == urb_priv->length) {
 601                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 602                        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
 603                        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
 604                                if (xhci->quirks & XHCI_AMD_PLL_FIX)
 605                                        usb_amd_quirk_pll_enable();
 606                        }
 607                }
 608                usb_hcd_unlink_urb_from_ep(hcd, urb);
 609
 610                spin_unlock(&xhci->lock);
 611                usb_hcd_giveback_urb(hcd, urb, status);
 612                xhci_urb_free_priv(urb_priv);
 613                spin_lock(&xhci->lock);
 614        }
 615}
 616
 617/*
 618 * When we get a command completion for a Stop Endpoint Command, we need to
 619 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 620 *
 621 *  1. If the HW was in the middle of processing the TD that needs to be
 622 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 623 *     in the TD with a Set Dequeue Pointer Command.
 624 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 625 *     bit cleared) so that the HW will skip over them.
 626 */
 627static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
 628                union xhci_trb *trb, struct xhci_event_cmd *event)
 629{
 630        unsigned int ep_index;
 631        struct xhci_ring *ep_ring;
 632        struct xhci_virt_ep *ep;
 633        struct list_head *entry;
 634        struct xhci_td *cur_td = NULL;
 635        struct xhci_td *last_unlinked_td;
 636
 637        struct xhci_dequeue_state deq_state;
 638
 639        if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
 640                if (!xhci->devs[slot_id])
 641                        xhci_warn(xhci, "Stop endpoint command "
 642                                "completion for disabled slot %u\n",
 643                                slot_id);
 644                return;
 645        }
 646
 647        memset(&deq_state, 0, sizeof(deq_state));
 648        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
 649        ep = &xhci->devs[slot_id]->eps[ep_index];
 650
 651        if (list_empty(&ep->cancelled_td_list)) {
 652                xhci_stop_watchdog_timer_in_irq(xhci, ep);
 653                ep->stopped_td = NULL;
 654                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 655                return;
 656        }
 657
 658        /* Fix up the ep ring first, so HW stops executing cancelled TDs.
 659         * We have the xHCI lock, so nothing can modify this list until we drop
 660         * it.  We're also in the event handler, so we can't get re-interrupted
 661         * if another Stop Endpoint command completes
 662         */
 663        list_for_each(entry, &ep->cancelled_td_list) {
 664                cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
 665                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 666                                "Removing canceled TD starting at 0x%llx (dma).",
 667                                (unsigned long long)xhci_trb_virt_to_dma(
 668                                        cur_td->start_seg, cur_td->first_trb));
 669                ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
 670                if (!ep_ring) {
 671                        /* This shouldn't happen unless a driver is mucking
 672                         * with the stream ID after submission.  This will
 673                         * leave the TD on the hardware ring, and the hardware
 674                         * will try to execute it, and may access a buffer
 675                         * that has already been freed.  In the best case, the
 676                         * hardware will execute it, and the event handler will
 677                         * ignore the completion event for that TD, since it was
 678                         * removed from the td_list for that endpoint.  In
 679                         * short, don't muck with the stream ID after
 680                         * submission.
 681                         */
 682                        xhci_warn(xhci, "WARN Cancelled URB %p "
 683                                        "has invalid stream ID %u.\n",
 684                                        cur_td->urb,
 685                                        cur_td->urb->stream_id);
 686                        goto remove_finished_td;
 687                }
 688                /*
 689                 * If we stopped on the TD we need to cancel, then we have to
 690                 * move the xHC endpoint ring dequeue pointer past this TD.
 691                 */
 692                if (cur_td == ep->stopped_td)
 693                        xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
 694                                        cur_td->urb->stream_id,
 695                                        cur_td, &deq_state);
 696                else
 697                        td_to_noop(xhci, ep_ring, cur_td, false);
 698remove_finished_td:
 699                /*
 700                 * The event handler won't see a completion for this TD anymore,
 701                 * so remove it from the endpoint ring's TD list.  Keep it in
 702                 * the cancelled TD list for URB completion later.
 703                 */
 704                list_del_init(&cur_td->td_list);
 705        }
 706        last_unlinked_td = cur_td;
 707        xhci_stop_watchdog_timer_in_irq(xhci, ep);
 708
 709        /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
 710        if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
 711                xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
 712                                ep->stopped_td->urb->stream_id, &deq_state);
 713                xhci_ring_cmd_db(xhci);
 714        } else {
 715                /* Otherwise ring the doorbell(s) to restart queued transfers */
 716                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 717        }
 718
 719        ep->stopped_td = NULL;
 720
 721        /*
 722         * Drop the lock and complete the URBs in the cancelled TD list.
 723         * New TDs to be cancelled might be added to the end of the list before
 724         * we can complete all the URBs for the TDs we already unlinked.
 725         * So stop when we've completed the URB for the last TD we unlinked.
 726         */
 727        do {
 728                cur_td = list_entry(ep->cancelled_td_list.next,
 729                                struct xhci_td, cancelled_td_list);
 730                list_del_init(&cur_td->cancelled_td_list);
 731
 732                /* Clean up the cancelled URB */
 733                /* Doesn't matter what we pass for status, since the core will
 734                 * just overwrite it (because the URB has been unlinked).
 735                 */
 736                xhci_giveback_urb_in_irq(xhci, cur_td, 0);
 737
 738                /* Stop processing the cancelled list if the watchdog timer is
 739                 * running.
 740                 */
 741                if (xhci->xhc_state & XHCI_STATE_DYING)
 742                        return;
 743        } while (cur_td != last_unlinked_td);
 744
 745        /* Return to the event handler with xhci->lock re-acquired */
 746}
 747
 748static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
 749{
 750        struct xhci_td *cur_td;
 751
 752        while (!list_empty(&ring->td_list)) {
 753                cur_td = list_first_entry(&ring->td_list,
 754                                struct xhci_td, td_list);
 755                list_del_init(&cur_td->td_list);
 756                if (!list_empty(&cur_td->cancelled_td_list))
 757                        list_del_init(&cur_td->cancelled_td_list);
 758                xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
 759        }
 760}
 761
 762static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
 763                int slot_id, int ep_index)
 764{
 765        struct xhci_td *cur_td;
 766        struct xhci_virt_ep *ep;
 767        struct xhci_ring *ring;
 768
 769        ep = &xhci->devs[slot_id]->eps[ep_index];
 770        if ((ep->ep_state & EP_HAS_STREAMS) ||
 771                        (ep->ep_state & EP_GETTING_NO_STREAMS)) {
 772                int stream_id;
 773
 774                for (stream_id = 0; stream_id < ep->stream_info->num_streams;
 775                                stream_id++) {
 776                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 777                                        "Killing URBs for slot ID %u, ep index %u, stream %u",
 778                                        slot_id, ep_index, stream_id + 1);
 779                        xhci_kill_ring_urbs(xhci,
 780                                        ep->stream_info->stream_rings[stream_id]);
 781                }
 782        } else {
 783                ring = ep->ring;
 784                if (!ring)
 785                        return;
 786                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 787                                "Killing URBs for slot ID %u, ep index %u",
 788                                slot_id, ep_index);
 789                xhci_kill_ring_urbs(xhci, ring);
 790        }
 791        while (!list_empty(&ep->cancelled_td_list)) {
 792                cur_td = list_first_entry(&ep->cancelled_td_list,
 793                                struct xhci_td, cancelled_td_list);
 794                list_del_init(&cur_td->cancelled_td_list);
 795                xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
 796        }
 797}
 798
 799/* Watchdog timer function for when a stop endpoint command fails to complete.
 800 * In this case, we assume the host controller is broken or dying or dead.  The
 801 * host may still be completing some other events, so we have to be careful to
 802 * let the event ring handler and the URB dequeueing/enqueueing functions know
 803 * through xhci->state.
 804 *
 805 * The timer may also fire if the host takes a very long time to respond to the
 806 * command, and the stop endpoint command completion handler cannot delete the
 807 * timer before the timer function is called.  Another endpoint cancellation may
 808 * sneak in before the timer function can grab the lock, and that may queue
 809 * another stop endpoint command and add the timer back.  So we cannot use a
 810 * simple flag to say whether there is a pending stop endpoint command for a
 811 * particular endpoint.
 812 *
 813 * Instead we use a combination of that flag and a counter for the number of
 814 * pending stop endpoint commands.  If the timer is the tail end of the last
 815 * stop endpoint command, and the endpoint's command is still pending, we assume
 816 * the host is dying.
 817 */
 818void xhci_stop_endpoint_command_watchdog(unsigned long arg)
 819{
 820        struct xhci_hcd *xhci;
 821        struct xhci_virt_ep *ep;
 822        int ret, i, j;
 823        unsigned long flags;
 824
 825        ep = (struct xhci_virt_ep *) arg;
 826        xhci = ep->xhci;
 827
 828        spin_lock_irqsave(&xhci->lock, flags);
 829
 830        ep->stop_cmds_pending--;
 831        if (xhci->xhc_state & XHCI_STATE_DYING) {
 832                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 833                                "Stop EP timer ran, but another timer marked "
 834                                "xHCI as DYING, exiting.");
 835                spin_unlock_irqrestore(&xhci->lock, flags);
 836                return;
 837        }
 838        if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
 839                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 840                                "Stop EP timer ran, but no command pending, "
 841                                "exiting.");
 842                spin_unlock_irqrestore(&xhci->lock, flags);
 843                return;
 844        }
 845
 846        xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
 847        xhci_warn(xhci, "Assuming host is dying, halting host.\n");
 848        /* Oops, HC is dead or dying or at least not responding to the stop
 849         * endpoint command.
 850         */
 851        xhci->xhc_state |= XHCI_STATE_DYING;
 852        /* Disable interrupts from the host controller and start halting it */
 853        xhci_quiesce(xhci);
 854        spin_unlock_irqrestore(&xhci->lock, flags);
 855
 856        ret = xhci_halt(xhci);
 857
 858        spin_lock_irqsave(&xhci->lock, flags);
 859        if (ret < 0) {
 860                /* This is bad; the host is not responding to commands and it's
 861                 * not allowing itself to be halted.  At least interrupts are
 862                 * disabled. If we call usb_hc_died(), it will attempt to
 863                 * disconnect all device drivers under this host.  Those
 864                 * disconnect() methods will wait for all URBs to be unlinked,
 865                 * so we must complete them.
 866                 */
 867                xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
 868                xhci_warn(xhci, "Completing active URBs anyway.\n");
 869                /* We could turn all TDs on the rings to no-ops.  This won't
 870                 * help if the host has cached part of the ring, and is slow if
 871                 * we want to preserve the cycle bit.  Skip it and hope the host
 872                 * doesn't touch the memory.
 873                 */
 874        }
 875        for (i = 0; i < MAX_HC_SLOTS; i++) {
 876                if (!xhci->devs[i])
 877                        continue;
 878                for (j = 0; j < 31; j++)
 879                        xhci_kill_endpoint_urbs(xhci, i, j);
 880        }
 881        spin_unlock_irqrestore(&xhci->lock, flags);
 882        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 883                        "Calling usb_hc_died()");
 884        usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
 885        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 886                        "xHCI host controller is dead.");
 887}
 888
 889
 890static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
 891                struct xhci_virt_device *dev,
 892                struct xhci_ring *ep_ring,
 893                unsigned int ep_index)
 894{
 895        union xhci_trb *dequeue_temp;
 896        int num_trbs_free_temp;
 897        bool revert = false;
 898
 899        num_trbs_free_temp = ep_ring->num_trbs_free;
 900        dequeue_temp = ep_ring->dequeue;
 901
 902        /* If we get two back-to-back stalls, and the first stalled transfer
 903         * ends just before a link TRB, the dequeue pointer will be left on
 904         * the link TRB by the code in the while loop.  So we have to update
 905         * the dequeue pointer one segment further, or we'll jump off
 906         * the segment into la-la-land.
 907         */
 908        if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
 909                ep_ring->deq_seg = ep_ring->deq_seg->next;
 910                ep_ring->dequeue = ep_ring->deq_seg->trbs;
 911        }
 912
 913        while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
 914                /* We have more usable TRBs */
 915                ep_ring->num_trbs_free++;
 916                ep_ring->dequeue++;
 917                if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
 918                                ep_ring->dequeue)) {
 919                        if (ep_ring->dequeue ==
 920                                        dev->eps[ep_index].queued_deq_ptr)
 921                                break;
 922                        ep_ring->deq_seg = ep_ring->deq_seg->next;
 923                        ep_ring->dequeue = ep_ring->deq_seg->trbs;
 924                }
 925                if (ep_ring->dequeue == dequeue_temp) {
 926                        revert = true;
 927                        break;
 928                }
 929        }
 930
 931        if (revert) {
 932                xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
 933                ep_ring->num_trbs_free = num_trbs_free_temp;
 934        }
 935}
 936
 937/*
 938 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 939 * we need to clear the set deq pending flag in the endpoint ring state, so that
 940 * the TD queueing code can ring the doorbell again.  We also need to ring the
 941 * endpoint doorbell to restart the ring, but only if there aren't more
 942 * cancellations pending.
 943 */
 944static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
 945                union xhci_trb *trb, u32 cmd_comp_code)
 946{
 947        unsigned int ep_index;
 948        unsigned int stream_id;
 949        struct xhci_ring *ep_ring;
 950        struct xhci_virt_device *dev;
 951        struct xhci_virt_ep *ep;
 952        struct xhci_ep_ctx *ep_ctx;
 953        struct xhci_slot_ctx *slot_ctx;
 954
 955        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
 956        stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
 957        dev = xhci->devs[slot_id];
 958        ep = &dev->eps[ep_index];
 959
 960        ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
 961        if (!ep_ring) {
 962                xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
 963                                stream_id);
 964                /* XXX: Harmless??? */
 965                goto cleanup;
 966        }
 967
 968        ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
 969        slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
 970
 971        if (cmd_comp_code != COMP_SUCCESS) {
 972                unsigned int ep_state;
 973                unsigned int slot_state;
 974
 975                switch (cmd_comp_code) {
 976                case COMP_TRB_ERR:
 977                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
 978                        break;
 979                case COMP_CTX_STATE:
 980                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
 981                        ep_state = le32_to_cpu(ep_ctx->ep_info);
 982                        ep_state &= EP_STATE_MASK;
 983                        slot_state = le32_to_cpu(slot_ctx->dev_state);
 984                        slot_state = GET_SLOT_STATE(slot_state);
 985                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 986                                        "Slot state = %u, EP state = %u",
 987                                        slot_state, ep_state);
 988                        break;
 989                case COMP_EBADSLT:
 990                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
 991                                        slot_id);
 992                        break;
 993                default:
 994                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
 995                                        cmd_comp_code);
 996                        break;
 997                }
 998                /* OK what do we do now?  The endpoint state is hosed, and we
 999                 * should never get to this point if the synchronization between
1000                 * queueing, and endpoint state are correct.  This might happen
1001                 * if the device gets disconnected after we've finished
1002                 * cancelling URBs, which might not be an error...
1003                 */
1004        } else {
1005                u64 deq;
1006                /* 4.6.10 deq ptr is written to the stream ctx for streams */
1007                if (ep->ep_state & EP_HAS_STREAMS) {
1008                        struct xhci_stream_ctx *ctx =
1009                                &ep->stream_info->stream_ctx_array[stream_id];
1010                        deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1011                } else {
1012                        deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1013                }
1014                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1015                        "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1016                if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1017                                         ep->queued_deq_ptr) == deq) {
1018                        /* Update the ring's dequeue segment and dequeue pointer
1019                         * to reflect the new position.
1020                         */
1021                        update_ring_for_set_deq_completion(xhci, dev,
1022                                ep_ring, ep_index);
1023                } else {
1024                        xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1025                        xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1026                                  ep->queued_deq_seg, ep->queued_deq_ptr);
1027                }
1028        }
1029
1030cleanup:
1031        dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1032        dev->eps[ep_index].queued_deq_seg = NULL;
1033        dev->eps[ep_index].queued_deq_ptr = NULL;
1034        /* Restart any rings with pending URBs */
1035        ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1036}
1037
1038static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1039                union xhci_trb *trb, u32 cmd_comp_code)
1040{
1041        unsigned int ep_index;
1042
1043        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1044        /* This command will only fail if the endpoint wasn't halted,
1045         * but we don't care.
1046         */
1047        xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1048                "Ignoring reset ep completion code of %u", cmd_comp_code);
1049
1050        /* HW with the reset endpoint quirk needs to have a configure endpoint
1051         * command complete before the endpoint can be used.  Queue that here
1052         * because the HW can't handle two commands being queued in a row.
1053         */
1054        if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1055                struct xhci_command *command;
1056                command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1057                if (!command) {
1058                        xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
1059                        return;
1060                }
1061                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1062                                "Queueing configure endpoint command");
1063                xhci_queue_configure_endpoint(xhci, command,
1064                                xhci->devs[slot_id]->in_ctx->dma, slot_id,
1065                                false);
1066                xhci_ring_cmd_db(xhci);
1067        } else {
1068                /* Clear our internal halted state */
1069                xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1070        }
1071}
1072
1073static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1074                u32 cmd_comp_code)
1075{
1076        if (cmd_comp_code == COMP_SUCCESS)
1077                xhci->slot_id = slot_id;
1078        else
1079                xhci->slot_id = 0;
1080}
1081
1082static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1083{
1084        struct xhci_virt_device *virt_dev;
1085
1086        virt_dev = xhci->devs[slot_id];
1087        if (!virt_dev)
1088                return;
1089        if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1090                /* Delete default control endpoint resources */
1091                xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1092        xhci_free_virt_device(xhci, slot_id);
1093}
1094
1095static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1096                struct xhci_event_cmd *event, u32 cmd_comp_code)
1097{
1098        struct xhci_virt_device *virt_dev;
1099        struct xhci_input_control_ctx *ctrl_ctx;
1100        unsigned int ep_index;
1101        unsigned int ep_state;
1102        u32 add_flags, drop_flags;
1103
1104        /*
1105         * Configure endpoint commands can come from the USB core
1106         * configuration or alt setting changes, or because the HW
1107         * needed an extra configure endpoint command after a reset
1108         * endpoint command or streams were being configured.
1109         * If the command was for a halted endpoint, the xHCI driver
1110         * is not waiting on the configure endpoint command.
1111         */
1112        virt_dev = xhci->devs[slot_id];
1113        ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1114        if (!ctrl_ctx) {
1115                xhci_warn(xhci, "Could not get input context, bad type.\n");
1116                return;
1117        }
1118
1119        add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1120        drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1121        /* Input ctx add_flags are the endpoint index plus one */
1122        ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1123
1124        /* A usb_set_interface() call directly after clearing a halted
1125         * condition may race on this quirky hardware.  Not worth
1126         * worrying about, since this is prototype hardware.  Not sure
1127         * if this will work for streams, but streams support was
1128         * untested on this prototype.
1129         */
1130        if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1131                        ep_index != (unsigned int) -1 &&
1132                        add_flags - SLOT_FLAG == drop_flags) {
1133                ep_state = virt_dev->eps[ep_index].ep_state;
1134                if (!(ep_state & EP_HALTED))
1135                        return;
1136                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1137                                "Completed config ep cmd - "
1138                                "last ep index = %d, state = %d",
1139                                ep_index, ep_state);
1140                /* Clear internal halted state and restart ring(s) */
1141                virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1142                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1143                return;
1144        }
1145        return;
1146}
1147
1148static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1149                struct xhci_event_cmd *event)
1150{
1151        xhci_dbg(xhci, "Completed reset device command.\n");
1152        if (!xhci->devs[slot_id])
1153                xhci_warn(xhci, "Reset device command completion "
1154                                "for disabled slot %u\n", slot_id);
1155}
1156
1157static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1158                struct xhci_event_cmd *event)
1159{
1160        if (!(xhci->quirks & XHCI_NEC_HOST)) {
1161                xhci->error_bitmask |= 1 << 6;
1162                return;
1163        }
1164        xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1165                        "NEC firmware version %2x.%02x",
1166                        NEC_FW_MAJOR(le32_to_cpu(event->status)),
1167                        NEC_FW_MINOR(le32_to_cpu(event->status)));
1168}
1169
1170static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1171{
1172        list_del(&cmd->cmd_list);
1173
1174        if (cmd->completion) {
1175                cmd->status = status;
1176                complete(cmd->completion);
1177        } else {
1178                kfree(cmd);
1179        }
1180}
1181
1182void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1183{
1184        struct xhci_command *cur_cmd, *tmp_cmd;
1185        list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1186                xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
1187}
1188
1189/*
1190 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1191 * If there are other commands waiting then restart the ring and kick the timer.
1192 * This must be called with command ring stopped and xhci->lock held.
1193 */
1194static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1195                                         struct xhci_command *cur_cmd)
1196{
1197        struct xhci_command *i_cmd, *tmp_cmd;
1198        u32 cycle_state;
1199
1200        /* Turn all aborted commands in list to no-ops, then restart */
1201        list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1202                                 cmd_list) {
1203
1204                if (i_cmd->status != COMP_CMD_ABORT)
1205                        continue;
1206
1207                i_cmd->status = COMP_CMD_STOP;
1208
1209                xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1210                         i_cmd->command_trb);
1211                /* get cycle state from the original cmd trb */
1212                cycle_state = le32_to_cpu(
1213                        i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1214                /* modify the command trb to no-op command */
1215                i_cmd->command_trb->generic.field[0] = 0;
1216                i_cmd->command_trb->generic.field[1] = 0;
1217                i_cmd->command_trb->generic.field[2] = 0;
1218                i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1219                        TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1220
1221                /*
1222                 * caller waiting for completion is called when command
1223                 *  completion event is received for these no-op commands
1224                 */
1225        }
1226
1227        xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1228
1229        /* ring command ring doorbell to restart the command ring */
1230        if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1231            !(xhci->xhc_state & XHCI_STATE_DYING)) {
1232                xhci->current_cmd = cur_cmd;
1233                mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1234                xhci_ring_cmd_db(xhci);
1235        }
1236        return;
1237}
1238
1239
1240void xhci_handle_command_timeout(unsigned long data)
1241{
1242        struct xhci_hcd *xhci;
1243        int ret;
1244        unsigned long flags;
1245        u64 hw_ring_state;
1246        struct xhci_command *cur_cmd = NULL;
1247        xhci = (struct xhci_hcd *) data;
1248
1249        /* mark this command to be cancelled */
1250        spin_lock_irqsave(&xhci->lock, flags);
1251        if (xhci->current_cmd) {
1252                cur_cmd = xhci->current_cmd;
1253                cur_cmd->status = COMP_CMD_ABORT;
1254        }
1255
1256
1257        /* Make sure command ring is running before aborting it */
1258        hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1259        if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1260            (hw_ring_state & CMD_RING_RUNNING))  {
1261
1262                spin_unlock_irqrestore(&xhci->lock, flags);
1263                xhci_dbg(xhci, "Command timeout\n");
1264                ret = xhci_abort_cmd_ring(xhci);
1265                if (unlikely(ret == -ESHUTDOWN)) {
1266                        xhci_err(xhci, "Abort command ring failed\n");
1267                        xhci_cleanup_command_queue(xhci);
1268                        usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1269                        xhci_dbg(xhci, "xHCI host controller is dead.\n");
1270                }
1271                return;
1272        }
1273        /* command timeout on stopped ring, ring can't be aborted */
1274        xhci_dbg(xhci, "Command timeout on stopped ring\n");
1275        xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1276        spin_unlock_irqrestore(&xhci->lock, flags);
1277        return;
1278}
1279
1280static void handle_cmd_completion(struct xhci_hcd *xhci,
1281                struct xhci_event_cmd *event)
1282{
1283        int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1284        u64 cmd_dma;
1285        dma_addr_t cmd_dequeue_dma;
1286        u32 cmd_comp_code;
1287        union xhci_trb *cmd_trb;
1288        struct xhci_command *cmd;
1289        u32 cmd_type;
1290
1291        cmd_dma = le64_to_cpu(event->cmd_trb);
1292        cmd_trb = xhci->cmd_ring->dequeue;
1293        cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1294                        cmd_trb);
1295        /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1296        if (cmd_dequeue_dma == 0) {
1297                xhci->error_bitmask |= 1 << 4;
1298                return;
1299        }
1300        /* Does the DMA address match our internal dequeue pointer address? */
1301        if (cmd_dma != (u64) cmd_dequeue_dma) {
1302                xhci->error_bitmask |= 1 << 5;
1303                return;
1304        }
1305
1306        cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1307
1308        if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1309                xhci_err(xhci,
1310                         "Command completion event does not match command\n");
1311                return;
1312        }
1313
1314        del_timer(&xhci->cmd_timer);
1315
1316        trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1317
1318        cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1319
1320        /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1321        if (cmd_comp_code == COMP_CMD_STOP) {
1322                xhci_handle_stopped_cmd_ring(xhci, cmd);
1323                return;
1324        }
1325        /*
1326         * Host aborted the command ring, check if the current command was
1327         * supposed to be aborted, otherwise continue normally.
1328         * The command ring is stopped now, but the xHC will issue a Command
1329         * Ring Stopped event which will cause us to restart it.
1330         */
1331        if (cmd_comp_code == COMP_CMD_ABORT) {
1332                xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1333                if (cmd->status == COMP_CMD_ABORT)
1334                        goto event_handled;
1335        }
1336
1337        cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1338        switch (cmd_type) {
1339        case TRB_ENABLE_SLOT:
1340                xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1341                break;
1342        case TRB_DISABLE_SLOT:
1343                xhci_handle_cmd_disable_slot(xhci, slot_id);
1344                break;
1345        case TRB_CONFIG_EP:
1346                if (!cmd->completion)
1347                        xhci_handle_cmd_config_ep(xhci, slot_id, event,
1348                                                  cmd_comp_code);
1349                break;
1350        case TRB_EVAL_CONTEXT:
1351                break;
1352        case TRB_ADDR_DEV:
1353                break;
1354        case TRB_STOP_RING:
1355                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1356                                le32_to_cpu(cmd_trb->generic.field[3])));
1357                xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1358                break;
1359        case TRB_SET_DEQ:
1360                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1361                                le32_to_cpu(cmd_trb->generic.field[3])));
1362                xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1363                break;
1364        case TRB_CMD_NOOP:
1365                /* Is this an aborted command turned to NO-OP? */
1366                if (cmd->status == COMP_CMD_STOP)
1367                        cmd_comp_code = COMP_CMD_STOP;
1368                break;
1369        case TRB_RESET_EP:
1370                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1371                                le32_to_cpu(cmd_trb->generic.field[3])));
1372                xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1373                break;
1374        case TRB_RESET_DEV:
1375                /* SLOT_ID field in reset device cmd completion event TRB is 0.
1376                 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1377                 */
1378                slot_id = TRB_TO_SLOT_ID(
1379                                le32_to_cpu(cmd_trb->generic.field[3]));
1380                xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1381                break;
1382        case TRB_NEC_GET_FW:
1383                xhci_handle_cmd_nec_get_fw(xhci, event);
1384                break;
1385        default:
1386                /* Skip over unknown commands on the event ring */
1387                xhci->error_bitmask |= 1 << 6;
1388                break;
1389        }
1390
1391        /* restart timer if this wasn't the last command */
1392        if (cmd->cmd_list.next != &xhci->cmd_list) {
1393                xhci->current_cmd = list_entry(cmd->cmd_list.next,
1394                                               struct xhci_command, cmd_list);
1395                mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1396        }
1397
1398event_handled:
1399        xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1400
1401        inc_deq(xhci, xhci->cmd_ring);
1402}
1403
1404static void handle_vendor_event(struct xhci_hcd *xhci,
1405                union xhci_trb *event)
1406{
1407        u32 trb_type;
1408
1409        trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1410        xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1411        if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1412                handle_cmd_completion(xhci, &event->event_cmd);
1413}
1414
1415/* @port_id: the one-based port ID from the hardware (indexed from array of all
1416 * port registers -- USB 3.0 and USB 2.0).
1417 *
1418 * Returns a zero-based port number, which is suitable for indexing into each of
1419 * the split roothubs' port arrays and bus state arrays.
1420 * Add one to it in order to call xhci_find_slot_id_by_port.
1421 */
1422static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1423                struct xhci_hcd *xhci, u32 port_id)
1424{
1425        unsigned int i;
1426        unsigned int num_similar_speed_ports = 0;
1427
1428        /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1429         * and usb2_ports are 0-based indexes.  Count the number of similar
1430         * speed ports, up to 1 port before this port.
1431         */
1432        for (i = 0; i < (port_id - 1); i++) {
1433                u8 port_speed = xhci->port_array[i];
1434
1435                /*
1436                 * Skip ports that don't have known speeds, or have duplicate
1437                 * Extended Capabilities port speed entries.
1438                 */
1439                if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1440                        continue;
1441
1442                /*
1443                 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
1444                 * 1.1 ports are under the USB 2.0 hub.  If the port speed
1445                 * matches the device speed, it's a similar speed port.
1446                 */
1447                if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1448                        num_similar_speed_ports++;
1449        }
1450        return num_similar_speed_ports;
1451}
1452
1453static void handle_device_notification(struct xhci_hcd *xhci,
1454                union xhci_trb *event)
1455{
1456        u32 slot_id;
1457        struct usb_device *udev;
1458
1459        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1460        if (!xhci->devs[slot_id]) {
1461                xhci_warn(xhci, "Device Notification event for "
1462                                "unused slot %u\n", slot_id);
1463                return;
1464        }
1465
1466        xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1467                        slot_id);
1468        udev = xhci->devs[slot_id]->udev;
1469        if (udev && udev->parent)
1470                usb_wakeup_notification(udev->parent, udev->portnum);
1471}
1472
1473static void handle_port_status(struct xhci_hcd *xhci,
1474                union xhci_trb *event)
1475{
1476        struct usb_hcd *hcd;
1477        u32 port_id;
1478        u32 temp, temp1;
1479        int max_ports;
1480        int slot_id;
1481        unsigned int faked_port_index;
1482        u8 major_revision;
1483        struct xhci_bus_state *bus_state;
1484        __le32 __iomem **port_array;
1485        bool bogus_port_status = false;
1486
1487        /* Port status change events always have a successful completion code */
1488        if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1489                xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1490                xhci->error_bitmask |= 1 << 8;
1491        }
1492        port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1493        xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1494
1495        max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1496        if ((port_id <= 0) || (port_id > max_ports)) {
1497                xhci_warn(xhci, "Invalid port id %d\n", port_id);
1498                inc_deq(xhci, xhci->event_ring);
1499                return;
1500        }
1501
1502        /* Figure out which usb_hcd this port is attached to:
1503         * is it a USB 3.0 port or a USB 2.0/1.1 port?
1504         */
1505        major_revision = xhci->port_array[port_id - 1];
1506
1507        /* Find the right roothub. */
1508        hcd = xhci_to_hcd(xhci);
1509        if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1510                hcd = xhci->shared_hcd;
1511
1512        if (major_revision == 0) {
1513                xhci_warn(xhci, "Event for port %u not in "
1514                                "Extended Capabilities, ignoring.\n",
1515                                port_id);
1516                bogus_port_status = true;
1517                goto cleanup;
1518        }
1519        if (major_revision == DUPLICATE_ENTRY) {
1520                xhci_warn(xhci, "Event for port %u duplicated in"
1521                                "Extended Capabilities, ignoring.\n",
1522                                port_id);
1523                bogus_port_status = true;
1524                goto cleanup;
1525        }
1526
1527        /*
1528         * Hardware port IDs reported by a Port Status Change Event include USB
1529         * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
1530         * resume event, but we first need to translate the hardware port ID
1531         * into the index into the ports on the correct split roothub, and the
1532         * correct bus_state structure.
1533         */
1534        bus_state = &xhci->bus_state[hcd_index(hcd)];
1535        if (hcd->speed == HCD_USB3)
1536                port_array = xhci->usb3_ports;
1537        else
1538                port_array = xhci->usb2_ports;
1539        /* Find the faked port hub number */
1540        faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1541                        port_id);
1542
1543        temp = readl(port_array[faked_port_index]);
1544        if (hcd->state == HC_STATE_SUSPENDED) {
1545                xhci_dbg(xhci, "resume root hub\n");
1546                usb_hcd_resume_root_hub(hcd);
1547        }
1548
1549        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1550                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1551
1552                temp1 = readl(&xhci->op_regs->command);
1553                if (!(temp1 & CMD_RUN)) {
1554                        xhci_warn(xhci, "xHC is not running.\n");
1555                        goto cleanup;
1556                }
1557
1558                if (DEV_SUPERSPEED(temp)) {
1559                        xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1560                        /* Set a flag to say the port signaled remote wakeup,
1561                         * so we can tell the difference between the end of
1562                         * device and host initiated resume.
1563                         */
1564                        bus_state->port_remote_wakeup |= 1 << faked_port_index;
1565                        xhci_test_and_clear_bit(xhci, port_array,
1566                                        faked_port_index, PORT_PLC);
1567                        xhci_set_link_state(xhci, port_array, faked_port_index,
1568                                                XDEV_U0);
1569                        /* Need to wait until the next link state change
1570                         * indicates the device is actually in U0.
1571                         */
1572                        bogus_port_status = true;
1573                        goto cleanup;
1574                } else {
1575                        xhci_dbg(xhci, "resume HS port %d\n", port_id);
1576                        bus_state->resume_done[faked_port_index] = jiffies +
1577                                msecs_to_jiffies(USB_RESUME_TIMEOUT);
1578                        set_bit(faked_port_index, &bus_state->resuming_ports);
1579                        mod_timer(&hcd->rh_timer,
1580                                  bus_state->resume_done[faked_port_index]);
1581                        /* Do the rest in GetPortStatus */
1582                }
1583        }
1584
1585        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1586                        DEV_SUPERSPEED(temp)) {
1587                xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1588                /* We've just brought the device into U0 through either the
1589                 * Resume state after a device remote wakeup, or through the
1590                 * U3Exit state after a host-initiated resume.  If it's a device
1591                 * initiated remote wake, don't pass up the link state change,
1592                 * so the roothub behavior is consistent with external
1593                 * USB 3.0 hub behavior.
1594                 */
1595                slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1596                                faked_port_index + 1);
1597                if (slot_id && xhci->devs[slot_id])
1598                        xhci_ring_device(xhci, slot_id);
1599                if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1600                        bus_state->port_remote_wakeup &=
1601                                ~(1 << faked_port_index);
1602                        xhci_test_and_clear_bit(xhci, port_array,
1603                                        faked_port_index, PORT_PLC);
1604                        usb_wakeup_notification(hcd->self.root_hub,
1605                                        faked_port_index + 1);
1606                        bogus_port_status = true;
1607                        goto cleanup;
1608                }
1609        }
1610
1611        /*
1612         * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1613         * RExit to a disconnect state).  If so, let the the driver know it's
1614         * out of the RExit state.
1615         */
1616        if (!DEV_SUPERSPEED(temp) &&
1617                        test_and_clear_bit(faked_port_index,
1618                                &bus_state->rexit_ports)) {
1619                complete(&bus_state->rexit_done[faked_port_index]);
1620                bogus_port_status = true;
1621                goto cleanup;
1622        }
1623
1624        if (hcd->speed != HCD_USB3)
1625                xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1626                                        PORT_PLC);
1627
1628cleanup:
1629        /* Update event ring dequeue pointer before dropping the lock */
1630        inc_deq(xhci, xhci->event_ring);
1631
1632        /* Don't make the USB core poll the roothub if we got a bad port status
1633         * change event.  Besides, at that point we can't tell which roothub
1634         * (USB 2.0 or USB 3.0) to kick.
1635         */
1636        if (bogus_port_status)
1637                return;
1638
1639        /*
1640         * xHCI port-status-change events occur when the "or" of all the
1641         * status-change bits in the portsc register changes from 0 to 1.
1642         * New status changes won't cause an event if any other change
1643         * bits are still set.  When an event occurs, switch over to
1644         * polling to avoid losing status changes.
1645         */
1646        xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1647        set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1648        spin_unlock(&xhci->lock);
1649        /* Pass this up to the core */
1650        usb_hcd_poll_rh_status(hcd);
1651        spin_lock(&xhci->lock);
1652}
1653
1654/*
1655 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1656 * at end_trb, which may be in another segment.  If the suspect DMA address is a
1657 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
1658 * returns 0.
1659 */
1660struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1661                struct xhci_segment *start_seg,
1662                union xhci_trb  *start_trb,
1663                union xhci_trb  *end_trb,
1664                dma_addr_t      suspect_dma,
1665                bool            debug)
1666{
1667        dma_addr_t start_dma;
1668        dma_addr_t end_seg_dma;
1669        dma_addr_t end_trb_dma;
1670        struct xhci_segment *cur_seg;
1671
1672        start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1673        cur_seg = start_seg;
1674
1675        do {
1676                if (start_dma == 0)
1677                        return NULL;
1678                /* We may get an event for a Link TRB in the middle of a TD */
1679                end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1680                                &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1681                /* If the end TRB isn't in this segment, this is set to 0 */
1682                end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1683
1684                if (debug)
1685                        xhci_warn(xhci,
1686                                "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1687                                (unsigned long long)suspect_dma,
1688                                (unsigned long long)start_dma,
1689                                (unsigned long long)end_trb_dma,
1690                                (unsigned long long)cur_seg->dma,
1691                                (unsigned long long)end_seg_dma);
1692
1693                if (end_trb_dma > 0) {
1694                        /* The end TRB is in this segment, so suspect should be here */
1695                        if (start_dma <= end_trb_dma) {
1696                                if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1697                                        return cur_seg;
1698                        } else {
1699                                /* Case for one segment with
1700                                 * a TD wrapped around to the top
1701                                 */
1702                                if ((suspect_dma >= start_dma &&
1703                                                        suspect_dma <= end_seg_dma) ||
1704                                                (suspect_dma >= cur_seg->dma &&
1705                                                 suspect_dma <= end_trb_dma))
1706                                        return cur_seg;
1707                        }
1708                        return NULL;
1709                } else {
1710                        /* Might still be somewhere in this segment */
1711                        if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1712                                return cur_seg;
1713                }
1714                cur_seg = cur_seg->next;
1715                start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1716        } while (cur_seg != start_seg);
1717
1718        return NULL;
1719}
1720
1721static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1722                unsigned int slot_id, unsigned int ep_index,
1723                unsigned int stream_id,
1724                struct xhci_td *td, union xhci_trb *event_trb)
1725{
1726        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1727        struct xhci_command *command;
1728        command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1729        if (!command)
1730                return;
1731
1732        ep->ep_state |= EP_HALTED;
1733        ep->stopped_stream = stream_id;
1734
1735        xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1736        xhci_cleanup_stalled_ring(xhci, ep_index, td);
1737
1738        ep->stopped_stream = 0;
1739
1740        xhci_ring_cmd_db(xhci);
1741}
1742
1743/* Check if an error has halted the endpoint ring.  The class driver will
1744 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1745 * However, a babble and other errors also halt the endpoint ring, and the class
1746 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1747 * Ring Dequeue Pointer command manually.
1748 */
1749static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1750                struct xhci_ep_ctx *ep_ctx,
1751                unsigned int trb_comp_code)
1752{
1753        /* TRB completion codes that may require a manual halt cleanup */
1754        if (trb_comp_code == COMP_TX_ERR ||
1755                        trb_comp_code == COMP_BABBLE ||
1756                        trb_comp_code == COMP_SPLIT_ERR)
1757                /* The 0.96 spec says a babbling control endpoint
1758                 * is not halted. The 0.96 spec says it is.  Some HW
1759                 * claims to be 0.95 compliant, but it halts the control
1760                 * endpoint anyway.  Check if a babble halted the
1761                 * endpoint.
1762                 */
1763                if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1764                    cpu_to_le32(EP_STATE_HALTED))
1765                        return 1;
1766
1767        return 0;
1768}
1769
1770int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1771{
1772        if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1773                /* Vendor defined "informational" completion code,
1774                 * treat as not-an-error.
1775                 */
1776                xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1777                                trb_comp_code);
1778                xhci_dbg(xhci, "Treating code as success.\n");
1779                return 1;
1780        }
1781        return 0;
1782}
1783
1784/*
1785 * Finish the td processing, remove the td from td list;
1786 * Return 1 if the urb can be given back.
1787 */
1788static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1789        union xhci_trb *event_trb, struct xhci_transfer_event *event,
1790        struct xhci_virt_ep *ep, int *status, bool skip)
1791{
1792        struct xhci_virt_device *xdev;
1793        struct xhci_ring *ep_ring;
1794        unsigned int slot_id;
1795        int ep_index;
1796        struct urb *urb = NULL;
1797        struct xhci_ep_ctx *ep_ctx;
1798        int ret = 0;
1799        struct urb_priv *urb_priv;
1800        u32 trb_comp_code;
1801
1802        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1803        xdev = xhci->devs[slot_id];
1804        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1805        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1806        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1807        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1808
1809        if (skip)
1810                goto td_cleanup;
1811
1812        if (trb_comp_code == COMP_STOP_INVAL || trb_comp_code == COMP_STOP) {
1813                /* The Endpoint Stop Command completion will take care of any
1814                 * stopped TDs.  A stopped TD may be restarted, so don't update
1815                 * the ring dequeue pointer or take this TD off any lists yet.
1816                 */
1817                ep->stopped_td = td;
1818                return 0;
1819        }
1820        if (trb_comp_code == COMP_STALL ||
1821                xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1822                                                trb_comp_code)) {
1823                /* Issue a reset endpoint command to clear the host side
1824                 * halt, followed by a set dequeue command to move the
1825                 * dequeue pointer past the TD.
1826                 * The class driver clears the device side halt later.
1827                 */
1828                xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1829                                        ep_ring->stream_id, td, event_trb);
1830        } else {
1831                /* Update ring dequeue pointer */
1832                while (ep_ring->dequeue != td->last_trb)
1833                        inc_deq(xhci, ep_ring);
1834                inc_deq(xhci, ep_ring);
1835        }
1836
1837td_cleanup:
1838        /* Clean up the endpoint's TD list */
1839        urb = td->urb;
1840        urb_priv = urb->hcpriv;
1841
1842        /* Do one last check of the actual transfer length.
1843         * If the host controller said we transferred more data than the buffer
1844         * length, urb->actual_length will be a very big number (since it's
1845         * unsigned).  Play it safe and say we didn't transfer anything.
1846         */
1847        if (urb->actual_length > urb->transfer_buffer_length) {
1848                xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
1849                        urb->transfer_buffer_length,
1850                        urb->actual_length);
1851                urb->actual_length = 0;
1852                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1853                        *status = -EREMOTEIO;
1854                else
1855                        *status = 0;
1856        }
1857        list_del_init(&td->td_list);
1858        /* Was this TD slated to be cancelled but completed anyway? */
1859        if (!list_empty(&td->cancelled_td_list))
1860                list_del_init(&td->cancelled_td_list);
1861
1862        urb_priv->td_cnt++;
1863        /* Giveback the urb when all the tds are completed */
1864        if (urb_priv->td_cnt == urb_priv->length) {
1865                ret = 1;
1866                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1867                        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1868                        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
1869                                if (xhci->quirks & XHCI_AMD_PLL_FIX)
1870                                        usb_amd_quirk_pll_enable();
1871                        }
1872                }
1873        }
1874
1875        return ret;
1876}
1877
1878/*
1879 * Process control tds, update urb status and actual_length.
1880 */
1881static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1882        union xhci_trb *event_trb, struct xhci_transfer_event *event,
1883        struct xhci_virt_ep *ep, int *status)
1884{
1885        struct xhci_virt_device *xdev;
1886        struct xhci_ring *ep_ring;
1887        unsigned int slot_id;
1888        int ep_index;
1889        struct xhci_ep_ctx *ep_ctx;
1890        u32 trb_comp_code;
1891
1892        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1893        xdev = xhci->devs[slot_id];
1894        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1895        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1896        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1897        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1898
1899        switch (trb_comp_code) {
1900        case COMP_SUCCESS:
1901                if (event_trb == ep_ring->dequeue) {
1902                        xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1903                                        "without IOC set??\n");
1904                        *status = -ESHUTDOWN;
1905                } else if (event_trb != td->last_trb) {
1906                        xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1907                                        "without IOC set??\n");
1908                        *status = -ESHUTDOWN;
1909                } else {
1910                        *status = 0;
1911                }
1912                break;
1913        case COMP_SHORT_TX:
1914                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1915                        *status = -EREMOTEIO;
1916                else
1917                        *status = 0;
1918                break;
1919        case COMP_STOP_INVAL:
1920        case COMP_STOP:
1921                return finish_td(xhci, td, event_trb, event, ep, status, false);
1922        default:
1923                if (!xhci_requires_manual_halt_cleanup(xhci,
1924                                        ep_ctx, trb_comp_code))
1925                        break;
1926                xhci_dbg(xhci, "TRB error code %u, "
1927                                "halted endpoint index = %u\n",
1928                                trb_comp_code, ep_index);
1929                /* else fall through */
1930        case COMP_STALL:
1931                /* Did we transfer part of the data (middle) phase? */
1932                if (event_trb != ep_ring->dequeue &&
1933                                event_trb != td->last_trb)
1934                        td->urb->actual_length =
1935                                td->urb->transfer_buffer_length -
1936                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1937                else
1938                        td->urb->actual_length = 0;
1939
1940                return finish_td(xhci, td, event_trb, event, ep, status, false);
1941        }
1942        /*
1943         * Did we transfer any data, despite the errors that might have
1944         * happened?  I.e. did we get past the setup stage?
1945         */
1946        if (event_trb != ep_ring->dequeue) {
1947                /* The event was for the status stage */
1948                if (event_trb == td->last_trb) {
1949                        if (td->urb_length_set) {
1950                                /* Don't overwrite a previously set error code
1951                                 */
1952                                if ((*status == -EINPROGRESS || *status == 0) &&
1953                                                (td->urb->transfer_flags
1954                                                 & URB_SHORT_NOT_OK))
1955                                        /* Did we already see a short data
1956                                         * stage? */
1957                                        *status = -EREMOTEIO;
1958                        } else {
1959                                td->urb->actual_length =
1960                                        td->urb->transfer_buffer_length;
1961                        }
1962                } else {
1963                        /*
1964                         * Maybe the event was for the data stage? If so, update
1965                         * already the actual_length of the URB and flag it as
1966                         * set, so that it is not overwritten in the event for
1967                         * the last TRB.
1968                         */
1969                        td->urb_length_set = true;
1970                        td->urb->actual_length =
1971                                td->urb->transfer_buffer_length -
1972                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1973                        xhci_dbg(xhci, "Waiting for status "
1974                                        "stage event\n");
1975                        return 0;
1976                }
1977        }
1978
1979        return finish_td(xhci, td, event_trb, event, ep, status, false);
1980}
1981
1982/*
1983 * Process isochronous tds, update urb packet status and actual_length.
1984 */
1985static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1986        union xhci_trb *event_trb, struct xhci_transfer_event *event,
1987        struct xhci_virt_ep *ep, int *status)
1988{
1989        struct xhci_ring *ep_ring;
1990        struct urb_priv *urb_priv;
1991        int idx;
1992        int len = 0;
1993        union xhci_trb *cur_trb;
1994        struct xhci_segment *cur_seg;
1995        struct usb_iso_packet_descriptor *frame;
1996        u32 trb_comp_code;
1997        bool skip_td = false;
1998
1999        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2000        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2001        urb_priv = td->urb->hcpriv;
2002        idx = urb_priv->td_cnt;
2003        frame = &td->urb->iso_frame_desc[idx];
2004
2005        /* handle completion code */
2006        switch (trb_comp_code) {
2007        case COMP_SUCCESS:
2008                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2009                        frame->status = 0;
2010                        break;
2011                }
2012                if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2013                        trb_comp_code = COMP_SHORT_TX;
2014        case COMP_SHORT_TX:
2015                frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2016                                -EREMOTEIO : 0;
2017                break;
2018        case COMP_BW_OVER:
2019                frame->status = -ECOMM;
2020                skip_td = true;
2021                break;
2022        case COMP_BUFF_OVER:
2023        case COMP_BABBLE:
2024                frame->status = -EOVERFLOW;
2025                skip_td = true;
2026                break;
2027        case COMP_DEV_ERR:
2028        case COMP_STALL:
2029                frame->status = -EPROTO;
2030                skip_td = true;
2031                break;
2032        case COMP_TX_ERR:
2033                frame->status = -EPROTO;
2034                if (event_trb != td->last_trb)
2035                        return 0;
2036                skip_td = true;
2037                break;
2038        case COMP_STOP:
2039        case COMP_STOP_INVAL:
2040                break;
2041        default:
2042                frame->status = -1;
2043                break;
2044        }
2045
2046        if (trb_comp_code == COMP_SUCCESS || skip_td) {
2047                frame->actual_length = frame->length;
2048                td->urb->actual_length += frame->length;
2049        } else {
2050                for (cur_trb = ep_ring->dequeue,
2051                     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2052                     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2053                        if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2054                            !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2055                                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2056                }
2057                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2058                        EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2059
2060                if (trb_comp_code != COMP_STOP_INVAL) {
2061                        frame->actual_length = len;
2062                        td->urb->actual_length += len;
2063                }
2064        }
2065
2066        return finish_td(xhci, td, event_trb, event, ep, status, false);
2067}
2068
2069static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2070                        struct xhci_transfer_event *event,
2071                        struct xhci_virt_ep *ep, int *status)
2072{
2073        struct xhci_ring *ep_ring;
2074        struct urb_priv *urb_priv;
2075        struct usb_iso_packet_descriptor *frame;
2076        int idx;
2077
2078        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2079        urb_priv = td->urb->hcpriv;
2080        idx = urb_priv->td_cnt;
2081        frame = &td->urb->iso_frame_desc[idx];
2082
2083        /* The transfer is partly done. */
2084        frame->status = -EXDEV;
2085
2086        /* calc actual length */
2087        frame->actual_length = 0;
2088
2089        /* Update ring dequeue pointer */
2090        while (ep_ring->dequeue != td->last_trb)
2091                inc_deq(xhci, ep_ring);
2092        inc_deq(xhci, ep_ring);
2093
2094        return finish_td(xhci, td, NULL, event, ep, status, true);
2095}
2096
2097/*
2098 * Process bulk and interrupt tds, update urb status and actual_length.
2099 */
2100static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2101        union xhci_trb *event_trb, struct xhci_transfer_event *event,
2102        struct xhci_virt_ep *ep, int *status)
2103{
2104        struct xhci_ring *ep_ring;
2105        union xhci_trb *cur_trb;
2106        struct xhci_segment *cur_seg;
2107        u32 trb_comp_code;
2108
2109        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2110        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2111
2112        switch (trb_comp_code) {
2113        case COMP_SUCCESS:
2114                /* Double check that the HW transferred everything. */
2115                if (event_trb != td->last_trb ||
2116                    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2117                        xhci_warn(xhci, "WARN Successful completion "
2118                                        "on short TX\n");
2119                        if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2120                                *status = -EREMOTEIO;
2121                        else
2122                                *status = 0;
2123                        if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2124                                trb_comp_code = COMP_SHORT_TX;
2125                } else {
2126                        *status = 0;
2127                }
2128                break;
2129        case COMP_SHORT_TX:
2130                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2131                        *status = -EREMOTEIO;
2132                else
2133                        *status = 0;
2134                break;
2135        default:
2136                /* Others already handled above */
2137                break;
2138        }
2139        if (trb_comp_code == COMP_SHORT_TX)
2140                xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2141                                "%d bytes untransferred\n",
2142                                td->urb->ep->desc.bEndpointAddress,
2143                                td->urb->transfer_buffer_length,
2144                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2145        /* Fast path - was this the last TRB in the TD for this URB? */
2146        if (event_trb == td->last_trb) {
2147                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2148                        td->urb->actual_length =
2149                                td->urb->transfer_buffer_length -
2150                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2151                        if (td->urb->transfer_buffer_length <
2152                                        td->urb->actual_length) {
2153                                xhci_warn(xhci, "HC gave bad length "
2154                                                "of %d bytes left\n",
2155                                          EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2156                                td->urb->actual_length = 0;
2157                                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2158                                        *status = -EREMOTEIO;
2159                                else
2160                                        *status = 0;
2161                        }
2162                        /* Don't overwrite a previously set error code */
2163                        if (*status == -EINPROGRESS) {
2164                                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2165                                        *status = -EREMOTEIO;
2166                                else
2167                                        *status = 0;
2168                        }
2169                } else {
2170                        td->urb->actual_length =
2171                                td->urb->transfer_buffer_length;
2172                        /* Ignore a short packet completion if the
2173                         * untransferred length was zero.
2174                         */
2175                        if (*status == -EREMOTEIO)
2176                                *status = 0;
2177                }
2178        } else {
2179                /* Slow path - walk the list, starting from the dequeue
2180                 * pointer, to get the actual length transferred.
2181                 */
2182                td->urb->actual_length = 0;
2183                for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2184                                cur_trb != event_trb;
2185                                next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2186                        if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2187                            !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2188                                td->urb->actual_length +=
2189                                        TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2190                }
2191                /* If the ring didn't stop on a Link or No-op TRB, add
2192                 * in the actual bytes transferred from the Normal TRB
2193                 */
2194                if (trb_comp_code != COMP_STOP_INVAL)
2195                        td->urb->actual_length +=
2196                                TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2197                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2198        }
2199
2200        return finish_td(xhci, td, event_trb, event, ep, status, false);
2201}
2202
2203/*
2204 * If this function returns an error condition, it means it got a Transfer
2205 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2206 * At this point, the host controller is probably hosed and should be reset.
2207 */
2208static int handle_tx_event(struct xhci_hcd *xhci,
2209                struct xhci_transfer_event *event)
2210        __releases(&xhci->lock)
2211        __acquires(&xhci->lock)
2212{
2213        struct xhci_virt_device *xdev;
2214        struct xhci_virt_ep *ep;
2215        struct xhci_ring *ep_ring;
2216        unsigned int slot_id;
2217        int ep_index;
2218        struct xhci_td *td = NULL;
2219        dma_addr_t event_dma;
2220        struct xhci_segment *event_seg;
2221        union xhci_trb *event_trb;
2222        struct urb *urb = NULL;
2223        int status = -EINPROGRESS;
2224        struct urb_priv *urb_priv;
2225        struct xhci_ep_ctx *ep_ctx;
2226        struct list_head *tmp;
2227        u32 trb_comp_code;
2228        int ret = 0;
2229        int td_num = 0;
2230
2231        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2232        xdev = xhci->devs[slot_id];
2233        if (!xdev) {
2234                xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2235                xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2236                         (unsigned long long) xhci_trb_virt_to_dma(
2237                                 xhci->event_ring->deq_seg,
2238                                 xhci->event_ring->dequeue),
2239                         lower_32_bits(le64_to_cpu(event->buffer)),
2240                         upper_32_bits(le64_to_cpu(event->buffer)),
2241                         le32_to_cpu(event->transfer_len),
2242                         le32_to_cpu(event->flags));
2243                xhci_dbg(xhci, "Event ring:\n");
2244                xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2245                return -ENODEV;
2246        }
2247
2248        /* Endpoint ID is 1 based, our index is zero based */
2249        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2250        ep = &xdev->eps[ep_index];
2251        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2252        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2253        if (!ep_ring ||
2254            (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2255            EP_STATE_DISABLED) {
2256                xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2257                                "or incorrect stream ring\n");
2258                xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2259                         (unsigned long long) xhci_trb_virt_to_dma(
2260                                 xhci->event_ring->deq_seg,
2261                                 xhci->event_ring->dequeue),
2262                         lower_32_bits(le64_to_cpu(event->buffer)),
2263                         upper_32_bits(le64_to_cpu(event->buffer)),
2264                         le32_to_cpu(event->transfer_len),
2265                         le32_to_cpu(event->flags));
2266                xhci_dbg(xhci, "Event ring:\n");
2267                xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2268                return -ENODEV;
2269        }
2270
2271        /* Count current td numbers if ep->skip is set */
2272        if (ep->skip) {
2273                list_for_each(tmp, &ep_ring->td_list)
2274                        td_num++;
2275        }
2276
2277        event_dma = le64_to_cpu(event->buffer);
2278        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2279        /* Look for common error cases */
2280        switch (trb_comp_code) {
2281        /* Skip codes that require special handling depending on
2282         * transfer type
2283         */
2284        case COMP_SUCCESS:
2285                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2286                        break;
2287                if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2288                        trb_comp_code = COMP_SHORT_TX;
2289                else
2290                        xhci_warn_ratelimited(xhci,
2291                                        "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2292        case COMP_SHORT_TX:
2293                break;
2294        case COMP_STOP:
2295                xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2296                break;
2297        case COMP_STOP_INVAL:
2298                xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2299                break;
2300        case COMP_STALL:
2301                xhci_dbg(xhci, "Stalled endpoint\n");
2302                ep->ep_state |= EP_HALTED;
2303                status = -EPIPE;
2304                break;
2305        case COMP_TRB_ERR:
2306                xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2307                status = -EILSEQ;
2308                break;
2309        case COMP_SPLIT_ERR:
2310        case COMP_TX_ERR:
2311                xhci_dbg(xhci, "Transfer error on endpoint\n");
2312                status = -EPROTO;
2313                break;
2314        case COMP_BABBLE:
2315                xhci_dbg(xhci, "Babble error on endpoint\n");
2316                status = -EOVERFLOW;
2317                break;
2318        case COMP_DB_ERR:
2319                xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2320                status = -ENOSR;
2321                break;
2322        case COMP_BW_OVER:
2323                xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2324                break;
2325        case COMP_BUFF_OVER:
2326                xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2327                break;
2328        case COMP_UNDERRUN:
2329                /*
2330                 * When the Isoch ring is empty, the xHC will generate
2331                 * a Ring Overrun Event for IN Isoch endpoint or Ring
2332                 * Underrun Event for OUT Isoch endpoint.
2333                 */
2334                xhci_dbg(xhci, "underrun event on endpoint\n");
2335                if (!list_empty(&ep_ring->td_list))
2336                        xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2337                                        "still with TDs queued?\n",
2338                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2339                                 ep_index);
2340                goto cleanup;
2341        case COMP_OVERRUN:
2342                xhci_dbg(xhci, "overrun event on endpoint\n");
2343                if (!list_empty(&ep_ring->td_list))
2344                        xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2345                                        "still with TDs queued?\n",
2346                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2347                                 ep_index);
2348                goto cleanup;
2349        case COMP_DEV_ERR:
2350                xhci_warn(xhci, "WARN: detect an incompatible device");
2351                status = -EPROTO;
2352                break;
2353        case COMP_MISSED_INT:
2354                /*
2355                 * When encounter missed service error, one or more isoc tds
2356                 * may be missed by xHC.
2357                 * Set skip flag of the ep_ring; Complete the missed tds as
2358                 * short transfer when process the ep_ring next time.
2359                 */
2360                ep->skip = true;
2361                xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2362                goto cleanup;
2363        default:
2364                if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2365                        status = 0;
2366                        break;
2367                }
2368                xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
2369                          trb_comp_code);
2370                goto cleanup;
2371        }
2372
2373        do {
2374                /* This TRB should be in the TD at the head of this ring's
2375                 * TD list.
2376                 */
2377                if (list_empty(&ep_ring->td_list)) {
2378                        /*
2379                         * A stopped endpoint may generate an extra completion
2380                         * event if the device was suspended.  Don't print
2381                         * warnings.
2382                         */
2383                        if (!(trb_comp_code == COMP_STOP ||
2384                                                trb_comp_code == COMP_STOP_INVAL)) {
2385                                xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2386                                                TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2387                                                ep_index);
2388                                xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2389                                                (le32_to_cpu(event->flags) &
2390                                                 TRB_TYPE_BITMASK)>>10);
2391                                xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2392                        }
2393                        if (ep->skip) {
2394                                ep->skip = false;
2395                                xhci_dbg(xhci, "td_list is empty while skip "
2396                                                "flag set. Clear skip flag.\n");
2397                        }
2398                        ret = 0;
2399                        goto cleanup;
2400                }
2401
2402                /* We've skipped all the TDs on the ep ring when ep->skip set */
2403                if (ep->skip && td_num == 0) {
2404                        ep->skip = false;
2405                        xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2406                                                "Clear skip flag.\n");
2407                        ret = 0;
2408                        goto cleanup;
2409                }
2410
2411                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2412                if (ep->skip)
2413                        td_num--;
2414
2415                /* Is this a TRB in the currently executing TD? */
2416                event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2417                                td->last_trb, event_dma, false);
2418
2419                /*
2420                 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2421                 * is not in the current TD pointed by ep_ring->dequeue because
2422                 * that the hardware dequeue pointer still at the previous TRB
2423                 * of the current TD. The previous TRB maybe a Link TD or the
2424                 * last TRB of the previous TD. The command completion handle
2425                 * will take care the rest.
2426                 */
2427                if (!event_seg && (trb_comp_code == COMP_STOP ||
2428                                   trb_comp_code == COMP_STOP_INVAL)) {
2429                        ret = 0;
2430                        goto cleanup;
2431                }
2432
2433                if (!event_seg) {
2434                        if (!ep->skip ||
2435                            !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2436                                /* Some host controllers give a spurious
2437                                 * successful event after a short transfer.
2438                                 * Ignore it.
2439                                 */
2440                                if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2441                                                ep_ring->last_td_was_short) {
2442                                        ep_ring->last_td_was_short = false;
2443                                        ret = 0;
2444                                        goto cleanup;
2445                                }
2446                                /* HC is busted, give up! */
2447                                xhci_err(xhci,
2448                                        "ERROR Transfer event TRB DMA ptr not "
2449                                        "part of current TD ep_index %d "
2450                                        "comp_code %u\n", ep_index,
2451                                        trb_comp_code);
2452                                trb_in_td(xhci, ep_ring->deq_seg,
2453                                          ep_ring->dequeue, td->last_trb,
2454                                          event_dma, true);
2455                                return -ESHUTDOWN;
2456                        }
2457
2458                        ret = skip_isoc_td(xhci, td, event, ep, &status);
2459                        goto cleanup;
2460                }
2461                if (trb_comp_code == COMP_SHORT_TX)
2462                        ep_ring->last_td_was_short = true;
2463                else
2464                        ep_ring->last_td_was_short = false;
2465
2466                if (ep->skip) {
2467                        xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2468                        ep->skip = false;
2469                }
2470
2471                event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2472                                                sizeof(*event_trb)];
2473                /*
2474                 * No-op TRB should not trigger interrupts.
2475                 * If event_trb is a no-op TRB, it means the
2476                 * corresponding TD has been cancelled. Just ignore
2477                 * the TD.
2478                 */
2479                if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2480                        xhci_dbg(xhci,
2481                                 "event_trb is a no-op TRB. Skip it\n");
2482                        goto cleanup;
2483                }
2484
2485                /* Now update the urb's actual_length and give back to
2486                 * the core
2487                 */
2488                if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2489                        ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2490                                                 &status);
2491                else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2492                        ret = process_isoc_td(xhci, td, event_trb, event, ep,
2493                                                 &status);
2494                else
2495                        ret = process_bulk_intr_td(xhci, td, event_trb, event,
2496                                                 ep, &status);
2497
2498cleanup:
2499                /*
2500                 * Do not update event ring dequeue pointer if ep->skip is set.
2501                 * Will roll back to continue process missed tds.
2502                 */
2503                if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2504                        inc_deq(xhci, xhci->event_ring);
2505                }
2506
2507                if (ret) {
2508                        urb = td->urb;
2509                        urb_priv = urb->hcpriv;
2510
2511                        xhci_urb_free_priv(urb_priv);
2512
2513                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2514                        if ((urb->actual_length != urb->transfer_buffer_length &&
2515                                                (urb->transfer_flags &
2516                                                 URB_SHORT_NOT_OK)) ||
2517                                        (status != 0 &&
2518                                         !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2519                                xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2520                                                "expected = %d, status = %d\n",
2521                                                urb, urb->actual_length,
2522                                                urb->transfer_buffer_length,
2523                                                status);
2524                        spin_unlock(&xhci->lock);
2525                        /* EHCI, UHCI, and OHCI always unconditionally set the
2526                         * urb->status of an isochronous endpoint to 0.
2527                         */
2528                        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2529                                status = 0;
2530                        usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2531                        spin_lock(&xhci->lock);
2532                }
2533
2534        /*
2535         * If ep->skip is set, it means there are missed tds on the
2536         * endpoint ring need to take care of.
2537         * Process them as short transfer until reach the td pointed by
2538         * the event.
2539         */
2540        } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2541
2542        return 0;
2543}
2544
2545/*
2546 * This function handles all OS-owned events on the event ring.  It may drop
2547 * xhci->lock between event processing (e.g. to pass up port status changes).
2548 * Returns >0 for "possibly more events to process" (caller should call again),
2549 * otherwise 0 if done.  In future, <0 returns should indicate error code.
2550 */
2551static int xhci_handle_event(struct xhci_hcd *xhci)
2552{
2553        union xhci_trb *event;
2554        int update_ptrs = 1;
2555        int ret;
2556
2557        if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2558                xhci->error_bitmask |= 1 << 1;
2559                return 0;
2560        }
2561
2562        event = xhci->event_ring->dequeue;
2563        /* Does the HC or OS own the TRB? */
2564        if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2565            xhci->event_ring->cycle_state) {
2566                xhci->error_bitmask |= 1 << 2;
2567                return 0;
2568        }
2569
2570        /*
2571         * Barrier between reading the TRB_CYCLE (valid) flag above and any
2572         * speculative reads of the event's flags/data below.
2573         */
2574        rmb();
2575        /* FIXME: Handle more event types. */
2576        switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2577        case TRB_TYPE(TRB_COMPLETION):
2578                handle_cmd_completion(xhci, &event->event_cmd);
2579                break;
2580        case TRB_TYPE(TRB_PORT_STATUS):
2581                handle_port_status(xhci, event);
2582                update_ptrs = 0;
2583                break;
2584        case TRB_TYPE(TRB_TRANSFER):
2585                ret = handle_tx_event(xhci, &event->trans_event);
2586                if (ret < 0)
2587                        xhci->error_bitmask |= 1 << 9;
2588                else
2589                        update_ptrs = 0;
2590                break;
2591        case TRB_TYPE(TRB_DEV_NOTE):
2592                handle_device_notification(xhci, event);
2593                break;
2594        default:
2595                if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2596                    TRB_TYPE(48))
2597                        handle_vendor_event(xhci, event);
2598                else
2599                        xhci->error_bitmask |= 1 << 3;
2600        }
2601        /* Any of the above functions may drop and re-acquire the lock, so check
2602         * to make sure a watchdog timer didn't mark the host as non-responsive.
2603         */
2604        if (xhci->xhc_state & XHCI_STATE_DYING) {
2605                xhci_dbg(xhci, "xHCI host dying, returning from "
2606                                "event handler.\n");
2607                return 0;
2608        }
2609
2610        if (update_ptrs)
2611                /* Update SW event ring dequeue pointer */
2612                inc_deq(xhci, xhci->event_ring);
2613
2614        /* Are there more items on the event ring?  Caller will call us again to
2615         * check.
2616         */
2617        return 1;
2618}
2619
2620/*
2621 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2622 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
2623 * indicators of an event TRB error, but we check the status *first* to be safe.
2624 */
2625irqreturn_t xhci_irq(struct usb_hcd *hcd)
2626{
2627        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2628        u32 status;
2629        u64 temp_64;
2630        union xhci_trb *event_ring_deq;
2631        dma_addr_t deq;
2632
2633        spin_lock(&xhci->lock);
2634        /* Check if the xHC generated the interrupt, or the irq is shared */
2635        status = readl(&xhci->op_regs->status);
2636        if (status == 0xffffffff)
2637                goto hw_died;
2638
2639        if (!(status & STS_EINT)) {
2640                spin_unlock(&xhci->lock);
2641                return IRQ_NONE;
2642        }
2643        if (status & STS_FATAL) {
2644                xhci_warn(xhci, "WARNING: Host System Error\n");
2645                xhci_halt(xhci);
2646hw_died:
2647                spin_unlock(&xhci->lock);
2648                return IRQ_HANDLED;
2649        }
2650
2651        /*
2652         * Clear the op reg interrupt status first,
2653         * so we can receive interrupts from other MSI-X interrupters.
2654         * Write 1 to clear the interrupt status.
2655         */
2656        status |= STS_EINT;
2657        writel(status, &xhci->op_regs->status);
2658        /* FIXME when MSI-X is supported and there are multiple vectors */
2659        /* Clear the MSI-X event interrupt status */
2660
2661        if (hcd->irq) {
2662                u32 irq_pending;
2663                /* Acknowledge the PCI interrupt */
2664                irq_pending = readl(&xhci->ir_set->irq_pending);
2665                irq_pending |= IMAN_IP;
2666                writel(irq_pending, &xhci->ir_set->irq_pending);
2667        }
2668
2669        if (xhci->xhc_state & XHCI_STATE_DYING) {
2670                xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2671                                "Shouldn't IRQs be disabled?\n");
2672                /* Clear the event handler busy flag (RW1C);
2673                 * the event ring should be empty.
2674                 */
2675                temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2676                xhci_write_64(xhci, temp_64 | ERST_EHB,
2677                                &xhci->ir_set->erst_dequeue);
2678                spin_unlock(&xhci->lock);
2679
2680                return IRQ_HANDLED;
2681        }
2682
2683        event_ring_deq = xhci->event_ring->dequeue;
2684        /* FIXME this should be a delayed service routine
2685         * that clears the EHB.
2686         */
2687        while (xhci_handle_event(xhci) > 0) {}
2688
2689        temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2690        /* If necessary, update the HW's version of the event ring deq ptr. */
2691        if (event_ring_deq != xhci->event_ring->dequeue) {
2692                deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2693                                xhci->event_ring->dequeue);
2694                if (deq == 0)
2695                        xhci_warn(xhci, "WARN something wrong with SW event "
2696                                        "ring dequeue ptr.\n");
2697                /* Update HC event ring dequeue pointer */
2698                temp_64 &= ERST_PTR_MASK;
2699                temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2700        }
2701
2702        /* Clear the event handler busy flag (RW1C); event ring is empty. */
2703        temp_64 |= ERST_EHB;
2704        xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2705
2706        spin_unlock(&xhci->lock);
2707
2708        return IRQ_HANDLED;
2709}
2710
2711irqreturn_t xhci_msi_irq(int irq, void *hcd)
2712{
2713        return xhci_irq(hcd);
2714}
2715
2716/****           Endpoint Ring Operations        ****/
2717
2718/*
2719 * Generic function for queueing a TRB on a ring.
2720 * The caller must have checked to make sure there's room on the ring.
2721 *
2722 * @more_trbs_coming:   Will you enqueue more TRBs before calling
2723 *                      prepare_transfer()?
2724 */
2725static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2726                bool more_trbs_coming,
2727                u32 field1, u32 field2, u32 field3, u32 field4)
2728{
2729        struct xhci_generic_trb *trb;
2730
2731        trb = &ring->enqueue->generic;
2732        trb->field[0] = cpu_to_le32(field1);
2733        trb->field[1] = cpu_to_le32(field2);
2734        trb->field[2] = cpu_to_le32(field3);
2735        trb->field[3] = cpu_to_le32(field4);
2736        inc_enq(xhci, ring, more_trbs_coming);
2737}
2738
2739/*
2740 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2741 * FIXME allocate segments if the ring is full.
2742 */
2743static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2744                u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2745{
2746        unsigned int num_trbs_needed;
2747
2748        /* Make sure the endpoint has been added to xHC schedule */
2749        switch (ep_state) {
2750        case EP_STATE_DISABLED:
2751                /*
2752                 * USB core changed config/interfaces without notifying us,
2753                 * or hardware is reporting the wrong state.
2754                 */
2755                xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2756                return -ENOENT;
2757        case EP_STATE_ERROR:
2758                xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2759                /* FIXME event handling code for error needs to clear it */
2760                /* XXX not sure if this should be -ENOENT or not */
2761                return -EINVAL;
2762        case EP_STATE_HALTED:
2763                xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2764        case EP_STATE_STOPPED:
2765        case EP_STATE_RUNNING:
2766                break;
2767        default:
2768                xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2769                /*
2770                 * FIXME issue Configure Endpoint command to try to get the HC
2771                 * back into a known state.
2772                 */
2773                return -EINVAL;
2774        }
2775
2776        while (1) {
2777                if (room_on_ring(xhci, ep_ring, num_trbs))
2778                        break;
2779
2780                if (ep_ring == xhci->cmd_ring) {
2781                        xhci_err(xhci, "Do not support expand command ring\n");
2782                        return -ENOMEM;
2783                }
2784
2785                xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2786                                "ERROR no room on ep ring, try ring expansion");
2787                num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2788                if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2789                                        mem_flags)) {
2790                        xhci_err(xhci, "Ring expansion failed\n");
2791                        return -ENOMEM;
2792                }
2793        }
2794
2795        if (enqueue_is_link_trb(ep_ring)) {
2796                struct xhci_ring *ring = ep_ring;
2797                union xhci_trb *next;
2798
2799                next = ring->enqueue;
2800
2801                while (last_trb(xhci, ring, ring->enq_seg, next)) {
2802                        /* If we're not dealing with 0.95 hardware or isoc rings
2803                         * on AMD 0.96 host, clear the chain bit.
2804                         */
2805                        if (!xhci_link_trb_quirk(xhci) &&
2806                                        !(ring->type == TYPE_ISOC &&
2807                                         (xhci->quirks & XHCI_AMD_0x96_HOST)))
2808                                next->link.control &= cpu_to_le32(~TRB_CHAIN);
2809                        else
2810                                next->link.control |= cpu_to_le32(TRB_CHAIN);
2811
2812                        wmb();
2813                        next->link.control ^= cpu_to_le32(TRB_CYCLE);
2814
2815                        /* Toggle the cycle bit after the last ring segment. */
2816                        if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2817                                ring->cycle_state ^= 1;
2818                        }
2819                        ring->enq_seg = ring->enq_seg->next;
2820                        ring->enqueue = ring->enq_seg->trbs;
2821                        next = ring->enqueue;
2822                }
2823        }
2824
2825        return 0;
2826}
2827
2828static int prepare_transfer(struct xhci_hcd *xhci,
2829                struct xhci_virt_device *xdev,
2830                unsigned int ep_index,
2831                unsigned int stream_id,
2832                unsigned int num_trbs,
2833                struct urb *urb,
2834                unsigned int td_index,
2835                gfp_t mem_flags)
2836{
2837        int ret;
2838        struct urb_priv *urb_priv;
2839        struct xhci_td  *td;
2840        struct xhci_ring *ep_ring;
2841        struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2842
2843        ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2844        if (!ep_ring) {
2845                xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2846                                stream_id);
2847                return -EINVAL;
2848        }
2849
2850        ret = prepare_ring(xhci, ep_ring,
2851                           le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2852                           num_trbs, mem_flags);
2853        if (ret)
2854                return ret;
2855
2856        urb_priv = urb->hcpriv;
2857        td = urb_priv->td[td_index];
2858
2859        INIT_LIST_HEAD(&td->td_list);
2860        INIT_LIST_HEAD(&td->cancelled_td_list);
2861
2862        if (td_index == 0) {
2863                ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2864                if (unlikely(ret))
2865                        return ret;
2866        }
2867
2868        td->urb = urb;
2869        /* Add this TD to the tail of the endpoint ring's TD list */
2870        list_add_tail(&td->td_list, &ep_ring->td_list);
2871        td->start_seg = ep_ring->enq_seg;
2872        td->first_trb = ep_ring->enqueue;
2873
2874        urb_priv->td[td_index] = td;
2875
2876        return 0;
2877}
2878
2879static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2880{
2881        int num_sgs, num_trbs, running_total, temp, i;
2882        struct scatterlist *sg;
2883
2884        sg = NULL;
2885        num_sgs = urb->num_mapped_sgs;
2886        temp = urb->transfer_buffer_length;
2887
2888        num_trbs = 0;
2889        for_each_sg(urb->sg, sg, num_sgs, i) {
2890                unsigned int len = sg_dma_len(sg);
2891
2892                /* Scatter gather list entries may cross 64KB boundaries */
2893                running_total = TRB_MAX_BUFF_SIZE -
2894                        (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2895                running_total &= TRB_MAX_BUFF_SIZE - 1;
2896                if (running_total != 0)
2897                        num_trbs++;
2898
2899                /* How many more 64KB chunks to transfer, how many more TRBs? */
2900                while (running_total < sg_dma_len(sg) && running_total < temp) {
2901                        num_trbs++;
2902                        running_total += TRB_MAX_BUFF_SIZE;
2903                }
2904                len = min_t(int, len, temp);
2905                temp -= len;
2906                if (temp == 0)
2907                        break;
2908        }
2909        return num_trbs;
2910}
2911
2912static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2913{
2914        if (num_trbs != 0)
2915                dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2916                                "TRBs, %d left\n", __func__,
2917                                urb->ep->desc.bEndpointAddress, num_trbs);
2918        if (running_total != urb->transfer_buffer_length)
2919                dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2920                                "queued %#x (%d), asked for %#x (%d)\n",
2921                                __func__,
2922                                urb->ep->desc.bEndpointAddress,
2923                                running_total, running_total,
2924                                urb->transfer_buffer_length,
2925                                urb->transfer_buffer_length);
2926}
2927
2928static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2929                unsigned int ep_index, unsigned int stream_id, int start_cycle,
2930                struct xhci_generic_trb *start_trb)
2931{
2932        /*
2933         * Pass all the TRBs to the hardware at once and make sure this write
2934         * isn't reordered.
2935         */
2936        wmb();
2937        if (start_cycle)
2938                start_trb->field[3] |= cpu_to_le32(start_cycle);
2939        else
2940                start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2941        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2942}
2943
2944/*
2945 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
2946 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
2947 * (comprised of sg list entries) can take several service intervals to
2948 * transmit.
2949 */
2950int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2951                struct urb *urb, int slot_id, unsigned int ep_index)
2952{
2953        struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
2954                        xhci->devs[slot_id]->out_ctx, ep_index);
2955        int xhci_interval;
2956        int ep_interval;
2957
2958        xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2959        ep_interval = urb->interval;
2960        /* Convert to microframes */
2961        if (urb->dev->speed == USB_SPEED_LOW ||
2962                        urb->dev->speed == USB_SPEED_FULL)
2963                ep_interval *= 8;
2964        /* FIXME change this to a warning and a suggestion to use the new API
2965         * to set the polling interval (once the API is added).
2966         */
2967        if (xhci_interval != ep_interval) {
2968                dev_dbg_ratelimited(&urb->dev->dev,
2969                                "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
2970                                ep_interval, ep_interval == 1 ? "" : "s",
2971                                xhci_interval, xhci_interval == 1 ? "" : "s");
2972                urb->interval = xhci_interval;
2973                /* Convert back to frames for LS/FS devices */
2974                if (urb->dev->speed == USB_SPEED_LOW ||
2975                                urb->dev->speed == USB_SPEED_FULL)
2976                        urb->interval /= 8;
2977        }
2978        return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
2979}
2980
2981/*
2982 * The TD size is the number of bytes remaining in the TD (including this TRB),
2983 * right shifted by 10.
2984 * It must fit in bits 21:17, so it can't be bigger than 31.
2985 */
2986static u32 xhci_td_remainder(unsigned int remainder)
2987{
2988        u32 max = (1 << (21 - 17 + 1)) - 1;
2989
2990        if ((remainder >> 10) >= max)
2991                return max << 17;
2992        else
2993                return (remainder >> 10) << 17;
2994}
2995
2996/*
2997 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
2998 * packets remaining in the TD (*not* including this TRB).
2999 *
3000 * Total TD packet count = total_packet_count =
3001 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3002 *
3003 * Packets transferred up to and including this TRB = packets_transferred =
3004 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3005 *
3006 * TD size = total_packet_count - packets_transferred
3007 *
3008 * It must fit in bits 21:17, so it can't be bigger than 31.
3009 * The last TRB in a TD must have the TD size set to zero.
3010 */
3011static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
3012                unsigned int total_packet_count, struct urb *urb,
3013                unsigned int num_trbs_left)
3014{
3015        int packets_transferred;
3016
3017        /* One TRB with a zero-length data packet. */
3018        if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
3019                return 0;
3020
3021        /* All the TRB queueing functions don't count the current TRB in
3022         * running_total.
3023         */
3024        packets_transferred = (running_total + trb_buff_len) /
3025                GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3026
3027        if ((total_packet_count - packets_transferred) > 31)
3028                return 31 << 17;
3029        return (total_packet_count - packets_transferred) << 17;
3030}
3031
3032static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3033                struct urb *urb, int slot_id, unsigned int ep_index)
3034{
3035        struct xhci_ring *ep_ring;
3036        unsigned int num_trbs;
3037        struct urb_priv *urb_priv;
3038        struct xhci_td *td;
3039        struct scatterlist *sg;
3040        int num_sgs;
3041        int trb_buff_len, this_sg_len, running_total;
3042        unsigned int total_packet_count;
3043        bool first_trb;
3044        u64 addr;
3045        bool more_trbs_coming;
3046
3047        struct xhci_generic_trb *start_trb;
3048        int start_cycle;
3049
3050        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3051        if (!ep_ring)
3052                return -EINVAL;
3053
3054        num_trbs = count_sg_trbs_needed(xhci, urb);
3055        num_sgs = urb->num_mapped_sgs;
3056        total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3057                        usb_endpoint_maxp(&urb->ep->desc));
3058
3059        trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
3060                        ep_index, urb->stream_id,
3061                        num_trbs, urb, 0, mem_flags);
3062        if (trb_buff_len < 0)
3063                return trb_buff_len;
3064
3065        urb_priv = urb->hcpriv;
3066        td = urb_priv->td[0];
3067
3068        /*
3069         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3070         * until we've finished creating all the other TRBs.  The ring's cycle
3071         * state may change as we enqueue the other TRBs, so save it too.
3072         */
3073        start_trb = &ep_ring->enqueue->generic;
3074        start_cycle = ep_ring->cycle_state;
3075
3076        running_total = 0;
3077        /*
3078         * How much data is in the first TRB?
3079         *
3080         * There are three forces at work for TRB buffer pointers and lengths:
3081         * 1. We don't want to walk off the end of this sg-list entry buffer.
3082         * 2. The transfer length that the driver requested may be smaller than
3083         *    the amount of memory allocated for this scatter-gather list.
3084         * 3. TRBs buffers can't cross 64KB boundaries.
3085         */
3086        sg = urb->sg;
3087        addr = (u64) sg_dma_address(sg);
3088        this_sg_len = sg_dma_len(sg);
3089        trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3090        trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3091        if (trb_buff_len > urb->transfer_buffer_length)
3092                trb_buff_len = urb->transfer_buffer_length;
3093
3094        first_trb = true;
3095        /* Queue the first TRB, even if it's zero-length */
3096        do {
3097                u32 field = 0;
3098                u32 length_field = 0;
3099                u32 remainder = 0;
3100
3101                /* Don't change the cycle bit of the first TRB until later */
3102                if (first_trb) {
3103                        first_trb = false;
3104                        if (start_cycle == 0)
3105                                field |= 0x1;
3106                } else
3107                        field |= ep_ring->cycle_state;
3108
3109                /* Chain all the TRBs together; clear the chain bit in the last
3110                 * TRB to indicate it's the last TRB in the chain.
3111                 */
3112                if (num_trbs > 1) {
3113                        field |= TRB_CHAIN;
3114                } else {
3115                        /* FIXME - add check for ZERO_PACKET flag before this */
3116                        td->last_trb = ep_ring->enqueue;
3117                        field |= TRB_IOC;
3118                }
3119
3120                /* Only set interrupt on short packet for IN endpoints */
3121                if (usb_urb_dir_in(urb))
3122                        field |= TRB_ISP;
3123
3124                if (TRB_MAX_BUFF_SIZE -
3125                                (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3126                        xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3127                        xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3128                                        (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3129                                        (unsigned int) addr + trb_buff_len);
3130                }
3131
3132                /* Set the TRB length, TD size, and interrupter fields. */
3133                if (xhci->hci_version < 0x100) {
3134                        remainder = xhci_td_remainder(
3135                                        urb->transfer_buffer_length -
3136                                        running_total);
3137                } else {
3138                        remainder = xhci_v1_0_td_remainder(running_total,
3139                                        trb_buff_len, total_packet_count, urb,
3140                                        num_trbs - 1);
3141                }
3142                length_field = TRB_LEN(trb_buff_len) |
3143                        remainder |
3144                        TRB_INTR_TARGET(0);
3145
3146                if (num_trbs > 1)
3147                        more_trbs_coming = true;
3148                else
3149                        more_trbs_coming = false;
3150                queue_trb(xhci, ep_ring, more_trbs_coming,
3151                                lower_32_bits(addr),
3152                                upper_32_bits(addr),
3153                                length_field,
3154                                field | TRB_TYPE(TRB_NORMAL));
3155                --num_trbs;
3156                running_total += trb_buff_len;
3157
3158                /* Calculate length for next transfer --
3159                 * Are we done queueing all the TRBs for this sg entry?
3160                 */
3161                this_sg_len -= trb_buff_len;
3162                if (this_sg_len == 0) {
3163                        --num_sgs;
3164                        if (num_sgs == 0)
3165                                break;
3166                        sg = sg_next(sg);
3167                        addr = (u64) sg_dma_address(sg);
3168                        this_sg_len = sg_dma_len(sg);
3169                } else {
3170                        addr += trb_buff_len;
3171                }
3172
3173                trb_buff_len = TRB_MAX_BUFF_SIZE -
3174                        (addr & (TRB_MAX_BUFF_SIZE - 1));
3175                trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3176                if (running_total + trb_buff_len > urb->transfer_buffer_length)
3177                        trb_buff_len =
3178                                urb->transfer_buffer_length - running_total;
3179        } while (running_total < urb->transfer_buffer_length);
3180
3181        check_trb_math(urb, num_trbs, running_total);
3182        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3183                        start_cycle, start_trb);
3184        return 0;
3185}
3186
3187/* This is very similar to what ehci-q.c qtd_fill() does */
3188int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3189                struct urb *urb, int slot_id, unsigned int ep_index)
3190{
3191        struct xhci_ring *ep_ring;
3192        struct urb_priv *urb_priv;
3193        struct xhci_td *td;
3194        int num_trbs;
3195        struct xhci_generic_trb *start_trb;
3196        bool first_trb;
3197        bool more_trbs_coming;
3198        int start_cycle;
3199        u32 field, length_field;
3200
3201        int running_total, trb_buff_len, ret;
3202        unsigned int total_packet_count;
3203        u64 addr;
3204
3205        if (urb->num_sgs)
3206                return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3207
3208        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3209        if (!ep_ring)
3210                return -EINVAL;
3211
3212        num_trbs = 0;
3213        /* How much data is (potentially) left before the 64KB boundary? */
3214        running_total = TRB_MAX_BUFF_SIZE -
3215                (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3216        running_total &= TRB_MAX_BUFF_SIZE - 1;
3217
3218        /* If there's some data on this 64KB chunk, or we have to send a
3219         * zero-length transfer, we need at least one TRB
3220         */
3221        if (running_total != 0 || urb->transfer_buffer_length == 0)
3222                num_trbs++;
3223        /* How many more 64KB chunks to transfer, how many more TRBs? */
3224        while (running_total < urb->transfer_buffer_length) {
3225                num_trbs++;
3226                running_total += TRB_MAX_BUFF_SIZE;
3227        }
3228        /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
3229
3230        ret = prepare_transfer(xhci, xhci->devs[slot_id],
3231                        ep_index, urb->stream_id,
3232                        num_trbs, urb, 0, mem_flags);
3233        if (ret < 0)
3234                return ret;
3235
3236        urb_priv = urb->hcpriv;
3237        td = urb_priv->td[0];
3238
3239        /*
3240         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3241         * until we've finished creating all the other TRBs.  The ring's cycle
3242         * state may change as we enqueue the other TRBs, so save it too.
3243         */
3244        start_trb = &ep_ring->enqueue->generic;
3245        start_cycle = ep_ring->cycle_state;
3246
3247        running_total = 0;
3248        total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3249                        usb_endpoint_maxp(&urb->ep->desc));
3250        /* How much data is in the first TRB? */
3251        addr = (u64) urb->transfer_dma;
3252        trb_buff_len = TRB_MAX_BUFF_SIZE -
3253                (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3254        if (trb_buff_len > urb->transfer_buffer_length)
3255                trb_buff_len = urb->transfer_buffer_length;
3256
3257        first_trb = true;
3258
3259        /* Queue the first TRB, even if it's zero-length */
3260        do {
3261                u32 remainder = 0;
3262                field = 0;
3263
3264                /* Don't change the cycle bit of the first TRB until later */
3265                if (first_trb) {
3266                        first_trb = false;
3267                        if (start_cycle == 0)
3268                                field |= 0x1;
3269                } else
3270                        field |= ep_ring->cycle_state;
3271
3272                /* Chain all the TRBs together; clear the chain bit in the last
3273                 * TRB to indicate it's the last TRB in the chain.
3274                 */
3275                if (num_trbs > 1) {
3276                        field |= TRB_CHAIN;
3277                } else {
3278                        /* FIXME - add check for ZERO_PACKET flag before this */
3279                        td->last_trb = ep_ring->enqueue;
3280                        field |= TRB_IOC;
3281                }
3282
3283                /* Only set interrupt on short packet for IN endpoints */
3284                if (usb_urb_dir_in(urb))
3285                        field |= TRB_ISP;
3286
3287                /* Set the TRB length, TD size, and interrupter fields. */
3288                if (xhci->hci_version < 0x100) {
3289                        remainder = xhci_td_remainder(
3290                                        urb->transfer_buffer_length -
3291                                        running_total);
3292                } else {
3293                        remainder = xhci_v1_0_td_remainder(running_total,
3294                                        trb_buff_len, total_packet_count, urb,
3295                                        num_trbs - 1);
3296                }
3297                length_field = TRB_LEN(trb_buff_len) |
3298                        remainder |
3299                        TRB_INTR_TARGET(0);
3300
3301                if (num_trbs > 1)
3302                        more_trbs_coming = true;
3303                else
3304                        more_trbs_coming = false;
3305                queue_trb(xhci, ep_ring, more_trbs_coming,
3306                                lower_32_bits(addr),
3307                                upper_32_bits(addr),
3308                                length_field,
3309                                field | TRB_TYPE(TRB_NORMAL));
3310                --num_trbs;
3311                running_total += trb_buff_len;
3312
3313                /* Calculate length for next transfer */
3314                addr += trb_buff_len;
3315                trb_buff_len = urb->transfer_buffer_length - running_total;
3316                if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3317                        trb_buff_len = TRB_MAX_BUFF_SIZE;
3318        } while (running_total < urb->transfer_buffer_length);
3319
3320        check_trb_math(urb, num_trbs, running_total);
3321        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3322                        start_cycle, start_trb);
3323        return 0;
3324}
3325
3326/* Caller must have locked xhci->lock */
3327int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3328                struct urb *urb, int slot_id, unsigned int ep_index)
3329{
3330        struct xhci_ring *ep_ring;
3331        int num_trbs;
3332        int ret;
3333        struct usb_ctrlrequest *setup;
3334        struct xhci_generic_trb *start_trb;
3335        int start_cycle;
3336        u32 field, length_field;
3337        struct urb_priv *urb_priv;
3338        struct xhci_td *td;
3339
3340        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3341        if (!ep_ring)
3342                return -EINVAL;
3343
3344        /*
3345         * Need to copy setup packet into setup TRB, so we can't use the setup
3346         * DMA address.
3347         */
3348        if (!urb->setup_packet)
3349                return -EINVAL;
3350
3351        /* 1 TRB for setup, 1 for status */
3352        num_trbs = 2;
3353        /*
3354         * Don't need to check if we need additional event data and normal TRBs,
3355         * since data in control transfers will never get bigger than 16MB
3356         * XXX: can we get a buffer that crosses 64KB boundaries?
3357         */
3358        if (urb->transfer_buffer_length > 0)
3359                num_trbs++;
3360        ret = prepare_transfer(xhci, xhci->devs[slot_id],
3361                        ep_index, urb->stream_id,
3362                        num_trbs, urb, 0, mem_flags);
3363        if (ret < 0)
3364                return ret;
3365
3366        urb_priv = urb->hcpriv;
3367        td = urb_priv->td[0];
3368
3369        /*
3370         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3371         * until we've finished creating all the other TRBs.  The ring's cycle
3372         * state may change as we enqueue the other TRBs, so save it too.
3373         */
3374        start_trb = &ep_ring->enqueue->generic;
3375        start_cycle = ep_ring->cycle_state;
3376
3377        /* Queue setup TRB - see section 6.4.1.2.1 */
3378        /* FIXME better way to translate setup_packet into two u32 fields? */
3379        setup = (struct usb_ctrlrequest *) urb->setup_packet;
3380        field = 0;
3381        field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3382        if (start_cycle == 0)
3383                field |= 0x1;
3384
3385        /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
3386        if (xhci->hci_version == 0x100) {
3387                if (urb->transfer_buffer_length > 0) {
3388                        if (setup->bRequestType & USB_DIR_IN)
3389                                field |= TRB_TX_TYPE(TRB_DATA_IN);
3390                        else
3391                                field |= TRB_TX_TYPE(TRB_DATA_OUT);
3392                }
3393        }
3394
3395        queue_trb(xhci, ep_ring, true,
3396                  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3397                  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3398                  TRB_LEN(8) | TRB_INTR_TARGET(0),
3399                  /* Immediate data in pointer */
3400                  field);
3401
3402        /* If there's data, queue data TRBs */
3403        /* Only set interrupt on short packet for IN endpoints */
3404        if (usb_urb_dir_in(urb))
3405                field = TRB_ISP | TRB_TYPE(TRB_DATA);
3406        else
3407                field = TRB_TYPE(TRB_DATA);
3408
3409        length_field = TRB_LEN(urb->transfer_buffer_length) |
3410                xhci_td_remainder(urb->transfer_buffer_length) |
3411                TRB_INTR_TARGET(0);
3412        if (urb->transfer_buffer_length > 0) {
3413                if (setup->bRequestType & USB_DIR_IN)
3414                        field |= TRB_DIR_IN;
3415                queue_trb(xhci, ep_ring, true,
3416                                lower_32_bits(urb->transfer_dma),
3417                                upper_32_bits(urb->transfer_dma),
3418                                length_field,
3419                                field | ep_ring->cycle_state);
3420        }
3421
3422        /* Save the DMA address of the last TRB in the TD */
3423        td->last_trb = ep_ring->enqueue;
3424
3425        /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3426        /* If the device sent data, the status stage is an OUT transfer */
3427        if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3428                field = 0;
3429        else
3430                field = TRB_DIR_IN;
3431        queue_trb(xhci, ep_ring, false,
3432                        0,
3433                        0,
3434                        TRB_INTR_TARGET(0),
3435                        /* Event on completion */
3436                        field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3437
3438        giveback_first_trb(xhci, slot_id, ep_index, 0,
3439                        start_cycle, start_trb);
3440        return 0;
3441}
3442
3443static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3444                struct urb *urb, int i)
3445{
3446        int num_trbs = 0;
3447        u64 addr, td_len;
3448
3449        addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3450        td_len = urb->iso_frame_desc[i].length;
3451
3452        num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3453                        TRB_MAX_BUFF_SIZE);
3454        if (num_trbs == 0)
3455                num_trbs++;
3456
3457        return num_trbs;
3458}
3459
3460/*
3461 * The transfer burst count field of the isochronous TRB defines the number of
3462 * bursts that are required to move all packets in this TD.  Only SuperSpeed
3463 * devices can burst up to bMaxBurst number of packets per service interval.
3464 * This field is zero based, meaning a value of zero in the field means one
3465 * burst.  Basically, for everything but SuperSpeed devices, this field will be
3466 * zero.  Only xHCI 1.0 host controllers support this field.
3467 */
3468static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3469                struct usb_device *udev,
3470                struct urb *urb, unsigned int total_packet_count)
3471{
3472        unsigned int max_burst;
3473
3474        if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3475                return 0;
3476
3477        max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3478        return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3479}
3480
3481/*
3482 * Returns the number of packets in the last "burst" of packets.  This field is
3483 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
3484 * the last burst packet count is equal to the total number of packets in the
3485 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
3486 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3487 * contain 1 to (bMaxBurst + 1) packets.
3488 */
3489static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3490                struct usb_device *udev,
3491                struct urb *urb, unsigned int total_packet_count)
3492{
3493        unsigned int max_burst;
3494        unsigned int residue;
3495
3496        if (xhci->hci_version < 0x100)
3497                return 0;
3498
3499        switch (udev->speed) {
3500        case USB_SPEED_SUPER:
3501                /* bMaxBurst is zero based: 0 means 1 packet per burst */
3502                max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3503                residue = total_packet_count % (max_burst + 1);
3504                /* If residue is zero, the last burst contains (max_burst + 1)
3505                 * number of packets, but the TLBPC field is zero-based.
3506                 */
3507                if (residue == 0)
3508                        return max_burst;
3509                return residue - 1;
3510        default:
3511                if (total_packet_count == 0)
3512                        return 0;
3513                return total_packet_count - 1;
3514        }
3515}
3516
3517/* This is for isoc transfer */
3518static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3519                struct urb *urb, int slot_id, unsigned int ep_index)
3520{
3521        struct xhci_ring *ep_ring;
3522        struct urb_priv *urb_priv;
3523        struct xhci_td *td;
3524        int num_tds, trbs_per_td;
3525        struct xhci_generic_trb *start_trb;
3526        bool first_trb;
3527        int start_cycle;
3528        u32 field, length_field;
3529        int running_total, trb_buff_len, td_len, td_remain_len, ret;
3530        u64 start_addr, addr;
3531        int i, j;
3532        bool more_trbs_coming;
3533
3534        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3535
3536        num_tds = urb->number_of_packets;
3537        if (num_tds < 1) {
3538                xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3539                return -EINVAL;
3540        }
3541
3542        start_addr = (u64) urb->transfer_dma;
3543        start_trb = &ep_ring->enqueue->generic;
3544        start_cycle = ep_ring->cycle_state;
3545
3546        urb_priv = urb->hcpriv;
3547        /* Queue the first TRB, even if it's zero-length */
3548        for (i = 0; i < num_tds; i++) {
3549                unsigned int total_packet_count;
3550                unsigned int burst_count;
3551                unsigned int residue;
3552
3553                first_trb = true;
3554                running_total = 0;
3555                addr = start_addr + urb->iso_frame_desc[i].offset;
3556                td_len = urb->iso_frame_desc[i].length;
3557                td_remain_len = td_len;
3558                total_packet_count = DIV_ROUND_UP(td_len,
3559                                GET_MAX_PACKET(
3560                                        usb_endpoint_maxp(&urb->ep->desc)));
3561                /* A zero-length transfer still involves at least one packet. */
3562                if (total_packet_count == 0)
3563                        total_packet_count++;
3564                burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3565                                total_packet_count);
3566                residue = xhci_get_last_burst_packet_count(xhci,
3567                                urb->dev, urb, total_packet_count);
3568
3569                trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3570
3571                ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3572                                urb->stream_id, trbs_per_td, urb, i, mem_flags);
3573                if (ret < 0) {
3574                        if (i == 0)
3575                                return ret;
3576                        goto cleanup;
3577                }
3578
3579                td = urb_priv->td[i];
3580                for (j = 0; j < trbs_per_td; j++) {
3581                        u32 remainder = 0;
3582                        field = 0;
3583
3584                        if (first_trb) {
3585                                field = TRB_TBC(burst_count) |
3586                                        TRB_TLBPC(residue);
3587                                /* Queue the isoc TRB */
3588                                field |= TRB_TYPE(TRB_ISOC);
3589                                /* Assume URB_ISO_ASAP is set */
3590                                field |= TRB_SIA;
3591                                if (i == 0) {
3592                                        if (start_cycle == 0)
3593                                                field |= 0x1;
3594                                } else
3595                                        field |= ep_ring->cycle_state;
3596                                first_trb = false;
3597                        } else {
3598                                /* Queue other normal TRBs */
3599                                field |= TRB_TYPE(TRB_NORMAL);
3600                                field |= ep_ring->cycle_state;
3601                        }
3602
3603                        /* Only set interrupt on short packet for IN EPs */
3604                        if (usb_urb_dir_in(urb))
3605                                field |= TRB_ISP;
3606
3607                        /* Chain all the TRBs together; clear the chain bit in
3608                         * the last TRB to indicate it's the last TRB in the
3609                         * chain.
3610                         */
3611                        if (j < trbs_per_td - 1) {
3612                                field |= TRB_CHAIN;
3613                                more_trbs_coming = true;
3614                        } else {
3615                                td->last_trb = ep_ring->enqueue;
3616                                field |= TRB_IOC;
3617                                if (xhci->hci_version == 0x100 &&
3618                                                !(xhci->quirks &
3619                                                        XHCI_AVOID_BEI)) {
3620                                        /* Set BEI bit except for the last td */
3621                                        if (i < num_tds - 1)
3622                                                field |= TRB_BEI;
3623                                }
3624                                more_trbs_coming = false;
3625                        }
3626
3627                        /* Calculate TRB length */
3628                        trb_buff_len = TRB_MAX_BUFF_SIZE -
3629                                (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3630                        if (trb_buff_len > td_remain_len)
3631                                trb_buff_len = td_remain_len;
3632
3633                        /* Set the TRB length, TD size, & interrupter fields. */
3634                        if (xhci->hci_version < 0x100) {
3635                                remainder = xhci_td_remainder(
3636                                                td_len - running_total);
3637                        } else {
3638                                remainder = xhci_v1_0_td_remainder(
3639                                                running_total, trb_buff_len,
3640                                                total_packet_count, urb,
3641                                                (trbs_per_td - j - 1));
3642                        }
3643                        length_field = TRB_LEN(trb_buff_len) |
3644                                remainder |
3645                                TRB_INTR_TARGET(0);
3646
3647                        queue_trb(xhci, ep_ring, more_trbs_coming,
3648                                lower_32_bits(addr),
3649                                upper_32_bits(addr),
3650                                length_field,
3651                                field);
3652                        running_total += trb_buff_len;
3653
3654                        addr += trb_buff_len;
3655                        td_remain_len -= trb_buff_len;
3656                }
3657
3658                /* Check TD length */
3659                if (running_total != td_len) {
3660                        xhci_err(xhci, "ISOC TD length unmatch\n");
3661                        ret = -EINVAL;
3662                        goto cleanup;
3663                }
3664        }
3665
3666        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3667                if (xhci->quirks & XHCI_AMD_PLL_FIX)
3668                        usb_amd_quirk_pll_disable();
3669        }
3670        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3671
3672        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3673                        start_cycle, start_trb);
3674        return 0;
3675cleanup:
3676        /* Clean up a partially enqueued isoc transfer. */
3677
3678        for (i--; i >= 0; i--)
3679                list_del_init(&urb_priv->td[i]->td_list);
3680
3681        /* Use the first TD as a temporary variable to turn the TDs we've queued
3682         * into No-ops with a software-owned cycle bit. That way the hardware
3683         * won't accidentally start executing bogus TDs when we partially
3684         * overwrite them.  td->first_trb and td->start_seg are already set.
3685         */
3686        urb_priv->td[0]->last_trb = ep_ring->enqueue;
3687        /* Every TRB except the first & last will have its cycle bit flipped. */
3688        td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3689
3690        /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3691        ep_ring->enqueue = urb_priv->td[0]->first_trb;
3692        ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3693        ep_ring->cycle_state = start_cycle;
3694        ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3695        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3696        return ret;
3697}
3698
3699/*
3700 * Check transfer ring to guarantee there is enough room for the urb.
3701 * Update ISO URB start_frame and interval.
3702 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3703 * update the urb->start_frame by now.
3704 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3705 */
3706int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3707                struct urb *urb, int slot_id, unsigned int ep_index)
3708{
3709        struct xhci_virt_device *xdev;
3710        struct xhci_ring *ep_ring;
3711        struct xhci_ep_ctx *ep_ctx;
3712        int start_frame;
3713        int xhci_interval;
3714        int ep_interval;
3715        int num_tds, num_trbs, i;
3716        int ret;
3717
3718        xdev = xhci->devs[slot_id];
3719        ep_ring = xdev->eps[ep_index].ring;
3720        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3721
3722        num_trbs = 0;
3723        num_tds = urb->number_of_packets;
3724        for (i = 0; i < num_tds; i++)
3725                num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3726
3727        /* Check the ring to guarantee there is enough room for the whole urb.
3728         * Do not insert any td of the urb to the ring if the check failed.
3729         */
3730        ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3731                           num_trbs, mem_flags);
3732        if (ret)
3733                return ret;
3734
3735        start_frame = readl(&xhci->run_regs->microframe_index);
3736        start_frame &= 0x3fff;
3737
3738        urb->start_frame = start_frame;
3739        if (urb->dev->speed == USB_SPEED_LOW ||
3740                        urb->dev->speed == USB_SPEED_FULL)
3741                urb->start_frame >>= 3;
3742
3743        xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3744        ep_interval = urb->interval;
3745        /* Convert to microframes */
3746        if (urb->dev->speed == USB_SPEED_LOW ||
3747                        urb->dev->speed == USB_SPEED_FULL)
3748                ep_interval *= 8;
3749        /* FIXME change this to a warning and a suggestion to use the new API
3750         * to set the polling interval (once the API is added).
3751         */
3752        if (xhci_interval != ep_interval) {
3753                dev_dbg_ratelimited(&urb->dev->dev,
3754                                "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3755                                ep_interval, ep_interval == 1 ? "" : "s",
3756                                xhci_interval, xhci_interval == 1 ? "" : "s");
3757                urb->interval = xhci_interval;
3758                /* Convert back to frames for LS/FS devices */
3759                if (urb->dev->speed == USB_SPEED_LOW ||
3760                                urb->dev->speed == USB_SPEED_FULL)
3761                        urb->interval /= 8;
3762        }
3763        ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3764
3765        return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3766}
3767
3768/****           Command Ring Operations         ****/
3769
3770/* Generic function for queueing a command TRB on the command ring.
3771 * Check to make sure there's room on the command ring for one command TRB.
3772 * Also check that there's room reserved for commands that must not fail.
3773 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3774 * then only check for the number of reserved spots.
3775 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3776 * because the command event handler may want to resubmit a failed command.
3777 */
3778static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3779                         u32 field1, u32 field2,
3780                         u32 field3, u32 field4, bool command_must_succeed)
3781{
3782        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3783        int ret;
3784        if (xhci->xhc_state & XHCI_STATE_DYING)
3785                return -ESHUTDOWN;
3786
3787        if (!command_must_succeed)
3788                reserved_trbs++;
3789
3790        ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3791                        reserved_trbs, GFP_ATOMIC);
3792        if (ret < 0) {
3793                xhci_err(xhci, "ERR: No room for command on command ring\n");
3794                if (command_must_succeed)
3795                        xhci_err(xhci, "ERR: Reserved TRB counting for "
3796                                        "unfailable commands failed.\n");
3797                return ret;
3798        }
3799
3800        cmd->command_trb = xhci->cmd_ring->enqueue;
3801        list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
3802
3803        /* if there are no other commands queued we start the timeout timer */
3804        if (xhci->cmd_list.next == &cmd->cmd_list &&
3805            !timer_pending(&xhci->cmd_timer)) {
3806                xhci->current_cmd = cmd;
3807                mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
3808        }
3809
3810        queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
3811                        field4 | xhci->cmd_ring->cycle_state);
3812        return 0;
3813}
3814
3815/* Queue a slot enable or disable request on the command ring */
3816int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
3817                u32 trb_type, u32 slot_id)
3818{
3819        return queue_command(xhci, cmd, 0, 0, 0,
3820                        TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3821}
3822
3823/* Queue an address device command TRB */
3824int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3825                dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
3826{
3827        return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3828                        upper_32_bits(in_ctx_ptr), 0,
3829                        TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
3830                        | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
3831}
3832
3833int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3834                u32 field1, u32 field2, u32 field3, u32 field4)
3835{
3836        return queue_command(xhci, cmd, field1, field2, field3, field4, false);
3837}
3838
3839/* Queue a reset device command TRB */
3840int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3841                u32 slot_id)
3842{
3843        return queue_command(xhci, cmd, 0, 0, 0,
3844                        TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3845                        false);
3846}
3847
3848/* Queue a configure endpoint command TRB */
3849int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
3850                struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
3851                u32 slot_id, bool command_must_succeed)
3852{
3853        return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3854                        upper_32_bits(in_ctx_ptr), 0,
3855                        TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3856                        command_must_succeed);
3857}
3858
3859/* Queue an evaluate context command TRB */
3860int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
3861                dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
3862{
3863        return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3864                        upper_32_bits(in_ctx_ptr), 0,
3865                        TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3866                        command_must_succeed);
3867}
3868
3869/*
3870 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3871 * activity on an endpoint that is about to be suspended.
3872 */
3873int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
3874                             int slot_id, unsigned int ep_index, int suspend)
3875{
3876        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3877        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3878        u32 type = TRB_TYPE(TRB_STOP_RING);
3879        u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3880
3881        return queue_command(xhci, cmd, 0, 0, 0,
3882                        trb_slot_id | trb_ep_index | type | trb_suspend, false);
3883}
3884
3885/* Set Transfer Ring Dequeue Pointer command */
3886void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
3887                unsigned int slot_id, unsigned int ep_index,
3888                unsigned int stream_id,
3889                struct xhci_dequeue_state *deq_state)
3890{
3891        dma_addr_t addr;
3892        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3893        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3894        u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3895        u32 trb_sct = 0;
3896        u32 type = TRB_TYPE(TRB_SET_DEQ);
3897        struct xhci_virt_ep *ep;
3898        struct xhci_command *cmd;
3899        int ret;
3900
3901        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3902                "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
3903                deq_state->new_deq_seg,
3904                (unsigned long long)deq_state->new_deq_seg->dma,
3905                deq_state->new_deq_ptr,
3906                (unsigned long long)xhci_trb_virt_to_dma(
3907                        deq_state->new_deq_seg, deq_state->new_deq_ptr),
3908                deq_state->new_cycle_state);
3909
3910        addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3911                                    deq_state->new_deq_ptr);
3912        if (addr == 0) {
3913                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3914                xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3915                          deq_state->new_deq_seg, deq_state->new_deq_ptr);
3916                return;
3917        }
3918        ep = &xhci->devs[slot_id]->eps[ep_index];
3919        if ((ep->ep_state & SET_DEQ_PENDING)) {
3920                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3921                xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3922                return;
3923        }
3924
3925        /* This function gets called from contexts where it cannot sleep */
3926        cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
3927        if (!cmd) {
3928                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
3929                return;
3930        }
3931
3932        ep->queued_deq_seg = deq_state->new_deq_seg;
3933        ep->queued_deq_ptr = deq_state->new_deq_ptr;
3934        if (stream_id)
3935                trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
3936        ret = queue_command(xhci, cmd,
3937                lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
3938                upper_32_bits(addr), trb_stream_id,
3939                trb_slot_id | trb_ep_index | type, false);
3940        if (ret < 0) {
3941                xhci_free_command(xhci, cmd);
3942                return;
3943        }
3944
3945        /* Stop the TD queueing code from ringing the doorbell until
3946         * this command completes.  The HC won't set the dequeue pointer
3947         * if the ring is running, and ringing the doorbell starts the
3948         * ring running.
3949         */
3950        ep->ep_state |= SET_DEQ_PENDING;
3951}
3952
3953int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
3954                        int slot_id, unsigned int ep_index)
3955{
3956        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3957        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3958        u32 type = TRB_TYPE(TRB_RESET_EP);
3959
3960        return queue_command(xhci, cmd, 0, 0, 0,
3961                        trb_slot_id | trb_ep_index | type, false);
3962}
3963