linux/drivers/usb/host/xhci-ring.c
<<
>>
Prefs
   1/*
   2 * xHCI host controller driver
   3 *
   4 * Copyright (C) 2008 Intel Corp.
   5 *
   6 * Author: Sarah Sharp
   7 * Some code borrowed from the Linux EHCI driver.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16 * for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software Foundation,
  20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23/*
  24 * Ring initialization rules:
  25 * 1. Each segment is initialized to zero, except for link TRBs.
  26 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
  27 *    Consumer Cycle State (CCS), depending on ring function.
  28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  29 *
  30 * Ring behavior rules:
  31 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
  32 *    least one free TRB in the ring.  This is useful if you want to turn that
  33 *    into a link TRB and expand the ring.
  34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  35 *    link TRB, then load the pointer with the address in the link TRB.  If the
  36 *    link TRB had its toggle bit set, you may need to update the ring cycle
  37 *    state (see cycle bit rules).  You may have to do this multiple times
  38 *    until you reach a non-link TRB.
  39 * 3. A ring is full if enqueue++ (for the definition of increment above)
  40 *    equals the dequeue pointer.
  41 *
  42 * Cycle bit rules:
  43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  44 *    in a link TRB, it must toggle the ring cycle state.
  45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  46 *    in a link TRB, it must toggle the ring cycle state.
  47 *
  48 * Producer rules:
  49 * 1. Check if ring is full before you enqueue.
  50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  51 *    Update enqueue pointer between each write (which may update the ring
  52 *    cycle state).
  53 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
  54 *    and endpoint rings.  If HC is the producer for the event ring,
  55 *    and it generates an interrupt according to interrupt modulation rules.
  56 *
  57 * Consumer rules:
  58 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
  59 *    the TRB is owned by the consumer.
  60 * 2. Update dequeue pointer (which may update the ring cycle state) and
  61 *    continue processing TRBs until you reach a TRB which is not owned by you.
  62 * 3. Notify the producer.  SW is the consumer for the event ring, and it
  63 *   updates event ring dequeue pointer.  HC is the consumer for the command and
  64 *   endpoint rings; it generates events on the event ring for these.
  65 */
  66
  67#include <linux/scatterlist.h>
  68#include <linux/slab.h>
  69#include "xhci.h"
  70#include "xhci-trace.h"
  71
  72static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  73                struct xhci_virt_device *virt_dev,
  74                struct xhci_event_cmd *event);
  75
  76/*
  77 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  78 * address of the TRB.
  79 */
  80dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  81                union xhci_trb *trb)
  82{
  83        unsigned long segment_offset;
  84
  85        if (!seg || !trb || trb < seg->trbs)
  86                return 0;
  87        /* offset in TRBs */
  88        segment_offset = trb - seg->trbs;
  89        if (segment_offset > TRBS_PER_SEGMENT)
  90                return 0;
  91        return seg->dma + (segment_offset * sizeof(*trb));
  92}
  93
  94/* Does this link TRB point to the first segment in a ring,
  95 * or was the previous TRB the last TRB on the last segment in the ERST?
  96 */
  97static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  98                struct xhci_segment *seg, union xhci_trb *trb)
  99{
 100        if (ring == xhci->event_ring)
 101                return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
 102                        (seg->next == xhci->event_ring->first_seg);
 103        else
 104                return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
 105}
 106
 107/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
 108 * segment?  I.e. would the updated event TRB pointer step off the end of the
 109 * event seg?
 110 */
 111static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
 112                struct xhci_segment *seg, union xhci_trb *trb)
 113{
 114        if (ring == xhci->event_ring)
 115                return trb == &seg->trbs[TRBS_PER_SEGMENT];
 116        else
 117                return TRB_TYPE_LINK_LE32(trb->link.control);
 118}
 119
 120static int enqueue_is_link_trb(struct xhci_ring *ring)
 121{
 122        struct xhci_link_trb *link = &ring->enqueue->link;
 123        return TRB_TYPE_LINK_LE32(link->control);
 124}
 125
 126union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
 127{
 128        /* Enqueue pointer can be left pointing to the link TRB,
 129         * we must handle that
 130         */
 131        if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
 132                return ring->enq_seg->next->trbs;
 133        return ring->enqueue;
 134}
 135
 136/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 137 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 138 * effect the ring dequeue or enqueue pointers.
 139 */
 140static void next_trb(struct xhci_hcd *xhci,
 141                struct xhci_ring *ring,
 142                struct xhci_segment **seg,
 143                union xhci_trb **trb)
 144{
 145        if (last_trb(xhci, ring, *seg, *trb)) {
 146                *seg = (*seg)->next;
 147                *trb = ((*seg)->trbs);
 148        } else {
 149                (*trb)++;
 150        }
 151}
 152
 153/*
 154 * See Cycle bit rules. SW is the consumer for the event ring only.
 155 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 156 */
 157static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
 158{
 159        unsigned long long addr;
 160
 161        ring->deq_updates++;
 162
 163        /*
 164         * If this is not event ring, and the dequeue pointer
 165         * is not on a link TRB, there is one more usable TRB
 166         */
 167        if (ring->type != TYPE_EVENT &&
 168                        !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
 169                ring->num_trbs_free++;
 170
 171        do {
 172                /*
 173                 * Update the dequeue pointer further if that was a link TRB or
 174                 * we're at the end of an event ring segment (which doesn't have
 175                 * link TRBS)
 176                 */
 177                if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
 178                        if (ring->type == TYPE_EVENT &&
 179                                        last_trb_on_last_seg(xhci, ring,
 180                                                ring->deq_seg, ring->dequeue)) {
 181                                ring->cycle_state ^= 1;
 182                        }
 183                        ring->deq_seg = ring->deq_seg->next;
 184                        ring->dequeue = ring->deq_seg->trbs;
 185                } else {
 186                        ring->dequeue++;
 187                }
 188        } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
 189
 190        addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
 191}
 192
 193/*
 194 * See Cycle bit rules. SW is the consumer for the event ring only.
 195 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 196 *
 197 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 198 * chain bit is set), then set the chain bit in all the following link TRBs.
 199 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 200 * have their chain bit cleared (so that each Link TRB is a separate TD).
 201 *
 202 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
 203 * set, but other sections talk about dealing with the chain bit set.  This was
 204 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 205 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
 206 *
 207 * @more_trbs_coming:   Will you enqueue more TRBs before calling
 208 *                      prepare_transfer()?
 209 */
 210static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 211                        bool more_trbs_coming)
 212{
 213        u32 chain;
 214        union xhci_trb *next;
 215        unsigned long long addr;
 216
 217        chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
 218        /* If this is not event ring, there is one less usable TRB */
 219        if (ring->type != TYPE_EVENT &&
 220                        !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
 221                ring->num_trbs_free--;
 222        next = ++(ring->enqueue);
 223
 224        ring->enq_updates++;
 225        /* Update the dequeue pointer further if that was a link TRB or we're at
 226         * the end of an event ring segment (which doesn't have link TRBS)
 227         */
 228        while (last_trb(xhci, ring, ring->enq_seg, next)) {
 229                if (ring->type != TYPE_EVENT) {
 230                        /*
 231                         * If the caller doesn't plan on enqueueing more
 232                         * TDs before ringing the doorbell, then we
 233                         * don't want to give the link TRB to the
 234                         * hardware just yet.  We'll give the link TRB
 235                         * back in prepare_ring() just before we enqueue
 236                         * the TD at the top of the ring.
 237                         */
 238                        if (!chain && !more_trbs_coming)
 239                                break;
 240
 241                        /* If we're not dealing with 0.95 hardware or
 242                         * isoc rings on AMD 0.96 host,
 243                         * carry over the chain bit of the previous TRB
 244                         * (which may mean the chain bit is cleared).
 245                         */
 246                        if (!(ring->type == TYPE_ISOC &&
 247                                        (xhci->quirks & XHCI_AMD_0x96_HOST))
 248                                                && !xhci_link_trb_quirk(xhci)) {
 249                                next->link.control &=
 250                                        cpu_to_le32(~TRB_CHAIN);
 251                                next->link.control |=
 252                                        cpu_to_le32(chain);
 253                        }
 254                        /* Give this link TRB to the hardware */
 255                        wmb();
 256                        next->link.control ^= cpu_to_le32(TRB_CYCLE);
 257
 258                        /* Toggle the cycle bit after the last ring segment. */
 259                        if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
 260                                ring->cycle_state = (ring->cycle_state ? 0 : 1);
 261                        }
 262                }
 263                ring->enq_seg = ring->enq_seg->next;
 264                ring->enqueue = ring->enq_seg->trbs;
 265                next = ring->enqueue;
 266        }
 267        addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
 268}
 269
 270/*
 271 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 272 * enqueue pointer will not advance into dequeue segment. See rules above.
 273 */
 274static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
 275                unsigned int num_trbs)
 276{
 277        int num_trbs_in_deq_seg;
 278
 279        if (ring->num_trbs_free < num_trbs)
 280                return 0;
 281
 282        if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
 283                num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
 284                if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
 285                        return 0;
 286        }
 287
 288        return 1;
 289}
 290
 291/* Ring the host controller doorbell after placing a command on the ring */
 292void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 293{
 294        if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
 295                return;
 296
 297        xhci_dbg(xhci, "// Ding dong!\n");
 298        xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
 299        /* Flush PCI posted writes */
 300        xhci_readl(xhci, &xhci->dba->doorbell[0]);
 301}
 302
 303static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 304{
 305        u64 temp_64;
 306        int ret;
 307
 308        xhci_dbg(xhci, "Abort command ring\n");
 309
 310        if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
 311                xhci_dbg(xhci, "The command ring isn't running, "
 312                                "Have the command ring been stopped?\n");
 313                return 0;
 314        }
 315
 316        temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 317        if (!(temp_64 & CMD_RING_RUNNING)) {
 318                xhci_dbg(xhci, "Command ring had been stopped\n");
 319                return 0;
 320        }
 321        xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
 322        xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
 323                        &xhci->op_regs->cmd_ring);
 324
 325        /* Section 4.6.1.2 of xHCI 1.0 spec says software should
 326         * time the completion od all xHCI commands, including
 327         * the Command Abort operation. If software doesn't see
 328         * CRR negated in a timely manner (e.g. longer than 5
 329         * seconds), then it should assume that the there are
 330         * larger problems with the xHC and assert HCRST.
 331         */
 332        ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
 333                        CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
 334        if (ret < 0) {
 335                xhci_err(xhci, "Stopped the command ring failed, "
 336                                "maybe the host is dead\n");
 337                xhci->xhc_state |= XHCI_STATE_DYING;
 338                xhci_quiesce(xhci);
 339                xhci_halt(xhci);
 340                return -ESHUTDOWN;
 341        }
 342
 343        return 0;
 344}
 345
 346static int xhci_queue_cd(struct xhci_hcd *xhci,
 347                struct xhci_command *command,
 348                union xhci_trb *cmd_trb)
 349{
 350        struct xhci_cd *cd;
 351        cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
 352        if (!cd)
 353                return -ENOMEM;
 354        INIT_LIST_HEAD(&cd->cancel_cmd_list);
 355
 356        cd->command = command;
 357        cd->cmd_trb = cmd_trb;
 358        list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
 359
 360        return 0;
 361}
 362
 363/*
 364 * Cancel the command which has issue.
 365 *
 366 * Some commands may hang due to waiting for acknowledgement from
 367 * usb device. It is outside of the xHC's ability to control and
 368 * will cause the command ring is blocked. When it occurs software
 369 * should intervene to recover the command ring.
 370 * See Section 4.6.1.1 and 4.6.1.2
 371 */
 372int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
 373                union xhci_trb *cmd_trb)
 374{
 375        int retval = 0;
 376        unsigned long flags;
 377
 378        spin_lock_irqsave(&xhci->lock, flags);
 379
 380        if (xhci->xhc_state & XHCI_STATE_DYING) {
 381                xhci_warn(xhci, "Abort the command ring,"
 382                                " but the xHCI is dead.\n");
 383                retval = -ESHUTDOWN;
 384                goto fail;
 385        }
 386
 387        /* queue the cmd desriptor to cancel_cmd_list */
 388        retval = xhci_queue_cd(xhci, command, cmd_trb);
 389        if (retval) {
 390                xhci_warn(xhci, "Queuing command descriptor failed.\n");
 391                goto fail;
 392        }
 393
 394        /* abort command ring */
 395        retval = xhci_abort_cmd_ring(xhci);
 396        if (retval) {
 397                xhci_err(xhci, "Abort command ring failed\n");
 398                if (unlikely(retval == -ESHUTDOWN)) {
 399                        spin_unlock_irqrestore(&xhci->lock, flags);
 400                        usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
 401                        xhci_dbg(xhci, "xHCI host controller is dead.\n");
 402                        return retval;
 403                }
 404        }
 405
 406fail:
 407        spin_unlock_irqrestore(&xhci->lock, flags);
 408        return retval;
 409}
 410
 411void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
 412                unsigned int slot_id,
 413                unsigned int ep_index,
 414                unsigned int stream_id)
 415{
 416        __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
 417        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
 418        unsigned int ep_state = ep->ep_state;
 419
 420        /* Don't ring the doorbell for this endpoint if there are pending
 421         * cancellations because we don't want to interrupt processing.
 422         * We don't want to restart any stream rings if there's a set dequeue
 423         * pointer command pending because the device can choose to start any
 424         * stream once the endpoint is on the HW schedule.
 425         * FIXME - check all the stream rings for pending cancellations.
 426         */
 427        if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
 428            (ep_state & EP_HALTED))
 429                return;
 430        xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
 431        /* The CPU has better things to do at this point than wait for a
 432         * write-posting flush.  It'll get there soon enough.
 433         */
 434}
 435
 436/* Ring the doorbell for any rings with pending URBs */
 437static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
 438                unsigned int slot_id,
 439                unsigned int ep_index)
 440{
 441        unsigned int stream_id;
 442        struct xhci_virt_ep *ep;
 443
 444        ep = &xhci->devs[slot_id]->eps[ep_index];
 445
 446        /* A ring has pending URBs if its TD list is not empty */
 447        if (!(ep->ep_state & EP_HAS_STREAMS)) {
 448                if (ep->ring && !(list_empty(&ep->ring->td_list)))
 449                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
 450                return;
 451        }
 452
 453        for (stream_id = 1; stream_id < ep->stream_info->num_streams;
 454                        stream_id++) {
 455                struct xhci_stream_info *stream_info = ep->stream_info;
 456                if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
 457                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
 458                                                stream_id);
 459        }
 460}
 461
 462/*
 463 * Find the segment that trb is in.  Start searching in start_seg.
 464 * If we must move past a segment that has a link TRB with a toggle cycle state
 465 * bit set, then we will toggle the value pointed at by cycle_state.
 466 */
 467static struct xhci_segment *find_trb_seg(
 468                struct xhci_segment *start_seg,
 469                union xhci_trb  *trb, int *cycle_state)
 470{
 471        struct xhci_segment *cur_seg = start_seg;
 472        struct xhci_generic_trb *generic_trb;
 473
 474        while (cur_seg->trbs > trb ||
 475                        &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
 476                generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
 477                if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
 478                        *cycle_state ^= 0x1;
 479                cur_seg = cur_seg->next;
 480                if (cur_seg == start_seg)
 481                        /* Looped over the entire list.  Oops! */
 482                        return NULL;
 483        }
 484        return cur_seg;
 485}
 486
 487
 488static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
 489                unsigned int slot_id, unsigned int ep_index,
 490                unsigned int stream_id)
 491{
 492        struct xhci_virt_ep *ep;
 493
 494        ep = &xhci->devs[slot_id]->eps[ep_index];
 495        /* Common case: no streams */
 496        if (!(ep->ep_state & EP_HAS_STREAMS))
 497                return ep->ring;
 498
 499        if (stream_id == 0) {
 500                xhci_warn(xhci,
 501                                "WARN: Slot ID %u, ep index %u has streams, "
 502                                "but URB has no stream ID.\n",
 503                                slot_id, ep_index);
 504                return NULL;
 505        }
 506
 507        if (stream_id < ep->stream_info->num_streams)
 508                return ep->stream_info->stream_rings[stream_id];
 509
 510        xhci_warn(xhci,
 511                        "WARN: Slot ID %u, ep index %u has "
 512                        "stream IDs 1 to %u allocated, "
 513                        "but stream ID %u is requested.\n",
 514                        slot_id, ep_index,
 515                        ep->stream_info->num_streams - 1,
 516                        stream_id);
 517        return NULL;
 518}
 519
 520/* Get the right ring for the given URB.
 521 * If the endpoint supports streams, boundary check the URB's stream ID.
 522 * If the endpoint doesn't support streams, return the singular endpoint ring.
 523 */
 524static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
 525                struct urb *urb)
 526{
 527        return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
 528                xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
 529}
 530
 531/*
 532 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 533 * Record the new state of the xHC's endpoint ring dequeue segment,
 534 * dequeue pointer, and new consumer cycle state in state.
 535 * Update our internal representation of the ring's dequeue pointer.
 536 *
 537 * We do this in three jumps:
 538 *  - First we update our new ring state to be the same as when the xHC stopped.
 539 *  - Then we traverse the ring to find the segment that contains
 540 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 541 *    any link TRBs with the toggle cycle bit set.
 542 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 543 *    if we've moved it past a link TRB with the toggle cycle bit set.
 544 *
 545 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 546 * with correct __le32 accesses they should work fine.  Only users of this are
 547 * in here.
 548 */
 549void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 550                unsigned int slot_id, unsigned int ep_index,
 551                unsigned int stream_id, struct xhci_td *cur_td,
 552                struct xhci_dequeue_state *state)
 553{
 554        struct xhci_virt_device *dev = xhci->devs[slot_id];
 555        struct xhci_ring *ep_ring;
 556        struct xhci_generic_trb *trb;
 557        struct xhci_ep_ctx *ep_ctx;
 558        dma_addr_t addr;
 559
 560        ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
 561                        ep_index, stream_id);
 562        if (!ep_ring) {
 563                xhci_warn(xhci, "WARN can't find new dequeue state "
 564                                "for invalid stream ID %u.\n",
 565                                stream_id);
 566                return;
 567        }
 568        state->new_cycle_state = 0;
 569        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 570                        "Finding segment containing stopped TRB.");
 571        state->new_deq_seg = find_trb_seg(cur_td->start_seg,
 572                        dev->eps[ep_index].stopped_trb,
 573                        &state->new_cycle_state);
 574        if (!state->new_deq_seg) {
 575                WARN_ON(1);
 576                return;
 577        }
 578
 579        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
 580        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 581                        "Finding endpoint context");
 582        ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
 583        state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
 584
 585        state->new_deq_ptr = cur_td->last_trb;
 586        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 587                        "Finding segment containing last TRB in TD.");
 588        state->new_deq_seg = find_trb_seg(state->new_deq_seg,
 589                        state->new_deq_ptr,
 590                        &state->new_cycle_state);
 591        if (!state->new_deq_seg) {
 592                WARN_ON(1);
 593                return;
 594        }
 595
 596        trb = &state->new_deq_ptr->generic;
 597        if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
 598            (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
 599                state->new_cycle_state ^= 0x1;
 600        next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
 601
 602        /*
 603         * If there is only one segment in a ring, find_trb_seg()'s while loop
 604         * will not run, and it will return before it has a chance to see if it
 605         * needs to toggle the cycle bit.  It can't tell if the stalled transfer
 606         * ended just before the link TRB on a one-segment ring, or if the TD
 607         * wrapped around the top of the ring, because it doesn't have the TD in
 608         * question.  Look for the one-segment case where stalled TRB's address
 609         * is greater than the new dequeue pointer address.
 610         */
 611        if (ep_ring->first_seg == ep_ring->first_seg->next &&
 612                        state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
 613                state->new_cycle_state ^= 0x1;
 614        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 615                        "Cycle state = 0x%x", state->new_cycle_state);
 616
 617        /* Don't update the ring cycle state for the producer (us). */
 618        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 619                        "New dequeue segment = %p (virtual)",
 620                        state->new_deq_seg);
 621        addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
 622        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 623                        "New dequeue pointer = 0x%llx (DMA)",
 624                        (unsigned long long) addr);
 625}
 626
 627/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 628 * (The last TRB actually points to the ring enqueue pointer, which is not part
 629 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 630 */
 631static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
 632                struct xhci_td *cur_td, bool flip_cycle)
 633{
 634        struct xhci_segment *cur_seg;
 635        union xhci_trb *cur_trb;
 636
 637        for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
 638                        true;
 639                        next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
 640                if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
 641                        /* Unchain any chained Link TRBs, but
 642                         * leave the pointers intact.
 643                         */
 644                        cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
 645                        /* Flip the cycle bit (link TRBs can't be the first
 646                         * or last TRB).
 647                         */
 648                        if (flip_cycle)
 649                                cur_trb->generic.field[3] ^=
 650                                        cpu_to_le32(TRB_CYCLE);
 651                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 652                                        "Cancel (unchain) link TRB");
 653                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 654                                        "Address = %p (0x%llx dma); "
 655                                        "in seg %p (0x%llx dma)",
 656                                        cur_trb,
 657                                        (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
 658                                        cur_seg,
 659                                        (unsigned long long)cur_seg->dma);
 660                } else {
 661                        cur_trb->generic.field[0] = 0;
 662                        cur_trb->generic.field[1] = 0;
 663                        cur_trb->generic.field[2] = 0;
 664                        /* Preserve only the cycle bit of this TRB */
 665                        cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
 666                        /* Flip the cycle bit except on the first or last TRB */
 667                        if (flip_cycle && cur_trb != cur_td->first_trb &&
 668                                        cur_trb != cur_td->last_trb)
 669                                cur_trb->generic.field[3] ^=
 670                                        cpu_to_le32(TRB_CYCLE);
 671                        cur_trb->generic.field[3] |= cpu_to_le32(
 672                                TRB_TYPE(TRB_TR_NOOP));
 673                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 674                                        "TRB to noop at offset 0x%llx",
 675                                        (unsigned long long)
 676                                        xhci_trb_virt_to_dma(cur_seg, cur_trb));
 677                }
 678                if (cur_trb == cur_td->last_trb)
 679                        break;
 680        }
 681}
 682
 683static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
 684                unsigned int ep_index, unsigned int stream_id,
 685                struct xhci_segment *deq_seg,
 686                union xhci_trb *deq_ptr, u32 cycle_state);
 687
 688void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
 689                unsigned int slot_id, unsigned int ep_index,
 690                unsigned int stream_id,
 691                struct xhci_dequeue_state *deq_state)
 692{
 693        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
 694
 695        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 696                        "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
 697                        "new deq ptr = %p (0x%llx dma), new cycle = %u",
 698                        deq_state->new_deq_seg,
 699                        (unsigned long long)deq_state->new_deq_seg->dma,
 700                        deq_state->new_deq_ptr,
 701                        (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
 702                        deq_state->new_cycle_state);
 703        queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
 704                        deq_state->new_deq_seg,
 705                        deq_state->new_deq_ptr,
 706                        (u32) deq_state->new_cycle_state);
 707        /* Stop the TD queueing code from ringing the doorbell until
 708         * this command completes.  The HC won't set the dequeue pointer
 709         * if the ring is running, and ringing the doorbell starts the
 710         * ring running.
 711         */
 712        ep->ep_state |= SET_DEQ_PENDING;
 713}
 714
 715static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
 716                struct xhci_virt_ep *ep)
 717{
 718        ep->ep_state &= ~EP_HALT_PENDING;
 719        /* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
 720         * timer is running on another CPU, we don't decrement stop_cmds_pending
 721         * (since we didn't successfully stop the watchdog timer).
 722         */
 723        if (del_timer(&ep->stop_cmd_timer))
 724                ep->stop_cmds_pending--;
 725}
 726
 727/* Must be called with xhci->lock held in interrupt context */
 728static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
 729                struct xhci_td *cur_td, int status)
 730{
 731        struct usb_hcd *hcd;
 732        struct urb      *urb;
 733        struct urb_priv *urb_priv;
 734
 735        urb = cur_td->urb;
 736        urb_priv = urb->hcpriv;
 737        urb_priv->td_cnt++;
 738        hcd = bus_to_hcd(urb->dev->bus);
 739
 740        /* Only giveback urb when this is the last td in urb */
 741        if (urb_priv->td_cnt == urb_priv->length) {
 742                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 743                        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
 744                        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
 745                                if (xhci->quirks & XHCI_AMD_PLL_FIX)
 746                                        usb_amd_quirk_pll_enable();
 747                        }
 748                }
 749                usb_hcd_unlink_urb_from_ep(hcd, urb);
 750
 751                spin_unlock(&xhci->lock);
 752                usb_hcd_giveback_urb(hcd, urb, status);
 753                xhci_urb_free_priv(xhci, urb_priv);
 754                spin_lock(&xhci->lock);
 755        }
 756}
 757
 758/*
 759 * When we get a command completion for a Stop Endpoint Command, we need to
 760 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 761 *
 762 *  1. If the HW was in the middle of processing the TD that needs to be
 763 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 764 *     in the TD with a Set Dequeue Pointer Command.
 765 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 766 *     bit cleared) so that the HW will skip over them.
 767 */
 768static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
 769                union xhci_trb *trb, struct xhci_event_cmd *event)
 770{
 771        unsigned int ep_index;
 772        struct xhci_virt_device *virt_dev;
 773        struct xhci_ring *ep_ring;
 774        struct xhci_virt_ep *ep;
 775        struct list_head *entry;
 776        struct xhci_td *cur_td = NULL;
 777        struct xhci_td *last_unlinked_td;
 778
 779        struct xhci_dequeue_state deq_state;
 780
 781        if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
 782                virt_dev = xhci->devs[slot_id];
 783                if (virt_dev)
 784                        handle_cmd_in_cmd_wait_list(xhci, virt_dev,
 785                                event);
 786                else
 787                        xhci_warn(xhci, "Stop endpoint command "
 788                                "completion for disabled slot %u\n",
 789                                slot_id);
 790                return;
 791        }
 792
 793        memset(&deq_state, 0, sizeof(deq_state));
 794        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
 795        ep = &xhci->devs[slot_id]->eps[ep_index];
 796
 797        if (list_empty(&ep->cancelled_td_list)) {
 798                xhci_stop_watchdog_timer_in_irq(xhci, ep);
 799                ep->stopped_td = NULL;
 800                ep->stopped_trb = NULL;
 801                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 802                return;
 803        }
 804
 805        /* Fix up the ep ring first, so HW stops executing cancelled TDs.
 806         * We have the xHCI lock, so nothing can modify this list until we drop
 807         * it.  We're also in the event handler, so we can't get re-interrupted
 808         * if another Stop Endpoint command completes
 809         */
 810        list_for_each(entry, &ep->cancelled_td_list) {
 811                cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
 812                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 813                                "Removing canceled TD starting at 0x%llx (dma).",
 814                                (unsigned long long)xhci_trb_virt_to_dma(
 815                                        cur_td->start_seg, cur_td->first_trb));
 816                ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
 817                if (!ep_ring) {
 818                        /* This shouldn't happen unless a driver is mucking
 819                         * with the stream ID after submission.  This will
 820                         * leave the TD on the hardware ring, and the hardware
 821                         * will try to execute it, and may access a buffer
 822                         * that has already been freed.  In the best case, the
 823                         * hardware will execute it, and the event handler will
 824                         * ignore the completion event for that TD, since it was
 825                         * removed from the td_list for that endpoint.  In
 826                         * short, don't muck with the stream ID after
 827                         * submission.
 828                         */
 829                        xhci_warn(xhci, "WARN Cancelled URB %p "
 830                                        "has invalid stream ID %u.\n",
 831                                        cur_td->urb,
 832                                        cur_td->urb->stream_id);
 833                        goto remove_finished_td;
 834                }
 835                /*
 836                 * If we stopped on the TD we need to cancel, then we have to
 837                 * move the xHC endpoint ring dequeue pointer past this TD.
 838                 */
 839                if (cur_td == ep->stopped_td)
 840                        xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
 841                                        cur_td->urb->stream_id,
 842                                        cur_td, &deq_state);
 843                else
 844                        td_to_noop(xhci, ep_ring, cur_td, false);
 845remove_finished_td:
 846                /*
 847                 * The event handler won't see a completion for this TD anymore,
 848                 * so remove it from the endpoint ring's TD list.  Keep it in
 849                 * the cancelled TD list for URB completion later.
 850                 */
 851                list_del_init(&cur_td->td_list);
 852        }
 853        last_unlinked_td = cur_td;
 854        xhci_stop_watchdog_timer_in_irq(xhci, ep);
 855
 856        /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
 857        if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
 858                xhci_queue_new_dequeue_state(xhci,
 859                                slot_id, ep_index,
 860                                ep->stopped_td->urb->stream_id,
 861                                &deq_state);
 862                xhci_ring_cmd_db(xhci);
 863        } else {
 864                /* Otherwise ring the doorbell(s) to restart queued transfers */
 865                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 866        }
 867
 868        /* Clear stopped_td and stopped_trb if endpoint is not halted */
 869        if (!(ep->ep_state & EP_HALTED)) {
 870                ep->stopped_td = NULL;
 871                ep->stopped_trb = NULL;
 872        }
 873
 874        /*
 875         * Drop the lock and complete the URBs in the cancelled TD list.
 876         * New TDs to be cancelled might be added to the end of the list before
 877         * we can complete all the URBs for the TDs we already unlinked.
 878         * So stop when we've completed the URB for the last TD we unlinked.
 879         */
 880        do {
 881                cur_td = list_entry(ep->cancelled_td_list.next,
 882                                struct xhci_td, cancelled_td_list);
 883                list_del_init(&cur_td->cancelled_td_list);
 884
 885                /* Clean up the cancelled URB */
 886                /* Doesn't matter what we pass for status, since the core will
 887                 * just overwrite it (because the URB has been unlinked).
 888                 */
 889                xhci_giveback_urb_in_irq(xhci, cur_td, 0);
 890
 891                /* Stop processing the cancelled list if the watchdog timer is
 892                 * running.
 893                 */
 894                if (xhci->xhc_state & XHCI_STATE_DYING)
 895                        return;
 896        } while (cur_td != last_unlinked_td);
 897
 898        /* Return to the event handler with xhci->lock re-acquired */
 899}
 900
 901/* Watchdog timer function for when a stop endpoint command fails to complete.
 902 * In this case, we assume the host controller is broken or dying or dead.  The
 903 * host may still be completing some other events, so we have to be careful to
 904 * let the event ring handler and the URB dequeueing/enqueueing functions know
 905 * through xhci->state.
 906 *
 907 * The timer may also fire if the host takes a very long time to respond to the
 908 * command, and the stop endpoint command completion handler cannot delete the
 909 * timer before the timer function is called.  Another endpoint cancellation may
 910 * sneak in before the timer function can grab the lock, and that may queue
 911 * another stop endpoint command and add the timer back.  So we cannot use a
 912 * simple flag to say whether there is a pending stop endpoint command for a
 913 * particular endpoint.
 914 *
 915 * Instead we use a combination of that flag and a counter for the number of
 916 * pending stop endpoint commands.  If the timer is the tail end of the last
 917 * stop endpoint command, and the endpoint's command is still pending, we assume
 918 * the host is dying.
 919 */
 920void xhci_stop_endpoint_command_watchdog(unsigned long arg)
 921{
 922        struct xhci_hcd *xhci;
 923        struct xhci_virt_ep *ep;
 924        struct xhci_virt_ep *temp_ep;
 925        struct xhci_ring *ring;
 926        struct xhci_td *cur_td;
 927        int ret, i, j;
 928        unsigned long flags;
 929
 930        ep = (struct xhci_virt_ep *) arg;
 931        xhci = ep->xhci;
 932
 933        spin_lock_irqsave(&xhci->lock, flags);
 934
 935        ep->stop_cmds_pending--;
 936        if (xhci->xhc_state & XHCI_STATE_DYING) {
 937                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 938                                "Stop EP timer ran, but another timer marked "
 939                                "xHCI as DYING, exiting.");
 940                spin_unlock_irqrestore(&xhci->lock, flags);
 941                return;
 942        }
 943        if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
 944                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 945                                "Stop EP timer ran, but no command pending, "
 946                                "exiting.");
 947                spin_unlock_irqrestore(&xhci->lock, flags);
 948                return;
 949        }
 950
 951        xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
 952        xhci_warn(xhci, "Assuming host is dying, halting host.\n");
 953        /* Oops, HC is dead or dying or at least not responding to the stop
 954         * endpoint command.
 955         */
 956        xhci->xhc_state |= XHCI_STATE_DYING;
 957        /* Disable interrupts from the host controller and start halting it */
 958        xhci_quiesce(xhci);
 959        spin_unlock_irqrestore(&xhci->lock, flags);
 960
 961        ret = xhci_halt(xhci);
 962
 963        spin_lock_irqsave(&xhci->lock, flags);
 964        if (ret < 0) {
 965                /* This is bad; the host is not responding to commands and it's
 966                 * not allowing itself to be halted.  At least interrupts are
 967                 * disabled. If we call usb_hc_died(), it will attempt to
 968                 * disconnect all device drivers under this host.  Those
 969                 * disconnect() methods will wait for all URBs to be unlinked,
 970                 * so we must complete them.
 971                 */
 972                xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
 973                xhci_warn(xhci, "Completing active URBs anyway.\n");
 974                /* We could turn all TDs on the rings to no-ops.  This won't
 975                 * help if the host has cached part of the ring, and is slow if
 976                 * we want to preserve the cycle bit.  Skip it and hope the host
 977                 * doesn't touch the memory.
 978                 */
 979        }
 980        for (i = 0; i < MAX_HC_SLOTS; i++) {
 981                if (!xhci->devs[i])
 982                        continue;
 983                for (j = 0; j < 31; j++) {
 984                        temp_ep = &xhci->devs[i]->eps[j];
 985                        ring = temp_ep->ring;
 986                        if (!ring)
 987                                continue;
 988                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 989                                        "Killing URBs for slot ID %u, "
 990                                        "ep index %u", i, j);
 991                        while (!list_empty(&ring->td_list)) {
 992                                cur_td = list_first_entry(&ring->td_list,
 993                                                struct xhci_td,
 994                                                td_list);
 995                                list_del_init(&cur_td->td_list);
 996                                if (!list_empty(&cur_td->cancelled_td_list))
 997                                        list_del_init(&cur_td->cancelled_td_list);
 998                                xhci_giveback_urb_in_irq(xhci, cur_td,
 999                                                -ESHUTDOWN);
1000                        }
1001                        while (!list_empty(&temp_ep->cancelled_td_list)) {
1002                                cur_td = list_first_entry(
1003                                                &temp_ep->cancelled_td_list,
1004                                                struct xhci_td,
1005                                                cancelled_td_list);
1006                                list_del_init(&cur_td->cancelled_td_list);
1007                                xhci_giveback_urb_in_irq(xhci, cur_td,
1008                                                -ESHUTDOWN);
1009                        }
1010                }
1011        }
1012        spin_unlock_irqrestore(&xhci->lock, flags);
1013        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1014                        "Calling usb_hc_died()");
1015        usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1016        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1017                        "xHCI host controller is dead.");
1018}
1019
1020
1021static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1022                struct xhci_virt_device *dev,
1023                struct xhci_ring *ep_ring,
1024                unsigned int ep_index)
1025{
1026        union xhci_trb *dequeue_temp;
1027        int num_trbs_free_temp;
1028        bool revert = false;
1029
1030        num_trbs_free_temp = ep_ring->num_trbs_free;
1031        dequeue_temp = ep_ring->dequeue;
1032
1033        /* If we get two back-to-back stalls, and the first stalled transfer
1034         * ends just before a link TRB, the dequeue pointer will be left on
1035         * the link TRB by the code in the while loop.  So we have to update
1036         * the dequeue pointer one segment further, or we'll jump off
1037         * the segment into la-la-land.
1038         */
1039        if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
1040                ep_ring->deq_seg = ep_ring->deq_seg->next;
1041                ep_ring->dequeue = ep_ring->deq_seg->trbs;
1042        }
1043
1044        while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1045                /* We have more usable TRBs */
1046                ep_ring->num_trbs_free++;
1047                ep_ring->dequeue++;
1048                if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
1049                                ep_ring->dequeue)) {
1050                        if (ep_ring->dequeue ==
1051                                        dev->eps[ep_index].queued_deq_ptr)
1052                                break;
1053                        ep_ring->deq_seg = ep_ring->deq_seg->next;
1054                        ep_ring->dequeue = ep_ring->deq_seg->trbs;
1055                }
1056                if (ep_ring->dequeue == dequeue_temp) {
1057                        revert = true;
1058                        break;
1059                }
1060        }
1061
1062        if (revert) {
1063                xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1064                ep_ring->num_trbs_free = num_trbs_free_temp;
1065        }
1066}
1067
1068/*
1069 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1070 * we need to clear the set deq pending flag in the endpoint ring state, so that
1071 * the TD queueing code can ring the doorbell again.  We also need to ring the
1072 * endpoint doorbell to restart the ring, but only if there aren't more
1073 * cancellations pending.
1074 */
1075static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1076                union xhci_trb *trb, u32 cmd_comp_code)
1077{
1078        unsigned int ep_index;
1079        unsigned int stream_id;
1080        struct xhci_ring *ep_ring;
1081        struct xhci_virt_device *dev;
1082        struct xhci_ep_ctx *ep_ctx;
1083        struct xhci_slot_ctx *slot_ctx;
1084
1085        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1086        stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1087        dev = xhci->devs[slot_id];
1088
1089        ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1090        if (!ep_ring) {
1091                xhci_warn(xhci, "WARN Set TR deq ptr command for "
1092                                "freed stream ID %u\n",
1093                                stream_id);
1094                /* XXX: Harmless??? */
1095                dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1096                return;
1097        }
1098
1099        ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1100        slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1101
1102        if (cmd_comp_code != COMP_SUCCESS) {
1103                unsigned int ep_state;
1104                unsigned int slot_state;
1105
1106                switch (cmd_comp_code) {
1107                case COMP_TRB_ERR:
1108                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
1109                                        "of stream ID configuration\n");
1110                        break;
1111                case COMP_CTX_STATE:
1112                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
1113                                        "to incorrect slot or ep state.\n");
1114                        ep_state = le32_to_cpu(ep_ctx->ep_info);
1115                        ep_state &= EP_STATE_MASK;
1116                        slot_state = le32_to_cpu(slot_ctx->dev_state);
1117                        slot_state = GET_SLOT_STATE(slot_state);
1118                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1119                                        "Slot state = %u, EP state = %u",
1120                                        slot_state, ep_state);
1121                        break;
1122                case COMP_EBADSLT:
1123                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
1124                                        "slot %u was not enabled.\n", slot_id);
1125                        break;
1126                default:
1127                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
1128                                        "completion code of %u.\n",
1129                                  cmd_comp_code);
1130                        break;
1131                }
1132                /* OK what do we do now?  The endpoint state is hosed, and we
1133                 * should never get to this point if the synchronization between
1134                 * queueing, and endpoint state are correct.  This might happen
1135                 * if the device gets disconnected after we've finished
1136                 * cancelling URBs, which might not be an error...
1137                 */
1138        } else {
1139                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1140                        "Successful Set TR Deq Ptr cmd, deq = @%08llx",
1141                         le64_to_cpu(ep_ctx->deq));
1142                if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
1143                                         dev->eps[ep_index].queued_deq_ptr) ==
1144                    (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
1145                        /* Update the ring's dequeue segment and dequeue pointer
1146                         * to reflect the new position.
1147                         */
1148                        update_ring_for_set_deq_completion(xhci, dev,
1149                                ep_ring, ep_index);
1150                } else {
1151                        xhci_warn(xhci, "Mismatch between completed Set TR Deq "
1152                                        "Ptr command & xHCI internal state.\n");
1153                        xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1154                                        dev->eps[ep_index].queued_deq_seg,
1155                                        dev->eps[ep_index].queued_deq_ptr);
1156                }
1157        }
1158
1159        dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1160        dev->eps[ep_index].queued_deq_seg = NULL;
1161        dev->eps[ep_index].queued_deq_ptr = NULL;
1162        /* Restart any rings with pending URBs */
1163        ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1164}
1165
1166static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1167                union xhci_trb *trb, u32 cmd_comp_code)
1168{
1169        unsigned int ep_index;
1170
1171        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1172        /* This command will only fail if the endpoint wasn't halted,
1173         * but we don't care.
1174         */
1175        xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1176                "Ignoring reset ep completion code of %u", cmd_comp_code);
1177
1178        /* HW with the reset endpoint quirk needs to have a configure endpoint
1179         * command complete before the endpoint can be used.  Queue that here
1180         * because the HW can't handle two commands being queued in a row.
1181         */
1182        if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1183                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1184                                "Queueing configure endpoint command");
1185                xhci_queue_configure_endpoint(xhci,
1186                                xhci->devs[slot_id]->in_ctx->dma, slot_id,
1187                                false);
1188                xhci_ring_cmd_db(xhci);
1189        } else {
1190                /* Clear our internal halted state and restart the ring(s) */
1191                xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1192                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1193        }
1194}
1195
1196/* Complete the command and detele it from the devcie's command queue.
1197 */
1198static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1199                struct xhci_command *command, u32 status)
1200{
1201        command->status = status;
1202        list_del(&command->cmd_list);
1203        if (command->completion)
1204                complete(command->completion);
1205        else
1206                xhci_free_command(xhci, command);
1207}
1208
1209
1210/* Check to see if a command in the device's command queue matches this one.
1211 * Signal the completion or free the command, and return 1.  Return 0 if the
1212 * completed command isn't at the head of the command list.
1213 */
1214static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1215                struct xhci_virt_device *virt_dev,
1216                struct xhci_event_cmd *event)
1217{
1218        struct xhci_command *command;
1219
1220        if (list_empty(&virt_dev->cmd_list))
1221                return 0;
1222
1223        command = list_entry(virt_dev->cmd_list.next,
1224                        struct xhci_command, cmd_list);
1225        if (xhci->cmd_ring->dequeue != command->command_trb)
1226                return 0;
1227
1228        xhci_complete_cmd_in_cmd_wait_list(xhci, command,
1229                        GET_COMP_CODE(le32_to_cpu(event->status)));
1230        return 1;
1231}
1232
1233/*
1234 * Finding the command trb need to be cancelled and modifying it to
1235 * NO OP command. And if the command is in device's command wait
1236 * list, finishing and freeing it.
1237 *
1238 * If we can't find the command trb, we think it had already been
1239 * executed.
1240 */
1241static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1242{
1243        struct xhci_segment *cur_seg;
1244        union xhci_trb *cmd_trb;
1245        u32 cycle_state;
1246
1247        if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1248                return;
1249
1250        /* find the current segment of command ring */
1251        cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1252                        xhci->cmd_ring->dequeue, &cycle_state);
1253
1254        if (!cur_seg) {
1255                xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
1256                                xhci->cmd_ring->dequeue,
1257                                (unsigned long long)
1258                                xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1259                                        xhci->cmd_ring->dequeue));
1260                xhci_debug_ring(xhci, xhci->cmd_ring);
1261                xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
1262                return;
1263        }
1264
1265        /* find the command trb matched by cd from command ring */
1266        for (cmd_trb = xhci->cmd_ring->dequeue;
1267                        cmd_trb != xhci->cmd_ring->enqueue;
1268                        next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
1269                /* If the trb is link trb, continue */
1270                if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
1271                        continue;
1272
1273                if (cur_cd->cmd_trb == cmd_trb) {
1274
1275                        /* If the command in device's command list, we should
1276                         * finish it and free the command structure.
1277                         */
1278                        if (cur_cd->command)
1279                                xhci_complete_cmd_in_cmd_wait_list(xhci,
1280                                        cur_cd->command, COMP_CMD_STOP);
1281
1282                        /* get cycle state from the origin command trb */
1283                        cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
1284                                & TRB_CYCLE;
1285
1286                        /* modify the command trb to NO OP command */
1287                        cmd_trb->generic.field[0] = 0;
1288                        cmd_trb->generic.field[1] = 0;
1289                        cmd_trb->generic.field[2] = 0;
1290                        cmd_trb->generic.field[3] = cpu_to_le32(
1291                                        TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1292                        break;
1293                }
1294        }
1295}
1296
1297static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
1298{
1299        struct xhci_cd *cur_cd, *next_cd;
1300
1301        if (list_empty(&xhci->cancel_cmd_list))
1302                return;
1303
1304        list_for_each_entry_safe(cur_cd, next_cd,
1305                        &xhci->cancel_cmd_list, cancel_cmd_list) {
1306                xhci_cmd_to_noop(xhci, cur_cd);
1307                list_del(&cur_cd->cancel_cmd_list);
1308                kfree(cur_cd);
1309        }
1310}
1311
1312/*
1313 * traversing the cancel_cmd_list. If the command descriptor according
1314 * to cmd_trb is found, the function free it and return 1, otherwise
1315 * return 0.
1316 */
1317static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
1318                union xhci_trb *cmd_trb)
1319{
1320        struct xhci_cd *cur_cd, *next_cd;
1321
1322        if (list_empty(&xhci->cancel_cmd_list))
1323                return 0;
1324
1325        list_for_each_entry_safe(cur_cd, next_cd,
1326                        &xhci->cancel_cmd_list, cancel_cmd_list) {
1327                if (cur_cd->cmd_trb == cmd_trb) {
1328                        if (cur_cd->command)
1329                                xhci_complete_cmd_in_cmd_wait_list(xhci,
1330                                        cur_cd->command, COMP_CMD_STOP);
1331                        list_del(&cur_cd->cancel_cmd_list);
1332                        kfree(cur_cd);
1333                        return 1;
1334                }
1335        }
1336
1337        return 0;
1338}
1339
1340/*
1341 * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
1342 * trb pointed by the command ring dequeue pointer is the trb we want to
1343 * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
1344 * traverse the cancel_cmd_list to trun the all of the commands according
1345 * to command descriptor to NO-OP trb.
1346 */
1347static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1348                int cmd_trb_comp_code)
1349{
1350        int cur_trb_is_good = 0;
1351
1352        /* Searching the cmd trb pointed by the command ring dequeue
1353         * pointer in command descriptor list. If it is found, free it.
1354         */
1355        cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1356                        xhci->cmd_ring->dequeue);
1357
1358        if (cmd_trb_comp_code == COMP_CMD_ABORT)
1359                xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1360        else if (cmd_trb_comp_code == COMP_CMD_STOP) {
1361                /* traversing the cancel_cmd_list and canceling
1362                 * the command according to command descriptor
1363                 */
1364                xhci_cancel_cmd_in_cd_list(xhci);
1365
1366                xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1367                /*
1368                 * ring command ring doorbell again to restart the
1369                 * command ring
1370                 */
1371                if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
1372                        xhci_ring_cmd_db(xhci);
1373        }
1374        return cur_trb_is_good;
1375}
1376
1377static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1378                u32 cmd_comp_code)
1379{
1380        if (cmd_comp_code == COMP_SUCCESS)
1381                xhci->slot_id = slot_id;
1382        else
1383                xhci->slot_id = 0;
1384        complete(&xhci->addr_dev);
1385}
1386
1387static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1388{
1389        struct xhci_virt_device *virt_dev;
1390
1391        virt_dev = xhci->devs[slot_id];
1392        if (!virt_dev)
1393                return;
1394        if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1395                /* Delete default control endpoint resources */
1396                xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1397        xhci_free_virt_device(xhci, slot_id);
1398}
1399
1400static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1401                struct xhci_event_cmd *event, u32 cmd_comp_code)
1402{
1403        struct xhci_virt_device *virt_dev;
1404        struct xhci_input_control_ctx *ctrl_ctx;
1405        unsigned int ep_index;
1406        unsigned int ep_state;
1407        u32 add_flags, drop_flags;
1408
1409        virt_dev = xhci->devs[slot_id];
1410        if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1411                return;
1412        /*
1413         * Configure endpoint commands can come from the USB core
1414         * configuration or alt setting changes, or because the HW
1415         * needed an extra configure endpoint command after a reset
1416         * endpoint command or streams were being configured.
1417         * If the command was for a halted endpoint, the xHCI driver
1418         * is not waiting on the configure endpoint command.
1419         */
1420        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1421        if (!ctrl_ctx) {
1422                xhci_warn(xhci, "Could not get input context, bad type.\n");
1423                return;
1424        }
1425
1426        add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1427        drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1428        /* Input ctx add_flags are the endpoint index plus one */
1429        ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1430
1431        /* A usb_set_interface() call directly after clearing a halted
1432         * condition may race on this quirky hardware.  Not worth
1433         * worrying about, since this is prototype hardware.  Not sure
1434         * if this will work for streams, but streams support was
1435         * untested on this prototype.
1436         */
1437        if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1438                        ep_index != (unsigned int) -1 &&
1439                        add_flags - SLOT_FLAG == drop_flags) {
1440                ep_state = virt_dev->eps[ep_index].ep_state;
1441                if (!(ep_state & EP_HALTED))
1442                        goto bandwidth_change;
1443                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1444                                "Completed config ep cmd - "
1445                                "last ep index = %d, state = %d",
1446                                ep_index, ep_state);
1447                /* Clear internal halted state and restart ring(s) */
1448                virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1449                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1450                return;
1451        }
1452bandwidth_change:
1453        xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1454                        "Completed config ep cmd");
1455        virt_dev->cmd_status = cmd_comp_code;
1456        complete(&virt_dev->cmd_completion);
1457        return;
1458}
1459
1460static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id,
1461                struct xhci_event_cmd *event, u32 cmd_comp_code)
1462{
1463        struct xhci_virt_device *virt_dev;
1464
1465        virt_dev = xhci->devs[slot_id];
1466        if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1467                return;
1468        virt_dev->cmd_status = cmd_comp_code;
1469        complete(&virt_dev->cmd_completion);
1470}
1471
1472static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id,
1473                u32 cmd_comp_code)
1474{
1475        xhci->devs[slot_id]->cmd_status = cmd_comp_code;
1476        complete(&xhci->addr_dev);
1477}
1478
1479static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1480                struct xhci_event_cmd *event)
1481{
1482        struct xhci_virt_device *virt_dev;
1483
1484        xhci_dbg(xhci, "Completed reset device command.\n");
1485        virt_dev = xhci->devs[slot_id];
1486        if (virt_dev)
1487                handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1488        else
1489                xhci_warn(xhci, "Reset device command completion "
1490                                "for disabled slot %u\n", slot_id);
1491}
1492
1493static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1494                struct xhci_event_cmd *event)
1495{
1496        if (!(xhci->quirks & XHCI_NEC_HOST)) {
1497                xhci->error_bitmask |= 1 << 6;
1498                return;
1499        }
1500        xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1501                        "NEC firmware version %2x.%02x",
1502                        NEC_FW_MAJOR(le32_to_cpu(event->status)),
1503                        NEC_FW_MINOR(le32_to_cpu(event->status)));
1504}
1505
1506static void handle_cmd_completion(struct xhci_hcd *xhci,
1507                struct xhci_event_cmd *event)
1508{
1509        int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1510        u64 cmd_dma;
1511        dma_addr_t cmd_dequeue_dma;
1512        u32 cmd_comp_code;
1513        union xhci_trb *cmd_trb;
1514        u32 cmd_type;
1515
1516        cmd_dma = le64_to_cpu(event->cmd_trb);
1517        cmd_trb = xhci->cmd_ring->dequeue;
1518        cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1519                        cmd_trb);
1520        /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1521        if (cmd_dequeue_dma == 0) {
1522                xhci->error_bitmask |= 1 << 4;
1523                return;
1524        }
1525        /* Does the DMA address match our internal dequeue pointer address? */
1526        if (cmd_dma != (u64) cmd_dequeue_dma) {
1527                xhci->error_bitmask |= 1 << 5;
1528                return;
1529        }
1530
1531        trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1532
1533        cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1534        if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) {
1535                /* If the return value is 0, we think the trb pointed by
1536                 * command ring dequeue pointer is a good trb. The good
1537                 * trb means we don't want to cancel the trb, but it have
1538                 * been stopped by host. So we should handle it normally.
1539                 * Otherwise, driver should invoke inc_deq() and return.
1540                 */
1541                if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) {
1542                        inc_deq(xhci, xhci->cmd_ring);
1543                        return;
1544                }
1545                /* There is no command to handle if we get a stop event when the
1546                 * command ring is empty, event->cmd_trb points to the next
1547                 * unset command
1548                 */
1549                if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1550                        return;
1551        }
1552
1553        cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1554        switch (cmd_type) {
1555        case TRB_ENABLE_SLOT:
1556                xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1557                break;
1558        case TRB_DISABLE_SLOT:
1559                xhci_handle_cmd_disable_slot(xhci, slot_id);
1560                break;
1561        case TRB_CONFIG_EP:
1562                xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code);
1563                break;
1564        case TRB_EVAL_CONTEXT:
1565                xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code);
1566                break;
1567        case TRB_ADDR_DEV:
1568                xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code);
1569                break;
1570        case TRB_STOP_RING:
1571                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1572                                le32_to_cpu(cmd_trb->generic.field[3])));
1573                xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1574                break;
1575        case TRB_SET_DEQ:
1576                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1577                                le32_to_cpu(cmd_trb->generic.field[3])));
1578                xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1579                break;
1580        case TRB_CMD_NOOP:
1581                break;
1582        case TRB_RESET_EP:
1583                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1584                                le32_to_cpu(cmd_trb->generic.field[3])));
1585                xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1586                break;
1587        case TRB_RESET_DEV:
1588                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1589                                le32_to_cpu(cmd_trb->generic.field[3])));
1590                xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1591                break;
1592        case TRB_NEC_GET_FW:
1593                xhci_handle_cmd_nec_get_fw(xhci, event);
1594                break;
1595        default:
1596                /* Skip over unknown commands on the event ring */
1597                xhci->error_bitmask |= 1 << 6;
1598                break;
1599        }
1600        inc_deq(xhci, xhci->cmd_ring);
1601}
1602
1603static void handle_vendor_event(struct xhci_hcd *xhci,
1604                union xhci_trb *event)
1605{
1606        u32 trb_type;
1607
1608        trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1609        xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1610        if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1611                handle_cmd_completion(xhci, &event->event_cmd);
1612}
1613
1614/* @port_id: the one-based port ID from the hardware (indexed from array of all
1615 * port registers -- USB 3.0 and USB 2.0).
1616 *
1617 * Returns a zero-based port number, which is suitable for indexing into each of
1618 * the split roothubs' port arrays and bus state arrays.
1619 * Add one to it in order to call xhci_find_slot_id_by_port.
1620 */
1621static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1622                struct xhci_hcd *xhci, u32 port_id)
1623{
1624        unsigned int i;
1625        unsigned int num_similar_speed_ports = 0;
1626
1627        /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1628         * and usb2_ports are 0-based indexes.  Count the number of similar
1629         * speed ports, up to 1 port before this port.
1630         */
1631        for (i = 0; i < (port_id - 1); i++) {
1632                u8 port_speed = xhci->port_array[i];
1633
1634                /*
1635                 * Skip ports that don't have known speeds, or have duplicate
1636                 * Extended Capabilities port speed entries.
1637                 */
1638                if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1639                        continue;
1640
1641                /*
1642                 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
1643                 * 1.1 ports are under the USB 2.0 hub.  If the port speed
1644                 * matches the device speed, it's a similar speed port.
1645                 */
1646                if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1647                        num_similar_speed_ports++;
1648        }
1649        return num_similar_speed_ports;
1650}
1651
1652static void handle_device_notification(struct xhci_hcd *xhci,
1653                union xhci_trb *event)
1654{
1655        u32 slot_id;
1656        struct usb_device *udev;
1657
1658        slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
1659        if (!xhci->devs[slot_id]) {
1660                xhci_warn(xhci, "Device Notification event for "
1661                                "unused slot %u\n", slot_id);
1662                return;
1663        }
1664
1665        xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1666                        slot_id);
1667        udev = xhci->devs[slot_id]->udev;
1668        if (udev && udev->parent)
1669                usb_wakeup_notification(udev->parent, udev->portnum);
1670}
1671
1672static void handle_port_status(struct xhci_hcd *xhci,
1673                union xhci_trb *event)
1674{
1675        struct usb_hcd *hcd;
1676        u32 port_id;
1677        u32 temp, temp1;
1678        int max_ports;
1679        int slot_id;
1680        unsigned int faked_port_index;
1681        u8 major_revision;
1682        struct xhci_bus_state *bus_state;
1683        __le32 __iomem **port_array;
1684        bool bogus_port_status = false;
1685
1686        /* Port status change events always have a successful completion code */
1687        if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1688                xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1689                xhci->error_bitmask |= 1 << 8;
1690        }
1691        port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1692        xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1693
1694        max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1695        if ((port_id <= 0) || (port_id > max_ports)) {
1696                xhci_warn(xhci, "Invalid port id %d\n", port_id);
1697                inc_deq(xhci, xhci->event_ring);
1698                return;
1699        }
1700
1701        /* Figure out which usb_hcd this port is attached to:
1702         * is it a USB 3.0 port or a USB 2.0/1.1 port?
1703         */
1704        major_revision = xhci->port_array[port_id - 1];
1705
1706        /* Find the right roothub. */
1707        hcd = xhci_to_hcd(xhci);
1708        if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1709                hcd = xhci->shared_hcd;
1710
1711        if (major_revision == 0) {
1712                xhci_warn(xhci, "Event for port %u not in "
1713                                "Extended Capabilities, ignoring.\n",
1714                                port_id);
1715                bogus_port_status = true;
1716                goto cleanup;
1717        }
1718        if (major_revision == DUPLICATE_ENTRY) {
1719                xhci_warn(xhci, "Event for port %u duplicated in"
1720                                "Extended Capabilities, ignoring.\n",
1721                                port_id);
1722                bogus_port_status = true;
1723                goto cleanup;
1724        }
1725
1726        /*
1727         * Hardware port IDs reported by a Port Status Change Event include USB
1728         * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
1729         * resume event, but we first need to translate the hardware port ID
1730         * into the index into the ports on the correct split roothub, and the
1731         * correct bus_state structure.
1732         */
1733        bus_state = &xhci->bus_state[hcd_index(hcd)];
1734        if (hcd->speed == HCD_USB3)
1735                port_array = xhci->usb3_ports;
1736        else
1737                port_array = xhci->usb2_ports;
1738        /* Find the faked port hub number */
1739        faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1740                        port_id);
1741
1742        temp = xhci_readl(xhci, port_array[faked_port_index]);
1743        if (hcd->state == HC_STATE_SUSPENDED) {
1744                xhci_dbg(xhci, "resume root hub\n");
1745                usb_hcd_resume_root_hub(hcd);
1746        }
1747
1748        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1749                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1750
1751                temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1752                if (!(temp1 & CMD_RUN)) {
1753                        xhci_warn(xhci, "xHC is not running.\n");
1754                        goto cleanup;
1755                }
1756
1757                if (DEV_SUPERSPEED(temp)) {
1758                        xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1759                        /* Set a flag to say the port signaled remote wakeup,
1760                         * so we can tell the difference between the end of
1761                         * device and host initiated resume.
1762                         */
1763                        bus_state->port_remote_wakeup |= 1 << faked_port_index;
1764                        xhci_test_and_clear_bit(xhci, port_array,
1765                                        faked_port_index, PORT_PLC);
1766                        xhci_set_link_state(xhci, port_array, faked_port_index,
1767                                                XDEV_U0);
1768                        /* Need to wait until the next link state change
1769                         * indicates the device is actually in U0.
1770                         */
1771                        bogus_port_status = true;
1772                        goto cleanup;
1773                } else {
1774                        xhci_dbg(xhci, "resume HS port %d\n", port_id);
1775                        bus_state->resume_done[faked_port_index] = jiffies +
1776                                msecs_to_jiffies(20);
1777                        set_bit(faked_port_index, &bus_state->resuming_ports);
1778                        mod_timer(&hcd->rh_timer,
1779                                  bus_state->resume_done[faked_port_index]);
1780                        /* Do the rest in GetPortStatus */
1781                }
1782        }
1783
1784        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1785                        DEV_SUPERSPEED(temp)) {
1786                xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1787                /* We've just brought the device into U0 through either the
1788                 * Resume state after a device remote wakeup, or through the
1789                 * U3Exit state after a host-initiated resume.  If it's a device
1790                 * initiated remote wake, don't pass up the link state change,
1791                 * so the roothub behavior is consistent with external
1792                 * USB 3.0 hub behavior.
1793                 */
1794                slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1795                                faked_port_index + 1);
1796                if (slot_id && xhci->devs[slot_id])
1797                        xhci_ring_device(xhci, slot_id);
1798                if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1799                        bus_state->port_remote_wakeup &=
1800                                ~(1 << faked_port_index);
1801                        xhci_test_and_clear_bit(xhci, port_array,
1802                                        faked_port_index, PORT_PLC);
1803                        usb_wakeup_notification(hcd->self.root_hub,
1804                                        faked_port_index + 1);
1805                        bogus_port_status = true;
1806                        goto cleanup;
1807                }
1808        }
1809
1810        /*
1811         * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1812         * RExit to a disconnect state).  If so, let the the driver know it's
1813         * out of the RExit state.
1814         */
1815        if (!DEV_SUPERSPEED(temp) &&
1816                        test_and_clear_bit(faked_port_index,
1817                                &bus_state->rexit_ports)) {
1818                complete(&bus_state->rexit_done[faked_port_index]);
1819                bogus_port_status = true;
1820                goto cleanup;
1821        }
1822
1823        if (hcd->speed != HCD_USB3)
1824                xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1825                                        PORT_PLC);
1826
1827cleanup:
1828        /* Update event ring dequeue pointer before dropping the lock */
1829        inc_deq(xhci, xhci->event_ring);
1830
1831        /* Don't make the USB core poll the roothub if we got a bad port status
1832         * change event.  Besides, at that point we can't tell which roothub
1833         * (USB 2.0 or USB 3.0) to kick.
1834         */
1835        if (bogus_port_status)
1836                return;
1837
1838        /*
1839         * xHCI port-status-change events occur when the "or" of all the
1840         * status-change bits in the portsc register changes from 0 to 1.
1841         * New status changes won't cause an event if any other change
1842         * bits are still set.  When an event occurs, switch over to
1843         * polling to avoid losing status changes.
1844         */
1845        xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1846        set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1847        spin_unlock(&xhci->lock);
1848        /* Pass this up to the core */
1849        usb_hcd_poll_rh_status(hcd);
1850        spin_lock(&xhci->lock);
1851}
1852
1853/*
1854 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1855 * at end_trb, which may be in another segment.  If the suspect DMA address is a
1856 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
1857 * returns 0.
1858 */
1859struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1860                union xhci_trb  *start_trb,
1861                union xhci_trb  *end_trb,
1862                dma_addr_t      suspect_dma)
1863{
1864        dma_addr_t start_dma;
1865        dma_addr_t end_seg_dma;
1866        dma_addr_t end_trb_dma;
1867        struct xhci_segment *cur_seg;
1868
1869        start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1870        cur_seg = start_seg;
1871
1872        do {
1873                if (start_dma == 0)
1874                        return NULL;
1875                /* We may get an event for a Link TRB in the middle of a TD */
1876                end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1877                                &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1878                /* If the end TRB isn't in this segment, this is set to 0 */
1879                end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1880
1881                if (end_trb_dma > 0) {
1882                        /* The end TRB is in this segment, so suspect should be here */
1883                        if (start_dma <= end_trb_dma) {
1884                                if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1885                                        return cur_seg;
1886                        } else {
1887                                /* Case for one segment with
1888                                 * a TD wrapped around to the top
1889                                 */
1890                                if ((suspect_dma >= start_dma &&
1891                                                        suspect_dma <= end_seg_dma) ||
1892                                                (suspect_dma >= cur_seg->dma &&
1893                                                 suspect_dma <= end_trb_dma))
1894                                        return cur_seg;
1895                        }
1896                        return NULL;
1897                } else {
1898                        /* Might still be somewhere in this segment */
1899                        if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1900                                return cur_seg;
1901                }
1902                cur_seg = cur_seg->next;
1903                start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1904        } while (cur_seg != start_seg);
1905
1906        return NULL;
1907}
1908
1909static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1910                unsigned int slot_id, unsigned int ep_index,
1911                unsigned int stream_id,
1912                struct xhci_td *td, union xhci_trb *event_trb)
1913{
1914        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1915        ep->ep_state |= EP_HALTED;
1916        ep->stopped_td = td;
1917        ep->stopped_trb = event_trb;
1918        ep->stopped_stream = stream_id;
1919
1920        xhci_queue_reset_ep(xhci, slot_id, ep_index);
1921        xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1922
1923        ep->stopped_td = NULL;
1924        ep->stopped_trb = NULL;
1925        ep->stopped_stream = 0;
1926
1927        xhci_ring_cmd_db(xhci);
1928}
1929
1930/* Check if an error has halted the endpoint ring.  The class driver will
1931 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1932 * However, a babble and other errors also halt the endpoint ring, and the class
1933 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1934 * Ring Dequeue Pointer command manually.
1935 */
1936static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1937                struct xhci_ep_ctx *ep_ctx,
1938                unsigned int trb_comp_code)
1939{
1940        /* TRB completion codes that may require a manual halt cleanup */
1941        if (trb_comp_code == COMP_TX_ERR ||
1942                        trb_comp_code == COMP_BABBLE ||
1943                        trb_comp_code == COMP_SPLIT_ERR)
1944                /* The 0.96 spec says a babbling control endpoint
1945                 * is not halted. The 0.96 spec says it is.  Some HW
1946                 * claims to be 0.95 compliant, but it halts the control
1947                 * endpoint anyway.  Check if a babble halted the
1948                 * endpoint.
1949                 */
1950                if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1951                    cpu_to_le32(EP_STATE_HALTED))
1952                        return 1;
1953
1954        return 0;
1955}
1956
1957int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1958{
1959        if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1960                /* Vendor defined "informational" completion code,
1961                 * treat as not-an-error.
1962                 */
1963                xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1964                                trb_comp_code);
1965                xhci_dbg(xhci, "Treating code as success.\n");
1966                return 1;
1967        }
1968        return 0;
1969}
1970
1971/*
1972 * Finish the td processing, remove the td from td list;
1973 * Return 1 if the urb can be given back.
1974 */
1975static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1976        union xhci_trb *event_trb, struct xhci_transfer_event *event,
1977        struct xhci_virt_ep *ep, int *status, bool skip)
1978{
1979        struct xhci_virt_device *xdev;
1980        struct xhci_ring *ep_ring;
1981        unsigned int slot_id;
1982        int ep_index;
1983        struct urb *urb = NULL;
1984        struct xhci_ep_ctx *ep_ctx;
1985        int ret = 0;
1986        struct urb_priv *urb_priv;
1987        u32 trb_comp_code;
1988
1989        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1990        xdev = xhci->devs[slot_id];
1991        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1992        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1993        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1994        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1995
1996        if (skip)
1997                goto td_cleanup;
1998
1999        if (trb_comp_code == COMP_STOP_INVAL ||
2000                        trb_comp_code == COMP_STOP) {
2001                /* The Endpoint Stop Command completion will take care of any
2002                 * stopped TDs.  A stopped TD may be restarted, so don't update
2003                 * the ring dequeue pointer or take this TD off any lists yet.
2004                 */
2005                ep->stopped_td = td;
2006                ep->stopped_trb = event_trb;
2007                return 0;
2008        } else {
2009                if (trb_comp_code == COMP_STALL) {
2010                        /* The transfer is completed from the driver's
2011                         * perspective, but we need to issue a set dequeue
2012                         * command for this stalled endpoint to move the dequeue
2013                         * pointer past the TD.  We can't do that here because
2014                         * the halt condition must be cleared first.  Let the
2015                         * USB class driver clear the stall later.
2016                         */
2017                        ep->stopped_td = td;
2018                        ep->stopped_trb = event_trb;
2019                        ep->stopped_stream = ep_ring->stream_id;
2020                } else if (xhci_requires_manual_halt_cleanup(xhci,
2021                                        ep_ctx, trb_comp_code)) {
2022                        /* Other types of errors halt the endpoint, but the
2023                         * class driver doesn't call usb_reset_endpoint() unless
2024                         * the error is -EPIPE.  Clear the halted status in the
2025                         * xHCI hardware manually.
2026                         */
2027                        xhci_cleanup_halted_endpoint(xhci,
2028                                        slot_id, ep_index, ep_ring->stream_id,
2029                                        td, event_trb);
2030                } else {
2031                        /* Update ring dequeue pointer */
2032                        while (ep_ring->dequeue != td->last_trb)
2033                                inc_deq(xhci, ep_ring);
2034                        inc_deq(xhci, ep_ring);
2035                }
2036
2037td_cleanup:
2038                /* Clean up the endpoint's TD list */
2039                urb = td->urb;
2040                urb_priv = urb->hcpriv;
2041
2042                /* Do one last check of the actual transfer length.
2043                 * If the host controller said we transferred more data than
2044                 * the buffer length, urb->actual_length will be a very big
2045                 * number (since it's unsigned).  Play it safe and say we didn't
2046                 * transfer anything.
2047                 */
2048                if (urb->actual_length > urb->transfer_buffer_length) {
2049                        xhci_warn(xhci, "URB transfer length is wrong, "
2050                                        "xHC issue? req. len = %u, "
2051                                        "act. len = %u\n",
2052                                        urb->transfer_buffer_length,
2053                                        urb->actual_length);
2054                        urb->actual_length = 0;
2055                        if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2056                                *status = -EREMOTEIO;
2057                        else
2058                                *status = 0;
2059                }
2060                list_del_init(&td->td_list);
2061                /* Was this TD slated to be cancelled but completed anyway? */
2062                if (!list_empty(&td->cancelled_td_list))
2063                        list_del_init(&td->cancelled_td_list);
2064
2065                urb_priv->td_cnt++;
2066                /* Giveback the urb when all the tds are completed */
2067                if (urb_priv->td_cnt == urb_priv->length) {
2068                        ret = 1;
2069                        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2070                                xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
2071                                if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
2072                                        == 0) {
2073                                        if (xhci->quirks & XHCI_AMD_PLL_FIX)
2074                                                usb_amd_quirk_pll_enable();
2075                                }
2076                        }
2077                }
2078        }
2079
2080        return ret;
2081}
2082
2083/*
2084 * Process control tds, update urb status and actual_length.
2085 */
2086static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
2087        union xhci_trb *event_trb, struct xhci_transfer_event *event,
2088        struct xhci_virt_ep *ep, int *status)
2089{
2090        struct xhci_virt_device *xdev;
2091        struct xhci_ring *ep_ring;
2092        unsigned int slot_id;
2093        int ep_index;
2094        struct xhci_ep_ctx *ep_ctx;
2095        u32 trb_comp_code;
2096
2097        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2098        xdev = xhci->devs[slot_id];
2099        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2100        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2101        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2102        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2103
2104        switch (trb_comp_code) {
2105        case COMP_SUCCESS:
2106                if (event_trb == ep_ring->dequeue) {
2107                        xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
2108                                        "without IOC set??\n");
2109                        *status = -ESHUTDOWN;
2110                } else if (event_trb != td->last_trb) {
2111                        xhci_warn(xhci, "WARN: Success on ctrl data TRB "
2112                                        "without IOC set??\n");
2113                        *status = -ESHUTDOWN;
2114                } else {
2115                        *status = 0;
2116                }
2117                break;
2118        case COMP_SHORT_TX:
2119                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2120                        *status = -EREMOTEIO;
2121                else
2122                        *status = 0;
2123                break;
2124        case COMP_STOP_INVAL:
2125        case COMP_STOP:
2126                return finish_td(xhci, td, event_trb, event, ep, status, false);
2127        default:
2128                if (!xhci_requires_manual_halt_cleanup(xhci,
2129                                        ep_ctx, trb_comp_code))
2130                        break;
2131                xhci_dbg(xhci, "TRB error code %u, "
2132                                "halted endpoint index = %u\n",
2133                                trb_comp_code, ep_index);
2134                /* else fall through */
2135        case COMP_STALL:
2136                /* Did we transfer part of the data (middle) phase? */
2137                if (event_trb != ep_ring->dequeue &&
2138                                event_trb != td->last_trb)
2139                        td->urb->actual_length =
2140                                td->urb->transfer_buffer_length -
2141                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2142                else
2143                        td->urb->actual_length = 0;
2144
2145                xhci_cleanup_halted_endpoint(xhci,
2146                        slot_id, ep_index, 0, td, event_trb);
2147                return finish_td(xhci, td, event_trb, event, ep, status, true);
2148        }
2149        /*
2150         * Did we transfer any data, despite the errors that might have
2151         * happened?  I.e. did we get past the setup stage?
2152         */
2153        if (event_trb != ep_ring->dequeue) {
2154                /* The event was for the status stage */
2155                if (event_trb == td->last_trb) {
2156                        if (td->urb->actual_length != 0) {
2157                                /* Don't overwrite a previously set error code
2158                                 */
2159                                if ((*status == -EINPROGRESS || *status == 0) &&
2160                                                (td->urb->transfer_flags
2161                                                 & URB_SHORT_NOT_OK))
2162                                        /* Did we already see a short data
2163                                         * stage? */
2164                                        *status = -EREMOTEIO;
2165                        } else {
2166                                td->urb->actual_length =
2167                                        td->urb->transfer_buffer_length;
2168                        }
2169                } else {
2170                /* Maybe the event was for the data stage? */
2171                        td->urb->actual_length =
2172                                td->urb->transfer_buffer_length -
2173                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2174                        xhci_dbg(xhci, "Waiting for status "
2175                                        "stage event\n");
2176                        return 0;
2177                }
2178        }
2179
2180        return finish_td(xhci, td, event_trb, event, ep, status, false);
2181}
2182
2183/*
2184 * Process isochronous tds, update urb packet status and actual_length.
2185 */
2186static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2187        union xhci_trb *event_trb, struct xhci_transfer_event *event,
2188        struct xhci_virt_ep *ep, int *status)
2189{
2190        struct xhci_ring *ep_ring;
2191        struct urb_priv *urb_priv;
2192        int idx;
2193        int len = 0;
2194        union xhci_trb *cur_trb;
2195        struct xhci_segment *cur_seg;
2196        struct usb_iso_packet_descriptor *frame;
2197        u32 trb_comp_code;
2198        bool skip_td = false;
2199
2200        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2201        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2202        urb_priv = td->urb->hcpriv;
2203        idx = urb_priv->td_cnt;
2204        frame = &td->urb->iso_frame_desc[idx];
2205
2206        /* handle completion code */
2207        switch (trb_comp_code) {
2208        case COMP_SUCCESS:
2209                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2210                        frame->status = 0;
2211                        break;
2212                }
2213                if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2214                        trb_comp_code = COMP_SHORT_TX;
2215        case COMP_SHORT_TX:
2216                frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2217                                -EREMOTEIO : 0;
2218                break;
2219        case COMP_BW_OVER:
2220                frame->status = -ECOMM;
2221                skip_td = true;
2222                break;
2223        case COMP_BUFF_OVER:
2224        case COMP_BABBLE:
2225                frame->status = -EOVERFLOW;
2226                skip_td = true;
2227                break;
2228        case COMP_DEV_ERR:
2229        case COMP_STALL:
2230        case COMP_TX_ERR:
2231                frame->status = -EPROTO;
2232                skip_td = true;
2233                break;
2234        case COMP_STOP:
2235        case COMP_STOP_INVAL:
2236                break;
2237        default:
2238                frame->status = -1;
2239                break;
2240        }
2241
2242        if (trb_comp_code == COMP_SUCCESS || skip_td) {
2243                frame->actual_length = frame->length;
2244                td->urb->actual_length += frame->length;
2245        } else {
2246                for (cur_trb = ep_ring->dequeue,
2247                     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2248                     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2249                        if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2250                            !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2251                                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2252                }
2253                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2254                        EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2255
2256                if (trb_comp_code != COMP_STOP_INVAL) {
2257                        frame->actual_length = len;
2258                        td->urb->actual_length += len;
2259                }
2260        }
2261
2262        return finish_td(xhci, td, event_trb, event, ep, status, false);
2263}
2264
2265static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2266                        struct xhci_transfer_event *event,
2267                        struct xhci_virt_ep *ep, int *status)
2268{
2269        struct xhci_ring *ep_ring;
2270        struct urb_priv *urb_priv;
2271        struct usb_iso_packet_descriptor *frame;
2272        int idx;
2273
2274        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2275        urb_priv = td->urb->hcpriv;
2276        idx = urb_priv->td_cnt;
2277        frame = &td->urb->iso_frame_desc[idx];
2278
2279        /* The transfer is partly done. */
2280        frame->status = -EXDEV;
2281
2282        /* calc actual length */
2283        frame->actual_length = 0;
2284
2285        /* Update ring dequeue pointer */
2286        while (ep_ring->dequeue != td->last_trb)
2287                inc_deq(xhci, ep_ring);
2288        inc_deq(xhci, ep_ring);
2289
2290        return finish_td(xhci, td, NULL, event, ep, status, true);
2291}
2292
2293/*
2294 * Process bulk and interrupt tds, update urb status and actual_length.
2295 */
2296static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2297        union xhci_trb *event_trb, struct xhci_transfer_event *event,
2298        struct xhci_virt_ep *ep, int *status)
2299{
2300        struct xhci_ring *ep_ring;
2301        union xhci_trb *cur_trb;
2302        struct xhci_segment *cur_seg;
2303        u32 trb_comp_code;
2304
2305        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2306        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2307
2308        switch (trb_comp_code) {
2309        case COMP_SUCCESS:
2310                /* Double check that the HW transferred everything. */
2311                if (event_trb != td->last_trb ||
2312                    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2313                        xhci_warn(xhci, "WARN Successful completion "
2314                                        "on short TX\n");
2315                        if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2316                                *status = -EREMOTEIO;
2317                        else
2318                                *status = 0;
2319                        if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2320                                trb_comp_code = COMP_SHORT_TX;
2321                } else {
2322                        *status = 0;
2323                }
2324                break;
2325        case COMP_SHORT_TX:
2326                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2327                        *status = -EREMOTEIO;
2328                else
2329                        *status = 0;
2330                break;
2331        default:
2332                /* Others already handled above */
2333                break;
2334        }
2335        if (trb_comp_code == COMP_SHORT_TX)
2336                xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2337                                "%d bytes untransferred\n",
2338                                td->urb->ep->desc.bEndpointAddress,
2339                                td->urb->transfer_buffer_length,
2340                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2341        /* Fast path - was this the last TRB in the TD for this URB? */
2342        if (event_trb == td->last_trb) {
2343                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2344                        td->urb->actual_length =
2345                                td->urb->transfer_buffer_length -
2346                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2347                        if (td->urb->transfer_buffer_length <
2348                                        td->urb->actual_length) {
2349                                xhci_warn(xhci, "HC gave bad length "
2350                                                "of %d bytes left\n",
2351                                          EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2352                                td->urb->actual_length = 0;
2353                                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2354                                        *status = -EREMOTEIO;
2355                                else
2356                                        *status = 0;
2357                        }
2358                        /* Don't overwrite a previously set error code */
2359                        if (*status == -EINPROGRESS) {
2360                                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2361                                        *status = -EREMOTEIO;
2362                                else
2363                                        *status = 0;
2364                        }
2365                } else {
2366                        td->urb->actual_length =
2367                                td->urb->transfer_buffer_length;
2368                        /* Ignore a short packet completion if the
2369                         * untransferred length was zero.
2370                         */
2371                        if (*status == -EREMOTEIO)
2372                                *status = 0;
2373                }
2374        } else {
2375                /* Slow path - walk the list, starting from the dequeue
2376                 * pointer, to get the actual length transferred.
2377                 */
2378                td->urb->actual_length = 0;
2379                for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2380                                cur_trb != event_trb;
2381                                next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2382                        if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2383                            !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2384                                td->urb->actual_length +=
2385                                        TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2386                }
2387                /* If the ring didn't stop on a Link or No-op TRB, add
2388                 * in the actual bytes transferred from the Normal TRB
2389                 */
2390                if (trb_comp_code != COMP_STOP_INVAL)
2391                        td->urb->actual_length +=
2392                                TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2393                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2394        }
2395
2396        return finish_td(xhci, td, event_trb, event, ep, status, false);
2397}
2398
2399/*
2400 * If this function returns an error condition, it means it got a Transfer
2401 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2402 * At this point, the host controller is probably hosed and should be reset.
2403 */
2404static int handle_tx_event(struct xhci_hcd *xhci,
2405                struct xhci_transfer_event *event)
2406        __releases(&xhci->lock)
2407        __acquires(&xhci->lock)
2408{
2409        struct xhci_virt_device *xdev;
2410        struct xhci_virt_ep *ep;
2411        struct xhci_ring *ep_ring;
2412        unsigned int slot_id;
2413        int ep_index;
2414        struct xhci_td *td = NULL;
2415        dma_addr_t event_dma;
2416        struct xhci_segment *event_seg;
2417        union xhci_trb *event_trb;
2418        struct urb *urb = NULL;
2419        int status = -EINPROGRESS;
2420        struct urb_priv *urb_priv;
2421        struct xhci_ep_ctx *ep_ctx;
2422        struct list_head *tmp;
2423        u32 trb_comp_code;
2424        int ret = 0;
2425        int td_num = 0;
2426
2427        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2428        xdev = xhci->devs[slot_id];
2429        if (!xdev) {
2430                xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2431                xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2432                         (unsigned long long) xhci_trb_virt_to_dma(
2433                                 xhci->event_ring->deq_seg,
2434                                 xhci->event_ring->dequeue),
2435                         lower_32_bits(le64_to_cpu(event->buffer)),
2436                         upper_32_bits(le64_to_cpu(event->buffer)),
2437                         le32_to_cpu(event->transfer_len),
2438                         le32_to_cpu(event->flags));
2439                xhci_dbg(xhci, "Event ring:\n");
2440                xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2441                return -ENODEV;
2442        }
2443
2444        /* Endpoint ID is 1 based, our index is zero based */
2445        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2446        ep = &xdev->eps[ep_index];
2447        ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2448        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2449        if (!ep_ring ||
2450            (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2451            EP_STATE_DISABLED) {
2452                xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2453                                "or incorrect stream ring\n");
2454                xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2455                         (unsigned long long) xhci_trb_virt_to_dma(
2456                                 xhci->event_ring->deq_seg,
2457                                 xhci->event_ring->dequeue),
2458                         lower_32_bits(le64_to_cpu(event->buffer)),
2459                         upper_32_bits(le64_to_cpu(event->buffer)),
2460                         le32_to_cpu(event->transfer_len),
2461                         le32_to_cpu(event->flags));
2462                xhci_dbg(xhci, "Event ring:\n");
2463                xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2464                return -ENODEV;
2465        }
2466
2467        /* Count current td numbers if ep->skip is set */
2468        if (ep->skip) {
2469                list_for_each(tmp, &ep_ring->td_list)
2470                        td_num++;
2471        }
2472
2473        event_dma = le64_to_cpu(event->buffer);
2474        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2475        /* Look for common error cases */
2476        switch (trb_comp_code) {
2477        /* Skip codes that require special handling depending on
2478         * transfer type
2479         */
2480        case COMP_SUCCESS:
2481                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2482                        break;
2483                if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2484                        trb_comp_code = COMP_SHORT_TX;
2485                else
2486                        xhci_warn_ratelimited(xhci,
2487                                        "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2488        case COMP_SHORT_TX:
2489                break;
2490        case COMP_STOP:
2491                xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2492                break;
2493        case COMP_STOP_INVAL:
2494                xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2495                break;
2496        case COMP_STALL:
2497                xhci_dbg(xhci, "Stalled endpoint\n");
2498                ep->ep_state |= EP_HALTED;
2499                status = -EPIPE;
2500                break;
2501        case COMP_TRB_ERR:
2502                xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2503                status = -EILSEQ;
2504                break;
2505        case COMP_SPLIT_ERR:
2506        case COMP_TX_ERR:
2507                xhci_dbg(xhci, "Transfer error on endpoint\n");
2508                status = -EPROTO;
2509                break;
2510        case COMP_BABBLE:
2511                xhci_dbg(xhci, "Babble error on endpoint\n");
2512                status = -EOVERFLOW;
2513                break;
2514        case COMP_DB_ERR:
2515                xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2516                status = -ENOSR;
2517                break;
2518        case COMP_BW_OVER:
2519                xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2520                break;
2521        case COMP_BUFF_OVER:
2522                xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2523                break;
2524        case COMP_UNDERRUN:
2525                /*
2526                 * When the Isoch ring is empty, the xHC will generate
2527                 * a Ring Overrun Event for IN Isoch endpoint or Ring
2528                 * Underrun Event for OUT Isoch endpoint.
2529                 */
2530                xhci_dbg(xhci, "underrun event on endpoint\n");
2531                if (!list_empty(&ep_ring->td_list))
2532                        xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2533                                        "still with TDs queued?\n",
2534                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2535                                 ep_index);
2536                goto cleanup;
2537        case COMP_OVERRUN:
2538                xhci_dbg(xhci, "overrun event on endpoint\n");
2539                if (!list_empty(&ep_ring->td_list))
2540                        xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2541                                        "still with TDs queued?\n",
2542                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2543                                 ep_index);
2544                goto cleanup;
2545        case COMP_DEV_ERR:
2546                xhci_warn(xhci, "WARN: detect an incompatible device");
2547                status = -EPROTO;
2548                break;
2549        case COMP_MISSED_INT:
2550                /*
2551                 * When encounter missed service error, one or more isoc tds
2552                 * may be missed by xHC.
2553                 * Set skip flag of the ep_ring; Complete the missed tds as
2554                 * short transfer when process the ep_ring next time.
2555                 */
2556                ep->skip = true;
2557                xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2558                goto cleanup;
2559        default:
2560                if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2561                        status = 0;
2562                        break;
2563                }
2564                xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
2565                                "busted\n");
2566                goto cleanup;
2567        }
2568
2569        do {
2570                /* This TRB should be in the TD at the head of this ring's
2571                 * TD list.
2572                 */
2573                if (list_empty(&ep_ring->td_list)) {
2574                        /*
2575                         * A stopped endpoint may generate an extra completion
2576                         * event if the device was suspended.  Don't print
2577                         * warnings.
2578                         */
2579                        if (!(trb_comp_code == COMP_STOP ||
2580                                                trb_comp_code == COMP_STOP_INVAL)) {
2581                                xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2582                                                TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2583                                                ep_index);
2584                                xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2585                                                (le32_to_cpu(event->flags) &
2586                                                 TRB_TYPE_BITMASK)>>10);
2587                                xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2588                        }
2589                        if (ep->skip) {
2590                                ep->skip = false;
2591                                xhci_dbg(xhci, "td_list is empty while skip "
2592                                                "flag set. Clear skip flag.\n");
2593                        }
2594                        ret = 0;
2595                        goto cleanup;
2596                }
2597
2598                /* We've skipped all the TDs on the ep ring when ep->skip set */
2599                if (ep->skip && td_num == 0) {
2600                        ep->skip = false;
2601                        xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2602                                                "Clear skip flag.\n");
2603                        ret = 0;
2604                        goto cleanup;
2605                }
2606
2607                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2608                if (ep->skip)
2609                        td_num--;
2610
2611                /* Is this a TRB in the currently executing TD? */
2612                event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2613                                td->last_trb, event_dma);
2614
2615                /*
2616                 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2617                 * is not in the current TD pointed by ep_ring->dequeue because
2618                 * that the hardware dequeue pointer still at the previous TRB
2619                 * of the current TD. The previous TRB maybe a Link TD or the
2620                 * last TRB of the previous TD. The command completion handle
2621                 * will take care the rest.
2622                 */
2623                if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
2624                        ret = 0;
2625                        goto cleanup;
2626                }
2627
2628                if (!event_seg) {
2629                        if (!ep->skip ||
2630                            !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2631                                /* Some host controllers give a spurious
2632                                 * successful event after a short transfer.
2633                                 * Ignore it.
2634                                 */
2635                                if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
2636                                                ep_ring->last_td_was_short) {
2637                                        ep_ring->last_td_was_short = false;
2638                                        ret = 0;
2639                                        goto cleanup;
2640                                }
2641                                /* HC is busted, give up! */
2642                                xhci_err(xhci,
2643                                        "ERROR Transfer event TRB DMA ptr not "
2644                                        "part of current TD\n");
2645                                return -ESHUTDOWN;
2646                        }
2647
2648                        ret = skip_isoc_td(xhci, td, event, ep, &status);
2649                        goto cleanup;
2650                }
2651                if (trb_comp_code == COMP_SHORT_TX)
2652                        ep_ring->last_td_was_short = true;
2653                else
2654                        ep_ring->last_td_was_short = false;
2655
2656                if (ep->skip) {
2657                        xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2658                        ep->skip = false;
2659                }
2660
2661                event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2662                                                sizeof(*event_trb)];
2663                /*
2664                 * No-op TRB should not trigger interrupts.
2665                 * If event_trb is a no-op TRB, it means the
2666                 * corresponding TD has been cancelled. Just ignore
2667                 * the TD.
2668                 */
2669                if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2670                        xhci_dbg(xhci,
2671                                 "event_trb is a no-op TRB. Skip it\n");
2672                        goto cleanup;
2673                }
2674
2675                /* Now update the urb's actual_length and give back to
2676                 * the core
2677                 */
2678                if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2679                        ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2680                                                 &status);
2681                else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2682                        ret = process_isoc_td(xhci, td, event_trb, event, ep,
2683                                                 &status);
2684                else
2685                        ret = process_bulk_intr_td(xhci, td, event_trb, event,
2686                                                 ep, &status);
2687
2688cleanup:
2689                /*
2690                 * Do not update event ring dequeue pointer if ep->skip is set.
2691                 * Will roll back to continue process missed tds.
2692                 */
2693                if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2694                        inc_deq(xhci, xhci->event_ring);
2695                }
2696
2697                if (ret) {
2698                        urb = td->urb;
2699                        urb_priv = urb->hcpriv;
2700                        /* Leave the TD around for the reset endpoint function
2701                         * to use(but only if it's not a control endpoint,
2702                         * since we already queued the Set TR dequeue pointer
2703                         * command for stalled control endpoints).
2704                         */
2705                        if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2706                                (trb_comp_code != COMP_STALL &&
2707                                        trb_comp_code != COMP_BABBLE))
2708                                xhci_urb_free_priv(xhci, urb_priv);
2709                        else
2710                                kfree(urb_priv);
2711
2712                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2713                        if ((urb->actual_length != urb->transfer_buffer_length &&
2714                                                (urb->transfer_flags &
2715                                                 URB_SHORT_NOT_OK)) ||
2716                                        (status != 0 &&
2717                                         !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2718                                xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2719                                                "expected = %d, status = %d\n",
2720                                                urb, urb->actual_length,
2721                                                urb->transfer_buffer_length,
2722                                                status);
2723                        spin_unlock(&xhci->lock);
2724                        /* EHCI, UHCI, and OHCI always unconditionally set the
2725                         * urb->status of an isochronous endpoint to 0.
2726                         */
2727                        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2728                                status = 0;
2729                        usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2730                        spin_lock(&xhci->lock);
2731                }
2732
2733        /*
2734         * If ep->skip is set, it means there are missed tds on the
2735         * endpoint ring need to take care of.
2736         * Process them as short transfer until reach the td pointed by
2737         * the event.
2738         */
2739        } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2740
2741        return 0;
2742}
2743
2744/*
2745 * This function handles all OS-owned events on the event ring.  It may drop
2746 * xhci->lock between event processing (e.g. to pass up port status changes).
2747 * Returns >0 for "possibly more events to process" (caller should call again),
2748 * otherwise 0 if done.  In future, <0 returns should indicate error code.
2749 */
2750static int xhci_handle_event(struct xhci_hcd *xhci)
2751{
2752        union xhci_trb *event;
2753        int update_ptrs = 1;
2754        int ret;
2755
2756        if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2757                xhci->error_bitmask |= 1 << 1;
2758                return 0;
2759        }
2760
2761        event = xhci->event_ring->dequeue;
2762        /* Does the HC or OS own the TRB? */
2763        if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2764            xhci->event_ring->cycle_state) {
2765                xhci->error_bitmask |= 1 << 2;
2766                return 0;
2767        }
2768
2769        /*
2770         * Barrier between reading the TRB_CYCLE (valid) flag above and any
2771         * speculative reads of the event's flags/data below.
2772         */
2773        rmb();
2774        /* FIXME: Handle more event types. */
2775        switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2776        case TRB_TYPE(TRB_COMPLETION):
2777                handle_cmd_completion(xhci, &event->event_cmd);
2778                break;
2779        case TRB_TYPE(TRB_PORT_STATUS):
2780                handle_port_status(xhci, event);
2781                update_ptrs = 0;
2782                break;
2783        case TRB_TYPE(TRB_TRANSFER):
2784                ret = handle_tx_event(xhci, &event->trans_event);
2785                if (ret < 0)
2786                        xhci->error_bitmask |= 1 << 9;
2787                else
2788                        update_ptrs = 0;
2789                break;
2790        case TRB_TYPE(TRB_DEV_NOTE):
2791                handle_device_notification(xhci, event);
2792                break;
2793        default:
2794                if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2795                    TRB_TYPE(48))
2796                        handle_vendor_event(xhci, event);
2797                else
2798                        xhci->error_bitmask |= 1 << 3;
2799        }
2800        /* Any of the above functions may drop and re-acquire the lock, so check
2801         * to make sure a watchdog timer didn't mark the host as non-responsive.
2802         */
2803        if (xhci->xhc_state & XHCI_STATE_DYING) {
2804                xhci_dbg(xhci, "xHCI host dying, returning from "
2805                                "event handler.\n");
2806                return 0;
2807        }
2808
2809        if (update_ptrs)
2810                /* Update SW event ring dequeue pointer */
2811                inc_deq(xhci, xhci->event_ring);
2812
2813        /* Are there more items on the event ring?  Caller will call us again to
2814         * check.
2815         */
2816        return 1;
2817}
2818
2819/*
2820 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2821 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
2822 * indicators of an event TRB error, but we check the status *first* to be safe.
2823 */
2824irqreturn_t xhci_irq(struct usb_hcd *hcd)
2825{
2826        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2827        u32 status;
2828        u64 temp_64;
2829        union xhci_trb *event_ring_deq;
2830        dma_addr_t deq;
2831
2832        spin_lock(&xhci->lock);
2833        /* Check if the xHC generated the interrupt, or the irq is shared */
2834        status = xhci_readl(xhci, &xhci->op_regs->status);
2835        if (status == 0xffffffff)
2836                goto hw_died;
2837
2838        if (!(status & STS_EINT)) {
2839                spin_unlock(&xhci->lock);
2840                return IRQ_NONE;
2841        }
2842        if (status & STS_FATAL) {
2843                xhci_warn(xhci, "WARNING: Host System Error\n");
2844                xhci_halt(xhci);
2845hw_died:
2846                spin_unlock(&xhci->lock);
2847                return -ESHUTDOWN;
2848        }
2849
2850        /*
2851         * Clear the op reg interrupt status first,
2852         * so we can receive interrupts from other MSI-X interrupters.
2853         * Write 1 to clear the interrupt status.
2854         */
2855        status |= STS_EINT;
2856        xhci_writel(xhci, status, &xhci->op_regs->status);
2857        /* FIXME when MSI-X is supported and there are multiple vectors */
2858        /* Clear the MSI-X event interrupt status */
2859
2860        if (hcd->irq) {
2861                u32 irq_pending;
2862                /* Acknowledge the PCI interrupt */
2863                irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2864                irq_pending |= IMAN_IP;
2865                xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2866        }
2867
2868        if (xhci->xhc_state & XHCI_STATE_DYING) {
2869                xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2870                                "Shouldn't IRQs be disabled?\n");
2871                /* Clear the event handler busy flag (RW1C);
2872                 * the event ring should be empty.
2873                 */
2874                temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2875                xhci_write_64(xhci, temp_64 | ERST_EHB,
2876                                &xhci->ir_set->erst_dequeue);
2877                spin_unlock(&xhci->lock);
2878
2879                return IRQ_HANDLED;
2880        }
2881
2882        event_ring_deq = xhci->event_ring->dequeue;
2883        /* FIXME this should be a delayed service routine
2884         * that clears the EHB.
2885         */
2886        while (xhci_handle_event(xhci) > 0) {}
2887
2888        temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2889        /* If necessary, update the HW's version of the event ring deq ptr. */
2890        if (event_ring_deq != xhci->event_ring->dequeue) {
2891                deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2892                                xhci->event_ring->dequeue);
2893                if (deq == 0)
2894                        xhci_warn(xhci, "WARN something wrong with SW event "
2895                                        "ring dequeue ptr.\n");
2896                /* Update HC event ring dequeue pointer */
2897                temp_64 &= ERST_PTR_MASK;
2898                temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2899        }
2900
2901        /* Clear the event handler busy flag (RW1C); event ring is empty. */
2902        temp_64 |= ERST_EHB;
2903        xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2904
2905        spin_unlock(&xhci->lock);
2906
2907        return IRQ_HANDLED;
2908}
2909
2910irqreturn_t xhci_msi_irq(int irq, void *hcd)
2911{
2912        return xhci_irq(hcd);
2913}
2914
2915/****           Endpoint Ring Operations        ****/
2916
2917/*
2918 * Generic function for queueing a TRB on a ring.
2919 * The caller must have checked to make sure there's room on the ring.
2920 *
2921 * @more_trbs_coming:   Will you enqueue more TRBs before calling
2922 *                      prepare_transfer()?
2923 */
2924static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2925                bool more_trbs_coming,
2926                u32 field1, u32 field2, u32 field3, u32 field4)
2927{
2928        struct xhci_generic_trb *trb;
2929
2930        trb = &ring->enqueue->generic;
2931        trb->field[0] = cpu_to_le32(field1);
2932        trb->field[1] = cpu_to_le32(field2);
2933        trb->field[2] = cpu_to_le32(field3);
2934        trb->field[3] = cpu_to_le32(field4);
2935        inc_enq(xhci, ring, more_trbs_coming);
2936}
2937
2938/*
2939 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2940 * FIXME allocate segments if the ring is full.
2941 */
2942static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2943                u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2944{
2945        unsigned int num_trbs_needed;
2946
2947        /* Make sure the endpoint has been added to xHC schedule */
2948        switch (ep_state) {
2949        case EP_STATE_DISABLED:
2950                /*
2951                 * USB core changed config/interfaces without notifying us,
2952                 * or hardware is reporting the wrong state.
2953                 */
2954                xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2955                return -ENOENT;
2956        case EP_STATE_ERROR:
2957                xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2958                /* FIXME event handling code for error needs to clear it */
2959                /* XXX not sure if this should be -ENOENT or not */
2960                return -EINVAL;
2961        case EP_STATE_HALTED:
2962                xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2963        case EP_STATE_STOPPED:
2964        case EP_STATE_RUNNING:
2965                break;
2966        default:
2967                xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2968                /*
2969                 * FIXME issue Configure Endpoint command to try to get the HC
2970                 * back into a known state.
2971                 */
2972                return -EINVAL;
2973        }
2974
2975        while (1) {
2976                if (room_on_ring(xhci, ep_ring, num_trbs)) {
2977                        union xhci_trb *trb = ep_ring->enqueue;
2978                        unsigned int usable = ep_ring->enq_seg->trbs +
2979                                        TRBS_PER_SEGMENT - 1 - trb;
2980                        u32 nop_cmd;
2981
2982                        /*
2983                         * Section 4.11.7.1 TD Fragments states that a link
2984                         * TRB must only occur at the boundary between
2985                         * data bursts (eg 512 bytes for 480M).
2986                         * While it is possible to split a large fragment
2987                         * we don't know the size yet.
2988                         * Simplest solution is to fill the trb before the
2989                         * LINK with nop commands.
2990                         */
2991                        if (num_trbs == 1 || num_trbs <= usable || usable == 0)
2992                                break;
2993
2994                        if (ep_ring->type != TYPE_BULK)
2995                                /*
2996                                 * While isoc transfers might have a buffer that
2997                                 * crosses a 64k boundary it is unlikely.
2998                                 * Since we can't add NOPs without generating
2999                                 * gaps in the traffic just hope it never
3000                                 * happens at the end of the ring.
3001                                 * This could be fixed by writing a LINK TRB
3002                                 * instead of the first NOP - however the
3003                                 * TRB_TYPE_LINK_LE32() calls would all need
3004                                 * changing to check the ring length.
3005                                 */
3006                                break;
3007
3008                        if (num_trbs >= TRBS_PER_SEGMENT) {
3009                                xhci_err(xhci, "Too many fragments %d, max %d\n",
3010                                                num_trbs, TRBS_PER_SEGMENT - 1);
3011                                return -ENOMEM;
3012                        }
3013
3014                        nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
3015                                        ep_ring->cycle_state);
3016                        ep_ring->num_trbs_free -= usable;
3017                        do {
3018                                trb->generic.field[0] = 0;
3019                                trb->generic.field[1] = 0;
3020                                trb->generic.field[2] = 0;
3021                                trb->generic.field[3] = nop_cmd;
3022                                trb++;
3023                        } while (--usable);
3024                        ep_ring->enqueue = trb;
3025                        if (room_on_ring(xhci, ep_ring, num_trbs))
3026                                break;
3027                }
3028
3029                if (ep_ring == xhci->cmd_ring) {
3030                        xhci_err(xhci, "Do not support expand command ring\n");
3031                        return -ENOMEM;
3032                }
3033
3034                xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
3035                                "ERROR no room on ep ring, try ring expansion");
3036                num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
3037                if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
3038                                        mem_flags)) {
3039                        xhci_err(xhci, "Ring expansion failed\n");
3040                        return -ENOMEM;
3041                }
3042        }
3043
3044        if (enqueue_is_link_trb(ep_ring)) {
3045                struct xhci_ring *ring = ep_ring;
3046                union xhci_trb *next;
3047
3048                next = ring->enqueue;
3049
3050                while (last_trb(xhci, ring, ring->enq_seg, next)) {
3051                        /* If we're not dealing with 0.95 hardware or isoc rings
3052                         * on AMD 0.96 host, clear the chain bit.
3053                         */
3054                        if (!xhci_link_trb_quirk(xhci) &&
3055                                        !(ring->type == TYPE_ISOC &&
3056                                         (xhci->quirks & XHCI_AMD_0x96_HOST)))
3057                                next->link.control &= cpu_to_le32(~TRB_CHAIN);
3058                        else
3059                                next->link.control |= cpu_to_le32(TRB_CHAIN);
3060
3061                        wmb();
3062                        next->link.control ^= cpu_to_le32(TRB_CYCLE);
3063
3064                        /* Toggle the cycle bit after the last ring segment. */
3065                        if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
3066                                ring->cycle_state = (ring->cycle_state ? 0 : 1);
3067                        }
3068                        ring->enq_seg = ring->enq_seg->next;
3069                        ring->enqueue = ring->enq_seg->trbs;
3070                        next = ring->enqueue;
3071                }
3072        }
3073
3074        return 0;
3075}
3076
3077static int prepare_transfer(struct xhci_hcd *xhci,
3078                struct xhci_virt_device *xdev,
3079                unsigned int ep_index,
3080                unsigned int stream_id,
3081                unsigned int num_trbs,
3082                struct urb *urb,
3083                unsigned int td_index,
3084                gfp_t mem_flags)
3085{
3086        int ret;
3087        struct urb_priv *urb_priv;
3088        struct xhci_td  *td;
3089        struct xhci_ring *ep_ring;
3090        struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3091
3092        ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
3093        if (!ep_ring) {
3094                xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3095                                stream_id);
3096                return -EINVAL;
3097        }
3098
3099        ret = prepare_ring(xhci, ep_ring,
3100                           le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3101                           num_trbs, mem_flags);
3102        if (ret)
3103                return ret;
3104
3105        urb_priv = urb->hcpriv;
3106        td = urb_priv->td[td_index];
3107
3108        INIT_LIST_HEAD(&td->td_list);
3109        INIT_LIST_HEAD(&td->cancelled_td_list);
3110
3111        if (td_index == 0) {
3112                ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3113                if (unlikely(ret))
3114                        return ret;
3115        }
3116
3117        td->urb = urb;
3118        /* Add this TD to the tail of the endpoint ring's TD list */
3119        list_add_tail(&td->td_list, &ep_ring->td_list);
3120        td->start_seg = ep_ring->enq_seg;
3121        td->first_trb = ep_ring->enqueue;
3122
3123        urb_priv->td[td_index] = td;
3124
3125        return 0;
3126}
3127
3128static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
3129{
3130        int num_sgs, num_trbs, running_total, temp, i;
3131        struct scatterlist *sg;
3132
3133        sg = NULL;
3134        num_sgs = urb->num_mapped_sgs;
3135        temp = urb->transfer_buffer_length;
3136
3137        num_trbs = 0;
3138        for_each_sg(urb->sg, sg, num_sgs, i) {
3139                unsigned int len = sg_dma_len(sg);
3140
3141                /* Scatter gather list entries may cross 64KB boundaries */
3142                running_total = TRB_MAX_BUFF_SIZE -
3143                        (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
3144                running_total &= TRB_MAX_BUFF_SIZE - 1;
3145                if (running_total != 0)
3146                        num_trbs++;
3147
3148                /* How many more 64KB chunks to transfer, how many more TRBs? */
3149                while (running_total < sg_dma_len(sg) && running_total < temp) {
3150                        num_trbs++;
3151                        running_total += TRB_MAX_BUFF_SIZE;
3152                }
3153                len = min_t(int, len, temp);
3154                temp -= len;
3155                if (temp == 0)
3156                        break;
3157        }
3158        return num_trbs;
3159}
3160
3161static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
3162{
3163        if (num_trbs != 0)
3164                dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
3165                                "TRBs, %d left\n", __func__,
3166                                urb->ep->desc.bEndpointAddress, num_trbs);
3167        if (running_total != urb->transfer_buffer_length)
3168                dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3169                                "queued %#x (%d), asked for %#x (%d)\n",
3170                                __func__,
3171                                urb->ep->desc.bEndpointAddress,
3172                                running_total, running_total,
3173                                urb->transfer_buffer_length,
3174                                urb->transfer_buffer_length);
3175}
3176
3177static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3178                unsigned int ep_index, unsigned int stream_id, int start_cycle,
3179                struct xhci_generic_trb *start_trb)
3180{
3181        /*
3182         * Pass all the TRBs to the hardware at once and make sure this write
3183         * isn't reordered.
3184         */
3185        wmb();
3186        if (start_cycle)
3187                start_trb->field[3] |= cpu_to_le32(start_cycle);
3188        else
3189                start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3190        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3191}
3192
3193/*
3194 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
3195 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
3196 * (comprised of sg list entries) can take several service intervals to
3197 * transmit.
3198 */
3199int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3200                struct urb *urb, int slot_id, unsigned int ep_index)
3201{
3202        struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
3203                        xhci->devs[slot_id]->out_ctx, ep_index);
3204        int xhci_interval;
3205        int ep_interval;
3206
3207        xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3208        ep_interval = urb->interval;
3209        /* Convert to microframes */
3210        if (urb->dev->speed == USB_SPEED_LOW ||
3211                        urb->dev->speed == USB_SPEED_FULL)
3212                ep_interval *= 8;
3213        /* FIXME change this to a warning and a suggestion to use the new API
3214         * to set the polling interval (once the API is added).
3215         */
3216        if (xhci_interval != ep_interval) {
3217                dev_dbg_ratelimited(&urb->dev->dev,
3218                                "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3219                                ep_interval, ep_interval == 1 ? "" : "s",
3220                                xhci_interval, xhci_interval == 1 ? "" : "s");
3221                urb->interval = xhci_interval;
3222                /* Convert back to frames for LS/FS devices */
3223                if (urb->dev->speed == USB_SPEED_LOW ||
3224                                urb->dev->speed == USB_SPEED_FULL)
3225                        urb->interval /= 8;
3226        }
3227        return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3228}
3229
3230/*
3231 * The TD size is the number of bytes remaining in the TD (including this TRB),
3232 * right shifted by 10.
3233 * It must fit in bits 21:17, so it can't be bigger than 31.
3234 */
3235static u32 xhci_td_remainder(unsigned int remainder)
3236{
3237        u32 max = (1 << (21 - 17 + 1)) - 1;
3238
3239        if ((remainder >> 10) >= max)
3240                return max << 17;
3241        else
3242                return (remainder >> 10) << 17;
3243}
3244
3245/*
3246 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3247 * packets remaining in the TD (*not* including this TRB).
3248 *
3249 * Total TD packet count = total_packet_count =
3250 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3251 *
3252 * Packets transferred up to and including this TRB = packets_transferred =
3253 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3254 *
3255 * TD size = total_packet_count - packets_transferred
3256 *
3257 * It must fit in bits 21:17, so it can't be bigger than 31.
3258 * The last TRB in a TD must have the TD size set to zero.
3259 */
3260static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
3261                unsigned int total_packet_count, struct urb *urb,
3262                unsigned int num_trbs_left)
3263{
3264        int packets_transferred;
3265
3266        /* One TRB with a zero-length data packet. */
3267        if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
3268                return 0;
3269
3270        /* All the TRB queueing functions don't count the current TRB in
3271         * running_total.
3272         */
3273        packets_transferred = (running_total + trb_buff_len) /
3274                GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3275
3276        if ((total_packet_count - packets_transferred) > 31)
3277                return 31 << 17;
3278        return (total_packet_count - packets_transferred) << 17;
3279}
3280
3281static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3282                struct urb *urb, int slot_id, unsigned int ep_index)
3283{
3284        struct xhci_ring *ep_ring;
3285        unsigned int num_trbs;
3286        struct urb_priv *urb_priv;
3287        struct xhci_td *td;
3288        struct scatterlist *sg;
3289        int num_sgs;
3290        int trb_buff_len, this_sg_len, running_total;
3291        unsigned int total_packet_count;
3292        bool first_trb;
3293        u64 addr;
3294        bool more_trbs_coming;
3295
3296        struct xhci_generic_trb *start_trb;
3297        int start_cycle;
3298
3299        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3300        if (!ep_ring)
3301                return -EINVAL;
3302
3303        num_trbs = count_sg_trbs_needed(xhci, urb);
3304        num_sgs = urb->num_mapped_sgs;
3305        total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3306                        usb_endpoint_maxp(&urb->ep->desc));
3307
3308        trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
3309                        ep_index, urb->stream_id,
3310                        num_trbs, urb, 0, mem_flags);
3311        if (trb_buff_len < 0)
3312                return trb_buff_len;
3313
3314        urb_priv = urb->hcpriv;
3315        td = urb_priv->td[0];
3316
3317        /*
3318         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3319         * until we've finished creating all the other TRBs.  The ring's cycle
3320         * state may change as we enqueue the other TRBs, so save it too.
3321         */
3322        start_trb = &ep_ring->enqueue->generic;
3323        start_cycle = ep_ring->cycle_state;
3324
3325        running_total = 0;
3326        /*
3327         * How much data is in the first TRB?
3328         *
3329         * There are three forces at work for TRB buffer pointers and lengths:
3330         * 1. We don't want to walk off the end of this sg-list entry buffer.
3331         * 2. The transfer length that the driver requested may be smaller than
3332         *    the amount of memory allocated for this scatter-gather list.
3333         * 3. TRBs buffers can't cross 64KB boundaries.
3334         */
3335        sg = urb->sg;
3336        addr = (u64) sg_dma_address(sg);
3337        this_sg_len = sg_dma_len(sg);
3338        trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3339        trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3340        if (trb_buff_len > urb->transfer_buffer_length)
3341                trb_buff_len = urb->transfer_buffer_length;
3342
3343        first_trb = true;
3344        /* Queue the first TRB, even if it's zero-length */
3345        do {
3346                u32 field = 0;
3347                u32 length_field = 0;
3348                u32 remainder = 0;
3349
3350                /* Don't change the cycle bit of the first TRB until later */
3351                if (first_trb) {
3352                        first_trb = false;
3353                        if (start_cycle == 0)
3354                                field |= 0x1;
3355                } else
3356                        field |= ep_ring->cycle_state;
3357
3358                /* Chain all the TRBs together; clear the chain bit in the last
3359                 * TRB to indicate it's the last TRB in the chain.
3360                 */
3361                if (num_trbs > 1) {
3362                        field |= TRB_CHAIN;
3363                } else {
3364                        /* FIXME - add check for ZERO_PACKET flag before this */
3365                        td->last_trb = ep_ring->enqueue;
3366                        field |= TRB_IOC;
3367                }
3368
3369                /* Only set interrupt on short packet for IN endpoints */
3370                if (usb_urb_dir_in(urb))
3371                        field |= TRB_ISP;
3372
3373                if (TRB_MAX_BUFF_SIZE -
3374                                (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3375                        xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3376                        xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3377                                        (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3378                                        (unsigned int) addr + trb_buff_len);
3379                }
3380
3381                /* Set the TRB length, TD size, and interrupter fields. */
3382                if (xhci->hci_version < 0x100) {
3383                        remainder = xhci_td_remainder(
3384                                        urb->transfer_buffer_length -
3385                                        running_total);
3386                } else {
3387                        remainder = xhci_v1_0_td_remainder(running_total,
3388                                        trb_buff_len, total_packet_count, urb,
3389                                        num_trbs - 1);
3390                }
3391                length_field = TRB_LEN(trb_buff_len) |
3392                        remainder |
3393                        TRB_INTR_TARGET(0);
3394
3395                if (num_trbs > 1)
3396                        more_trbs_coming = true;
3397                else
3398                        more_trbs_coming = false;
3399                queue_trb(xhci, ep_ring, more_trbs_coming,
3400                                lower_32_bits(addr),
3401                                upper_32_bits(addr),
3402                                length_field,
3403                                field | TRB_TYPE(TRB_NORMAL));
3404                --num_trbs;
3405                running_total += trb_buff_len;
3406
3407                /* Calculate length for next transfer --
3408                 * Are we done queueing all the TRBs for this sg entry?
3409                 */
3410                this_sg_len -= trb_buff_len;
3411                if (this_sg_len == 0) {
3412                        --num_sgs;
3413                        if (num_sgs == 0)
3414                                break;
3415                        sg = sg_next(sg);
3416                        addr = (u64) sg_dma_address(sg);
3417                        this_sg_len = sg_dma_len(sg);
3418                } else {
3419                        addr += trb_buff_len;
3420                }
3421
3422                trb_buff_len = TRB_MAX_BUFF_SIZE -
3423                        (addr & (TRB_MAX_BUFF_SIZE - 1));
3424                trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3425                if (running_total + trb_buff_len > urb->transfer_buffer_length)
3426                        trb_buff_len =
3427                                urb->transfer_buffer_length - running_total;
3428        } while (running_total < urb->transfer_buffer_length);
3429
3430        check_trb_math(urb, num_trbs, running_total);
3431        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3432                        start_cycle, start_trb);
3433        return 0;
3434}
3435
3436/* This is very similar to what ehci-q.c qtd_fill() does */
3437int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3438                struct urb *urb, int slot_id, unsigned int ep_index)
3439{
3440        struct xhci_ring *ep_ring;
3441        struct urb_priv *urb_priv;
3442        struct xhci_td *td;
3443        int num_trbs;
3444        struct xhci_generic_trb *start_trb;
3445        bool first_trb;
3446        bool more_trbs_coming;
3447        int start_cycle;
3448        u32 field, length_field;
3449
3450        int running_total, trb_buff_len, ret;
3451        unsigned int total_packet_count;
3452        u64 addr;
3453
3454        if (urb->num_sgs)
3455                return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3456
3457        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3458        if (!ep_ring)
3459                return -EINVAL;
3460
3461        num_trbs = 0;
3462        /* How much data is (potentially) left before the 64KB boundary? */
3463        running_total = TRB_MAX_BUFF_SIZE -
3464                (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3465        running_total &= TRB_MAX_BUFF_SIZE - 1;
3466
3467        /* If there's some data on this 64KB chunk, or we have to send a
3468         * zero-length transfer, we need at least one TRB
3469         */
3470        if (running_total != 0 || urb->transfer_buffer_length == 0)
3471                num_trbs++;
3472        /* How many more 64KB chunks to transfer, how many more TRBs? */
3473        while (running_total < urb->transfer_buffer_length) {
3474                num_trbs++;
3475                running_total += TRB_MAX_BUFF_SIZE;
3476        }
3477        /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
3478
3479        ret = prepare_transfer(xhci, xhci->devs[slot_id],
3480                        ep_index, urb->stream_id,
3481                        num_trbs, urb, 0, mem_flags);
3482        if (ret < 0)
3483                return ret;
3484
3485        urb_priv = urb->hcpriv;
3486        td = urb_priv->td[0];
3487
3488        /*
3489         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3490         * until we've finished creating all the other TRBs.  The ring's cycle
3491         * state may change as we enqueue the other TRBs, so save it too.
3492         */
3493        start_trb = &ep_ring->enqueue->generic;
3494        start_cycle = ep_ring->cycle_state;
3495
3496        running_total = 0;
3497        total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3498                        usb_endpoint_maxp(&urb->ep->desc));
3499        /* How much data is in the first TRB? */
3500        addr = (u64) urb->transfer_dma;
3501        trb_buff_len = TRB_MAX_BUFF_SIZE -
3502                (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3503        if (trb_buff_len > urb->transfer_buffer_length)
3504                trb_buff_len = urb->transfer_buffer_length;
3505
3506        first_trb = true;
3507
3508        /* Queue the first TRB, even if it's zero-length */
3509        do {
3510                u32 remainder = 0;
3511                field = 0;
3512
3513                /* Don't change the cycle bit of the first TRB until later */
3514                if (first_trb) {
3515                        first_trb = false;
3516                        if (start_cycle == 0)
3517                                field |= 0x1;
3518                } else
3519                        field |= ep_ring->cycle_state;
3520
3521                /* Chain all the TRBs together; clear the chain bit in the last
3522                 * TRB to indicate it's the last TRB in the chain.
3523                 */
3524                if (num_trbs > 1) {
3525                        field |= TRB_CHAIN;
3526                } else {
3527                        /* FIXME - add check for ZERO_PACKET flag before this */
3528                        td->last_trb = ep_ring->enqueue;
3529                        field |= TRB_IOC;
3530                }
3531
3532                /* Only set interrupt on short packet for IN endpoints */
3533                if (usb_urb_dir_in(urb))
3534                        field |= TRB_ISP;
3535
3536                /* Set the TRB length, TD size, and interrupter fields. */
3537                if (xhci->hci_version < 0x100) {
3538                        remainder = xhci_td_remainder(
3539                                        urb->transfer_buffer_length -
3540                                        running_total);
3541                } else {
3542                        remainder = xhci_v1_0_td_remainder(running_total,
3543                                        trb_buff_len, total_packet_count, urb,
3544                                        num_trbs - 1);
3545                }
3546                length_field = TRB_LEN(trb_buff_len) |
3547                        remainder |
3548                        TRB_INTR_TARGET(0);
3549
3550                if (num_trbs > 1)
3551                        more_trbs_coming = true;
3552                else
3553                        more_trbs_coming = false;
3554                queue_trb(xhci, ep_ring, more_trbs_coming,
3555                                lower_32_bits(addr),
3556                                upper_32_bits(addr),
3557                                length_field,
3558                                field | TRB_TYPE(TRB_NORMAL));
3559                --num_trbs;
3560                running_total += trb_buff_len;
3561
3562                /* Calculate length for next transfer */
3563                addr += trb_buff_len;
3564                trb_buff_len = urb->transfer_buffer_length - running_total;
3565                if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3566                        trb_buff_len = TRB_MAX_BUFF_SIZE;
3567        } while (running_total < urb->transfer_buffer_length);
3568
3569        check_trb_math(urb, num_trbs, running_total);
3570        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3571                        start_cycle, start_trb);
3572        return 0;
3573}
3574
3575/* Caller must have locked xhci->lock */
3576int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3577                struct urb *urb, int slot_id, unsigned int ep_index)
3578{
3579        struct xhci_ring *ep_ring;
3580        int num_trbs;
3581        int ret;
3582        struct usb_ctrlrequest *setup;
3583        struct xhci_generic_trb *start_trb;
3584        int start_cycle;
3585        u32 field, length_field;
3586        struct urb_priv *urb_priv;
3587        struct xhci_td *td;
3588
3589        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3590        if (!ep_ring)
3591                return -EINVAL;
3592
3593        /*
3594         * Need to copy setup packet into setup TRB, so we can't use the setup
3595         * DMA address.
3596         */
3597        if (!urb->setup_packet)
3598                return -EINVAL;
3599
3600        /* 1 TRB for setup, 1 for status */
3601        num_trbs = 2;
3602        /*
3603         * Don't need to check if we need additional event data and normal TRBs,
3604         * since data in control transfers will never get bigger than 16MB
3605         * XXX: can we get a buffer that crosses 64KB boundaries?
3606         */
3607        if (urb->transfer_buffer_length > 0)
3608                num_trbs++;
3609        ret = prepare_transfer(xhci, xhci->devs[slot_id],
3610                        ep_index, urb->stream_id,
3611                        num_trbs, urb, 0, mem_flags);
3612        if (ret < 0)
3613                return ret;
3614
3615        urb_priv = urb->hcpriv;
3616        td = urb_priv->td[0];
3617
3618        /*
3619         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3620         * until we've finished creating all the other TRBs.  The ring's cycle
3621         * state may change as we enqueue the other TRBs, so save it too.
3622         */
3623        start_trb = &ep_ring->enqueue->generic;
3624        start_cycle = ep_ring->cycle_state;
3625
3626        /* Queue setup TRB - see section 6.4.1.2.1 */
3627        /* FIXME better way to translate setup_packet into two u32 fields? */
3628        setup = (struct usb_ctrlrequest *) urb->setup_packet;
3629        field = 0;
3630        field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3631        if (start_cycle == 0)
3632                field |= 0x1;
3633
3634        /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
3635        if (xhci->hci_version == 0x100) {
3636                if (urb->transfer_buffer_length > 0) {
3637                        if (setup->bRequestType & USB_DIR_IN)
3638                                field |= TRB_TX_TYPE(TRB_DATA_IN);
3639                        else
3640                                field |= TRB_TX_TYPE(TRB_DATA_OUT);
3641                }
3642        }
3643
3644        queue_trb(xhci, ep_ring, true,
3645                  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3646                  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3647                  TRB_LEN(8) | TRB_INTR_TARGET(0),
3648                  /* Immediate data in pointer */
3649                  field);
3650
3651        /* If there's data, queue data TRBs */
3652        /* Only set interrupt on short packet for IN endpoints */
3653        if (usb_urb_dir_in(urb))
3654                field = TRB_ISP | TRB_TYPE(TRB_DATA);
3655        else
3656                field = TRB_TYPE(TRB_DATA);
3657
3658        length_field = TRB_LEN(urb->transfer_buffer_length) |
3659                xhci_td_remainder(urb->transfer_buffer_length) |
3660                TRB_INTR_TARGET(0);
3661        if (urb->transfer_buffer_length > 0) {
3662                if (setup->bRequestType & USB_DIR_IN)
3663                        field |= TRB_DIR_IN;
3664                queue_trb(xhci, ep_ring, true,
3665                                lower_32_bits(urb->transfer_dma),
3666                                upper_32_bits(urb->transfer_dma),
3667                                length_field,
3668                                field | ep_ring->cycle_state);
3669        }
3670
3671        /* Save the DMA address of the last TRB in the TD */
3672        td->last_trb = ep_ring->enqueue;
3673
3674        /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3675        /* If the device sent data, the status stage is an OUT transfer */
3676        if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3677                field = 0;
3678        else
3679                field = TRB_DIR_IN;
3680        queue_trb(xhci, ep_ring, false,
3681                        0,
3682                        0,
3683                        TRB_INTR_TARGET(0),
3684                        /* Event on completion */
3685                        field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3686
3687        giveback_first_trb(xhci, slot_id, ep_index, 0,
3688                        start_cycle, start_trb);
3689        return 0;
3690}
3691
3692static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3693                struct urb *urb, int i)
3694{
3695        int num_trbs = 0;
3696        u64 addr, td_len;
3697
3698        addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3699        td_len = urb->iso_frame_desc[i].length;
3700
3701        num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3702                        TRB_MAX_BUFF_SIZE);
3703        if (num_trbs == 0)
3704                num_trbs++;
3705
3706        return num_trbs;
3707}
3708
3709/*
3710 * The transfer burst count field of the isochronous TRB defines the number of
3711 * bursts that are required to move all packets in this TD.  Only SuperSpeed
3712 * devices can burst up to bMaxBurst number of packets per service interval.
3713 * This field is zero based, meaning a value of zero in the field means one
3714 * burst.  Basically, for everything but SuperSpeed devices, this field will be
3715 * zero.  Only xHCI 1.0 host controllers support this field.
3716 */
3717static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3718                struct usb_device *udev,
3719                struct urb *urb, unsigned int total_packet_count)
3720{
3721        unsigned int max_burst;
3722
3723        if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3724                return 0;
3725
3726        max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3727        return roundup(total_packet_count, max_burst + 1) - 1;
3728}
3729
3730/*
3731 * Returns the number of packets in the last "burst" of packets.  This field is
3732 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
3733 * the last burst packet count is equal to the total number of packets in the
3734 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
3735 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3736 * contain 1 to (bMaxBurst + 1) packets.
3737 */
3738static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3739                struct usb_device *udev,
3740                struct urb *urb, unsigned int total_packet_count)
3741{
3742        unsigned int max_burst;
3743        unsigned int residue;
3744
3745        if (xhci->hci_version < 0x100)
3746                return 0;
3747
3748        switch (udev->speed) {
3749        case USB_SPEED_SUPER:
3750                /* bMaxBurst is zero based: 0 means 1 packet per burst */
3751                max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3752                residue = total_packet_count % (max_burst + 1);
3753                /* If residue is zero, the last burst contains (max_burst + 1)
3754                 * number of packets, but the TLBPC field is zero-based.
3755                 */
3756                if (residue == 0)
3757                        return max_burst;
3758                return residue - 1;
3759        default:
3760                if (total_packet_count == 0)
3761                        return 0;
3762                return total_packet_count - 1;
3763        }
3764}
3765
3766/* This is for isoc transfer */
3767static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3768                struct urb *urb, int slot_id, unsigned int ep_index)
3769{
3770        struct xhci_ring *ep_ring;
3771        struct urb_priv *urb_priv;
3772        struct xhci_td *td;
3773        int num_tds, trbs_per_td;
3774        struct xhci_generic_trb *start_trb;
3775        bool first_trb;
3776        int start_cycle;
3777        u32 field, length_field;
3778        int running_total, trb_buff_len, td_len, td_remain_len, ret;
3779        u64 start_addr, addr;
3780        int i, j;
3781        bool more_trbs_coming;
3782
3783        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3784
3785        num_tds = urb->number_of_packets;
3786        if (num_tds < 1) {
3787                xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3788                return -EINVAL;
3789        }
3790
3791        start_addr = (u64) urb->transfer_dma;
3792        start_trb = &ep_ring->enqueue->generic;
3793        start_cycle = ep_ring->cycle_state;
3794
3795        urb_priv = urb->hcpriv;
3796        /* Queue the first TRB, even if it's zero-length */
3797        for (i = 0; i < num_tds; i++) {
3798                unsigned int total_packet_count;
3799                unsigned int burst_count;
3800                unsigned int residue;
3801
3802                first_trb = true;
3803                running_total = 0;
3804                addr = start_addr + urb->iso_frame_desc[i].offset;
3805                td_len = urb->iso_frame_desc[i].length;
3806                td_remain_len = td_len;
3807                total_packet_count = DIV_ROUND_UP(td_len,
3808                                GET_MAX_PACKET(
3809                                        usb_endpoint_maxp(&urb->ep->desc)));
3810                /* A zero-length transfer still involves at least one packet. */
3811                if (total_packet_count == 0)
3812                        total_packet_count++;
3813                burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3814                                total_packet_count);
3815                residue = xhci_get_last_burst_packet_count(xhci,
3816                                urb->dev, urb, total_packet_count);
3817
3818                trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3819
3820                ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3821                                urb->stream_id, trbs_per_td, urb, i, mem_flags);
3822                if (ret < 0) {
3823                        if (i == 0)
3824                                return ret;
3825                        goto cleanup;
3826                }
3827
3828                td = urb_priv->td[i];
3829                for (j = 0; j < trbs_per_td; j++) {
3830                        u32 remainder = 0;
3831                        field = 0;
3832
3833                        if (first_trb) {
3834                                field = TRB_TBC(burst_count) |
3835                                        TRB_TLBPC(residue);
3836                                /* Queue the isoc TRB */
3837                                field |= TRB_TYPE(TRB_ISOC);
3838                                /* Assume URB_ISO_ASAP is set */
3839                                field |= TRB_SIA;
3840                                if (i == 0) {
3841                                        if (start_cycle == 0)
3842                                                field |= 0x1;
3843                                } else
3844                                        field |= ep_ring->cycle_state;
3845                                first_trb = false;
3846                        } else {
3847                                /* Queue other normal TRBs */
3848                                field |= TRB_TYPE(TRB_NORMAL);
3849                                field |= ep_ring->cycle_state;
3850                        }
3851
3852                        /* Only set interrupt on short packet for IN EPs */
3853                        if (usb_urb_dir_in(urb))
3854                                field |= TRB_ISP;
3855
3856                        /* Chain all the TRBs together; clear the chain bit in
3857                         * the last TRB to indicate it's the last TRB in the
3858                         * chain.
3859                         */
3860                        if (j < trbs_per_td - 1) {
3861                                field |= TRB_CHAIN;
3862                                more_trbs_coming = true;
3863                        } else {
3864                                td->last_trb = ep_ring->enqueue;
3865                                field |= TRB_IOC;
3866                                if (xhci->hci_version == 0x100 &&
3867                                                !(xhci->quirks &
3868                                                        XHCI_AVOID_BEI)) {
3869                                        /* Set BEI bit except for the last td */
3870                                        if (i < num_tds - 1)
3871                                                field |= TRB_BEI;
3872                                }
3873                                more_trbs_coming = false;
3874                        }
3875
3876                        /* Calculate TRB length */
3877                        trb_buff_len = TRB_MAX_BUFF_SIZE -
3878                                (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3879                        if (trb_buff_len > td_remain_len)
3880                                trb_buff_len = td_remain_len;
3881
3882                        /* Set the TRB length, TD size, & interrupter fields. */
3883                        if (xhci->hci_version < 0x100) {
3884                                remainder = xhci_td_remainder(
3885                                                td_len - running_total);
3886                        } else {
3887                                remainder = xhci_v1_0_td_remainder(
3888                                                running_total, trb_buff_len,
3889                                                total_packet_count, urb,
3890                                                (trbs_per_td - j - 1));
3891                        }
3892                        length_field = TRB_LEN(trb_buff_len) |
3893                                remainder |
3894                                TRB_INTR_TARGET(0);
3895
3896                        queue_trb(xhci, ep_ring, more_trbs_coming,
3897                                lower_32_bits(addr),
3898                                upper_32_bits(addr),
3899                                length_field,
3900                                field);
3901                        running_total += trb_buff_len;
3902
3903                        addr += trb_buff_len;
3904                        td_remain_len -= trb_buff_len;
3905                }
3906
3907                /* Check TD length */
3908                if (running_total != td_len) {
3909                        xhci_err(xhci, "ISOC TD length unmatch\n");
3910                        ret = -EINVAL;
3911                        goto cleanup;
3912                }
3913        }
3914
3915        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3916                if (xhci->quirks & XHCI_AMD_PLL_FIX)
3917                        usb_amd_quirk_pll_disable();
3918        }
3919        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3920
3921        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3922                        start_cycle, start_trb);
3923        return 0;
3924cleanup:
3925        /* Clean up a partially enqueued isoc transfer. */
3926
3927        for (i--; i >= 0; i--)
3928                list_del_init(&urb_priv->td[i]->td_list);
3929
3930        /* Use the first TD as a temporary variable to turn the TDs we've queued
3931         * into No-ops with a software-owned cycle bit. That way the hardware
3932         * won't accidentally start executing bogus TDs when we partially
3933         * overwrite them.  td->first_trb and td->start_seg are already set.
3934         */
3935        urb_priv->td[0]->last_trb = ep_ring->enqueue;
3936        /* Every TRB except the first & last will have its cycle bit flipped. */
3937        td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3938
3939        /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3940        ep_ring->enqueue = urb_priv->td[0]->first_trb;
3941        ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3942        ep_ring->cycle_state = start_cycle;
3943        ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3944        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3945        return ret;
3946}
3947
3948/*
3949 * Check transfer ring to guarantee there is enough room for the urb.
3950 * Update ISO URB start_frame and interval.
3951 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3952 * update the urb->start_frame by now.
3953 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3954 */
3955int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3956                struct urb *urb, int slot_id, unsigned int ep_index)
3957{
3958        struct xhci_virt_device *xdev;
3959        struct xhci_ring *ep_ring;
3960        struct xhci_ep_ctx *ep_ctx;
3961        int start_frame;
3962        int xhci_interval;
3963        int ep_interval;
3964        int num_tds, num_trbs, i;
3965        int ret;
3966
3967        xdev = xhci->devs[slot_id];
3968        ep_ring = xdev->eps[ep_index].ring;
3969        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3970
3971        num_trbs = 0;
3972        num_tds = urb->number_of_packets;
3973        for (i = 0; i < num_tds; i++)
3974                num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3975
3976        /* Check the ring to guarantee there is enough room for the whole urb.
3977         * Do not insert any td of the urb to the ring if the check failed.
3978         */
3979        ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3980                           num_trbs, mem_flags);
3981        if (ret)
3982                return ret;
3983
3984        start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3985        start_frame &= 0x3fff;
3986
3987        urb->start_frame = start_frame;
3988        if (urb->dev->speed == USB_SPEED_LOW ||
3989                        urb->dev->speed == USB_SPEED_FULL)
3990                urb->start_frame >>= 3;
3991
3992        xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3993        ep_interval = urb->interval;
3994        /* Convert to microframes */
3995        if (urb->dev->speed == USB_SPEED_LOW ||
3996                        urb->dev->speed == USB_SPEED_FULL)
3997                ep_interval *= 8;
3998        /* FIXME change this to a warning and a suggestion to use the new API
3999         * to set the polling interval (once the API is added).
4000         */
4001        if (xhci_interval != ep_interval) {
4002                dev_dbg_ratelimited(&urb->dev->dev,
4003                                "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
4004                                ep_interval, ep_interval == 1 ? "" : "s",
4005                                xhci_interval, xhci_interval == 1 ? "" : "s");
4006                urb->interval = xhci_interval;
4007                /* Convert back to frames for LS/FS devices */
4008                if (urb->dev->speed == USB_SPEED_LOW ||
4009                                urb->dev->speed == USB_SPEED_FULL)
4010                        urb->interval /= 8;
4011        }
4012        ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
4013
4014        return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
4015}
4016
4017/****           Command Ring Operations         ****/
4018
4019/* Generic function for queueing a command TRB on the command ring.
4020 * Check to make sure there's room on the command ring for one command TRB.
4021 * Also check that there's room reserved for commands that must not fail.
4022 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
4023 * then only check for the number of reserved spots.
4024 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4025 * because the command event handler may want to resubmit a failed command.
4026 */
4027static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
4028                u32 field3, u32 field4, bool command_must_succeed)
4029{
4030        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4031        int ret;
4032
4033        if (!command_must_succeed)
4034                reserved_trbs++;
4035
4036        ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4037                        reserved_trbs, GFP_ATOMIC);
4038        if (ret < 0) {
4039                xhci_err(xhci, "ERR: No room for command on command ring\n");
4040                if (command_must_succeed)
4041                        xhci_err(xhci, "ERR: Reserved TRB counting for "
4042                                        "unfailable commands failed.\n");
4043                return ret;
4044        }
4045        queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4046                        field4 | xhci->cmd_ring->cycle_state);
4047        return 0;
4048}
4049
4050/* Queue a slot enable or disable request on the command ring */
4051int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
4052{
4053        return queue_command(xhci, 0, 0, 0,
4054                        TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4055}
4056
4057/* Queue an address device command TRB */
4058int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4059                u32 slot_id)
4060{
4061        return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4062                        upper_32_bits(in_ctx_ptr), 0,
4063                        TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
4064                        false);
4065}
4066
4067int xhci_queue_vendor_command(struct xhci_hcd *xhci,
4068                u32 field1, u32 field2, u32 field3, u32 field4)
4069{
4070        return queue_command(xhci, field1, field2, field3, field4, false);
4071}
4072
4073/* Queue a reset device command TRB */
4074int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
4075{
4076        return queue_command(xhci, 0, 0, 0,
4077                        TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4078                        false);
4079}
4080
4081/* Queue a configure endpoint command TRB */
4082int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4083                u32 slot_id, bool command_must_succeed)
4084{
4085        return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4086                        upper_32_bits(in_ctx_ptr), 0,
4087                        TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4088                        command_must_succeed);
4089}
4090
4091/* Queue an evaluate context command TRB */
4092int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4093                u32 slot_id, bool command_must_succeed)
4094{
4095        return queue_command(xhci, lower_32_bits(in_ctx_ptr),
4096                        upper_32_bits(in_ctx_ptr), 0,
4097                        TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4098                        command_must_succeed);
4099}
4100
4101/*
4102 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4103 * activity on an endpoint that is about to be suspended.
4104 */
4105int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
4106                unsigned int ep_index, int suspend)
4107{
4108        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4109        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4110        u32 type = TRB_TYPE(TRB_STOP_RING);
4111        u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4112
4113        return queue_command(xhci, 0, 0, 0,
4114                        trb_slot_id | trb_ep_index | type | trb_suspend, false);
4115}
4116
4117/* Set Transfer Ring Dequeue Pointer command.
4118 * This should not be used for endpoints that have streams enabled.
4119 */
4120static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
4121                unsigned int ep_index, unsigned int stream_id,
4122                struct xhci_segment *deq_seg,
4123                union xhci_trb *deq_ptr, u32 cycle_state)
4124{
4125        dma_addr_t addr;
4126        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4127        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4128        u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4129        u32 type = TRB_TYPE(TRB_SET_DEQ);
4130        struct xhci_virt_ep *ep;
4131
4132        addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
4133        if (addr == 0) {
4134                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4135                xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4136                                deq_seg, deq_ptr);
4137                return 0;
4138        }
4139        ep = &xhci->devs[slot_id]->eps[ep_index];
4140        if ((ep->ep_state & SET_DEQ_PENDING)) {
4141                xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4142                xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4143                return 0;
4144        }
4145        ep->queued_deq_seg = deq_seg;
4146        ep->queued_deq_ptr = deq_ptr;
4147        return queue_command(xhci, lower_32_bits(addr) | cycle_state,
4148                        upper_32_bits(addr), trb_stream_id,
4149                        trb_slot_id | trb_ep_index | type, false);
4150}
4151
4152int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
4153                unsigned int ep_index)
4154{
4155        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4156        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4157        u32 type = TRB_TYPE(TRB_RESET_EP);
4158
4159        return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
4160                        false);
4161}
4162