linux/drivers/usb/host/xhci.c
<<
>>
Prefs
   1/*
   2 * xHCI host controller driver
   3 *
   4 * Copyright (C) 2008 Intel Corp.
   5 *
   6 * Author: Sarah Sharp
   7 * Some code borrowed from the Linux EHCI driver.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16 * for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software Foundation,
  20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#include <linux/pci.h>
  24#include <linux/irq.h>
  25#include <linux/log2.h>
  26#include <linux/module.h>
  27#include <linux/moduleparam.h>
  28#include <linux/slab.h>
  29
  30#include "xhci.h"
  31
  32#define DRIVER_AUTHOR "Sarah Sharp"
  33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
  34
  35/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
  36static int link_quirk;
  37module_param(link_quirk, int, S_IRUGO | S_IWUSR);
  38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
  39
  40/* TODO: copied from ehci-hcd.c - can this be refactored? */
  41/*
  42 * handshake - spin reading hc until handshake completes or fails
  43 * @ptr: address of hc register to be read
  44 * @mask: bits to look at in result of read
  45 * @done: value of those bits when handshake succeeds
  46 * @usec: timeout in microseconds
  47 *
  48 * Returns negative errno, or zero on success
  49 *
  50 * Success happens when the "mask" bits have the specified value (hardware
  51 * handshake done).  There are two failure modes:  "usec" have passed (major
  52 * hardware flakeout), or the register reads as all-ones (hardware removed).
  53 */
  54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
  55                      u32 mask, u32 done, int usec)
  56{
  57        u32     result;
  58
  59        do {
  60                result = xhci_readl(xhci, ptr);
  61                if (result == ~(u32)0)          /* card removed */
  62                        return -ENODEV;
  63                result &= mask;
  64                if (result == done)
  65                        return 0;
  66                udelay(1);
  67                usec--;
  68        } while (usec > 0);
  69        return -ETIMEDOUT;
  70}
  71
  72/*
  73 * Disable interrupts and begin the xHCI halting process.
  74 */
  75void xhci_quiesce(struct xhci_hcd *xhci)
  76{
  77        u32 halted;
  78        u32 cmd;
  79        u32 mask;
  80
  81        mask = ~(XHCI_IRQS);
  82        halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
  83        if (!halted)
  84                mask &= ~CMD_RUN;
  85
  86        cmd = xhci_readl(xhci, &xhci->op_regs->command);
  87        cmd &= mask;
  88        xhci_writel(xhci, cmd, &xhci->op_regs->command);
  89}
  90
  91/*
  92 * Force HC into halt state.
  93 *
  94 * Disable any IRQs and clear the run/stop bit.
  95 * HC will complete any current and actively pipelined transactions, and
  96 * should halt within 16 microframes of the run/stop bit being cleared.
  97 * Read HC Halted bit in the status register to see when the HC is finished.
  98 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
  99 */
 100int xhci_halt(struct xhci_hcd *xhci)
 101{
 102        xhci_dbg(xhci, "// Halt the HC\n");
 103        xhci_quiesce(xhci);
 104
 105        return handshake(xhci, &xhci->op_regs->status,
 106                        STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
 107}
 108
 109/*
 110 * Set the run bit and wait for the host to be running.
 111 */
 112static int xhci_start(struct xhci_hcd *xhci)
 113{
 114        u32 temp;
 115        int ret;
 116
 117        temp = xhci_readl(xhci, &xhci->op_regs->command);
 118        temp |= (CMD_RUN);
 119        xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
 120                        temp);
 121        xhci_writel(xhci, temp, &xhci->op_regs->command);
 122
 123        /*
 124         * Wait for the HCHalted Status bit to be 0 to indicate the host is
 125         * running.
 126         */
 127        ret = handshake(xhci, &xhci->op_regs->status,
 128                        STS_HALT, 0, XHCI_MAX_HALT_USEC);
 129        if (ret == -ETIMEDOUT)
 130                xhci_err(xhci, "Host took too long to start, "
 131                                "waited %u microseconds.\n",
 132                                XHCI_MAX_HALT_USEC);
 133        return ret;
 134}
 135
 136/*
 137 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
 138 *
 139 * This resets pipelines, timers, counters, state machines, etc.
 140 * Transactions will be terminated immediately, and operational registers
 141 * will be set to their defaults.
 142 */
 143int xhci_reset(struct xhci_hcd *xhci)
 144{
 145        u32 command;
 146        u32 state;
 147        int ret;
 148
 149        state = xhci_readl(xhci, &xhci->op_regs->status);
 150        if ((state & STS_HALT) == 0) {
 151                xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
 152                return 0;
 153        }
 154
 155        xhci_dbg(xhci, "// Reset the HC\n");
 156        command = xhci_readl(xhci, &xhci->op_regs->command);
 157        command |= CMD_RESET;
 158        xhci_writel(xhci, command, &xhci->op_regs->command);
 159        /* XXX: Why does EHCI set this here?  Shouldn't other code do this? */
 160        xhci_to_hcd(xhci)->state = HC_STATE_HALT;
 161
 162        ret = handshake(xhci, &xhci->op_regs->command,
 163                        CMD_RESET, 0, 250 * 1000);
 164        if (ret)
 165                return ret;
 166
 167        xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
 168        /*
 169         * xHCI cannot write to any doorbells or operational registers other
 170         * than status until the "Controller Not Ready" flag is cleared.
 171         */
 172        return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
 173}
 174
 175/*
 176 * Free IRQs
 177 * free all IRQs request
 178 */
 179static void xhci_free_irq(struct xhci_hcd *xhci)
 180{
 181        int i;
 182        struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 183
 184        /* return if using legacy interrupt */
 185        if (xhci_to_hcd(xhci)->irq >= 0)
 186                return;
 187
 188        if (xhci->msix_entries) {
 189                for (i = 0; i < xhci->msix_count; i++)
 190                        if (xhci->msix_entries[i].vector)
 191                                free_irq(xhci->msix_entries[i].vector,
 192                                                xhci_to_hcd(xhci));
 193        } else if (pdev->irq >= 0)
 194                free_irq(pdev->irq, xhci_to_hcd(xhci));
 195
 196        return;
 197}
 198
 199/*
 200 * Set up MSI
 201 */
 202static int xhci_setup_msi(struct xhci_hcd *xhci)
 203{
 204        int ret;
 205        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 206
 207        ret = pci_enable_msi(pdev);
 208        if (ret) {
 209                xhci_err(xhci, "failed to allocate MSI entry\n");
 210                return ret;
 211        }
 212
 213        ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
 214                                0, "xhci_hcd", xhci_to_hcd(xhci));
 215        if (ret) {
 216                xhci_err(xhci, "disable MSI interrupt\n");
 217                pci_disable_msi(pdev);
 218        }
 219
 220        return ret;
 221}
 222
 223/*
 224 * Set up MSI-X
 225 */
 226static int xhci_setup_msix(struct xhci_hcd *xhci)
 227{
 228        int i, ret = 0;
 229        struct usb_hcd *hcd = xhci_to_hcd(xhci);
 230        struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 231
 232        /*
 233         * calculate number of msi-x vectors supported.
 234         * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
 235         *   with max number of interrupters based on the xhci HCSPARAMS1.
 236         * - num_online_cpus: maximum msi-x vectors per CPUs core.
 237         *   Add additional 1 vector to ensure always available interrupt.
 238         */
 239        xhci->msix_count = min(num_online_cpus() + 1,
 240                                HCS_MAX_INTRS(xhci->hcs_params1));
 241
 242        xhci->msix_entries =
 243                kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
 244                                GFP_KERNEL);
 245        if (!xhci->msix_entries) {
 246                xhci_err(xhci, "Failed to allocate MSI-X entries\n");
 247                return -ENOMEM;
 248        }
 249
 250        for (i = 0; i < xhci->msix_count; i++) {
 251                xhci->msix_entries[i].entry = i;
 252                xhci->msix_entries[i].vector = 0;
 253        }
 254
 255        ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
 256        if (ret) {
 257                xhci_err(xhci, "Failed to enable MSI-X\n");
 258                goto free_entries;
 259        }
 260
 261        for (i = 0; i < xhci->msix_count; i++) {
 262                ret = request_irq(xhci->msix_entries[i].vector,
 263                                (irq_handler_t)xhci_msi_irq,
 264                                0, "xhci_hcd", xhci_to_hcd(xhci));
 265                if (ret)
 266                        goto disable_msix;
 267        }
 268
 269        hcd->msix_enabled = 1;
 270        return ret;
 271
 272disable_msix:
 273        xhci_err(xhci, "disable MSI-X interrupt\n");
 274        xhci_free_irq(xhci);
 275        pci_disable_msix(pdev);
 276free_entries:
 277        kfree(xhci->msix_entries);
 278        xhci->msix_entries = NULL;
 279        return ret;
 280}
 281
 282/* Free any IRQs and disable MSI-X */
 283static void xhci_cleanup_msix(struct xhci_hcd *xhci)
 284{
 285        struct usb_hcd *hcd = xhci_to_hcd(xhci);
 286        struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 287
 288        xhci_free_irq(xhci);
 289
 290        if (xhci->msix_entries) {
 291                pci_disable_msix(pdev);
 292                kfree(xhci->msix_entries);
 293                xhci->msix_entries = NULL;
 294        } else {
 295                pci_disable_msi(pdev);
 296        }
 297
 298        hcd->msix_enabled = 0;
 299        return;
 300}
 301
 302/*
 303 * Initialize memory for HCD and xHC (one-time init).
 304 *
 305 * Program the PAGESIZE register, initialize the device context array, create
 306 * device contexts (?), set up a command ring segment (or two?), create event
 307 * ring (one for now).
 308 */
 309int xhci_init(struct usb_hcd *hcd)
 310{
 311        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 312        int retval = 0;
 313
 314        xhci_dbg(xhci, "xhci_init\n");
 315        spin_lock_init(&xhci->lock);
 316        if (link_quirk) {
 317                xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
 318                xhci->quirks |= XHCI_LINK_TRB_QUIRK;
 319        } else {
 320                xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
 321        }
 322        retval = xhci_mem_init(xhci, GFP_KERNEL);
 323        xhci_dbg(xhci, "Finished xhci_init\n");
 324
 325        return retval;
 326}
 327
 328/*-------------------------------------------------------------------------*/
 329
 330
 331#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 332static void xhci_event_ring_work(unsigned long arg)
 333{
 334        unsigned long flags;
 335        int temp;
 336        u64 temp_64;
 337        struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
 338        int i, j;
 339
 340        xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
 341
 342        spin_lock_irqsave(&xhci->lock, flags);
 343        temp = xhci_readl(xhci, &xhci->op_regs->status);
 344        xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
 345        if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
 346                xhci_dbg(xhci, "HW died, polling stopped.\n");
 347                spin_unlock_irqrestore(&xhci->lock, flags);
 348                return;
 349        }
 350
 351        temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 352        xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
 353        xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
 354        xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
 355        xhci->error_bitmask = 0;
 356        xhci_dbg(xhci, "Event ring:\n");
 357        xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
 358        xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
 359        temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 360        temp_64 &= ~ERST_PTR_MASK;
 361        xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
 362        xhci_dbg(xhci, "Command ring:\n");
 363        xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
 364        xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
 365        xhci_dbg_cmd_ptrs(xhci);
 366        for (i = 0; i < MAX_HC_SLOTS; ++i) {
 367                if (!xhci->devs[i])
 368                        continue;
 369                for (j = 0; j < 31; ++j) {
 370                        xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
 371                }
 372        }
 373
 374        if (xhci->noops_submitted != NUM_TEST_NOOPS)
 375                if (xhci_setup_one_noop(xhci))
 376                        xhci_ring_cmd_db(xhci);
 377        spin_unlock_irqrestore(&xhci->lock, flags);
 378
 379        if (!xhci->zombie)
 380                mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
 381        else
 382                xhci_dbg(xhci, "Quit polling the event ring.\n");
 383}
 384#endif
 385
 386/*
 387 * Start the HC after it was halted.
 388 *
 389 * This function is called by the USB core when the HC driver is added.
 390 * Its opposite is xhci_stop().
 391 *
 392 * xhci_init() must be called once before this function can be called.
 393 * Reset the HC, enable device slot contexts, program DCBAAP, and
 394 * set command ring pointer and event ring pointer.
 395 *
 396 * Setup MSI-X vectors and enable interrupts.
 397 */
 398int xhci_run(struct usb_hcd *hcd)
 399{
 400        u32 temp;
 401        u64 temp_64;
 402        u32 ret;
 403        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 404        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 405        void (*doorbell)(struct xhci_hcd *) = NULL;
 406
 407        hcd->uses_new_polling = 1;
 408
 409        xhci_dbg(xhci, "xhci_run\n");
 410        /* unregister the legacy interrupt */
 411        if (hcd->irq)
 412                free_irq(hcd->irq, hcd);
 413        hcd->irq = -1;
 414
 415        ret = xhci_setup_msix(xhci);
 416        if (ret)
 417                /* fall back to msi*/
 418                ret = xhci_setup_msi(xhci);
 419
 420        if (ret) {
 421                /* fall back to legacy interrupt*/
 422                ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
 423                                        hcd->irq_descr, hcd);
 424                if (ret) {
 425                        xhci_err(xhci, "request interrupt %d failed\n",
 426                                        pdev->irq);
 427                        return ret;
 428                }
 429                hcd->irq = pdev->irq;
 430        }
 431
 432#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 433        init_timer(&xhci->event_ring_timer);
 434        xhci->event_ring_timer.data = (unsigned long) xhci;
 435        xhci->event_ring_timer.function = xhci_event_ring_work;
 436        /* Poll the event ring */
 437        xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
 438        xhci->zombie = 0;
 439        xhci_dbg(xhci, "Setting event ring polling timer\n");
 440        add_timer(&xhci->event_ring_timer);
 441#endif
 442
 443        xhci_dbg(xhci, "Command ring memory map follows:\n");
 444        xhci_debug_ring(xhci, xhci->cmd_ring);
 445        xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
 446        xhci_dbg_cmd_ptrs(xhci);
 447
 448        xhci_dbg(xhci, "ERST memory map follows:\n");
 449        xhci_dbg_erst(xhci, &xhci->erst);
 450        xhci_dbg(xhci, "Event ring:\n");
 451        xhci_debug_ring(xhci, xhci->event_ring);
 452        xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
 453        temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 454        temp_64 &= ~ERST_PTR_MASK;
 455        xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
 456
 457        xhci_dbg(xhci, "// Set the interrupt modulation register\n");
 458        temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
 459        temp &= ~ER_IRQ_INTERVAL_MASK;
 460        temp |= (u32) 160;
 461        xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
 462
 463        /* Set the HCD state before we enable the irqs */
 464        hcd->state = HC_STATE_RUNNING;
 465        temp = xhci_readl(xhci, &xhci->op_regs->command);
 466        temp |= (CMD_EIE);
 467        xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
 468                        temp);
 469        xhci_writel(xhci, temp, &xhci->op_regs->command);
 470
 471        temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 472        xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
 473                        xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
 474        xhci_writel(xhci, ER_IRQ_ENABLE(temp),
 475                        &xhci->ir_set->irq_pending);
 476        xhci_print_ir_set(xhci, 0);
 477
 478        if (NUM_TEST_NOOPS > 0)
 479                doorbell = xhci_setup_one_noop(xhci);
 480        if (xhci->quirks & XHCI_NEC_HOST)
 481                xhci_queue_vendor_command(xhci, 0, 0, 0,
 482                                TRB_TYPE(TRB_NEC_GET_FW));
 483
 484        if (xhci_start(xhci)) {
 485                xhci_halt(xhci);
 486                return -ENODEV;
 487        }
 488
 489        if (doorbell)
 490                (*doorbell)(xhci);
 491        if (xhci->quirks & XHCI_NEC_HOST)
 492                xhci_ring_cmd_db(xhci);
 493
 494        xhci_dbg(xhci, "Finished xhci_run\n");
 495        return 0;
 496}
 497
 498/*
 499 * Stop xHCI driver.
 500 *
 501 * This function is called by the USB core when the HC driver is removed.
 502 * Its opposite is xhci_run().
 503 *
 504 * Disable device contexts, disable IRQs, and quiesce the HC.
 505 * Reset the HC, finish any completed transactions, and cleanup memory.
 506 */
 507void xhci_stop(struct usb_hcd *hcd)
 508{
 509        u32 temp;
 510        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 511
 512        spin_lock_irq(&xhci->lock);
 513        xhci_halt(xhci);
 514        xhci_reset(xhci);
 515        spin_unlock_irq(&xhci->lock);
 516
 517        xhci_cleanup_msix(xhci);
 518
 519#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 520        /* Tell the event ring poll function not to reschedule */
 521        xhci->zombie = 1;
 522        del_timer_sync(&xhci->event_ring_timer);
 523#endif
 524
 525        xhci_dbg(xhci, "// Disabling event ring interrupts\n");
 526        temp = xhci_readl(xhci, &xhci->op_regs->status);
 527        xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
 528        temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 529        xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 530                        &xhci->ir_set->irq_pending);
 531        xhci_print_ir_set(xhci, 0);
 532
 533        xhci_dbg(xhci, "cleaning up memory\n");
 534        xhci_mem_cleanup(xhci);
 535        xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
 536                    xhci_readl(xhci, &xhci->op_regs->status));
 537}
 538
 539/*
 540 * Shutdown HC (not bus-specific)
 541 *
 542 * This is called when the machine is rebooting or halting.  We assume that the
 543 * machine will be powered off, and the HC's internal state will be reset.
 544 * Don't bother to free memory.
 545 */
 546void xhci_shutdown(struct usb_hcd *hcd)
 547{
 548        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 549
 550        spin_lock_irq(&xhci->lock);
 551        xhci_halt(xhci);
 552        spin_unlock_irq(&xhci->lock);
 553
 554        xhci_cleanup_msix(xhci);
 555
 556        xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
 557                    xhci_readl(xhci, &xhci->op_regs->status));
 558}
 559
 560#ifdef CONFIG_PM
 561static void xhci_save_registers(struct xhci_hcd *xhci)
 562{
 563        xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
 564        xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
 565        xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
 566        xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
 567        xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 568        xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
 569        xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
 570        xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
 571        xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 572}
 573
 574static void xhci_restore_registers(struct xhci_hcd *xhci)
 575{
 576        xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
 577        xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
 578        xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
 579        xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
 580        xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
 581        xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
 582        xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
 583        xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
 584}
 585
 586static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
 587{
 588        u64     val_64;
 589
 590        /* step 2: initialize command ring buffer */
 591        val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 592        val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
 593                (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
 594                                      xhci->cmd_ring->dequeue) &
 595                 (u64) ~CMD_RING_RSVD_BITS) |
 596                xhci->cmd_ring->cycle_state;
 597        xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
 598                        (long unsigned long) val_64);
 599        xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
 600}
 601
 602/*
 603 * The whole command ring must be cleared to zero when we suspend the host.
 604 *
 605 * The host doesn't save the command ring pointer in the suspend well, so we
 606 * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
 607 * aligned, because of the reserved bits in the command ring dequeue pointer
 608 * register.  Therefore, we can't just set the dequeue pointer back in the
 609 * middle of the ring (TRBs are 16-byte aligned).
 610 */
 611static void xhci_clear_command_ring(struct xhci_hcd *xhci)
 612{
 613        struct xhci_ring *ring;
 614        struct xhci_segment *seg;
 615
 616        ring = xhci->cmd_ring;
 617        seg = ring->deq_seg;
 618        do {
 619                memset(seg->trbs, 0, SEGMENT_SIZE);
 620                seg = seg->next;
 621        } while (seg != ring->deq_seg);
 622
 623        /* Reset the software enqueue and dequeue pointers */
 624        ring->deq_seg = ring->first_seg;
 625        ring->dequeue = ring->first_seg->trbs;
 626        ring->enq_seg = ring->deq_seg;
 627        ring->enqueue = ring->dequeue;
 628
 629        /*
 630         * Ring is now zeroed, so the HW should look for change of ownership
 631         * when the cycle bit is set to 1.
 632         */
 633        ring->cycle_state = 1;
 634
 635        /*
 636         * Reset the hardware dequeue pointer.
 637         * Yes, this will need to be re-written after resume, but we're paranoid
 638         * and want to make sure the hardware doesn't access bogus memory
 639         * because, say, the BIOS or an SMI started the host without changing
 640         * the command ring pointers.
 641         */
 642        xhci_set_cmd_ring_deq(xhci);
 643}
 644
 645/*
 646 * Stop HC (not bus-specific)
 647 *
 648 * This is called when the machine transition into S3/S4 mode.
 649 *
 650 */
 651int xhci_suspend(struct xhci_hcd *xhci)
 652{
 653        int                     rc = 0;
 654        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
 655        u32                     command;
 656        int                     i;
 657
 658        spin_lock_irq(&xhci->lock);
 659        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 660        /* step 1: stop endpoint */
 661        /* skipped assuming that port suspend has done */
 662
 663        /* step 2: clear Run/Stop bit */
 664        command = xhci_readl(xhci, &xhci->op_regs->command);
 665        command &= ~CMD_RUN;
 666        xhci_writel(xhci, command, &xhci->op_regs->command);
 667        if (handshake(xhci, &xhci->op_regs->status,
 668                      STS_HALT, STS_HALT, 100*100)) {
 669                xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
 670                spin_unlock_irq(&xhci->lock);
 671                return -ETIMEDOUT;
 672        }
 673        xhci_clear_command_ring(xhci);
 674
 675        /* step 3: save registers */
 676        xhci_save_registers(xhci);
 677
 678        /* step 4: set CSS flag */
 679        command = xhci_readl(xhci, &xhci->op_regs->command);
 680        command |= CMD_CSS;
 681        xhci_writel(xhci, command, &xhci->op_regs->command);
 682        if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
 683                xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
 684                spin_unlock_irq(&xhci->lock);
 685                return -ETIMEDOUT;
 686        }
 687        spin_unlock_irq(&xhci->lock);
 688
 689        /* step 5: remove core well power */
 690        /* synchronize irq when using MSI-X */
 691        if (xhci->msix_entries) {
 692                for (i = 0; i < xhci->msix_count; i++)
 693                        synchronize_irq(xhci->msix_entries[i].vector);
 694        }
 695
 696        return rc;
 697}
 698
 699/*
 700 * start xHC (not bus-specific)
 701 *
 702 * This is called when the machine transition from S3/S4 mode.
 703 *
 704 */
 705int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 706{
 707        u32                     command, temp = 0;
 708        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
 709        int     old_state, retval;
 710
 711        old_state = hcd->state;
 712        if (time_before(jiffies, xhci->next_statechange))
 713                msleep(100);
 714
 715        spin_lock_irq(&xhci->lock);
 716
 717        if (!hibernated) {
 718                /* step 1: restore register */
 719                xhci_restore_registers(xhci);
 720                /* step 2: initialize command ring buffer */
 721                xhci_set_cmd_ring_deq(xhci);
 722                /* step 3: restore state and start state*/
 723                /* step 3: set CRS flag */
 724                command = xhci_readl(xhci, &xhci->op_regs->command);
 725                command |= CMD_CRS;
 726                xhci_writel(xhci, command, &xhci->op_regs->command);
 727                if (handshake(xhci, &xhci->op_regs->status,
 728                              STS_RESTORE, 0, 10*100)) {
 729                        xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
 730                        spin_unlock_irq(&xhci->lock);
 731                        return -ETIMEDOUT;
 732                }
 733                temp = xhci_readl(xhci, &xhci->op_regs->status);
 734        }
 735
 736        /* If restore operation fails, re-initialize the HC during resume */
 737        if ((temp & STS_SRE) || hibernated) {
 738                usb_root_hub_lost_power(hcd->self.root_hub);
 739
 740                xhci_dbg(xhci, "Stop HCD\n");
 741                xhci_halt(xhci);
 742                xhci_reset(xhci);
 743                spin_unlock_irq(&xhci->lock);
 744                xhci_cleanup_msix(xhci);
 745
 746#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 747                /* Tell the event ring poll function not to reschedule */
 748                xhci->zombie = 1;
 749                del_timer_sync(&xhci->event_ring_timer);
 750#endif
 751
 752                xhci_dbg(xhci, "// Disabling event ring interrupts\n");
 753                temp = xhci_readl(xhci, &xhci->op_regs->status);
 754                xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
 755                temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 756                xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 757                                &xhci->ir_set->irq_pending);
 758                xhci_print_ir_set(xhci, 0);
 759
 760                xhci_dbg(xhci, "cleaning up memory\n");
 761                xhci_mem_cleanup(xhci);
 762                xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
 763                            xhci_readl(xhci, &xhci->op_regs->status));
 764
 765                xhci_dbg(xhci, "Initialize the HCD\n");
 766                retval = xhci_init(hcd);
 767                if (retval)
 768                        return retval;
 769
 770                xhci_dbg(xhci, "Start the HCD\n");
 771                retval = xhci_run(hcd);
 772                if (!retval)
 773                        set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 774                hcd->state = HC_STATE_SUSPENDED;
 775                return retval;
 776        }
 777
 778        /* step 4: set Run/Stop bit */
 779        command = xhci_readl(xhci, &xhci->op_regs->command);
 780        command |= CMD_RUN;
 781        xhci_writel(xhci, command, &xhci->op_regs->command);
 782        handshake(xhci, &xhci->op_regs->status, STS_HALT,
 783                  0, 250 * 1000);
 784
 785        /* step 5: walk topology and initialize portsc,
 786         * portpmsc and portli
 787         */
 788        /* this is done in bus_resume */
 789
 790        /* step 6: restart each of the previously
 791         * Running endpoints by ringing their doorbells
 792         */
 793
 794        set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 795        if (!hibernated)
 796                hcd->state = old_state;
 797        else
 798                hcd->state = HC_STATE_SUSPENDED;
 799
 800        spin_unlock_irq(&xhci->lock);
 801        return 0;
 802}
 803#endif  /* CONFIG_PM */
 804
 805/*-------------------------------------------------------------------------*/
 806
 807/**
 808 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
 809 * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
 810 * value to right shift 1 for the bitmask.
 811 *
 812 * Index  = (epnum * 2) + direction - 1,
 813 * where direction = 0 for OUT, 1 for IN.
 814 * For control endpoints, the IN index is used (OUT index is unused), so
 815 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
 816 */
 817unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
 818{
 819        unsigned int index;
 820        if (usb_endpoint_xfer_control(desc))
 821                index = (unsigned int) (usb_endpoint_num(desc)*2);
 822        else
 823                index = (unsigned int) (usb_endpoint_num(desc)*2) +
 824                        (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
 825        return index;
 826}
 827
 828/* Find the flag for this endpoint (for use in the control context).  Use the
 829 * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
 830 * bit 1, etc.
 831 */
 832unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
 833{
 834        return 1 << (xhci_get_endpoint_index(desc) + 1);
 835}
 836
 837/* Find the flag for this endpoint (for use in the control context).  Use the
 838 * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
 839 * bit 1, etc.
 840 */
 841unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
 842{
 843        return 1 << (ep_index + 1);
 844}
 845
 846/* Compute the last valid endpoint context index.  Basically, this is the
 847 * endpoint index plus one.  For slot contexts with more than valid endpoint,
 848 * we find the most significant bit set in the added contexts flags.
 849 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
 850 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
 851 */
 852unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
 853{
 854        return fls(added_ctxs) - 1;
 855}
 856
 857/* Returns 1 if the arguments are OK;
 858 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
 859 */
 860static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
 861                struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
 862                const char *func) {
 863        struct xhci_hcd *xhci;
 864        struct xhci_virt_device *virt_dev;
 865
 866        if (!hcd || (check_ep && !ep) || !udev) {
 867                printk(KERN_DEBUG "xHCI %s called with invalid args\n",
 868                                func);
 869                return -EINVAL;
 870        }
 871        if (!udev->parent) {
 872                printk(KERN_DEBUG "xHCI %s called for root hub\n",
 873                                func);
 874                return 0;
 875        }
 876
 877        if (check_virt_dev) {
 878                xhci = hcd_to_xhci(hcd);
 879                if (!udev->slot_id || !xhci->devs
 880                        || !xhci->devs[udev->slot_id]) {
 881                        printk(KERN_DEBUG "xHCI %s called with unaddressed "
 882                                                "device\n", func);
 883                        return -EINVAL;
 884                }
 885
 886                virt_dev = xhci->devs[udev->slot_id];
 887                if (virt_dev->udev != udev) {
 888                        printk(KERN_DEBUG "xHCI %s called with udev and "
 889                                          "virt_dev does not match\n", func);
 890                        return -EINVAL;
 891                }
 892        }
 893
 894        return 1;
 895}
 896
 897static int xhci_configure_endpoint(struct xhci_hcd *xhci,
 898                struct usb_device *udev, struct xhci_command *command,
 899                bool ctx_change, bool must_succeed);
 900
 901/*
 902 * Full speed devices may have a max packet size greater than 8 bytes, but the
 903 * USB core doesn't know that until it reads the first 8 bytes of the
 904 * descriptor.  If the usb_device's max packet size changes after that point,
 905 * we need to issue an evaluate context command and wait on it.
 906 */
 907static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
 908                unsigned int ep_index, struct urb *urb)
 909{
 910        struct xhci_container_ctx *in_ctx;
 911        struct xhci_container_ctx *out_ctx;
 912        struct xhci_input_control_ctx *ctrl_ctx;
 913        struct xhci_ep_ctx *ep_ctx;
 914        int max_packet_size;
 915        int hw_max_packet_size;
 916        int ret = 0;
 917
 918        out_ctx = xhci->devs[slot_id]->out_ctx;
 919        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
 920        hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
 921        max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
 922        if (hw_max_packet_size != max_packet_size) {
 923                xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
 924                xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
 925                                max_packet_size);
 926                xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
 927                                hw_max_packet_size);
 928                xhci_dbg(xhci, "Issuing evaluate context command.\n");
 929
 930                /* Set up the modified control endpoint 0 */
 931                xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
 932                                xhci->devs[slot_id]->out_ctx, ep_index);
 933                in_ctx = xhci->devs[slot_id]->in_ctx;
 934                ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
 935                ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
 936                ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
 937
 938                /* Set up the input context flags for the command */
 939                /* FIXME: This won't work if a non-default control endpoint
 940                 * changes max packet sizes.
 941                 */
 942                ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
 943                ctrl_ctx->add_flags = EP0_FLAG;
 944                ctrl_ctx->drop_flags = 0;
 945
 946                xhci_dbg(xhci, "Slot %d input context\n", slot_id);
 947                xhci_dbg_ctx(xhci, in_ctx, ep_index);
 948                xhci_dbg(xhci, "Slot %d output context\n", slot_id);
 949                xhci_dbg_ctx(xhci, out_ctx, ep_index);
 950
 951                ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
 952                                true, false);
 953
 954                /* Clean up the input context for later use by bandwidth
 955                 * functions.
 956                 */
 957                ctrl_ctx->add_flags = SLOT_FLAG;
 958        }
 959        return ret;
 960}
 961
 962/*
 963 * non-error returns are a promise to giveback() the urb later
 964 * we drop ownership so next owner (or urb unlink) can get it
 965 */
 966int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
 967{
 968        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 969        unsigned long flags;
 970        int ret = 0;
 971        unsigned int slot_id, ep_index;
 972        struct urb_priv *urb_priv;
 973        int size, i;
 974
 975        if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
 976                                        true, true, __func__) <= 0)
 977                return -EINVAL;
 978
 979        slot_id = urb->dev->slot_id;
 980        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
 981
 982        if (!HCD_HW_ACCESSIBLE(hcd)) {
 983                if (!in_interrupt())
 984                        xhci_dbg(xhci, "urb submitted during PCI suspend\n");
 985                ret = -ESHUTDOWN;
 986                goto exit;
 987        }
 988
 989        if (usb_endpoint_xfer_isoc(&urb->ep->desc))
 990                size = urb->number_of_packets;
 991        else
 992                size = 1;
 993
 994        urb_priv = kzalloc(sizeof(struct urb_priv) +
 995                                  size * sizeof(struct xhci_td *), mem_flags);
 996        if (!urb_priv)
 997                return -ENOMEM;
 998
 999        for (i = 0; i < size; i++) {
1000                urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
1001                if (!urb_priv->td[i]) {
1002                        urb_priv->length = i;
1003                        xhci_urb_free_priv(xhci, urb_priv);
1004                        return -ENOMEM;
1005                }
1006        }
1007
1008        urb_priv->length = size;
1009        urb_priv->td_cnt = 0;
1010        urb->hcpriv = urb_priv;
1011
1012        if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1013                /* Check to see if the max packet size for the default control
1014                 * endpoint changed during FS device enumeration
1015                 */
1016                if (urb->dev->speed == USB_SPEED_FULL) {
1017                        ret = xhci_check_maxpacket(xhci, slot_id,
1018                                        ep_index, urb);
1019                        if (ret < 0)
1020                                return ret;
1021                }
1022
1023                /* We have a spinlock and interrupts disabled, so we must pass
1024                 * atomic context to this function, which may allocate memory.
1025                 */
1026                spin_lock_irqsave(&xhci->lock, flags);
1027                if (xhci->xhc_state & XHCI_STATE_DYING)
1028                        goto dying;
1029                ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1030                                slot_id, ep_index);
1031                spin_unlock_irqrestore(&xhci->lock, flags);
1032        } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1033                spin_lock_irqsave(&xhci->lock, flags);
1034                if (xhci->xhc_state & XHCI_STATE_DYING)
1035                        goto dying;
1036                if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1037                                EP_GETTING_STREAMS) {
1038                        xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1039                                        "is transitioning to using streams.\n");
1040                        ret = -EINVAL;
1041                } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1042                                EP_GETTING_NO_STREAMS) {
1043                        xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1044                                        "is transitioning to "
1045                                        "not having streams.\n");
1046                        ret = -EINVAL;
1047                } else {
1048                        ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1049                                        slot_id, ep_index);
1050                }
1051                spin_unlock_irqrestore(&xhci->lock, flags);
1052        } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1053                spin_lock_irqsave(&xhci->lock, flags);
1054                if (xhci->xhc_state & XHCI_STATE_DYING)
1055                        goto dying;
1056                ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1057                                slot_id, ep_index);
1058                spin_unlock_irqrestore(&xhci->lock, flags);
1059        } else {
1060                spin_lock_irqsave(&xhci->lock, flags);
1061                if (xhci->xhc_state & XHCI_STATE_DYING)
1062                        goto dying;
1063                ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1064                                slot_id, ep_index);
1065                spin_unlock_irqrestore(&xhci->lock, flags);
1066        }
1067exit:
1068        return ret;
1069dying:
1070        xhci_urb_free_priv(xhci, urb_priv);
1071        urb->hcpriv = NULL;
1072        xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1073                        "non-responsive xHCI host.\n",
1074                        urb->ep->desc.bEndpointAddress, urb);
1075        spin_unlock_irqrestore(&xhci->lock, flags);
1076        return -ESHUTDOWN;
1077}
1078
1079/* Get the right ring for the given URB.
1080 * If the endpoint supports streams, boundary check the URB's stream ID.
1081 * If the endpoint doesn't support streams, return the singular endpoint ring.
1082 */
1083static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1084                struct urb *urb)
1085{
1086        unsigned int slot_id;
1087        unsigned int ep_index;
1088        unsigned int stream_id;
1089        struct xhci_virt_ep *ep;
1090
1091        slot_id = urb->dev->slot_id;
1092        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1093        stream_id = urb->stream_id;
1094        ep = &xhci->devs[slot_id]->eps[ep_index];
1095        /* Common case: no streams */
1096        if (!(ep->ep_state & EP_HAS_STREAMS))
1097                return ep->ring;
1098
1099        if (stream_id == 0) {
1100                xhci_warn(xhci,
1101                                "WARN: Slot ID %u, ep index %u has streams, "
1102                                "but URB has no stream ID.\n",
1103                                slot_id, ep_index);
1104                return NULL;
1105        }
1106
1107        if (stream_id < ep->stream_info->num_streams)
1108                return ep->stream_info->stream_rings[stream_id];
1109
1110        xhci_warn(xhci,
1111                        "WARN: Slot ID %u, ep index %u has "
1112                        "stream IDs 1 to %u allocated, "
1113                        "but stream ID %u is requested.\n",
1114                        slot_id, ep_index,
1115                        ep->stream_info->num_streams - 1,
1116                        stream_id);
1117        return NULL;
1118}
1119
1120/*
1121 * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
1122 * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
1123 * should pick up where it left off in the TD, unless a Set Transfer Ring
1124 * Dequeue Pointer is issued.
1125 *
1126 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1127 * the ring.  Since the ring is a contiguous structure, they can't be physically
1128 * removed.  Instead, there are two options:
1129 *
1130 *  1) If the HC is in the middle of processing the URB to be canceled, we
1131 *     simply move the ring's dequeue pointer past those TRBs using the Set
1132 *     Transfer Ring Dequeue Pointer command.  This will be the common case,
1133 *     when drivers timeout on the last submitted URB and attempt to cancel.
1134 *
1135 *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
1136 *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
1137 *     HC will need to invalidate the any TRBs it has cached after the stop
1138 *     endpoint command, as noted in the xHCI 0.95 errata.
1139 *
1140 *  3) The TD may have completed by the time the Stop Endpoint Command
1141 *     completes, so software needs to handle that case too.
1142 *
1143 * This function should protect against the TD enqueueing code ringing the
1144 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1145 * It also needs to account for multiple cancellations on happening at the same
1146 * time for the same endpoint.
1147 *
1148 * Note that this function can be called in any context, or so says
1149 * usb_hcd_unlink_urb()
1150 */
1151int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1152{
1153        unsigned long flags;
1154        int ret, i;
1155        u32 temp;
1156        struct xhci_hcd *xhci;
1157        struct urb_priv *urb_priv;
1158        struct xhci_td *td;
1159        unsigned int ep_index;
1160        struct xhci_ring *ep_ring;
1161        struct xhci_virt_ep *ep;
1162
1163        xhci = hcd_to_xhci(hcd);
1164        spin_lock_irqsave(&xhci->lock, flags);
1165        /* Make sure the URB hasn't completed or been unlinked already */
1166        ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1167        if (ret || !urb->hcpriv)
1168                goto done;
1169        temp = xhci_readl(xhci, &xhci->op_regs->status);
1170        if (temp == 0xffffffff) {
1171                xhci_dbg(xhci, "HW died, freeing TD.\n");
1172                urb_priv = urb->hcpriv;
1173
1174                usb_hcd_unlink_urb_from_ep(hcd, urb);
1175                spin_unlock_irqrestore(&xhci->lock, flags);
1176                usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
1177                xhci_urb_free_priv(xhci, urb_priv);
1178                return ret;
1179        }
1180        if (xhci->xhc_state & XHCI_STATE_DYING) {
1181                xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1182                                "non-responsive xHCI host.\n",
1183                                urb->ep->desc.bEndpointAddress, urb);
1184                /* Let the stop endpoint command watchdog timer (which set this
1185                 * state) finish cleaning up the endpoint TD lists.  We must
1186                 * have caught it in the middle of dropping a lock and giving
1187                 * back an URB.
1188                 */
1189                goto done;
1190        }
1191
1192        xhci_dbg(xhci, "Cancel URB %p\n", urb);
1193        xhci_dbg(xhci, "Event ring:\n");
1194        xhci_debug_ring(xhci, xhci->event_ring);
1195        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1196        ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1197        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1198        if (!ep_ring) {
1199                ret = -EINVAL;
1200                goto done;
1201        }
1202
1203        xhci_dbg(xhci, "Endpoint ring:\n");
1204        xhci_debug_ring(xhci, ep_ring);
1205
1206        urb_priv = urb->hcpriv;
1207
1208        for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1209                td = urb_priv->td[i];
1210                list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1211        }
1212
1213        /* Queue a stop endpoint command, but only if this is
1214         * the first cancellation to be handled.
1215         */
1216        if (!(ep->ep_state & EP_HALT_PENDING)) {
1217                ep->ep_state |= EP_HALT_PENDING;
1218                ep->stop_cmds_pending++;
1219                ep->stop_cmd_timer.expires = jiffies +
1220                        XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1221                add_timer(&ep->stop_cmd_timer);
1222                xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1223                xhci_ring_cmd_db(xhci);
1224        }
1225done:
1226        spin_unlock_irqrestore(&xhci->lock, flags);
1227        return ret;
1228}
1229
1230/* Drop an endpoint from a new bandwidth configuration for this device.
1231 * Only one call to this function is allowed per endpoint before
1232 * check_bandwidth() or reset_bandwidth() must be called.
1233 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1234 * add the endpoint to the schedule with possibly new parameters denoted by a
1235 * different endpoint descriptor in usb_host_endpoint.
1236 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1237 * not allowed.
1238 *
1239 * The USB core will not allow URBs to be queued to an endpoint that is being
1240 * disabled, so there's no need for mutual exclusion to protect
1241 * the xhci->devs[slot_id] structure.
1242 */
1243int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1244                struct usb_host_endpoint *ep)
1245{
1246        struct xhci_hcd *xhci;
1247        struct xhci_container_ctx *in_ctx, *out_ctx;
1248        struct xhci_input_control_ctx *ctrl_ctx;
1249        struct xhci_slot_ctx *slot_ctx;
1250        unsigned int last_ctx;
1251        unsigned int ep_index;
1252        struct xhci_ep_ctx *ep_ctx;
1253        u32 drop_flag;
1254        u32 new_add_flags, new_drop_flags, new_slot_info;
1255        int ret;
1256
1257        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1258        if (ret <= 0)
1259                return ret;
1260        xhci = hcd_to_xhci(hcd);
1261        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1262
1263        drop_flag = xhci_get_endpoint_flag(&ep->desc);
1264        if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1265                xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1266                                __func__, drop_flag);
1267                return 0;
1268        }
1269
1270        in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1271        out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1272        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1273        ep_index = xhci_get_endpoint_index(&ep->desc);
1274        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1275        /* If the HC already knows the endpoint is disabled,
1276         * or the HCD has noted it is disabled, ignore this request
1277         */
1278        if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
1279                        ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
1280                xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1281                                __func__, ep);
1282                return 0;
1283        }
1284
1285        ctrl_ctx->drop_flags |= drop_flag;
1286        new_drop_flags = ctrl_ctx->drop_flags;
1287
1288        ctrl_ctx->add_flags &= ~drop_flag;
1289        new_add_flags = ctrl_ctx->add_flags;
1290
1291        last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
1292        slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1293        /* Update the last valid endpoint context, if we deleted the last one */
1294        if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
1295                slot_ctx->dev_info &= ~LAST_CTX_MASK;
1296                slot_ctx->dev_info |= LAST_CTX(last_ctx);
1297        }
1298        new_slot_info = slot_ctx->dev_info;
1299
1300        xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1301
1302        xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1303                        (unsigned int) ep->desc.bEndpointAddress,
1304                        udev->slot_id,
1305                        (unsigned int) new_drop_flags,
1306                        (unsigned int) new_add_flags,
1307                        (unsigned int) new_slot_info);
1308        return 0;
1309}
1310
1311/* Add an endpoint to a new possible bandwidth configuration for this device.
1312 * Only one call to this function is allowed per endpoint before
1313 * check_bandwidth() or reset_bandwidth() must be called.
1314 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1315 * add the endpoint to the schedule with possibly new parameters denoted by a
1316 * different endpoint descriptor in usb_host_endpoint.
1317 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1318 * not allowed.
1319 *
1320 * The USB core will not allow URBs to be queued to an endpoint until the
1321 * configuration or alt setting is installed in the device, so there's no need
1322 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1323 */
1324int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1325                struct usb_host_endpoint *ep)
1326{
1327        struct xhci_hcd *xhci;
1328        struct xhci_container_ctx *in_ctx, *out_ctx;
1329        unsigned int ep_index;
1330        struct xhci_ep_ctx *ep_ctx;
1331        struct xhci_slot_ctx *slot_ctx;
1332        struct xhci_input_control_ctx *ctrl_ctx;
1333        u32 added_ctxs;
1334        unsigned int last_ctx;
1335        u32 new_add_flags, new_drop_flags, new_slot_info;
1336        int ret = 0;
1337
1338        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1339        if (ret <= 0) {
1340                /* So we won't queue a reset ep command for a root hub */
1341                ep->hcpriv = NULL;
1342                return ret;
1343        }
1344        xhci = hcd_to_xhci(hcd);
1345
1346        added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1347        last_ctx = xhci_last_valid_endpoint(added_ctxs);
1348        if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1349                /* FIXME when we have to issue an evaluate endpoint command to
1350                 * deal with ep0 max packet size changing once we get the
1351                 * descriptors
1352                 */
1353                xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1354                                __func__, added_ctxs);
1355                return 0;
1356        }
1357
1358        in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1359        out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1360        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1361        ep_index = xhci_get_endpoint_index(&ep->desc);
1362        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1363        /* If the HCD has already noted the endpoint is enabled,
1364         * ignore this request.
1365         */
1366        if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
1367                xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1368                                __func__, ep);
1369                return 0;
1370        }
1371
1372        /*
1373         * Configuration and alternate setting changes must be done in
1374         * process context, not interrupt context (or so documenation
1375         * for usb_set_interface() and usb_set_configuration() claim).
1376         */
1377        if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
1378                                udev, ep, GFP_NOIO) < 0) {
1379                dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1380                                __func__, ep->desc.bEndpointAddress);
1381                return -ENOMEM;
1382        }
1383
1384        ctrl_ctx->add_flags |= added_ctxs;
1385        new_add_flags = ctrl_ctx->add_flags;
1386
1387        /* If xhci_endpoint_disable() was called for this endpoint, but the
1388         * xHC hasn't been notified yet through the check_bandwidth() call,
1389         * this re-adds a new state for the endpoint from the new endpoint
1390         * descriptors.  We must drop and re-add this endpoint, so we leave the
1391         * drop flags alone.
1392         */
1393        new_drop_flags = ctrl_ctx->drop_flags;
1394
1395        slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1396        /* Update the last valid endpoint context, if we just added one past */
1397        if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
1398                slot_ctx->dev_info &= ~LAST_CTX_MASK;
1399                slot_ctx->dev_info |= LAST_CTX(last_ctx);
1400        }
1401        new_slot_info = slot_ctx->dev_info;
1402
1403        /* Store the usb_device pointer for later use */
1404        ep->hcpriv = udev;
1405
1406        xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1407                        (unsigned int) ep->desc.bEndpointAddress,
1408                        udev->slot_id,
1409                        (unsigned int) new_drop_flags,
1410                        (unsigned int) new_add_flags,
1411                        (unsigned int) new_slot_info);
1412        return 0;
1413}
1414
1415static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1416{
1417        struct xhci_input_control_ctx *ctrl_ctx;
1418        struct xhci_ep_ctx *ep_ctx;
1419        struct xhci_slot_ctx *slot_ctx;
1420        int i;
1421
1422        /* When a device's add flag and drop flag are zero, any subsequent
1423         * configure endpoint command will leave that endpoint's state
1424         * untouched.  Make sure we don't leave any old state in the input
1425         * endpoint contexts.
1426         */
1427        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1428        ctrl_ctx->drop_flags = 0;
1429        ctrl_ctx->add_flags = 0;
1430        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1431        slot_ctx->dev_info &= ~LAST_CTX_MASK;
1432        /* Endpoint 0 is always valid */
1433        slot_ctx->dev_info |= LAST_CTX(1);
1434        for (i = 1; i < 31; ++i) {
1435                ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1436                ep_ctx->ep_info = 0;
1437                ep_ctx->ep_info2 = 0;
1438                ep_ctx->deq = 0;
1439                ep_ctx->tx_info = 0;
1440        }
1441}
1442
1443static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1444                struct usb_device *udev, int *cmd_status)
1445{
1446        int ret;
1447
1448        switch (*cmd_status) {
1449        case COMP_ENOMEM:
1450                dev_warn(&udev->dev, "Not enough host controller resources "
1451                                "for new device state.\n");
1452                ret = -ENOMEM;
1453                /* FIXME: can we allocate more resources for the HC? */
1454                break;
1455        case COMP_BW_ERR:
1456                dev_warn(&udev->dev, "Not enough bandwidth "
1457                                "for new device state.\n");
1458                ret = -ENOSPC;
1459                /* FIXME: can we go back to the old state? */
1460                break;
1461        case COMP_TRB_ERR:
1462                /* the HCD set up something wrong */
1463                dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1464                                "add flag = 1, "
1465                                "and endpoint is not disabled.\n");
1466                ret = -EINVAL;
1467                break;
1468        case COMP_SUCCESS:
1469                dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1470                ret = 0;
1471                break;
1472        default:
1473                xhci_err(xhci, "ERROR: unexpected command completion "
1474                                "code 0x%x.\n", *cmd_status);
1475                ret = -EINVAL;
1476                break;
1477        }
1478        return ret;
1479}
1480
1481static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1482                struct usb_device *udev, int *cmd_status)
1483{
1484        int ret;
1485        struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1486
1487        switch (*cmd_status) {
1488        case COMP_EINVAL:
1489                dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1490                                "context command.\n");
1491                ret = -EINVAL;
1492                break;
1493        case COMP_EBADSLT:
1494                dev_warn(&udev->dev, "WARN: slot not enabled for"
1495                                "evaluate context command.\n");
1496        case COMP_CTX_STATE:
1497                dev_warn(&udev->dev, "WARN: invalid context state for "
1498                                "evaluate context command.\n");
1499                xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1500                ret = -EINVAL;
1501                break;
1502        case COMP_SUCCESS:
1503                dev_dbg(&udev->dev, "Successful evaluate context command\n");
1504                ret = 0;
1505                break;
1506        default:
1507                xhci_err(xhci, "ERROR: unexpected command completion "
1508                                "code 0x%x.\n", *cmd_status);
1509                ret = -EINVAL;
1510                break;
1511        }
1512        return ret;
1513}
1514
1515/* Issue a configure endpoint command or evaluate context command
1516 * and wait for it to finish.
1517 */
1518static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1519                struct usb_device *udev,
1520                struct xhci_command *command,
1521                bool ctx_change, bool must_succeed)
1522{
1523        int ret;
1524        int timeleft;
1525        unsigned long flags;
1526        struct xhci_container_ctx *in_ctx;
1527        struct completion *cmd_completion;
1528        int *cmd_status;
1529        struct xhci_virt_device *virt_dev;
1530
1531        spin_lock_irqsave(&xhci->lock, flags);
1532        virt_dev = xhci->devs[udev->slot_id];
1533        if (command) {
1534                in_ctx = command->in_ctx;
1535                cmd_completion = command->completion;
1536                cmd_status = &command->status;
1537                command->command_trb = xhci->cmd_ring->enqueue;
1538
1539                /* Enqueue pointer can be left pointing to the link TRB,
1540                 * we must handle that
1541                 */
1542                if ((command->command_trb->link.control & TRB_TYPE_BITMASK)
1543                                == TRB_TYPE(TRB_LINK))
1544                        command->command_trb =
1545                                xhci->cmd_ring->enq_seg->next->trbs;
1546
1547                list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1548        } else {
1549                in_ctx = virt_dev->in_ctx;
1550                cmd_completion = &virt_dev->cmd_completion;
1551                cmd_status = &virt_dev->cmd_status;
1552        }
1553        init_completion(cmd_completion);
1554
1555        if (!ctx_change)
1556                ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1557                                udev->slot_id, must_succeed);
1558        else
1559                ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1560                                udev->slot_id);
1561        if (ret < 0) {
1562                if (command)
1563                        list_del(&command->cmd_list);
1564                spin_unlock_irqrestore(&xhci->lock, flags);
1565                xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1566                return -ENOMEM;
1567        }
1568        xhci_ring_cmd_db(xhci);
1569        spin_unlock_irqrestore(&xhci->lock, flags);
1570
1571        /* Wait for the configure endpoint command to complete */
1572        timeleft = wait_for_completion_interruptible_timeout(
1573                        cmd_completion,
1574                        USB_CTRL_SET_TIMEOUT);
1575        if (timeleft <= 0) {
1576                xhci_warn(xhci, "%s while waiting for %s command\n",
1577                                timeleft == 0 ? "Timeout" : "Signal",
1578                                ctx_change == 0 ?
1579                                        "configure endpoint" :
1580                                        "evaluate context");
1581                /* FIXME cancel the configure endpoint command */
1582                return -ETIME;
1583        }
1584
1585        if (!ctx_change)
1586                return xhci_configure_endpoint_result(xhci, udev, cmd_status);
1587        return xhci_evaluate_context_result(xhci, udev, cmd_status);
1588}
1589
1590/* Called after one or more calls to xhci_add_endpoint() or
1591 * xhci_drop_endpoint().  If this call fails, the USB core is expected
1592 * to call xhci_reset_bandwidth().
1593 *
1594 * Since we are in the middle of changing either configuration or
1595 * installing a new alt setting, the USB core won't allow URBs to be
1596 * enqueued for any endpoint on the old config or interface.  Nothing
1597 * else should be touching the xhci->devs[slot_id] structure, so we
1598 * don't need to take the xhci->lock for manipulating that.
1599 */
1600int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1601{
1602        int i;
1603        int ret = 0;
1604        struct xhci_hcd *xhci;
1605        struct xhci_virt_device *virt_dev;
1606        struct xhci_input_control_ctx *ctrl_ctx;
1607        struct xhci_slot_ctx *slot_ctx;
1608
1609        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1610        if (ret <= 0)
1611                return ret;
1612        xhci = hcd_to_xhci(hcd);
1613
1614        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1615        virt_dev = xhci->devs[udev->slot_id];
1616
1617        /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1618        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1619        ctrl_ctx->add_flags |= SLOT_FLAG;
1620        ctrl_ctx->add_flags &= ~EP0_FLAG;
1621        ctrl_ctx->drop_flags &= ~SLOT_FLAG;
1622        ctrl_ctx->drop_flags &= ~EP0_FLAG;
1623        xhci_dbg(xhci, "New Input Control Context:\n");
1624        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1625        xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1626                        LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1627
1628        ret = xhci_configure_endpoint(xhci, udev, NULL,
1629                        false, false);
1630        if (ret) {
1631                /* Callee should call reset_bandwidth() */
1632                return ret;
1633        }
1634
1635        xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1636        xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1637                        LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1638
1639        xhci_zero_in_ctx(xhci, virt_dev);
1640        /* Install new rings and free or cache any old rings */
1641        for (i = 1; i < 31; ++i) {
1642                if (!virt_dev->eps[i].new_ring)
1643                        continue;
1644                /* Only cache or free the old ring if it exists.
1645                 * It may not if this is the first add of an endpoint.
1646                 */
1647                if (virt_dev->eps[i].ring) {
1648                        xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1649                }
1650                virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1651                virt_dev->eps[i].new_ring = NULL;
1652        }
1653
1654        return ret;
1655}
1656
1657void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1658{
1659        struct xhci_hcd *xhci;
1660        struct xhci_virt_device *virt_dev;
1661        int i, ret;
1662
1663        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1664        if (ret <= 0)
1665                return;
1666        xhci = hcd_to_xhci(hcd);
1667
1668        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1669        virt_dev = xhci->devs[udev->slot_id];
1670        /* Free any rings allocated for added endpoints */
1671        for (i = 0; i < 31; ++i) {
1672                if (virt_dev->eps[i].new_ring) {
1673                        xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1674                        virt_dev->eps[i].new_ring = NULL;
1675                }
1676        }
1677        xhci_zero_in_ctx(xhci, virt_dev);
1678}
1679
1680static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1681                struct xhci_container_ctx *in_ctx,
1682                struct xhci_container_ctx *out_ctx,
1683                u32 add_flags, u32 drop_flags)
1684{
1685        struct xhci_input_control_ctx *ctrl_ctx;
1686        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1687        ctrl_ctx->add_flags = add_flags;
1688        ctrl_ctx->drop_flags = drop_flags;
1689        xhci_slot_copy(xhci, in_ctx, out_ctx);
1690        ctrl_ctx->add_flags |= SLOT_FLAG;
1691
1692        xhci_dbg(xhci, "Input Context:\n");
1693        xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1694}
1695
1696static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1697                unsigned int slot_id, unsigned int ep_index,
1698                struct xhci_dequeue_state *deq_state)
1699{
1700        struct xhci_container_ctx *in_ctx;
1701        struct xhci_ep_ctx *ep_ctx;
1702        u32 added_ctxs;
1703        dma_addr_t addr;
1704
1705        xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1706                        xhci->devs[slot_id]->out_ctx, ep_index);
1707        in_ctx = xhci->devs[slot_id]->in_ctx;
1708        ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1709        addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
1710                        deq_state->new_deq_ptr);
1711        if (addr == 0) {
1712                xhci_warn(xhci, "WARN Cannot submit config ep after "
1713                                "reset ep command\n");
1714                xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1715                                deq_state->new_deq_seg,
1716                                deq_state->new_deq_ptr);
1717                return;
1718        }
1719        ep_ctx->deq = addr | deq_state->new_cycle_state;
1720
1721        added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1722        xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1723                        xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
1724}
1725
1726void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1727                struct usb_device *udev, unsigned int ep_index)
1728{
1729        struct xhci_dequeue_state deq_state;
1730        struct xhci_virt_ep *ep;
1731
1732        xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1733        ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1734        /* We need to move the HW's dequeue pointer past this TD,
1735         * or it will attempt to resend it on the next doorbell ring.
1736         */
1737        xhci_find_new_dequeue_state(xhci, udev->slot_id,
1738                        ep_index, ep->stopped_stream, ep->stopped_td,
1739                        &deq_state);
1740
1741        /* HW with the reset endpoint quirk will use the saved dequeue state to
1742         * issue a configure endpoint command later.
1743         */
1744        if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1745                xhci_dbg(xhci, "Queueing new dequeue state\n");
1746                xhci_queue_new_dequeue_state(xhci, udev->slot_id,
1747                                ep_index, ep->stopped_stream, &deq_state);
1748        } else {
1749                /* Better hope no one uses the input context between now and the
1750                 * reset endpoint completion!
1751                 * XXX: No idea how this hardware will react when stream rings
1752                 * are enabled.
1753                 */
1754                xhci_dbg(xhci, "Setting up input context for "
1755                                "configure endpoint command\n");
1756                xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
1757                                ep_index, &deq_state);
1758        }
1759}
1760
1761/* Deal with stalled endpoints.  The core should have sent the control message
1762 * to clear the halt condition.  However, we need to make the xHCI hardware
1763 * reset its sequence number, since a device will expect a sequence number of
1764 * zero after the halt condition is cleared.
1765 * Context: in_interrupt
1766 */
1767void xhci_endpoint_reset(struct usb_hcd *hcd,
1768                struct usb_host_endpoint *ep)
1769{
1770        struct xhci_hcd *xhci;
1771        struct usb_device *udev;
1772        unsigned int ep_index;
1773        unsigned long flags;
1774        int ret;
1775        struct xhci_virt_ep *virt_ep;
1776
1777        xhci = hcd_to_xhci(hcd);
1778        udev = (struct usb_device *) ep->hcpriv;
1779        /* Called with a root hub endpoint (or an endpoint that wasn't added
1780         * with xhci_add_endpoint()
1781         */
1782        if (!ep->hcpriv)
1783                return;
1784        ep_index = xhci_get_endpoint_index(&ep->desc);
1785        virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1786        if (!virt_ep->stopped_td) {
1787                xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1788                                ep->desc.bEndpointAddress);
1789                return;
1790        }
1791        if (usb_endpoint_xfer_control(&ep->desc)) {
1792                xhci_dbg(xhci, "Control endpoint stall already handled.\n");
1793                return;
1794        }
1795
1796        xhci_dbg(xhci, "Queueing reset endpoint command\n");
1797        spin_lock_irqsave(&xhci->lock, flags);
1798        ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1799        /*
1800         * Can't change the ring dequeue pointer until it's transitioned to the
1801         * stopped state, which is only upon a successful reset endpoint
1802         * command.  Better hope that last command worked!
1803         */
1804        if (!ret) {
1805                xhci_cleanup_stalled_ring(xhci, udev, ep_index);
1806                kfree(virt_ep->stopped_td);
1807                xhci_ring_cmd_db(xhci);
1808        }
1809        virt_ep->stopped_td = NULL;
1810        virt_ep->stopped_trb = NULL;
1811        virt_ep->stopped_stream = 0;
1812        spin_unlock_irqrestore(&xhci->lock, flags);
1813
1814        if (ret)
1815                xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1816}
1817
1818static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1819                struct usb_device *udev, struct usb_host_endpoint *ep,
1820                unsigned int slot_id)
1821{
1822        int ret;
1823        unsigned int ep_index;
1824        unsigned int ep_state;
1825
1826        if (!ep)
1827                return -EINVAL;
1828        ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
1829        if (ret <= 0)
1830                return -EINVAL;
1831        if (ep->ss_ep_comp.bmAttributes == 0) {
1832                xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
1833                                " descriptor for ep 0x%x does not support streams\n",
1834                                ep->desc.bEndpointAddress);
1835                return -EINVAL;
1836        }
1837
1838        ep_index = xhci_get_endpoint_index(&ep->desc);
1839        ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1840        if (ep_state & EP_HAS_STREAMS ||
1841                        ep_state & EP_GETTING_STREAMS) {
1842                xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
1843                                "already has streams set up.\n",
1844                                ep->desc.bEndpointAddress);
1845                xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
1846                                "dynamic stream context array reallocation.\n");
1847                return -EINVAL;
1848        }
1849        if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
1850                xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
1851                                "endpoint 0x%x; URBs are pending.\n",
1852                                ep->desc.bEndpointAddress);
1853                return -EINVAL;
1854        }
1855        return 0;
1856}
1857
1858static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
1859                unsigned int *num_streams, unsigned int *num_stream_ctxs)
1860{
1861        unsigned int max_streams;
1862
1863        /* The stream context array size must be a power of two */
1864        *num_stream_ctxs = roundup_pow_of_two(*num_streams);
1865        /*
1866         * Find out how many primary stream array entries the host controller
1867         * supports.  Later we may use secondary stream arrays (similar to 2nd
1868         * level page entries), but that's an optional feature for xHCI host
1869         * controllers. xHCs must support at least 4 stream IDs.
1870         */
1871        max_streams = HCC_MAX_PSA(xhci->hcc_params);
1872        if (*num_stream_ctxs > max_streams) {
1873                xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
1874                                max_streams);
1875                *num_stream_ctxs = max_streams;
1876                *num_streams = max_streams;
1877        }
1878}
1879
1880/* Returns an error code if one of the endpoint already has streams.
1881 * This does not change any data structures, it only checks and gathers
1882 * information.
1883 */
1884static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
1885                struct usb_device *udev,
1886                struct usb_host_endpoint **eps, unsigned int num_eps,
1887                unsigned int *num_streams, u32 *changed_ep_bitmask)
1888{
1889        unsigned int max_streams;
1890        unsigned int endpoint_flag;
1891        int i;
1892        int ret;
1893
1894        for (i = 0; i < num_eps; i++) {
1895                ret = xhci_check_streams_endpoint(xhci, udev,
1896                                eps[i], udev->slot_id);
1897                if (ret < 0)
1898                        return ret;
1899
1900                max_streams = USB_SS_MAX_STREAMS(
1901                                eps[i]->ss_ep_comp.bmAttributes);
1902                if (max_streams < (*num_streams - 1)) {
1903                        xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
1904                                        eps[i]->desc.bEndpointAddress,
1905                                        max_streams);
1906                        *num_streams = max_streams+1;
1907                }
1908
1909                endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
1910                if (*changed_ep_bitmask & endpoint_flag)
1911                        return -EINVAL;
1912                *changed_ep_bitmask |= endpoint_flag;
1913        }
1914        return 0;
1915}
1916
1917static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
1918                struct usb_device *udev,
1919                struct usb_host_endpoint **eps, unsigned int num_eps)
1920{
1921        u32 changed_ep_bitmask = 0;
1922        unsigned int slot_id;
1923        unsigned int ep_index;
1924        unsigned int ep_state;
1925        int i;
1926
1927        slot_id = udev->slot_id;
1928        if (!xhci->devs[slot_id])
1929                return 0;
1930
1931        for (i = 0; i < num_eps; i++) {
1932                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1933                ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1934                /* Are streams already being freed for the endpoint? */
1935                if (ep_state & EP_GETTING_NO_STREAMS) {
1936                        xhci_warn(xhci, "WARN Can't disable streams for "
1937                                        "endpoint 0x%x\n, "
1938                                        "streams are being disabled already.",
1939                                        eps[i]->desc.bEndpointAddress);
1940                        return 0;
1941                }
1942                /* Are there actually any streams to free? */
1943                if (!(ep_state & EP_HAS_STREAMS) &&
1944                                !(ep_state & EP_GETTING_STREAMS)) {
1945                        xhci_warn(xhci, "WARN Can't disable streams for "
1946                                        "endpoint 0x%x\n, "
1947                                        "streams are already disabled!",
1948                                        eps[i]->desc.bEndpointAddress);
1949                        xhci_warn(xhci, "WARN xhci_free_streams() called "
1950                                        "with non-streams endpoint\n");
1951                        return 0;
1952                }
1953                changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
1954        }
1955        return changed_ep_bitmask;
1956}
1957
1958/*
1959 * The USB device drivers use this function (though the HCD interface in USB
1960 * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
1961 * coordinate mass storage command queueing across multiple endpoints (basically
1962 * a stream ID == a task ID).
1963 *
1964 * Setting up streams involves allocating the same size stream context array
1965 * for each endpoint and issuing a configure endpoint command for all endpoints.
1966 *
1967 * Don't allow the call to succeed if one endpoint only supports one stream
1968 * (which means it doesn't support streams at all).
1969 *
1970 * Drivers may get less stream IDs than they asked for, if the host controller
1971 * hardware or endpoints claim they can't support the number of requested
1972 * stream IDs.
1973 */
1974int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
1975                struct usb_host_endpoint **eps, unsigned int num_eps,
1976                unsigned int num_streams, gfp_t mem_flags)
1977{
1978        int i, ret;
1979        struct xhci_hcd *xhci;
1980        struct xhci_virt_device *vdev;
1981        struct xhci_command *config_cmd;
1982        unsigned int ep_index;
1983        unsigned int num_stream_ctxs;
1984        unsigned long flags;
1985        u32 changed_ep_bitmask = 0;
1986
1987        if (!eps)
1988                return -EINVAL;
1989
1990        /* Add one to the number of streams requested to account for
1991         * stream 0 that is reserved for xHCI usage.
1992         */
1993        num_streams += 1;
1994        xhci = hcd_to_xhci(hcd);
1995        xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
1996                        num_streams);
1997
1998        config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
1999        if (!config_cmd) {
2000                xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2001                return -ENOMEM;
2002        }
2003
2004        /* Check to make sure all endpoints are not already configured for
2005         * streams.  While we're at it, find the maximum number of streams that
2006         * all the endpoints will support and check for duplicate endpoints.
2007         */
2008        spin_lock_irqsave(&xhci->lock, flags);
2009        ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2010                        num_eps, &num_streams, &changed_ep_bitmask);
2011        if (ret < 0) {
2012                xhci_free_command(xhci, config_cmd);
2013                spin_unlock_irqrestore(&xhci->lock, flags);
2014                return ret;
2015        }
2016        if (num_streams <= 1) {
2017                xhci_warn(xhci, "WARN: endpoints can't handle "
2018                                "more than one stream.\n");
2019                xhci_free_command(xhci, config_cmd);
2020                spin_unlock_irqrestore(&xhci->lock, flags);
2021                return -EINVAL;
2022        }
2023        vdev = xhci->devs[udev->slot_id];
2024        /* Mark each endpoint as being in transistion, so
2025         * xhci_urb_enqueue() will reject all URBs.
2026         */
2027        for (i = 0; i < num_eps; i++) {
2028                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2029                vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2030        }
2031        spin_unlock_irqrestore(&xhci->lock, flags);
2032
2033        /* Setup internal data structures and allocate HW data structures for
2034         * streams (but don't install the HW structures in the input context
2035         * until we're sure all memory allocation succeeded).
2036         */
2037        xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2038        xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2039                        num_stream_ctxs, num_streams);
2040
2041        for (i = 0; i < num_eps; i++) {
2042                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2043                vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2044                                num_stream_ctxs,
2045                                num_streams, mem_flags);
2046                if (!vdev->eps[ep_index].stream_info)
2047                        goto cleanup;
2048                /* Set maxPstreams in endpoint context and update deq ptr to
2049                 * point to stream context array. FIXME
2050                 */
2051        }
2052
2053        /* Set up the input context for a configure endpoint command. */
2054        for (i = 0; i < num_eps; i++) {
2055                struct xhci_ep_ctx *ep_ctx;
2056
2057                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2058                ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2059
2060                xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2061                                vdev->out_ctx, ep_index);
2062                xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2063                                vdev->eps[ep_index].stream_info);
2064        }
2065        /* Tell the HW to drop its old copy of the endpoint context info
2066         * and add the updated copy from the input context.
2067         */
2068        xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2069                        vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2070
2071        /* Issue and wait for the configure endpoint command */
2072        ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2073                        false, false);
2074
2075        /* xHC rejected the configure endpoint command for some reason, so we
2076         * leave the old ring intact and free our internal streams data
2077         * structure.
2078         */
2079        if (ret < 0)
2080                goto cleanup;
2081
2082        spin_lock_irqsave(&xhci->lock, flags);
2083        for (i = 0; i < num_eps; i++) {
2084                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2085                vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2086                xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
2087                         udev->slot_id, ep_index);
2088                vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
2089        }
2090        xhci_free_command(xhci, config_cmd);
2091        spin_unlock_irqrestore(&xhci->lock, flags);
2092
2093        /* Subtract 1 for stream 0, which drivers can't use */
2094        return num_streams - 1;
2095
2096cleanup:
2097        /* If it didn't work, free the streams! */
2098        for (i = 0; i < num_eps; i++) {
2099                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2100                xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
2101                vdev->eps[ep_index].stream_info = NULL;
2102                /* FIXME Unset maxPstreams in endpoint context and
2103                 * update deq ptr to point to normal string ring.
2104                 */
2105                vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2106                vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2107                xhci_endpoint_zero(xhci, vdev, eps[i]);
2108        }
2109        xhci_free_command(xhci, config_cmd);
2110        return -ENOMEM;
2111}
2112
2113/* Transition the endpoint from using streams to being a "normal" endpoint
2114 * without streams.
2115 *
2116 * Modify the endpoint context state, submit a configure endpoint command,
2117 * and free all endpoint rings for streams if that completes successfully.
2118 */
2119int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
2120                struct usb_host_endpoint **eps, unsigned int num_eps,
2121                gfp_t mem_flags)
2122{
2123        int i, ret;
2124        struct xhci_hcd *xhci;
2125        struct xhci_virt_device *vdev;
2126        struct xhci_command *command;
2127        unsigned int ep_index;
2128        unsigned long flags;
2129        u32 changed_ep_bitmask;
2130
2131        xhci = hcd_to_xhci(hcd);
2132        vdev = xhci->devs[udev->slot_id];
2133
2134        /* Set up a configure endpoint command to remove the streams rings */
2135        spin_lock_irqsave(&xhci->lock, flags);
2136        changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
2137                        udev, eps, num_eps);
2138        if (changed_ep_bitmask == 0) {
2139                spin_unlock_irqrestore(&xhci->lock, flags);
2140                return -EINVAL;
2141        }
2142
2143        /* Use the xhci_command structure from the first endpoint.  We may have
2144         * allocated too many, but the driver may call xhci_free_streams() for
2145         * each endpoint it grouped into one call to xhci_alloc_streams().
2146         */
2147        ep_index = xhci_get_endpoint_index(&eps[0]->desc);
2148        command = vdev->eps[ep_index].stream_info->free_streams_command;
2149        for (i = 0; i < num_eps; i++) {
2150                struct xhci_ep_ctx *ep_ctx;
2151
2152                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2153                ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
2154                xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
2155                        EP_GETTING_NO_STREAMS;
2156
2157                xhci_endpoint_copy(xhci, command->in_ctx,
2158                                vdev->out_ctx, ep_index);
2159                xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
2160                                &vdev->eps[ep_index]);
2161        }
2162        xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
2163                        vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2164        spin_unlock_irqrestore(&xhci->lock, flags);
2165
2166        /* Issue and wait for the configure endpoint command,
2167         * which must succeed.
2168         */
2169        ret = xhci_configure_endpoint(xhci, udev, command,
2170                        false, true);
2171
2172        /* xHC rejected the configure endpoint command for some reason, so we
2173         * leave the streams rings intact.
2174         */
2175        if (ret < 0)
2176                return ret;
2177
2178        spin_lock_irqsave(&xhci->lock, flags);
2179        for (i = 0; i < num_eps; i++) {
2180                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2181                xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
2182                vdev->eps[ep_index].stream_info = NULL;
2183                /* FIXME Unset maxPstreams in endpoint context and
2184                 * update deq ptr to point to normal string ring.
2185                 */
2186                vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
2187                vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2188        }
2189        spin_unlock_irqrestore(&xhci->lock, flags);
2190
2191        return 0;
2192}
2193
2194/*
2195 * This submits a Reset Device Command, which will set the device state to 0,
2196 * set the device address to 0, and disable all the endpoints except the default
2197 * control endpoint.  The USB core should come back and call
2198 * xhci_address_device(), and then re-set up the configuration.  If this is
2199 * called because of a usb_reset_and_verify_device(), then the old alternate
2200 * settings will be re-installed through the normal bandwidth allocation
2201 * functions.
2202 *
2203 * Wait for the Reset Device command to finish.  Remove all structures
2204 * associated with the endpoints that were disabled.  Clear the input device
2205 * structure?  Cache the rings?  Reset the control endpoint 0 max packet size?
2206 *
2207 * If the virt_dev to be reset does not exist or does not match the udev,
2208 * it means the device is lost, possibly due to the xHC restore error and
2209 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
2210 * re-allocate the device.
2211 */
2212int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2213{
2214        int ret, i;
2215        unsigned long flags;
2216        struct xhci_hcd *xhci;
2217        unsigned int slot_id;
2218        struct xhci_virt_device *virt_dev;
2219        struct xhci_command *reset_device_cmd;
2220        int timeleft;
2221        int last_freed_endpoint;
2222
2223        ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
2224        if (ret <= 0)
2225                return ret;
2226        xhci = hcd_to_xhci(hcd);
2227        slot_id = udev->slot_id;
2228        virt_dev = xhci->devs[slot_id];
2229        if (!virt_dev) {
2230                xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2231                                "not exist. Re-allocate the device\n", slot_id);
2232                ret = xhci_alloc_dev(hcd, udev);
2233                if (ret == 1)
2234                        return 0;
2235                else
2236                        return -EINVAL;
2237        }
2238
2239        if (virt_dev->udev != udev) {
2240                /* If the virt_dev and the udev does not match, this virt_dev
2241                 * may belong to another udev.
2242                 * Re-allocate the device.
2243                 */
2244                xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2245                                "not match the udev. Re-allocate the device\n",
2246                                slot_id);
2247                ret = xhci_alloc_dev(hcd, udev);
2248                if (ret == 1)
2249                        return 0;
2250                else
2251                        return -EINVAL;
2252        }
2253
2254        xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
2255        /* Allocate the command structure that holds the struct completion.
2256         * Assume we're in process context, since the normal device reset
2257         * process has to wait for the device anyway.  Storage devices are
2258         * reset as part of error handling, so use GFP_NOIO instead of
2259         * GFP_KERNEL.
2260         */
2261        reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
2262        if (!reset_device_cmd) {
2263                xhci_dbg(xhci, "Couldn't allocate command structure.\n");
2264                return -ENOMEM;
2265        }
2266
2267        /* Attempt to submit the Reset Device command to the command ring */
2268        spin_lock_irqsave(&xhci->lock, flags);
2269        reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
2270
2271        /* Enqueue pointer can be left pointing to the link TRB,
2272         * we must handle that
2273         */
2274        if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK)
2275                        == TRB_TYPE(TRB_LINK))
2276                reset_device_cmd->command_trb =
2277                        xhci->cmd_ring->enq_seg->next->trbs;
2278
2279        list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
2280        ret = xhci_queue_reset_device(xhci, slot_id);
2281        if (ret) {
2282                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2283                list_del(&reset_device_cmd->cmd_list);
2284                spin_unlock_irqrestore(&xhci->lock, flags);
2285                goto command_cleanup;
2286        }
2287        xhci_ring_cmd_db(xhci);
2288        spin_unlock_irqrestore(&xhci->lock, flags);
2289
2290        /* Wait for the Reset Device command to finish */
2291        timeleft = wait_for_completion_interruptible_timeout(
2292                        reset_device_cmd->completion,
2293                        USB_CTRL_SET_TIMEOUT);
2294        if (timeleft <= 0) {
2295                xhci_warn(xhci, "%s while waiting for reset device command\n",
2296                                timeleft == 0 ? "Timeout" : "Signal");
2297                spin_lock_irqsave(&xhci->lock, flags);
2298                /* The timeout might have raced with the event ring handler, so
2299                 * only delete from the list if the item isn't poisoned.
2300                 */
2301                if (reset_device_cmd->cmd_list.next != LIST_POISON1)
2302                        list_del(&reset_device_cmd->cmd_list);
2303                spin_unlock_irqrestore(&xhci->lock, flags);
2304                ret = -ETIME;
2305                goto command_cleanup;
2306        }
2307
2308        /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
2309         * unless we tried to reset a slot ID that wasn't enabled,
2310         * or the device wasn't in the addressed or configured state.
2311         */
2312        ret = reset_device_cmd->status;
2313        switch (ret) {
2314        case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
2315        case COMP_CTX_STATE: /* 0.96 completion code for same thing */
2316                xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
2317                                slot_id,
2318                                xhci_get_slot_state(xhci, virt_dev->out_ctx));
2319                xhci_info(xhci, "Not freeing device rings.\n");
2320                /* Don't treat this as an error.  May change my mind later. */
2321                ret = 0;
2322                goto command_cleanup;
2323        case COMP_SUCCESS:
2324                xhci_dbg(xhci, "Successful reset device command.\n");
2325                break;
2326        default:
2327                if (xhci_is_vendor_info_code(xhci, ret))
2328                        break;
2329                xhci_warn(xhci, "Unknown completion code %u for "
2330                                "reset device command.\n", ret);
2331                ret = -EINVAL;
2332                goto command_cleanup;
2333        }
2334
2335        /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2336        last_freed_endpoint = 1;
2337        for (i = 1; i < 31; ++i) {
2338                if (!virt_dev->eps[i].ring)
2339                        continue;
2340                xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2341                last_freed_endpoint = i;
2342        }
2343        xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2344        xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2345        ret = 0;
2346
2347command_cleanup:
2348        xhci_free_command(xhci, reset_device_cmd);
2349        return ret;
2350}
2351
2352/*
2353 * At this point, the struct usb_device is about to go away, the device has
2354 * disconnected, and all traffic has been stopped and the endpoints have been
2355 * disabled.  Free any HC data structures associated with that device.
2356 */
2357void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2358{
2359        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2360        struct xhci_virt_device *virt_dev;
2361        unsigned long flags;
2362        u32 state;
2363        int i, ret;
2364
2365        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2366        if (ret <= 0)
2367                return;
2368
2369        virt_dev = xhci->devs[udev->slot_id];
2370
2371        /* Stop any wayward timer functions (which may grab the lock) */
2372        for (i = 0; i < 31; ++i) {
2373                virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
2374                del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
2375        }
2376
2377        spin_lock_irqsave(&xhci->lock, flags);
2378        /* Don't disable the slot if the host controller is dead. */
2379        state = xhci_readl(xhci, &xhci->op_regs->status);
2380        if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
2381                xhci_free_virt_device(xhci, udev->slot_id);
2382                spin_unlock_irqrestore(&xhci->lock, flags);
2383                return;
2384        }
2385
2386        if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
2387                spin_unlock_irqrestore(&xhci->lock, flags);
2388                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2389                return;
2390        }
2391        xhci_ring_cmd_db(xhci);
2392        spin_unlock_irqrestore(&xhci->lock, flags);
2393        /*
2394         * Event command completion handler will free any data structures
2395         * associated with the slot.  XXX Can free sleep?
2396         */
2397}
2398
2399/*
2400 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2401 * timed out, or allocating memory failed.  Returns 1 on success.
2402 */
2403int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2404{
2405        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2406        unsigned long flags;
2407        int timeleft;
2408        int ret;
2409
2410        spin_lock_irqsave(&xhci->lock, flags);
2411        ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
2412        if (ret) {
2413                spin_unlock_irqrestore(&xhci->lock, flags);
2414                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2415                return 0;
2416        }
2417        xhci_ring_cmd_db(xhci);
2418        spin_unlock_irqrestore(&xhci->lock, flags);
2419
2420        /* XXX: how much time for xHC slot assignment? */
2421        timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2422                        USB_CTRL_SET_TIMEOUT);
2423        if (timeleft <= 0) {
2424                xhci_warn(xhci, "%s while waiting for a slot\n",
2425                                timeleft == 0 ? "Timeout" : "Signal");
2426                /* FIXME cancel the enable slot request */
2427                return 0;
2428        }
2429
2430        if (!xhci->slot_id) {
2431                xhci_err(xhci, "Error while assigning device slot ID\n");
2432                return 0;
2433        }
2434        /* xhci_alloc_virt_device() does not touch rings; no need to lock.
2435         * Use GFP_NOIO, since this function can be called from
2436         * xhci_discover_or_reset_device(), which may be called as part of
2437         * mass storage driver error handling.
2438         */
2439        if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2440                /* Disable slot, if we can do it without mem alloc */
2441                xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2442                spin_lock_irqsave(&xhci->lock, flags);
2443                if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2444                        xhci_ring_cmd_db(xhci);
2445                spin_unlock_irqrestore(&xhci->lock, flags);
2446                return 0;
2447        }
2448        udev->slot_id = xhci->slot_id;
2449        /* Is this a LS or FS device under a HS hub? */
2450        /* Hub or peripherial? */
2451        return 1;
2452}
2453
2454/*
2455 * Issue an Address Device command (which will issue a SetAddress request to
2456 * the device).
2457 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
2458 * we should only issue and wait on one address command at the same time.
2459 *
2460 * We add one to the device address issued by the hardware because the USB core
2461 * uses address 1 for the root hubs (even though they're not really devices).
2462 */
2463int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2464{
2465        unsigned long flags;
2466        int timeleft;
2467        struct xhci_virt_device *virt_dev;
2468        int ret = 0;
2469        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2470        struct xhci_slot_ctx *slot_ctx;
2471        struct xhci_input_control_ctx *ctrl_ctx;
2472        u64 temp_64;
2473
2474        if (!udev->slot_id) {
2475                xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
2476                return -EINVAL;
2477        }
2478
2479        virt_dev = xhci->devs[udev->slot_id];
2480
2481        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2482        /*
2483         * If this is the first Set Address since device plug-in or
2484         * virt_device realloaction after a resume with an xHCI power loss,
2485         * then set up the slot context.
2486         */
2487        if (!slot_ctx->dev_info)
2488                xhci_setup_addressable_virt_dev(xhci, udev);
2489        /* Otherwise, update the control endpoint ring enqueue pointer. */
2490        else
2491                xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2492        xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2493        xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2494
2495        spin_lock_irqsave(&xhci->lock, flags);
2496        ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
2497                                        udev->slot_id);
2498        if (ret) {
2499                spin_unlock_irqrestore(&xhci->lock, flags);
2500                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2501                return ret;
2502        }
2503        xhci_ring_cmd_db(xhci);
2504        spin_unlock_irqrestore(&xhci->lock, flags);
2505
2506        /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2507        timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2508                        USB_CTRL_SET_TIMEOUT);
2509        /* FIXME: From section 4.3.4: "Software shall be responsible for timing
2510         * the SetAddress() "recovery interval" required by USB and aborting the
2511         * command on a timeout.
2512         */
2513        if (timeleft <= 0) {
2514                xhci_warn(xhci, "%s while waiting for a slot\n",
2515                                timeleft == 0 ? "Timeout" : "Signal");
2516                /* FIXME cancel the address device command */
2517                return -ETIME;
2518        }
2519
2520        switch (virt_dev->cmd_status) {
2521        case COMP_CTX_STATE:
2522        case COMP_EBADSLT:
2523                xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
2524                                udev->slot_id);
2525                ret = -EINVAL;
2526                break;
2527        case COMP_TX_ERR:
2528                dev_warn(&udev->dev, "Device not responding to set address.\n");
2529                ret = -EPROTO;
2530                break;
2531        case COMP_SUCCESS:
2532                xhci_dbg(xhci, "Successful Address Device command\n");
2533                break;
2534        default:
2535                xhci_err(xhci, "ERROR: unexpected command completion "
2536                                "code 0x%x.\n", virt_dev->cmd_status);
2537                xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2538                xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2539                ret = -EINVAL;
2540                break;
2541        }
2542        if (ret) {
2543                return ret;
2544        }
2545        temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2546        xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
2547        xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2548                        udev->slot_id,
2549                        &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
2550                        (unsigned long long)
2551                                xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
2552        xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
2553                        (unsigned long long)virt_dev->out_ctx->dma);
2554        xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2555        xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2556        xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2557        xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2558        /*
2559         * USB core uses address 1 for the roothubs, so we add one to the
2560         * address given back to us by the HC.
2561         */
2562        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2563        /* Use kernel assigned address for devices; store xHC assigned
2564         * address locally. */
2565        virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
2566        /* Zero the input context control for later use */
2567        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2568        ctrl_ctx->add_flags = 0;
2569        ctrl_ctx->drop_flags = 0;
2570
2571        xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
2572
2573        return 0;
2574}
2575
2576/* Once a hub descriptor is fetched for a device, we need to update the xHC's
2577 * internal data structures for the device.
2578 */
2579int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
2580                        struct usb_tt *tt, gfp_t mem_flags)
2581{
2582        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2583        struct xhci_virt_device *vdev;
2584        struct xhci_command *config_cmd;
2585        struct xhci_input_control_ctx *ctrl_ctx;
2586        struct xhci_slot_ctx *slot_ctx;
2587        unsigned long flags;
2588        unsigned think_time;
2589        int ret;
2590
2591        /* Ignore root hubs */
2592        if (!hdev->parent)
2593                return 0;
2594
2595        vdev = xhci->devs[hdev->slot_id];
2596        if (!vdev) {
2597                xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
2598                return -EINVAL;
2599        }
2600        config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2601        if (!config_cmd) {
2602                xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2603                return -ENOMEM;
2604        }
2605
2606        spin_lock_irqsave(&xhci->lock, flags);
2607        xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
2608        ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
2609        ctrl_ctx->add_flags |= SLOT_FLAG;
2610        slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
2611        slot_ctx->dev_info |= DEV_HUB;
2612        if (tt->multi)
2613                slot_ctx->dev_info |= DEV_MTT;
2614        if (xhci->hci_version > 0x95) {
2615                xhci_dbg(xhci, "xHCI version %x needs hub "
2616                                "TT think time and number of ports\n",
2617                                (unsigned int) xhci->hci_version);
2618                slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
2619                /* Set TT think time - convert from ns to FS bit times.
2620                 * 0 = 8 FS bit times, 1 = 16 FS bit times,
2621                 * 2 = 24 FS bit times, 3 = 32 FS bit times.
2622                 */
2623                think_time = tt->think_time;
2624                if (think_time != 0)
2625                        think_time = (think_time / 666) - 1;
2626                slot_ctx->tt_info |= TT_THINK_TIME(think_time);
2627        } else {
2628                xhci_dbg(xhci, "xHCI version %x doesn't need hub "
2629                                "TT think time or number of ports\n",
2630                                (unsigned int) xhci->hci_version);
2631        }
2632        slot_ctx->dev_state = 0;
2633        spin_unlock_irqrestore(&xhci->lock, flags);
2634
2635        xhci_dbg(xhci, "Set up %s for hub device.\n",
2636                        (xhci->hci_version > 0x95) ?
2637                        "configure endpoint" : "evaluate context");
2638        xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
2639        xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
2640
2641        /* Issue and wait for the configure endpoint or
2642         * evaluate context command.
2643         */
2644        if (xhci->hci_version > 0x95)
2645                ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2646                                false, false);
2647        else
2648                ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2649                                true, false);
2650
2651        xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
2652        xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
2653
2654        xhci_free_command(xhci, config_cmd);
2655        return ret;
2656}
2657
2658int xhci_get_frame(struct usb_hcd *hcd)
2659{
2660        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2661        /* EHCI mods by the periodic size.  Why? */
2662        return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
2663}
2664
2665MODULE_DESCRIPTION(DRIVER_DESC);
2666MODULE_AUTHOR(DRIVER_AUTHOR);
2667MODULE_LICENSE("GPL");
2668
2669static int __init xhci_hcd_init(void)
2670{
2671#ifdef CONFIG_PCI
2672        int retval = 0;
2673
2674        retval = xhci_register_pci();
2675
2676        if (retval < 0) {
2677                printk(KERN_DEBUG "Problem registering PCI driver.");
2678                return retval;
2679        }
2680#endif
2681        /*
2682         * Check the compiler generated sizes of structures that must be laid
2683         * out in specific ways for hardware access.
2684         */
2685        BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
2686        BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
2687        BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
2688        /* xhci_device_control has eight fields, and also
2689         * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
2690         */
2691        BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
2692        BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
2693        BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
2694        BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
2695        BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
2696        /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
2697        BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
2698        BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
2699        return 0;
2700}
2701module_init(xhci_hcd_init);
2702
2703static void __exit xhci_hcd_cleanup(void)
2704{
2705#ifdef CONFIG_PCI
2706        xhci_unregister_pci();
2707#endif
2708}
2709module_exit(xhci_hcd_cleanup);
2710