linux/drivers/usb/host/xhci.c
<<
>>
Prefs
   1/*
   2 * xHCI host controller driver
   3 *
   4 * Copyright (C) 2008 Intel Corp.
   5 *
   6 * Author: Sarah Sharp
   7 * Some code borrowed from the Linux EHCI driver.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16 * for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software Foundation,
  20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#include <linux/pci.h>
  24#include <linux/irq.h>
  25#include <linux/log2.h>
  26#include <linux/module.h>
  27#include <linux/moduleparam.h>
  28#include <linux/slab.h>
  29
  30#include "xhci.h"
  31
  32#define DRIVER_AUTHOR "Sarah Sharp"
  33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
  34
  35/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
  36static int link_quirk;
  37module_param(link_quirk, int, S_IRUGO | S_IWUSR);
  38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
  39
  40/* TODO: copied from ehci-hcd.c - can this be refactored? */
  41/*
  42 * handshake - spin reading hc until handshake completes or fails
  43 * @ptr: address of hc register to be read
  44 * @mask: bits to look at in result of read
  45 * @done: value of those bits when handshake succeeds
  46 * @usec: timeout in microseconds
  47 *
  48 * Returns negative errno, or zero on success
  49 *
  50 * Success happens when the "mask" bits have the specified value (hardware
  51 * handshake done).  There are two failure modes:  "usec" have passed (major
  52 * hardware flakeout), or the register reads as all-ones (hardware removed).
  53 */
  54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
  55                      u32 mask, u32 done, int usec)
  56{
  57        u32     result;
  58
  59        do {
  60                result = xhci_readl(xhci, ptr);
  61                if (result == ~(u32)0)          /* card removed */
  62                        return -ENODEV;
  63                result &= mask;
  64                if (result == done)
  65                        return 0;
  66                udelay(1);
  67                usec--;
  68        } while (usec > 0);
  69        return -ETIMEDOUT;
  70}
  71
  72/*
  73 * Disable interrupts and begin the xHCI halting process.
  74 */
  75void xhci_quiesce(struct xhci_hcd *xhci)
  76{
  77        u32 halted;
  78        u32 cmd;
  79        u32 mask;
  80
  81        mask = ~(XHCI_IRQS);
  82        halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
  83        if (!halted)
  84                mask &= ~CMD_RUN;
  85
  86        cmd = xhci_readl(xhci, &xhci->op_regs->command);
  87        cmd &= mask;
  88        xhci_writel(xhci, cmd, &xhci->op_regs->command);
  89}
  90
  91/*
  92 * Force HC into halt state.
  93 *
  94 * Disable any IRQs and clear the run/stop bit.
  95 * HC will complete any current and actively pipelined transactions, and
  96 * should halt within 16 ms of the run/stop bit being cleared.
  97 * Read HC Halted bit in the status register to see when the HC is finished.
  98 */
  99int xhci_halt(struct xhci_hcd *xhci)
 100{
 101        int ret;
 102        xhci_dbg(xhci, "// Halt the HC\n");
 103        xhci_quiesce(xhci);
 104
 105        ret = handshake(xhci, &xhci->op_regs->status,
 106                        STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
 107        if (!ret)
 108                xhci->xhc_state |= XHCI_STATE_HALTED;
 109        return ret;
 110}
 111
 112/*
 113 * Set the run bit and wait for the host to be running.
 114 */
 115static int xhci_start(struct xhci_hcd *xhci)
 116{
 117        u32 temp;
 118        int ret;
 119
 120        temp = xhci_readl(xhci, &xhci->op_regs->command);
 121        temp |= (CMD_RUN);
 122        xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
 123                        temp);
 124        xhci_writel(xhci, temp, &xhci->op_regs->command);
 125
 126        /*
 127         * Wait for the HCHalted Status bit to be 0 to indicate the host is
 128         * running.
 129         */
 130        ret = handshake(xhci, &xhci->op_regs->status,
 131                        STS_HALT, 0, XHCI_MAX_HALT_USEC);
 132        if (ret == -ETIMEDOUT)
 133                xhci_err(xhci, "Host took too long to start, "
 134                                "waited %u microseconds.\n",
 135                                XHCI_MAX_HALT_USEC);
 136        if (!ret)
 137                xhci->xhc_state &= ~XHCI_STATE_HALTED;
 138        return ret;
 139}
 140
 141/*
 142 * Reset a halted HC.
 143 *
 144 * This resets pipelines, timers, counters, state machines, etc.
 145 * Transactions will be terminated immediately, and operational registers
 146 * will be set to their defaults.
 147 */
 148int xhci_reset(struct xhci_hcd *xhci)
 149{
 150        u32 command;
 151        u32 state;
 152        int ret;
 153
 154        state = xhci_readl(xhci, &xhci->op_regs->status);
 155        if ((state & STS_HALT) == 0) {
 156                xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
 157                return 0;
 158        }
 159
 160        xhci_dbg(xhci, "// Reset the HC\n");
 161        command = xhci_readl(xhci, &xhci->op_regs->command);
 162        command |= CMD_RESET;
 163        xhci_writel(xhci, command, &xhci->op_regs->command);
 164
 165        ret = handshake(xhci, &xhci->op_regs->command,
 166                        CMD_RESET, 0, 250 * 1000);
 167        if (ret)
 168                return ret;
 169
 170        xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
 171        /*
 172         * xHCI cannot write to any doorbells or operational registers other
 173         * than status until the "Controller Not Ready" flag is cleared.
 174         */
 175        return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
 176}
 177
 178/*
 179 * Free IRQs
 180 * free all IRQs request
 181 */
 182static void xhci_free_irq(struct xhci_hcd *xhci)
 183{
 184        int i;
 185        struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 186
 187        /* return if using legacy interrupt */
 188        if (xhci_to_hcd(xhci)->irq >= 0)
 189                return;
 190
 191        if (xhci->msix_entries) {
 192                for (i = 0; i < xhci->msix_count; i++)
 193                        if (xhci->msix_entries[i].vector)
 194                                free_irq(xhci->msix_entries[i].vector,
 195                                                xhci_to_hcd(xhci));
 196        } else if (pdev->irq >= 0)
 197                free_irq(pdev->irq, xhci_to_hcd(xhci));
 198
 199        return;
 200}
 201
 202/*
 203 * Set up MSI
 204 */
 205static int xhci_setup_msi(struct xhci_hcd *xhci)
 206{
 207        int ret;
 208        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 209
 210        ret = pci_enable_msi(pdev);
 211        if (ret) {
 212                xhci_err(xhci, "failed to allocate MSI entry\n");
 213                return ret;
 214        }
 215
 216        ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
 217                                0, "xhci_hcd", xhci_to_hcd(xhci));
 218        if (ret) {
 219                xhci_err(xhci, "disable MSI interrupt\n");
 220                pci_disable_msi(pdev);
 221        }
 222
 223        return ret;
 224}
 225
 226/*
 227 * Set up MSI-X
 228 */
 229static int xhci_setup_msix(struct xhci_hcd *xhci)
 230{
 231        int i, ret = 0;
 232        struct usb_hcd *hcd = xhci_to_hcd(xhci);
 233        struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 234
 235        /*
 236         * calculate number of msi-x vectors supported.
 237         * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
 238         *   with max number of interrupters based on the xhci HCSPARAMS1.
 239         * - num_online_cpus: maximum msi-x vectors per CPUs core.
 240         *   Add additional 1 vector to ensure always available interrupt.
 241         */
 242        xhci->msix_count = min(num_online_cpus() + 1,
 243                                HCS_MAX_INTRS(xhci->hcs_params1));
 244
 245        xhci->msix_entries =
 246                kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
 247                                GFP_KERNEL);
 248        if (!xhci->msix_entries) {
 249                xhci_err(xhci, "Failed to allocate MSI-X entries\n");
 250                return -ENOMEM;
 251        }
 252
 253        for (i = 0; i < xhci->msix_count; i++) {
 254                xhci->msix_entries[i].entry = i;
 255                xhci->msix_entries[i].vector = 0;
 256        }
 257
 258        ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
 259        if (ret) {
 260                xhci_err(xhci, "Failed to enable MSI-X\n");
 261                goto free_entries;
 262        }
 263
 264        for (i = 0; i < xhci->msix_count; i++) {
 265                ret = request_irq(xhci->msix_entries[i].vector,
 266                                (irq_handler_t)xhci_msi_irq,
 267                                0, "xhci_hcd", xhci_to_hcd(xhci));
 268                if (ret)
 269                        goto disable_msix;
 270        }
 271
 272        hcd->msix_enabled = 1;
 273        return ret;
 274
 275disable_msix:
 276        xhci_err(xhci, "disable MSI-X interrupt\n");
 277        xhci_free_irq(xhci);
 278        pci_disable_msix(pdev);
 279free_entries:
 280        kfree(xhci->msix_entries);
 281        xhci->msix_entries = NULL;
 282        return ret;
 283}
 284
 285/* Free any IRQs and disable MSI-X */
 286static void xhci_cleanup_msix(struct xhci_hcd *xhci)
 287{
 288        struct usb_hcd *hcd = xhci_to_hcd(xhci);
 289        struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 290
 291        xhci_free_irq(xhci);
 292
 293        if (xhci->msix_entries) {
 294                pci_disable_msix(pdev);
 295                kfree(xhci->msix_entries);
 296                xhci->msix_entries = NULL;
 297        } else {
 298                pci_disable_msi(pdev);
 299        }
 300
 301        hcd->msix_enabled = 0;
 302        return;
 303}
 304
 305/*
 306 * Initialize memory for HCD and xHC (one-time init).
 307 *
 308 * Program the PAGESIZE register, initialize the device context array, create
 309 * device contexts (?), set up a command ring segment (or two?), create event
 310 * ring (one for now).
 311 */
 312int xhci_init(struct usb_hcd *hcd)
 313{
 314        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 315        int retval = 0;
 316
 317        xhci_dbg(xhci, "xhci_init\n");
 318        spin_lock_init(&xhci->lock);
 319        if (link_quirk) {
 320                xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
 321                xhci->quirks |= XHCI_LINK_TRB_QUIRK;
 322        } else {
 323                xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
 324        }
 325        retval = xhci_mem_init(xhci, GFP_KERNEL);
 326        xhci_dbg(xhci, "Finished xhci_init\n");
 327
 328        return retval;
 329}
 330
 331/*-------------------------------------------------------------------------*/
 332
 333
 334#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 335static void xhci_event_ring_work(unsigned long arg)
 336{
 337        unsigned long flags;
 338        int temp;
 339        u64 temp_64;
 340        struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
 341        int i, j;
 342
 343        xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
 344
 345        spin_lock_irqsave(&xhci->lock, flags);
 346        temp = xhci_readl(xhci, &xhci->op_regs->status);
 347        xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
 348        if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
 349                        (xhci->xhc_state & XHCI_STATE_HALTED)) {
 350                xhci_dbg(xhci, "HW died, polling stopped.\n");
 351                spin_unlock_irqrestore(&xhci->lock, flags);
 352                return;
 353        }
 354
 355        temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 356        xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
 357        xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
 358        xhci->error_bitmask = 0;
 359        xhci_dbg(xhci, "Event ring:\n");
 360        xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
 361        xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
 362        temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 363        temp_64 &= ~ERST_PTR_MASK;
 364        xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
 365        xhci_dbg(xhci, "Command ring:\n");
 366        xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
 367        xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
 368        xhci_dbg_cmd_ptrs(xhci);
 369        for (i = 0; i < MAX_HC_SLOTS; ++i) {
 370                if (!xhci->devs[i])
 371                        continue;
 372                for (j = 0; j < 31; ++j) {
 373                        xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
 374                }
 375        }
 376        spin_unlock_irqrestore(&xhci->lock, flags);
 377
 378        if (!xhci->zombie)
 379                mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
 380        else
 381                xhci_dbg(xhci, "Quit polling the event ring.\n");
 382}
 383#endif
 384
 385static int xhci_run_finished(struct xhci_hcd *xhci)
 386{
 387        if (xhci_start(xhci)) {
 388                xhci_halt(xhci);
 389                return -ENODEV;
 390        }
 391        xhci->shared_hcd->state = HC_STATE_RUNNING;
 392
 393        if (xhci->quirks & XHCI_NEC_HOST)
 394                xhci_ring_cmd_db(xhci);
 395
 396        xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
 397        return 0;
 398}
 399
 400/*
 401 * Start the HC after it was halted.
 402 *
 403 * This function is called by the USB core when the HC driver is added.
 404 * Its opposite is xhci_stop().
 405 *
 406 * xhci_init() must be called once before this function can be called.
 407 * Reset the HC, enable device slot contexts, program DCBAAP, and
 408 * set command ring pointer and event ring pointer.
 409 *
 410 * Setup MSI-X vectors and enable interrupts.
 411 */
 412int xhci_run(struct usb_hcd *hcd)
 413{
 414        u32 temp;
 415        u64 temp_64;
 416        u32 ret;
 417        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 418        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 419
 420        /* Start the xHCI host controller running only after the USB 2.0 roothub
 421         * is setup.
 422         */
 423
 424        hcd->uses_new_polling = 1;
 425        if (!usb_hcd_is_primary_hcd(hcd))
 426                return xhci_run_finished(xhci);
 427
 428        xhci_dbg(xhci, "xhci_run\n");
 429        /* unregister the legacy interrupt */
 430        if (hcd->irq)
 431                free_irq(hcd->irq, hcd);
 432        hcd->irq = -1;
 433
 434        /* Some Fresco Logic host controllers advertise MSI, but fail to
 435         * generate interrupts.  Don't even try to enable MSI.
 436         */
 437        if (xhci->quirks & XHCI_BROKEN_MSI)
 438                goto legacy_irq;
 439
 440        ret = xhci_setup_msix(xhci);
 441        if (ret)
 442                /* fall back to msi*/
 443                ret = xhci_setup_msi(xhci);
 444
 445        if (ret) {
 446legacy_irq:
 447                /* fall back to legacy interrupt*/
 448                ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
 449                                        hcd->irq_descr, hcd);
 450                if (ret) {
 451                        xhci_err(xhci, "request interrupt %d failed\n",
 452                                        pdev->irq);
 453                        return ret;
 454                }
 455                hcd->irq = pdev->irq;
 456        }
 457
 458#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 459        init_timer(&xhci->event_ring_timer);
 460        xhci->event_ring_timer.data = (unsigned long) xhci;
 461        xhci->event_ring_timer.function = xhci_event_ring_work;
 462        /* Poll the event ring */
 463        xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
 464        xhci->zombie = 0;
 465        xhci_dbg(xhci, "Setting event ring polling timer\n");
 466        add_timer(&xhci->event_ring_timer);
 467#endif
 468
 469        xhci_dbg(xhci, "Command ring memory map follows:\n");
 470        xhci_debug_ring(xhci, xhci->cmd_ring);
 471        xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
 472        xhci_dbg_cmd_ptrs(xhci);
 473
 474        xhci_dbg(xhci, "ERST memory map follows:\n");
 475        xhci_dbg_erst(xhci, &xhci->erst);
 476        xhci_dbg(xhci, "Event ring:\n");
 477        xhci_debug_ring(xhci, xhci->event_ring);
 478        xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
 479        temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 480        temp_64 &= ~ERST_PTR_MASK;
 481        xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
 482
 483        xhci_dbg(xhci, "// Set the interrupt modulation register\n");
 484        temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
 485        temp &= ~ER_IRQ_INTERVAL_MASK;
 486        temp |= (u32) 160;
 487        xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
 488
 489        /* Set the HCD state before we enable the irqs */
 490        temp = xhci_readl(xhci, &xhci->op_regs->command);
 491        temp |= (CMD_EIE);
 492        xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
 493                        temp);
 494        xhci_writel(xhci, temp, &xhci->op_regs->command);
 495
 496        temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 497        xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
 498                        xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
 499        xhci_writel(xhci, ER_IRQ_ENABLE(temp),
 500                        &xhci->ir_set->irq_pending);
 501        xhci_print_ir_set(xhci, 0);
 502
 503        if (xhci->quirks & XHCI_NEC_HOST)
 504                xhci_queue_vendor_command(xhci, 0, 0, 0,
 505                                TRB_TYPE(TRB_NEC_GET_FW));
 506
 507        xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
 508        return 0;
 509}
 510
 511static void xhci_only_stop_hcd(struct usb_hcd *hcd)
 512{
 513        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 514
 515        spin_lock_irq(&xhci->lock);
 516        xhci_halt(xhci);
 517
 518        /* The shared_hcd is going to be deallocated shortly (the USB core only
 519         * calls this function when allocation fails in usb_add_hcd(), or
 520         * usb_remove_hcd() is called).  So we need to unset xHCI's pointer.
 521         */
 522        xhci->shared_hcd = NULL;
 523        spin_unlock_irq(&xhci->lock);
 524}
 525
 526/*
 527 * Stop xHCI driver.
 528 *
 529 * This function is called by the USB core when the HC driver is removed.
 530 * Its opposite is xhci_run().
 531 *
 532 * Disable device contexts, disable IRQs, and quiesce the HC.
 533 * Reset the HC, finish any completed transactions, and cleanup memory.
 534 */
 535void xhci_stop(struct usb_hcd *hcd)
 536{
 537        u32 temp;
 538        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 539
 540        if (!usb_hcd_is_primary_hcd(hcd)) {
 541                xhci_only_stop_hcd(xhci->shared_hcd);
 542                return;
 543        }
 544
 545        spin_lock_irq(&xhci->lock);
 546        /* Make sure the xHC is halted for a USB3 roothub
 547         * (xhci_stop() could be called as part of failed init).
 548         */
 549        xhci_halt(xhci);
 550        xhci_reset(xhci);
 551        spin_unlock_irq(&xhci->lock);
 552
 553        xhci_cleanup_msix(xhci);
 554
 555#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 556        /* Tell the event ring poll function not to reschedule */
 557        xhci->zombie = 1;
 558        del_timer_sync(&xhci->event_ring_timer);
 559#endif
 560
 561        if (xhci->quirks & XHCI_AMD_PLL_FIX)
 562                usb_amd_dev_put();
 563
 564        xhci_dbg(xhci, "// Disabling event ring interrupts\n");
 565        temp = xhci_readl(xhci, &xhci->op_regs->status);
 566        xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
 567        temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 568        xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 569                        &xhci->ir_set->irq_pending);
 570        xhci_print_ir_set(xhci, 0);
 571
 572        xhci_dbg(xhci, "cleaning up memory\n");
 573        xhci_mem_cleanup(xhci);
 574        xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
 575                    xhci_readl(xhci, &xhci->op_regs->status));
 576}
 577
 578/*
 579 * Shutdown HC (not bus-specific)
 580 *
 581 * This is called when the machine is rebooting or halting.  We assume that the
 582 * machine will be powered off, and the HC's internal state will be reset.
 583 * Don't bother to free memory.
 584 *
 585 * This will only ever be called with the main usb_hcd (the USB3 roothub).
 586 */
 587void xhci_shutdown(struct usb_hcd *hcd)
 588{
 589        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 590
 591        spin_lock_irq(&xhci->lock);
 592        xhci_halt(xhci);
 593        spin_unlock_irq(&xhci->lock);
 594
 595        xhci_cleanup_msix(xhci);
 596
 597        xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
 598                    xhci_readl(xhci, &xhci->op_regs->status));
 599}
 600
 601#ifdef CONFIG_PM
 602static void xhci_save_registers(struct xhci_hcd *xhci)
 603{
 604        xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
 605        xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
 606        xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
 607        xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
 608        xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 609        xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
 610        xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
 611        xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
 612        xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
 613}
 614
 615static void xhci_restore_registers(struct xhci_hcd *xhci)
 616{
 617        xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
 618        xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
 619        xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
 620        xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
 621        xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
 622        xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
 623        xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
 624        xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
 625}
 626
 627static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
 628{
 629        u64     val_64;
 630
 631        /* step 2: initialize command ring buffer */
 632        val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 633        val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
 634                (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
 635                                      xhci->cmd_ring->dequeue) &
 636                 (u64) ~CMD_RING_RSVD_BITS) |
 637                xhci->cmd_ring->cycle_state;
 638        xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
 639                        (long unsigned long) val_64);
 640        xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
 641}
 642
 643/*
 644 * The whole command ring must be cleared to zero when we suspend the host.
 645 *
 646 * The host doesn't save the command ring pointer in the suspend well, so we
 647 * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
 648 * aligned, because of the reserved bits in the command ring dequeue pointer
 649 * register.  Therefore, we can't just set the dequeue pointer back in the
 650 * middle of the ring (TRBs are 16-byte aligned).
 651 */
 652static void xhci_clear_command_ring(struct xhci_hcd *xhci)
 653{
 654        struct xhci_ring *ring;
 655        struct xhci_segment *seg;
 656
 657        ring = xhci->cmd_ring;
 658        seg = ring->deq_seg;
 659        do {
 660                memset(seg->trbs, 0, SEGMENT_SIZE);
 661                seg = seg->next;
 662        } while (seg != ring->deq_seg);
 663
 664        /* Reset the software enqueue and dequeue pointers */
 665        ring->deq_seg = ring->first_seg;
 666        ring->dequeue = ring->first_seg->trbs;
 667        ring->enq_seg = ring->deq_seg;
 668        ring->enqueue = ring->dequeue;
 669
 670        /*
 671         * Ring is now zeroed, so the HW should look for change of ownership
 672         * when the cycle bit is set to 1.
 673         */
 674        ring->cycle_state = 1;
 675
 676        /*
 677         * Reset the hardware dequeue pointer.
 678         * Yes, this will need to be re-written after resume, but we're paranoid
 679         * and want to make sure the hardware doesn't access bogus memory
 680         * because, say, the BIOS or an SMI started the host without changing
 681         * the command ring pointers.
 682         */
 683        xhci_set_cmd_ring_deq(xhci);
 684}
 685
 686/*
 687 * Stop HC (not bus-specific)
 688 *
 689 * This is called when the machine transition into S3/S4 mode.
 690 *
 691 */
 692int xhci_suspend(struct xhci_hcd *xhci)
 693{
 694        int                     rc = 0;
 695        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
 696        u32                     command;
 697        int                     i;
 698
 699        spin_lock_irq(&xhci->lock);
 700        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 701        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
 702        /* step 1: stop endpoint */
 703        /* skipped assuming that port suspend has done */
 704
 705        /* step 2: clear Run/Stop bit */
 706        command = xhci_readl(xhci, &xhci->op_regs->command);
 707        command &= ~CMD_RUN;
 708        xhci_writel(xhci, command, &xhci->op_regs->command);
 709        if (handshake(xhci, &xhci->op_regs->status,
 710                      STS_HALT, STS_HALT, 100*100)) {
 711                xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
 712                spin_unlock_irq(&xhci->lock);
 713                return -ETIMEDOUT;
 714        }
 715        xhci_clear_command_ring(xhci);
 716
 717        /* step 3: save registers */
 718        xhci_save_registers(xhci);
 719
 720        /* step 4: set CSS flag */
 721        command = xhci_readl(xhci, &xhci->op_regs->command);
 722        command |= CMD_CSS;
 723        xhci_writel(xhci, command, &xhci->op_regs->command);
 724        if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
 725                xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
 726                spin_unlock_irq(&xhci->lock);
 727                return -ETIMEDOUT;
 728        }
 729        spin_unlock_irq(&xhci->lock);
 730
 731        /* step 5: remove core well power */
 732        /* synchronize irq when using MSI-X */
 733        if (xhci->msix_entries) {
 734                for (i = 0; i < xhci->msix_count; i++)
 735                        synchronize_irq(xhci->msix_entries[i].vector);
 736        }
 737
 738        return rc;
 739}
 740
 741/*
 742 * start xHC (not bus-specific)
 743 *
 744 * This is called when the machine transition from S3/S4 mode.
 745 *
 746 */
 747int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 748{
 749        u32                     command, temp = 0;
 750        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
 751        struct usb_hcd          *secondary_hcd;
 752        int                     retval;
 753
 754        /* Wait a bit if either of the roothubs need to settle from the
 755         * transition into bus suspend.
 756         */
 757        if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
 758                        time_before(jiffies,
 759                                xhci->bus_state[1].next_statechange))
 760                msleep(100);
 761
 762        spin_lock_irq(&xhci->lock);
 763        if (xhci->quirks & XHCI_RESET_ON_RESUME)
 764                hibernated = true;
 765
 766        if (!hibernated) {
 767                /* step 1: restore register */
 768                xhci_restore_registers(xhci);
 769                /* step 2: initialize command ring buffer */
 770                xhci_set_cmd_ring_deq(xhci);
 771                /* step 3: restore state and start state*/
 772                /* step 3: set CRS flag */
 773                command = xhci_readl(xhci, &xhci->op_regs->command);
 774                command |= CMD_CRS;
 775                xhci_writel(xhci, command, &xhci->op_regs->command);
 776                if (handshake(xhci, &xhci->op_regs->status,
 777                              STS_RESTORE, 0, 10*100)) {
 778                        xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
 779                        spin_unlock_irq(&xhci->lock);
 780                        return -ETIMEDOUT;
 781                }
 782                temp = xhci_readl(xhci, &xhci->op_regs->status);
 783        }
 784
 785        /* If restore operation fails, re-initialize the HC during resume */
 786        if ((temp & STS_SRE) || hibernated) {
 787                /* Let the USB core know _both_ roothubs lost power. */
 788                usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
 789                usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
 790
 791                xhci_dbg(xhci, "Stop HCD\n");
 792                xhci_halt(xhci);
 793                xhci_reset(xhci);
 794                spin_unlock_irq(&xhci->lock);
 795                xhci_cleanup_msix(xhci);
 796
 797#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 798                /* Tell the event ring poll function not to reschedule */
 799                xhci->zombie = 1;
 800                del_timer_sync(&xhci->event_ring_timer);
 801#endif
 802
 803                xhci_dbg(xhci, "// Disabling event ring interrupts\n");
 804                temp = xhci_readl(xhci, &xhci->op_regs->status);
 805                xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
 806                temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 807                xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 808                                &xhci->ir_set->irq_pending);
 809                xhci_print_ir_set(xhci, 0);
 810
 811                xhci_dbg(xhci, "cleaning up memory\n");
 812                xhci_mem_cleanup(xhci);
 813                xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
 814                            xhci_readl(xhci, &xhci->op_regs->status));
 815
 816                /* USB core calls the PCI reinit and start functions twice:
 817                 * first with the primary HCD, and then with the secondary HCD.
 818                 * If we don't do the same, the host will never be started.
 819                 */
 820                if (!usb_hcd_is_primary_hcd(hcd))
 821                        secondary_hcd = hcd;
 822                else
 823                        secondary_hcd = xhci->shared_hcd;
 824
 825                xhci_dbg(xhci, "Initialize the xhci_hcd\n");
 826                retval = xhci_init(hcd->primary_hcd);
 827                if (retval)
 828                        return retval;
 829                xhci_dbg(xhci, "Start the primary HCD\n");
 830                retval = xhci_run(hcd->primary_hcd);
 831                if (retval)
 832                        goto failed_restart;
 833
 834                xhci_dbg(xhci, "Start the secondary HCD\n");
 835                retval = xhci_run(secondary_hcd);
 836                if (!retval) {
 837                        set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 838                        set_bit(HCD_FLAG_HW_ACCESSIBLE,
 839                                        &xhci->shared_hcd->flags);
 840                }
 841failed_restart:
 842                hcd->state = HC_STATE_SUSPENDED;
 843                xhci->shared_hcd->state = HC_STATE_SUSPENDED;
 844                return retval;
 845        }
 846
 847        /* step 4: set Run/Stop bit */
 848        command = xhci_readl(xhci, &xhci->op_regs->command);
 849        command |= CMD_RUN;
 850        xhci_writel(xhci, command, &xhci->op_regs->command);
 851        handshake(xhci, &xhci->op_regs->status, STS_HALT,
 852                  0, 250 * 1000);
 853
 854        /* step 5: walk topology and initialize portsc,
 855         * portpmsc and portli
 856         */
 857        /* this is done in bus_resume */
 858
 859        /* step 6: restart each of the previously
 860         * Running endpoints by ringing their doorbells
 861         */
 862
 863        set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 864        set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
 865
 866        spin_unlock_irq(&xhci->lock);
 867        return 0;
 868}
 869#endif  /* CONFIG_PM */
 870
 871/*-------------------------------------------------------------------------*/
 872
 873/**
 874 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
 875 * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
 876 * value to right shift 1 for the bitmask.
 877 *
 878 * Index  = (epnum * 2) + direction - 1,
 879 * where direction = 0 for OUT, 1 for IN.
 880 * For control endpoints, the IN index is used (OUT index is unused), so
 881 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
 882 */
 883unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
 884{
 885        unsigned int index;
 886        if (usb_endpoint_xfer_control(desc))
 887                index = (unsigned int) (usb_endpoint_num(desc)*2);
 888        else
 889                index = (unsigned int) (usb_endpoint_num(desc)*2) +
 890                        (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
 891        return index;
 892}
 893
 894/* Find the flag for this endpoint (for use in the control context).  Use the
 895 * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
 896 * bit 1, etc.
 897 */
 898unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
 899{
 900        return 1 << (xhci_get_endpoint_index(desc) + 1);
 901}
 902
 903/* Find the flag for this endpoint (for use in the control context).  Use the
 904 * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
 905 * bit 1, etc.
 906 */
 907unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
 908{
 909        return 1 << (ep_index + 1);
 910}
 911
 912/* Compute the last valid endpoint context index.  Basically, this is the
 913 * endpoint index plus one.  For slot contexts with more than valid endpoint,
 914 * we find the most significant bit set in the added contexts flags.
 915 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
 916 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
 917 */
 918unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
 919{
 920        return fls(added_ctxs) - 1;
 921}
 922
 923/* Returns 1 if the arguments are OK;
 924 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
 925 */
 926static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
 927                struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
 928                const char *func) {
 929        struct xhci_hcd *xhci;
 930        struct xhci_virt_device *virt_dev;
 931
 932        if (!hcd || (check_ep && !ep) || !udev) {
 933                printk(KERN_DEBUG "xHCI %s called with invalid args\n",
 934                                func);
 935                return -EINVAL;
 936        }
 937        if (!udev->parent) {
 938                printk(KERN_DEBUG "xHCI %s called for root hub\n",
 939                                func);
 940                return 0;
 941        }
 942
 943        xhci = hcd_to_xhci(hcd);
 944        if (xhci->xhc_state & XHCI_STATE_HALTED)
 945                return -ENODEV;
 946
 947        if (check_virt_dev) {
 948                if (!udev->slot_id || !xhci->devs
 949                        || !xhci->devs[udev->slot_id]) {
 950                        printk(KERN_DEBUG "xHCI %s called with unaddressed "
 951                                                "device\n", func);
 952                        return -EINVAL;
 953                }
 954
 955                virt_dev = xhci->devs[udev->slot_id];
 956                if (virt_dev->udev != udev) {
 957                        printk(KERN_DEBUG "xHCI %s called with udev and "
 958                                          "virt_dev does not match\n", func);
 959                        return -EINVAL;
 960                }
 961        }
 962
 963        return 1;
 964}
 965
 966static int xhci_configure_endpoint(struct xhci_hcd *xhci,
 967                struct usb_device *udev, struct xhci_command *command,
 968                bool ctx_change, bool must_succeed);
 969
 970/*
 971 * Full speed devices may have a max packet size greater than 8 bytes, but the
 972 * USB core doesn't know that until it reads the first 8 bytes of the
 973 * descriptor.  If the usb_device's max packet size changes after that point,
 974 * we need to issue an evaluate context command and wait on it.
 975 */
 976static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
 977                unsigned int ep_index, struct urb *urb)
 978{
 979        struct xhci_container_ctx *in_ctx;
 980        struct xhci_container_ctx *out_ctx;
 981        struct xhci_input_control_ctx *ctrl_ctx;
 982        struct xhci_ep_ctx *ep_ctx;
 983        int max_packet_size;
 984        int hw_max_packet_size;
 985        int ret = 0;
 986
 987        out_ctx = xhci->devs[slot_id]->out_ctx;
 988        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
 989        hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
 990        max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize);
 991        if (hw_max_packet_size != max_packet_size) {
 992                xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
 993                xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
 994                                max_packet_size);
 995                xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
 996                                hw_max_packet_size);
 997                xhci_dbg(xhci, "Issuing evaluate context command.\n");
 998
 999                /* Set up the modified control endpoint 0 */
1000                xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1001                                xhci->devs[slot_id]->out_ctx, ep_index);
1002                in_ctx = xhci->devs[slot_id]->in_ctx;
1003                ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1004                ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1005                ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1006
1007                /* Set up the input context flags for the command */
1008                /* FIXME: This won't work if a non-default control endpoint
1009                 * changes max packet sizes.
1010                 */
1011                ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1012                ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1013                ctrl_ctx->drop_flags = 0;
1014
1015                xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1016                xhci_dbg_ctx(xhci, in_ctx, ep_index);
1017                xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1018                xhci_dbg_ctx(xhci, out_ctx, ep_index);
1019
1020                ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1021                                true, false);
1022
1023                /* Clean up the input context for later use by bandwidth
1024                 * functions.
1025                 */
1026                ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1027        }
1028        return ret;
1029}
1030
1031/*
1032 * non-error returns are a promise to giveback() the urb later
1033 * we drop ownership so next owner (or urb unlink) can get it
1034 */
1035int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1036{
1037        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1038        unsigned long flags;
1039        int ret = 0;
1040        unsigned int slot_id, ep_index;
1041        struct urb_priv *urb_priv;
1042        int size, i;
1043
1044        if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1045                                        true, true, __func__) <= 0)
1046                return -EINVAL;
1047
1048        slot_id = urb->dev->slot_id;
1049        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1050
1051        if (!HCD_HW_ACCESSIBLE(hcd)) {
1052                if (!in_interrupt())
1053                        xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1054                ret = -ESHUTDOWN;
1055                goto exit;
1056        }
1057
1058        if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1059                size = urb->number_of_packets;
1060        else
1061                size = 1;
1062
1063        urb_priv = kzalloc(sizeof(struct urb_priv) +
1064                                  size * sizeof(struct xhci_td *), mem_flags);
1065        if (!urb_priv)
1066                return -ENOMEM;
1067
1068        for (i = 0; i < size; i++) {
1069                urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
1070                if (!urb_priv->td[i]) {
1071                        urb_priv->length = i;
1072                        xhci_urb_free_priv(xhci, urb_priv);
1073                        return -ENOMEM;
1074                }
1075        }
1076
1077        urb_priv->length = size;
1078        urb_priv->td_cnt = 0;
1079        urb->hcpriv = urb_priv;
1080
1081        if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1082                /* Check to see if the max packet size for the default control
1083                 * endpoint changed during FS device enumeration
1084                 */
1085                if (urb->dev->speed == USB_SPEED_FULL) {
1086                        ret = xhci_check_maxpacket(xhci, slot_id,
1087                                        ep_index, urb);
1088                        if (ret < 0) {
1089                                xhci_urb_free_priv(xhci, urb_priv);
1090                                urb->hcpriv = NULL;
1091                                return ret;
1092                        }
1093                }
1094
1095                /* We have a spinlock and interrupts disabled, so we must pass
1096                 * atomic context to this function, which may allocate memory.
1097                 */
1098                spin_lock_irqsave(&xhci->lock, flags);
1099                if (xhci->xhc_state & XHCI_STATE_DYING)
1100                        goto dying;
1101                ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1102                                slot_id, ep_index);
1103                if (ret)
1104                        goto free_priv;
1105                spin_unlock_irqrestore(&xhci->lock, flags);
1106        } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1107                spin_lock_irqsave(&xhci->lock, flags);
1108                if (xhci->xhc_state & XHCI_STATE_DYING)
1109                        goto dying;
1110                if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1111                                EP_GETTING_STREAMS) {
1112                        xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1113                                        "is transitioning to using streams.\n");
1114                        ret = -EINVAL;
1115                } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1116                                EP_GETTING_NO_STREAMS) {
1117                        xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1118                                        "is transitioning to "
1119                                        "not having streams.\n");
1120                        ret = -EINVAL;
1121                } else {
1122                        ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1123                                        slot_id, ep_index);
1124                }
1125                if (ret)
1126                        goto free_priv;
1127                spin_unlock_irqrestore(&xhci->lock, flags);
1128        } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1129                spin_lock_irqsave(&xhci->lock, flags);
1130                if (xhci->xhc_state & XHCI_STATE_DYING)
1131                        goto dying;
1132                ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1133                                slot_id, ep_index);
1134                if (ret)
1135                        goto free_priv;
1136                spin_unlock_irqrestore(&xhci->lock, flags);
1137        } else {
1138                spin_lock_irqsave(&xhci->lock, flags);
1139                if (xhci->xhc_state & XHCI_STATE_DYING)
1140                        goto dying;
1141                ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1142                                slot_id, ep_index);
1143                if (ret)
1144                        goto free_priv;
1145                spin_unlock_irqrestore(&xhci->lock, flags);
1146        }
1147exit:
1148        return ret;
1149dying:
1150        xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1151                        "non-responsive xHCI host.\n",
1152                        urb->ep->desc.bEndpointAddress, urb);
1153        ret = -ESHUTDOWN;
1154free_priv:
1155        xhci_urb_free_priv(xhci, urb_priv);
1156        urb->hcpriv = NULL;
1157        spin_unlock_irqrestore(&xhci->lock, flags);
1158        return ret;
1159}
1160
1161/* Get the right ring for the given URB.
1162 * If the endpoint supports streams, boundary check the URB's stream ID.
1163 * If the endpoint doesn't support streams, return the singular endpoint ring.
1164 */
1165static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1166                struct urb *urb)
1167{
1168        unsigned int slot_id;
1169        unsigned int ep_index;
1170        unsigned int stream_id;
1171        struct xhci_virt_ep *ep;
1172
1173        slot_id = urb->dev->slot_id;
1174        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1175        stream_id = urb->stream_id;
1176        ep = &xhci->devs[slot_id]->eps[ep_index];
1177        /* Common case: no streams */
1178        if (!(ep->ep_state & EP_HAS_STREAMS))
1179                return ep->ring;
1180
1181        if (stream_id == 0) {
1182                xhci_warn(xhci,
1183                                "WARN: Slot ID %u, ep index %u has streams, "
1184                                "but URB has no stream ID.\n",
1185                                slot_id, ep_index);
1186                return NULL;
1187        }
1188
1189        if (stream_id < ep->stream_info->num_streams)
1190                return ep->stream_info->stream_rings[stream_id];
1191
1192        xhci_warn(xhci,
1193                        "WARN: Slot ID %u, ep index %u has "
1194                        "stream IDs 1 to %u allocated, "
1195                        "but stream ID %u is requested.\n",
1196                        slot_id, ep_index,
1197                        ep->stream_info->num_streams - 1,
1198                        stream_id);
1199        return NULL;
1200}
1201
1202/*
1203 * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
1204 * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
1205 * should pick up where it left off in the TD, unless a Set Transfer Ring
1206 * Dequeue Pointer is issued.
1207 *
1208 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1209 * the ring.  Since the ring is a contiguous structure, they can't be physically
1210 * removed.  Instead, there are two options:
1211 *
1212 *  1) If the HC is in the middle of processing the URB to be canceled, we
1213 *     simply move the ring's dequeue pointer past those TRBs using the Set
1214 *     Transfer Ring Dequeue Pointer command.  This will be the common case,
1215 *     when drivers timeout on the last submitted URB and attempt to cancel.
1216 *
1217 *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
1218 *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
1219 *     HC will need to invalidate the any TRBs it has cached after the stop
1220 *     endpoint command, as noted in the xHCI 0.95 errata.
1221 *
1222 *  3) The TD may have completed by the time the Stop Endpoint Command
1223 *     completes, so software needs to handle that case too.
1224 *
1225 * This function should protect against the TD enqueueing code ringing the
1226 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1227 * It also needs to account for multiple cancellations on happening at the same
1228 * time for the same endpoint.
1229 *
1230 * Note that this function can be called in any context, or so says
1231 * usb_hcd_unlink_urb()
1232 */
1233int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1234{
1235        unsigned long flags;
1236        int ret, i;
1237        u32 temp;
1238        struct xhci_hcd *xhci;
1239        struct urb_priv *urb_priv;
1240        struct xhci_td *td;
1241        unsigned int ep_index;
1242        struct xhci_ring *ep_ring;
1243        struct xhci_virt_ep *ep;
1244
1245        xhci = hcd_to_xhci(hcd);
1246        spin_lock_irqsave(&xhci->lock, flags);
1247        /* Make sure the URB hasn't completed or been unlinked already */
1248        ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1249        if (ret || !urb->hcpriv)
1250                goto done;
1251        temp = xhci_readl(xhci, &xhci->op_regs->status);
1252        if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1253                xhci_dbg(xhci, "HW died, freeing TD.\n");
1254                urb_priv = urb->hcpriv;
1255                for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1256                        td = urb_priv->td[i];
1257                        if (!list_empty(&td->td_list))
1258                                list_del_init(&td->td_list);
1259                        if (!list_empty(&td->cancelled_td_list))
1260                                list_del_init(&td->cancelled_td_list);
1261                }
1262
1263                usb_hcd_unlink_urb_from_ep(hcd, urb);
1264                spin_unlock_irqrestore(&xhci->lock, flags);
1265                usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1266                xhci_urb_free_priv(xhci, urb_priv);
1267                return ret;
1268        }
1269        if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1270                        (xhci->xhc_state & XHCI_STATE_HALTED)) {
1271                xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1272                                "non-responsive xHCI host.\n",
1273                                urb->ep->desc.bEndpointAddress, urb);
1274                /* Let the stop endpoint command watchdog timer (which set this
1275                 * state) finish cleaning up the endpoint TD lists.  We must
1276                 * have caught it in the middle of dropping a lock and giving
1277                 * back an URB.
1278                 */
1279                goto done;
1280        }
1281
1282        xhci_dbg(xhci, "Cancel URB %p\n", urb);
1283        xhci_dbg(xhci, "Event ring:\n");
1284        xhci_debug_ring(xhci, xhci->event_ring);
1285        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1286        ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1287        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1288        if (!ep_ring) {
1289                ret = -EINVAL;
1290                goto done;
1291        }
1292
1293        xhci_dbg(xhci, "Endpoint ring:\n");
1294        xhci_debug_ring(xhci, ep_ring);
1295
1296        urb_priv = urb->hcpriv;
1297
1298        for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1299                td = urb_priv->td[i];
1300                list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1301        }
1302
1303        /* Queue a stop endpoint command, but only if this is
1304         * the first cancellation to be handled.
1305         */
1306        if (!(ep->ep_state & EP_HALT_PENDING)) {
1307                ep->ep_state |= EP_HALT_PENDING;
1308                ep->stop_cmds_pending++;
1309                ep->stop_cmd_timer.expires = jiffies +
1310                        XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1311                add_timer(&ep->stop_cmd_timer);
1312                xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1313                xhci_ring_cmd_db(xhci);
1314        }
1315done:
1316        spin_unlock_irqrestore(&xhci->lock, flags);
1317        return ret;
1318}
1319
1320/* Drop an endpoint from a new bandwidth configuration for this device.
1321 * Only one call to this function is allowed per endpoint before
1322 * check_bandwidth() or reset_bandwidth() must be called.
1323 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1324 * add the endpoint to the schedule with possibly new parameters denoted by a
1325 * different endpoint descriptor in usb_host_endpoint.
1326 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1327 * not allowed.
1328 *
1329 * The USB core will not allow URBs to be queued to an endpoint that is being
1330 * disabled, so there's no need for mutual exclusion to protect
1331 * the xhci->devs[slot_id] structure.
1332 */
1333int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1334                struct usb_host_endpoint *ep)
1335{
1336        struct xhci_hcd *xhci;
1337        struct xhci_container_ctx *in_ctx, *out_ctx;
1338        struct xhci_input_control_ctx *ctrl_ctx;
1339        struct xhci_slot_ctx *slot_ctx;
1340        unsigned int last_ctx;
1341        unsigned int ep_index;
1342        struct xhci_ep_ctx *ep_ctx;
1343        u32 drop_flag;
1344        u32 new_add_flags, new_drop_flags, new_slot_info;
1345        int ret;
1346
1347        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1348        if (ret <= 0)
1349                return ret;
1350        xhci = hcd_to_xhci(hcd);
1351        if (xhci->xhc_state & XHCI_STATE_DYING)
1352                return -ENODEV;
1353
1354        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1355        drop_flag = xhci_get_endpoint_flag(&ep->desc);
1356        if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1357                xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1358                                __func__, drop_flag);
1359                return 0;
1360        }
1361
1362        in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1363        out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1364        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1365        ep_index = xhci_get_endpoint_index(&ep->desc);
1366        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1367        /* If the HC already knows the endpoint is disabled,
1368         * or the HCD has noted it is disabled, ignore this request
1369         */
1370        if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1371             cpu_to_le32(EP_STATE_DISABLED)) ||
1372            le32_to_cpu(ctrl_ctx->drop_flags) &
1373            xhci_get_endpoint_flag(&ep->desc)) {
1374                xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1375                                __func__, ep);
1376                return 0;
1377        }
1378
1379        ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1380        new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1381
1382        ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1383        new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1384
1385        last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1386        slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1387        /* Update the last valid endpoint context, if we deleted the last one */
1388        if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1389            LAST_CTX(last_ctx)) {
1390                slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1391                slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1392        }
1393        new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1394
1395        xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1396
1397        xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1398                        (unsigned int) ep->desc.bEndpointAddress,
1399                        udev->slot_id,
1400                        (unsigned int) new_drop_flags,
1401                        (unsigned int) new_add_flags,
1402                        (unsigned int) new_slot_info);
1403        return 0;
1404}
1405
1406/* Add an endpoint to a new possible bandwidth configuration for this device.
1407 * Only one call to this function is allowed per endpoint before
1408 * check_bandwidth() or reset_bandwidth() must be called.
1409 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1410 * add the endpoint to the schedule with possibly new parameters denoted by a
1411 * different endpoint descriptor in usb_host_endpoint.
1412 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1413 * not allowed.
1414 *
1415 * The USB core will not allow URBs to be queued to an endpoint until the
1416 * configuration or alt setting is installed in the device, so there's no need
1417 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1418 */
1419int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1420                struct usb_host_endpoint *ep)
1421{
1422        struct xhci_hcd *xhci;
1423        struct xhci_container_ctx *in_ctx, *out_ctx;
1424        unsigned int ep_index;
1425        struct xhci_ep_ctx *ep_ctx;
1426        struct xhci_slot_ctx *slot_ctx;
1427        struct xhci_input_control_ctx *ctrl_ctx;
1428        u32 added_ctxs;
1429        unsigned int last_ctx;
1430        u32 new_add_flags, new_drop_flags, new_slot_info;
1431        struct xhci_virt_device *virt_dev;
1432        int ret = 0;
1433
1434        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1435        if (ret <= 0) {
1436                /* So we won't queue a reset ep command for a root hub */
1437                ep->hcpriv = NULL;
1438                return ret;
1439        }
1440        xhci = hcd_to_xhci(hcd);
1441        if (xhci->xhc_state & XHCI_STATE_DYING)
1442                return -ENODEV;
1443
1444        added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1445        last_ctx = xhci_last_valid_endpoint(added_ctxs);
1446        if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1447                /* FIXME when we have to issue an evaluate endpoint command to
1448                 * deal with ep0 max packet size changing once we get the
1449                 * descriptors
1450                 */
1451                xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1452                                __func__, added_ctxs);
1453                return 0;
1454        }
1455
1456        virt_dev = xhci->devs[udev->slot_id];
1457        in_ctx = virt_dev->in_ctx;
1458        out_ctx = virt_dev->out_ctx;
1459        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1460        ep_index = xhci_get_endpoint_index(&ep->desc);
1461        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1462
1463        /* If this endpoint is already in use, and the upper layers are trying
1464         * to add it again without dropping it, reject the addition.
1465         */
1466        if (virt_dev->eps[ep_index].ring &&
1467                        !(le32_to_cpu(ctrl_ctx->drop_flags) &
1468                                xhci_get_endpoint_flag(&ep->desc))) {
1469                xhci_warn(xhci, "Trying to add endpoint 0x%x "
1470                                "without dropping it.\n",
1471                                (unsigned int) ep->desc.bEndpointAddress);
1472                return -EINVAL;
1473        }
1474
1475        /* If the HCD has already noted the endpoint is enabled,
1476         * ignore this request.
1477         */
1478        if (le32_to_cpu(ctrl_ctx->add_flags) &
1479            xhci_get_endpoint_flag(&ep->desc)) {
1480                xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1481                                __func__, ep);
1482                return 0;
1483        }
1484
1485        /*
1486         * Configuration and alternate setting changes must be done in
1487         * process context, not interrupt context (or so documenation
1488         * for usb_set_interface() and usb_set_configuration() claim).
1489         */
1490        if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1491                dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1492                                __func__, ep->desc.bEndpointAddress);
1493                return -ENOMEM;
1494        }
1495
1496        ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1497        new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1498
1499        /* If xhci_endpoint_disable() was called for this endpoint, but the
1500         * xHC hasn't been notified yet through the check_bandwidth() call,
1501         * this re-adds a new state for the endpoint from the new endpoint
1502         * descriptors.  We must drop and re-add this endpoint, so we leave the
1503         * drop flags alone.
1504         */
1505        new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1506
1507        slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1508        /* Update the last valid endpoint context, if we just added one past */
1509        if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1510            LAST_CTX(last_ctx)) {
1511                slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1512                slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1513        }
1514        new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1515
1516        /* Store the usb_device pointer for later use */
1517        ep->hcpriv = udev;
1518
1519        xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1520                        (unsigned int) ep->desc.bEndpointAddress,
1521                        udev->slot_id,
1522                        (unsigned int) new_drop_flags,
1523                        (unsigned int) new_add_flags,
1524                        (unsigned int) new_slot_info);
1525        return 0;
1526}
1527
1528static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1529{
1530        struct xhci_input_control_ctx *ctrl_ctx;
1531        struct xhci_ep_ctx *ep_ctx;
1532        struct xhci_slot_ctx *slot_ctx;
1533        int i;
1534
1535        /* When a device's add flag and drop flag are zero, any subsequent
1536         * configure endpoint command will leave that endpoint's state
1537         * untouched.  Make sure we don't leave any old state in the input
1538         * endpoint contexts.
1539         */
1540        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1541        ctrl_ctx->drop_flags = 0;
1542        ctrl_ctx->add_flags = 0;
1543        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1544        slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1545        /* Endpoint 0 is always valid */
1546        slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1547        for (i = 1; i < 31; ++i) {
1548                ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1549                ep_ctx->ep_info = 0;
1550                ep_ctx->ep_info2 = 0;
1551                ep_ctx->deq = 0;
1552                ep_ctx->tx_info = 0;
1553        }
1554}
1555
1556static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1557                struct usb_device *udev, u32 *cmd_status)
1558{
1559        int ret;
1560
1561        switch (*cmd_status) {
1562        case COMP_ENOMEM:
1563                dev_warn(&udev->dev, "Not enough host controller resources "
1564                                "for new device state.\n");
1565                ret = -ENOMEM;
1566                /* FIXME: can we allocate more resources for the HC? */
1567                break;
1568        case COMP_BW_ERR:
1569                dev_warn(&udev->dev, "Not enough bandwidth "
1570                                "for new device state.\n");
1571                ret = -ENOSPC;
1572                /* FIXME: can we go back to the old state? */
1573                break;
1574        case COMP_TRB_ERR:
1575                /* the HCD set up something wrong */
1576                dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1577                                "add flag = 1, "
1578                                "and endpoint is not disabled.\n");
1579                ret = -EINVAL;
1580                break;
1581        case COMP_DEV_ERR:
1582                dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1583                                "configure command.\n");
1584                ret = -ENODEV;
1585                break;
1586        case COMP_SUCCESS:
1587                dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1588                ret = 0;
1589                break;
1590        default:
1591                xhci_err(xhci, "ERROR: unexpected command completion "
1592                                "code 0x%x.\n", *cmd_status);
1593                ret = -EINVAL;
1594                break;
1595        }
1596        return ret;
1597}
1598
1599static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1600                struct usb_device *udev, u32 *cmd_status)
1601{
1602        int ret;
1603        struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1604
1605        switch (*cmd_status) {
1606        case COMP_EINVAL:
1607                dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1608                                "context command.\n");
1609                ret = -EINVAL;
1610                break;
1611        case COMP_EBADSLT:
1612                dev_warn(&udev->dev, "WARN: slot not enabled for"
1613                                "evaluate context command.\n");
1614        case COMP_CTX_STATE:
1615                dev_warn(&udev->dev, "WARN: invalid context state for "
1616                                "evaluate context command.\n");
1617                xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1618                ret = -EINVAL;
1619                break;
1620        case COMP_DEV_ERR:
1621                dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1622                                "context command.\n");
1623                ret = -ENODEV;
1624                break;
1625        case COMP_MEL_ERR:
1626                /* Max Exit Latency too large error */
1627                dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1628                ret = -EINVAL;
1629                break;
1630        case COMP_SUCCESS:
1631                dev_dbg(&udev->dev, "Successful evaluate context command\n");
1632                ret = 0;
1633                break;
1634        default:
1635                xhci_err(xhci, "ERROR: unexpected command completion "
1636                                "code 0x%x.\n", *cmd_status);
1637                ret = -EINVAL;
1638                break;
1639        }
1640        return ret;
1641}
1642
1643static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1644                struct xhci_container_ctx *in_ctx)
1645{
1646        struct xhci_input_control_ctx *ctrl_ctx;
1647        u32 valid_add_flags;
1648        u32 valid_drop_flags;
1649
1650        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1651        /* Ignore the slot flag (bit 0), and the default control endpoint flag
1652         * (bit 1).  The default control endpoint is added during the Address
1653         * Device command and is never removed until the slot is disabled.
1654         */
1655        valid_add_flags = ctrl_ctx->add_flags >> 2;
1656        valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1657
1658        /* Use hweight32 to count the number of ones in the add flags, or
1659         * number of endpoints added.  Don't count endpoints that are changed
1660         * (both added and dropped).
1661         */
1662        return hweight32(valid_add_flags) -
1663                hweight32(valid_add_flags & valid_drop_flags);
1664}
1665
1666static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1667                struct xhci_container_ctx *in_ctx)
1668{
1669        struct xhci_input_control_ctx *ctrl_ctx;
1670        u32 valid_add_flags;
1671        u32 valid_drop_flags;
1672
1673        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1674        valid_add_flags = ctrl_ctx->add_flags >> 2;
1675        valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1676
1677        return hweight32(valid_drop_flags) -
1678                hweight32(valid_add_flags & valid_drop_flags);
1679}
1680
1681/*
1682 * We need to reserve the new number of endpoints before the configure endpoint
1683 * command completes.  We can't subtract the dropped endpoints from the number
1684 * of active endpoints until the command completes because we can oversubscribe
1685 * the host in this case:
1686 *
1687 *  - the first configure endpoint command drops more endpoints than it adds
1688 *  - a second configure endpoint command that adds more endpoints is queued
1689 *  - the first configure endpoint command fails, so the config is unchanged
1690 *  - the second command may succeed, even though there isn't enough resources
1691 *
1692 * Must be called with xhci->lock held.
1693 */
1694static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1695                struct xhci_container_ctx *in_ctx)
1696{
1697        u32 added_eps;
1698
1699        added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1700        if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1701                xhci_dbg(xhci, "Not enough ep ctxs: "
1702                                "%u active, need to add %u, limit is %u.\n",
1703                                xhci->num_active_eps, added_eps,
1704                                xhci->limit_active_eps);
1705                return -ENOMEM;
1706        }
1707        xhci->num_active_eps += added_eps;
1708        xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1709                        xhci->num_active_eps);
1710        return 0;
1711}
1712
1713/*
1714 * The configure endpoint was failed by the xHC for some other reason, so we
1715 * need to revert the resources that failed configuration would have used.
1716 *
1717 * Must be called with xhci->lock held.
1718 */
1719static void xhci_free_host_resources(struct xhci_hcd *xhci,
1720                struct xhci_container_ctx *in_ctx)
1721{
1722        u32 num_failed_eps;
1723
1724        num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1725        xhci->num_active_eps -= num_failed_eps;
1726        xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1727                        num_failed_eps,
1728                        xhci->num_active_eps);
1729}
1730
1731/*
1732 * Now that the command has completed, clean up the active endpoint count by
1733 * subtracting out the endpoints that were dropped (but not changed).
1734 *
1735 * Must be called with xhci->lock held.
1736 */
1737static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1738                struct xhci_container_ctx *in_ctx)
1739{
1740        u32 num_dropped_eps;
1741
1742        num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1743        xhci->num_active_eps -= num_dropped_eps;
1744        if (num_dropped_eps)
1745                xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1746                                num_dropped_eps,
1747                                xhci->num_active_eps);
1748}
1749
1750/* Issue a configure endpoint command or evaluate context command
1751 * and wait for it to finish.
1752 */
1753static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1754                struct usb_device *udev,
1755                struct xhci_command *command,
1756                bool ctx_change, bool must_succeed)
1757{
1758        int ret;
1759        int timeleft;
1760        unsigned long flags;
1761        struct xhci_container_ctx *in_ctx;
1762        struct completion *cmd_completion;
1763        u32 *cmd_status;
1764        struct xhci_virt_device *virt_dev;
1765
1766        spin_lock_irqsave(&xhci->lock, flags);
1767        virt_dev = xhci->devs[udev->slot_id];
1768        if (command) {
1769                in_ctx = command->in_ctx;
1770                if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
1771                                xhci_reserve_host_resources(xhci, in_ctx)) {
1772                        spin_unlock_irqrestore(&xhci->lock, flags);
1773                        xhci_warn(xhci, "Not enough host resources, "
1774                                        "active endpoint contexts = %u\n",
1775                                        xhci->num_active_eps);
1776                        return -ENOMEM;
1777                }
1778
1779                cmd_completion = command->completion;
1780                cmd_status = &command->status;
1781                command->command_trb = xhci->cmd_ring->enqueue;
1782
1783                /* Enqueue pointer can be left pointing to the link TRB,
1784                 * we must handle that
1785                 */
1786                if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
1787                        command->command_trb =
1788                                xhci->cmd_ring->enq_seg->next->trbs;
1789
1790                list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1791        } else {
1792                in_ctx = virt_dev->in_ctx;
1793                if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
1794                                xhci_reserve_host_resources(xhci, in_ctx)) {
1795                        spin_unlock_irqrestore(&xhci->lock, flags);
1796                        xhci_warn(xhci, "Not enough host resources, "
1797                                        "active endpoint contexts = %u\n",
1798                                        xhci->num_active_eps);
1799                        return -ENOMEM;
1800                }
1801                cmd_completion = &virt_dev->cmd_completion;
1802                cmd_status = &virt_dev->cmd_status;
1803        }
1804        init_completion(cmd_completion);
1805
1806        if (!ctx_change)
1807                ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1808                                udev->slot_id, must_succeed);
1809        else
1810                ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1811                                udev->slot_id);
1812        if (ret < 0) {
1813                if (command)
1814                        list_del(&command->cmd_list);
1815                if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
1816                        xhci_free_host_resources(xhci, in_ctx);
1817                spin_unlock_irqrestore(&xhci->lock, flags);
1818                xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1819                return -ENOMEM;
1820        }
1821        xhci_ring_cmd_db(xhci);
1822        spin_unlock_irqrestore(&xhci->lock, flags);
1823
1824        /* Wait for the configure endpoint command to complete */
1825        timeleft = wait_for_completion_interruptible_timeout(
1826                        cmd_completion,
1827                        USB_CTRL_SET_TIMEOUT);
1828        if (timeleft <= 0) {
1829                xhci_warn(xhci, "%s while waiting for %s command\n",
1830                                timeleft == 0 ? "Timeout" : "Signal",
1831                                ctx_change == 0 ?
1832                                        "configure endpoint" :
1833                                        "evaluate context");
1834                /* FIXME cancel the configure endpoint command */
1835                return -ETIME;
1836        }
1837
1838        if (!ctx_change)
1839                ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
1840        else
1841                ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
1842
1843        if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
1844                spin_lock_irqsave(&xhci->lock, flags);
1845                /* If the command failed, remove the reserved resources.
1846                 * Otherwise, clean up the estimate to include dropped eps.
1847                 */
1848                if (ret)
1849                        xhci_free_host_resources(xhci, in_ctx);
1850                else
1851                        xhci_finish_resource_reservation(xhci, in_ctx);
1852                spin_unlock_irqrestore(&xhci->lock, flags);
1853        }
1854        return ret;
1855}
1856
1857/* Called after one or more calls to xhci_add_endpoint() or
1858 * xhci_drop_endpoint().  If this call fails, the USB core is expected
1859 * to call xhci_reset_bandwidth().
1860 *
1861 * Since we are in the middle of changing either configuration or
1862 * installing a new alt setting, the USB core won't allow URBs to be
1863 * enqueued for any endpoint on the old config or interface.  Nothing
1864 * else should be touching the xhci->devs[slot_id] structure, so we
1865 * don't need to take the xhci->lock for manipulating that.
1866 */
1867int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1868{
1869        int i;
1870        int ret = 0;
1871        struct xhci_hcd *xhci;
1872        struct xhci_virt_device *virt_dev;
1873        struct xhci_input_control_ctx *ctrl_ctx;
1874        struct xhci_slot_ctx *slot_ctx;
1875
1876        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1877        if (ret <= 0)
1878                return ret;
1879        xhci = hcd_to_xhci(hcd);
1880        if (xhci->xhc_state & XHCI_STATE_DYING)
1881                return -ENODEV;
1882
1883        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1884        virt_dev = xhci->devs[udev->slot_id];
1885
1886        /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1887        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1888        ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
1889        ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
1890        ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
1891        xhci_dbg(xhci, "New Input Control Context:\n");
1892        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1893        xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1894                     LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
1895
1896        ret = xhci_configure_endpoint(xhci, udev, NULL,
1897                        false, false);
1898        if (ret) {
1899                /* Callee should call reset_bandwidth() */
1900                return ret;
1901        }
1902
1903        xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1904        xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1905                     LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
1906
1907        /* Free any rings that were dropped, but not changed. */
1908        for (i = 1; i < 31; ++i) {
1909                if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
1910                    !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
1911                        xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1912        }
1913        xhci_zero_in_ctx(xhci, virt_dev);
1914        /*
1915         * Install any rings for completely new endpoints or changed endpoints,
1916         * and free or cache any old rings from changed endpoints.
1917         */
1918        for (i = 1; i < 31; ++i) {
1919                if (!virt_dev->eps[i].new_ring)
1920                        continue;
1921                /* Only cache or free the old ring if it exists.
1922                 * It may not if this is the first add of an endpoint.
1923                 */
1924                if (virt_dev->eps[i].ring) {
1925                        xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1926                }
1927                virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1928                virt_dev->eps[i].new_ring = NULL;
1929        }
1930
1931        return ret;
1932}
1933
1934void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1935{
1936        struct xhci_hcd *xhci;
1937        struct xhci_virt_device *virt_dev;
1938        int i, ret;
1939
1940        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1941        if (ret <= 0)
1942                return;
1943        xhci = hcd_to_xhci(hcd);
1944
1945        xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1946        virt_dev = xhci->devs[udev->slot_id];
1947        /* Free any rings allocated for added endpoints */
1948        for (i = 0; i < 31; ++i) {
1949                if (virt_dev->eps[i].new_ring) {
1950                        xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1951                        virt_dev->eps[i].new_ring = NULL;
1952                }
1953        }
1954        xhci_zero_in_ctx(xhci, virt_dev);
1955}
1956
1957static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1958                struct xhci_container_ctx *in_ctx,
1959                struct xhci_container_ctx *out_ctx,
1960                u32 add_flags, u32 drop_flags)
1961{
1962        struct xhci_input_control_ctx *ctrl_ctx;
1963        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1964        ctrl_ctx->add_flags = cpu_to_le32(add_flags);
1965        ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
1966        xhci_slot_copy(xhci, in_ctx, out_ctx);
1967        ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
1968
1969        xhci_dbg(xhci, "Input Context:\n");
1970        xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1971}
1972
1973static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1974                unsigned int slot_id, unsigned int ep_index,
1975                struct xhci_dequeue_state *deq_state)
1976{
1977        struct xhci_container_ctx *in_ctx;
1978        struct xhci_ep_ctx *ep_ctx;
1979        u32 added_ctxs;
1980        dma_addr_t addr;
1981
1982        xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1983                        xhci->devs[slot_id]->out_ctx, ep_index);
1984        in_ctx = xhci->devs[slot_id]->in_ctx;
1985        ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1986        addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
1987                        deq_state->new_deq_ptr);
1988        if (addr == 0) {
1989                xhci_warn(xhci, "WARN Cannot submit config ep after "
1990                                "reset ep command\n");
1991                xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1992                                deq_state->new_deq_seg,
1993                                deq_state->new_deq_ptr);
1994                return;
1995        }
1996        ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
1997
1998        added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1999        xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2000                        xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2001}
2002
2003void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2004                struct usb_device *udev, unsigned int ep_index)
2005{
2006        struct xhci_dequeue_state deq_state;
2007        struct xhci_virt_ep *ep;
2008
2009        xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2010        ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2011        /* We need to move the HW's dequeue pointer past this TD,
2012         * or it will attempt to resend it on the next doorbell ring.
2013         */
2014        xhci_find_new_dequeue_state(xhci, udev->slot_id,
2015                        ep_index, ep->stopped_stream, ep->stopped_td,
2016                        &deq_state);
2017
2018        /* HW with the reset endpoint quirk will use the saved dequeue state to
2019         * issue a configure endpoint command later.
2020         */
2021        if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2022                xhci_dbg(xhci, "Queueing new dequeue state\n");
2023                xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2024                                ep_index, ep->stopped_stream, &deq_state);
2025        } else {
2026                /* Better hope no one uses the input context between now and the
2027                 * reset endpoint completion!
2028                 * XXX: No idea how this hardware will react when stream rings
2029                 * are enabled.
2030                 */
2031                xhci_dbg(xhci, "Setting up input context for "
2032                                "configure endpoint command\n");
2033                xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2034                                ep_index, &deq_state);
2035        }
2036}
2037
2038/* Deal with stalled endpoints.  The core should have sent the control message
2039 * to clear the halt condition.  However, we need to make the xHCI hardware
2040 * reset its sequence number, since a device will expect a sequence number of
2041 * zero after the halt condition is cleared.
2042 * Context: in_interrupt
2043 */
2044void xhci_endpoint_reset(struct usb_hcd *hcd,
2045                struct usb_host_endpoint *ep)
2046{
2047        struct xhci_hcd *xhci;
2048        struct usb_device *udev;
2049        unsigned int ep_index;
2050        unsigned long flags;
2051        int ret;
2052        struct xhci_virt_ep *virt_ep;
2053
2054        xhci = hcd_to_xhci(hcd);
2055        udev = (struct usb_device *) ep->hcpriv;
2056        /* Called with a root hub endpoint (or an endpoint that wasn't added
2057         * with xhci_add_endpoint()
2058         */
2059        if (!ep->hcpriv)
2060                return;
2061        ep_index = xhci_get_endpoint_index(&ep->desc);
2062        virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2063        if (!virt_ep->stopped_td) {
2064                xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2065                                ep->desc.bEndpointAddress);
2066                return;
2067        }
2068        if (usb_endpoint_xfer_control(&ep->desc)) {
2069                xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2070                return;
2071        }
2072
2073        xhci_dbg(xhci, "Queueing reset endpoint command\n");
2074        spin_lock_irqsave(&xhci->lock, flags);
2075        ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2076        /*
2077         * Can't change the ring dequeue pointer until it's transitioned to the
2078         * stopped state, which is only upon a successful reset endpoint
2079         * command.  Better hope that last command worked!
2080         */
2081        if (!ret) {
2082                xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2083                kfree(virt_ep->stopped_td);
2084                xhci_ring_cmd_db(xhci);
2085        }
2086        virt_ep->stopped_td = NULL;
2087        virt_ep->stopped_trb = NULL;
2088        virt_ep->stopped_stream = 0;
2089        spin_unlock_irqrestore(&xhci->lock, flags);
2090
2091        if (ret)
2092                xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2093}
2094
2095static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2096                struct usb_device *udev, struct usb_host_endpoint *ep,
2097                unsigned int slot_id)
2098{
2099        int ret;
2100        unsigned int ep_index;
2101        unsigned int ep_state;
2102
2103        if (!ep)
2104                return -EINVAL;
2105        ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2106        if (ret <= 0)
2107                return -EINVAL;
2108        if (ep->ss_ep_comp.bmAttributes == 0) {
2109                xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2110                                " descriptor for ep 0x%x does not support streams\n",
2111                                ep->desc.bEndpointAddress);
2112                return -EINVAL;
2113        }
2114
2115        ep_index = xhci_get_endpoint_index(&ep->desc);
2116        ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2117        if (ep_state & EP_HAS_STREAMS ||
2118                        ep_state & EP_GETTING_STREAMS) {
2119                xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2120                                "already has streams set up.\n",
2121                                ep->desc.bEndpointAddress);
2122                xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2123                                "dynamic stream context array reallocation.\n");
2124                return -EINVAL;
2125        }
2126        if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2127                xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2128                                "endpoint 0x%x; URBs are pending.\n",
2129                                ep->desc.bEndpointAddress);
2130                return -EINVAL;
2131        }
2132        return 0;
2133}
2134
2135static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2136                unsigned int *num_streams, unsigned int *num_stream_ctxs)
2137{
2138        unsigned int max_streams;
2139
2140        /* The stream context array size must be a power of two */
2141        *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2142        /*
2143         * Find out how many primary stream array entries the host controller
2144         * supports.  Later we may use secondary stream arrays (similar to 2nd
2145         * level page entries), but that's an optional feature for xHCI host
2146         * controllers. xHCs must support at least 4 stream IDs.
2147         */
2148        max_streams = HCC_MAX_PSA(xhci->hcc_params);
2149        if (*num_stream_ctxs > max_streams) {
2150                xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2151                                max_streams);
2152                *num_stream_ctxs = max_streams;
2153                *num_streams = max_streams;
2154        }
2155}
2156
2157/* Returns an error code if one of the endpoint already has streams.
2158 * This does not change any data structures, it only checks and gathers
2159 * information.
2160 */
2161static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2162                struct usb_device *udev,
2163                struct usb_host_endpoint **eps, unsigned int num_eps,
2164                unsigned int *num_streams, u32 *changed_ep_bitmask)
2165{
2166        unsigned int max_streams;
2167        unsigned int endpoint_flag;
2168        int i;
2169        int ret;
2170
2171        for (i = 0; i < num_eps; i++) {
2172                ret = xhci_check_streams_endpoint(xhci, udev,
2173                                eps[i], udev->slot_id);
2174                if (ret < 0)
2175                        return ret;
2176
2177                max_streams = USB_SS_MAX_STREAMS(
2178                                eps[i]->ss_ep_comp.bmAttributes);
2179                if (max_streams < (*num_streams - 1)) {
2180                        xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2181                                        eps[i]->desc.bEndpointAddress,
2182                                        max_streams);
2183                        *num_streams = max_streams+1;
2184                }
2185
2186                endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2187                if (*changed_ep_bitmask & endpoint_flag)
2188                        return -EINVAL;
2189                *changed_ep_bitmask |= endpoint_flag;
2190        }
2191        return 0;
2192}
2193
2194static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2195                struct usb_device *udev,
2196                struct usb_host_endpoint **eps, unsigned int num_eps)
2197{
2198        u32 changed_ep_bitmask = 0;
2199        unsigned int slot_id;
2200        unsigned int ep_index;
2201        unsigned int ep_state;
2202        int i;
2203
2204        slot_id = udev->slot_id;
2205        if (!xhci->devs[slot_id])
2206                return 0;
2207
2208        for (i = 0; i < num_eps; i++) {
2209                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2210                ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2211                /* Are streams already being freed for the endpoint? */
2212                if (ep_state & EP_GETTING_NO_STREAMS) {
2213                        xhci_warn(xhci, "WARN Can't disable streams for "
2214                                        "endpoint 0x%x\n, "
2215                                        "streams are being disabled already.",
2216                                        eps[i]->desc.bEndpointAddress);
2217                        return 0;
2218                }
2219                /* Are there actually any streams to free? */
2220                if (!(ep_state & EP_HAS_STREAMS) &&
2221                                !(ep_state & EP_GETTING_STREAMS)) {
2222                        xhci_warn(xhci, "WARN Can't disable streams for "
2223                                        "endpoint 0x%x\n, "
2224                                        "streams are already disabled!",
2225                                        eps[i]->desc.bEndpointAddress);
2226                        xhci_warn(xhci, "WARN xhci_free_streams() called "
2227                                        "with non-streams endpoint\n");
2228                        return 0;
2229                }
2230                changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
2231        }
2232        return changed_ep_bitmask;
2233}
2234
2235/*
2236 * The USB device drivers use this function (though the HCD interface in USB
2237 * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
2238 * coordinate mass storage command queueing across multiple endpoints (basically
2239 * a stream ID == a task ID).
2240 *
2241 * Setting up streams involves allocating the same size stream context array
2242 * for each endpoint and issuing a configure endpoint command for all endpoints.
2243 *
2244 * Don't allow the call to succeed if one endpoint only supports one stream
2245 * (which means it doesn't support streams at all).
2246 *
2247 * Drivers may get less stream IDs than they asked for, if the host controller
2248 * hardware or endpoints claim they can't support the number of requested
2249 * stream IDs.
2250 */
2251int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
2252                struct usb_host_endpoint **eps, unsigned int num_eps,
2253                unsigned int num_streams, gfp_t mem_flags)
2254{
2255        int i, ret;
2256        struct xhci_hcd *xhci;
2257        struct xhci_virt_device *vdev;
2258        struct xhci_command *config_cmd;
2259        unsigned int ep_index;
2260        unsigned int num_stream_ctxs;
2261        unsigned long flags;
2262        u32 changed_ep_bitmask = 0;
2263
2264        if (!eps)
2265                return -EINVAL;
2266
2267        /* Add one to the number of streams requested to account for
2268         * stream 0 that is reserved for xHCI usage.
2269         */
2270        num_streams += 1;
2271        xhci = hcd_to_xhci(hcd);
2272        xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2273                        num_streams);
2274
2275        config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2276        if (!config_cmd) {
2277                xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2278                return -ENOMEM;
2279        }
2280
2281        /* Check to make sure all endpoints are not already configured for
2282         * streams.  While we're at it, find the maximum number of streams that
2283         * all the endpoints will support and check for duplicate endpoints.
2284         */
2285        spin_lock_irqsave(&xhci->lock, flags);
2286        ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2287                        num_eps, &num_streams, &changed_ep_bitmask);
2288        if (ret < 0) {
2289                xhci_free_command(xhci, config_cmd);
2290                spin_unlock_irqrestore(&xhci->lock, flags);
2291                return ret;
2292        }
2293        if (num_streams <= 1) {
2294                xhci_warn(xhci, "WARN: endpoints can't handle "
2295                                "more than one stream.\n");
2296                xhci_free_command(xhci, config_cmd);
2297                spin_unlock_irqrestore(&xhci->lock, flags);
2298                return -EINVAL;
2299        }
2300        vdev = xhci->devs[udev->slot_id];
2301        /* Mark each endpoint as being in transition, so
2302         * xhci_urb_enqueue() will reject all URBs.
2303         */
2304        for (i = 0; i < num_eps; i++) {
2305                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2306                vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2307        }
2308        spin_unlock_irqrestore(&xhci->lock, flags);
2309
2310        /* Setup internal data structures and allocate HW data structures for
2311         * streams (but don't install the HW structures in the input context
2312         * until we're sure all memory allocation succeeded).
2313         */
2314        xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2315        xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2316                        num_stream_ctxs, num_streams);
2317
2318        for (i = 0; i < num_eps; i++) {
2319                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2320                vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2321                                num_stream_ctxs,
2322                                num_streams, mem_flags);
2323                if (!vdev->eps[ep_index].stream_info)
2324                        goto cleanup;
2325                /* Set maxPstreams in endpoint context and update deq ptr to
2326                 * point to stream context array. FIXME
2327                 */
2328        }
2329
2330        /* Set up the input context for a configure endpoint command. */
2331        for (i = 0; i < num_eps; i++) {
2332                struct xhci_ep_ctx *ep_ctx;
2333
2334                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2335                ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2336
2337                xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2338                                vdev->out_ctx, ep_index);
2339                xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2340                                vdev->eps[ep_index].stream_info);
2341        }
2342        /* Tell the HW to drop its old copy of the endpoint context info
2343         * and add the updated copy from the input context.
2344         */
2345        xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2346                        vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2347
2348        /* Issue and wait for the configure endpoint command */
2349        ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2350                        false, false);
2351
2352        /* xHC rejected the configure endpoint command for some reason, so we
2353         * leave the old ring intact and free our internal streams data
2354         * structure.
2355         */
2356        if (ret < 0)
2357                goto cleanup;
2358
2359        spin_lock_irqsave(&xhci->lock, flags);
2360        for (i = 0; i < num_eps; i++) {
2361                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2362                vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2363                xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
2364                         udev->slot_id, ep_index);
2365                vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
2366        }
2367        xhci_free_command(xhci, config_cmd);
2368        spin_unlock_irqrestore(&xhci->lock, flags);
2369
2370        /* Subtract 1 for stream 0, which drivers can't use */
2371        return num_streams - 1;
2372
2373cleanup:
2374        /* If it didn't work, free the streams! */
2375        for (i = 0; i < num_eps; i++) {
2376                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2377                xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
2378                vdev->eps[ep_index].stream_info = NULL;
2379                /* FIXME Unset maxPstreams in endpoint context and
2380                 * update deq ptr to point to normal string ring.
2381                 */
2382                vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2383                vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2384                xhci_endpoint_zero(xhci, vdev, eps[i]);
2385        }
2386        xhci_free_command(xhci, config_cmd);
2387        return -ENOMEM;
2388}
2389
2390/* Transition the endpoint from using streams to being a "normal" endpoint
2391 * without streams.
2392 *
2393 * Modify the endpoint context state, submit a configure endpoint command,
2394 * and free all endpoint rings for streams if that completes successfully.
2395 */
2396int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
2397                struct usb_host_endpoint **eps, unsigned int num_eps,
2398                gfp_t mem_flags)
2399{
2400        int i, ret;
2401        struct xhci_hcd *xhci;
2402        struct xhci_virt_device *vdev;
2403        struct xhci_command *command;
2404        unsigned int ep_index;
2405        unsigned long flags;
2406        u32 changed_ep_bitmask;
2407
2408        xhci = hcd_to_xhci(hcd);
2409        vdev = xhci->devs[udev->slot_id];
2410
2411        /* Set up a configure endpoint command to remove the streams rings */
2412        spin_lock_irqsave(&xhci->lock, flags);
2413        changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
2414                        udev, eps, num_eps);
2415        if (changed_ep_bitmask == 0) {
2416                spin_unlock_irqrestore(&xhci->lock, flags);
2417                return -EINVAL;
2418        }
2419
2420        /* Use the xhci_command structure from the first endpoint.  We may have
2421         * allocated too many, but the driver may call xhci_free_streams() for
2422         * each endpoint it grouped into one call to xhci_alloc_streams().
2423         */
2424        ep_index = xhci_get_endpoint_index(&eps[0]->desc);
2425        command = vdev->eps[ep_index].stream_info->free_streams_command;
2426        for (i = 0; i < num_eps; i++) {
2427                struct xhci_ep_ctx *ep_ctx;
2428
2429                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2430                ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
2431                xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
2432                        EP_GETTING_NO_STREAMS;
2433
2434                xhci_endpoint_copy(xhci, command->in_ctx,
2435                                vdev->out_ctx, ep_index);
2436                xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
2437                                &vdev->eps[ep_index]);
2438        }
2439        xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
2440                        vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2441        spin_unlock_irqrestore(&xhci->lock, flags);
2442
2443        /* Issue and wait for the configure endpoint command,
2444         * which must succeed.
2445         */
2446        ret = xhci_configure_endpoint(xhci, udev, command,
2447                        false, true);
2448
2449        /* xHC rejected the configure endpoint command for some reason, so we
2450         * leave the streams rings intact.
2451         */
2452        if (ret < 0)
2453                return ret;
2454
2455        spin_lock_irqsave(&xhci->lock, flags);
2456        for (i = 0; i < num_eps; i++) {
2457                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2458                xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
2459                vdev->eps[ep_index].stream_info = NULL;
2460                /* FIXME Unset maxPstreams in endpoint context and
2461                 * update deq ptr to point to normal string ring.
2462                 */
2463                vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
2464                vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2465        }
2466        spin_unlock_irqrestore(&xhci->lock, flags);
2467
2468        return 0;
2469}
2470
2471/*
2472 * Deletes endpoint resources for endpoints that were active before a Reset
2473 * Device command, or a Disable Slot command.  The Reset Device command leaves
2474 * the control endpoint intact, whereas the Disable Slot command deletes it.
2475 *
2476 * Must be called with xhci->lock held.
2477 */
2478void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
2479        struct xhci_virt_device *virt_dev, bool drop_control_ep)
2480{
2481        int i;
2482        unsigned int num_dropped_eps = 0;
2483        unsigned int drop_flags = 0;
2484
2485        for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
2486                if (virt_dev->eps[i].ring) {
2487                        drop_flags |= 1 << i;
2488                        num_dropped_eps++;
2489                }
2490        }
2491        xhci->num_active_eps -= num_dropped_eps;
2492        if (num_dropped_eps)
2493                xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
2494                                "%u now active.\n",
2495                                num_dropped_eps, drop_flags,
2496                                xhci->num_active_eps);
2497}
2498
2499/*
2500 * This submits a Reset Device Command, which will set the device state to 0,
2501 * set the device address to 0, and disable all the endpoints except the default
2502 * control endpoint.  The USB core should come back and call
2503 * xhci_address_device(), and then re-set up the configuration.  If this is
2504 * called because of a usb_reset_and_verify_device(), then the old alternate
2505 * settings will be re-installed through the normal bandwidth allocation
2506 * functions.
2507 *
2508 * Wait for the Reset Device command to finish.  Remove all structures
2509 * associated with the endpoints that were disabled.  Clear the input device
2510 * structure?  Cache the rings?  Reset the control endpoint 0 max packet size?
2511 *
2512 * If the virt_dev to be reset does not exist or does not match the udev,
2513 * it means the device is lost, possibly due to the xHC restore error and
2514 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
2515 * re-allocate the device.
2516 */
2517int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2518{
2519        int ret, i;
2520        unsigned long flags;
2521        struct xhci_hcd *xhci;
2522        unsigned int slot_id;
2523        struct xhci_virt_device *virt_dev;
2524        struct xhci_command *reset_device_cmd;
2525        int timeleft;
2526        int last_freed_endpoint;
2527        struct xhci_slot_ctx *slot_ctx;
2528
2529        ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
2530        if (ret <= 0)
2531                return ret;
2532        xhci = hcd_to_xhci(hcd);
2533        slot_id = udev->slot_id;
2534        virt_dev = xhci->devs[slot_id];
2535        if (!virt_dev) {
2536                xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2537                                "not exist. Re-allocate the device\n", slot_id);
2538                ret = xhci_alloc_dev(hcd, udev);
2539                if (ret == 1)
2540                        return 0;
2541                else
2542                        return -EINVAL;
2543        }
2544
2545        if (virt_dev->udev != udev) {
2546                /* If the virt_dev and the udev does not match, this virt_dev
2547                 * may belong to another udev.
2548                 * Re-allocate the device.
2549                 */
2550                xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2551                                "not match the udev. Re-allocate the device\n",
2552                                slot_id);
2553                ret = xhci_alloc_dev(hcd, udev);
2554                if (ret == 1)
2555                        return 0;
2556                else
2557                        return -EINVAL;
2558        }
2559
2560        /* If device is not setup, there is no point in resetting it */
2561        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2562        if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
2563                                                SLOT_STATE_DISABLED)
2564                return 0;
2565
2566        xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
2567        /* Allocate the command structure that holds the struct completion.
2568         * Assume we're in process context, since the normal device reset
2569         * process has to wait for the device anyway.  Storage devices are
2570         * reset as part of error handling, so use GFP_NOIO instead of
2571         * GFP_KERNEL.
2572         */
2573        reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
2574        if (!reset_device_cmd) {
2575                xhci_dbg(xhci, "Couldn't allocate command structure.\n");
2576                return -ENOMEM;
2577        }
2578
2579        /* Attempt to submit the Reset Device command to the command ring */
2580        spin_lock_irqsave(&xhci->lock, flags);
2581        reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
2582
2583        /* Enqueue pointer can be left pointing to the link TRB,
2584         * we must handle that
2585         */
2586        if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
2587                reset_device_cmd->command_trb =
2588                        xhci->cmd_ring->enq_seg->next->trbs;
2589
2590        list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
2591        ret = xhci_queue_reset_device(xhci, slot_id);
2592        if (ret) {
2593                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2594                list_del(&reset_device_cmd->cmd_list);
2595                spin_unlock_irqrestore(&xhci->lock, flags);
2596                goto command_cleanup;
2597        }
2598        xhci_ring_cmd_db(xhci);
2599        spin_unlock_irqrestore(&xhci->lock, flags);
2600
2601        /* Wait for the Reset Device command to finish */
2602        timeleft = wait_for_completion_interruptible_timeout(
2603                        reset_device_cmd->completion,
2604                        USB_CTRL_SET_TIMEOUT);
2605        if (timeleft <= 0) {
2606                xhci_warn(xhci, "%s while waiting for reset device command\n",
2607                                timeleft == 0 ? "Timeout" : "Signal");
2608                spin_lock_irqsave(&xhci->lock, flags);
2609                /* The timeout might have raced with the event ring handler, so
2610                 * only delete from the list if the item isn't poisoned.
2611                 */
2612                if (reset_device_cmd->cmd_list.next != LIST_POISON1)
2613                        list_del(&reset_device_cmd->cmd_list);
2614                spin_unlock_irqrestore(&xhci->lock, flags);
2615                ret = -ETIME;
2616                goto command_cleanup;
2617        }
2618
2619        /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
2620         * unless we tried to reset a slot ID that wasn't enabled,
2621         * or the device wasn't in the addressed or configured state.
2622         */
2623        ret = reset_device_cmd->status;
2624        switch (ret) {
2625        case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
2626        case COMP_CTX_STATE: /* 0.96 completion code for same thing */
2627                xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
2628                                slot_id,
2629                                xhci_get_slot_state(xhci, virt_dev->out_ctx));
2630                xhci_info(xhci, "Not freeing device rings.\n");
2631                /* Don't treat this as an error.  May change my mind later. */
2632                ret = 0;
2633                goto command_cleanup;
2634        case COMP_SUCCESS:
2635                xhci_dbg(xhci, "Successful reset device command.\n");
2636                break;
2637        default:
2638                if (xhci_is_vendor_info_code(xhci, ret))
2639                        break;
2640                xhci_warn(xhci, "Unknown completion code %u for "
2641                                "reset device command.\n", ret);
2642                ret = -EINVAL;
2643                goto command_cleanup;
2644        }
2645
2646        /* Free up host controller endpoint resources */
2647        if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2648                spin_lock_irqsave(&xhci->lock, flags);
2649                /* Don't delete the default control endpoint resources */
2650                xhci_free_device_endpoint_resources(xhci, virt_dev, false);
2651                spin_unlock_irqrestore(&xhci->lock, flags);
2652        }
2653
2654        /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2655        last_freed_endpoint = 1;
2656        for (i = 1; i < 31; ++i) {
2657                struct xhci_virt_ep *ep = &virt_dev->eps[i];
2658
2659                if (ep->ep_state & EP_HAS_STREAMS) {
2660                        xhci_free_stream_info(xhci, ep->stream_info);
2661                        ep->stream_info = NULL;
2662                        ep->ep_state &= ~EP_HAS_STREAMS;
2663                }
2664
2665                if (ep->ring) {
2666                        xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2667                        last_freed_endpoint = i;
2668                }
2669        }
2670        xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2671        xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2672        ret = 0;
2673
2674command_cleanup:
2675        xhci_free_command(xhci, reset_device_cmd);
2676        return ret;
2677}
2678
2679/*
2680 * At this point, the struct usb_device is about to go away, the device has
2681 * disconnected, and all traffic has been stopped and the endpoints have been
2682 * disabled.  Free any HC data structures associated with that device.
2683 */
2684void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2685{
2686        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2687        struct xhci_virt_device *virt_dev;
2688        unsigned long flags;
2689        u32 state;
2690        int i, ret;
2691
2692        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2693        /* If the host is halted due to driver unload, we still need to free the
2694         * device.
2695         */
2696        if (ret <= 0 && ret != -ENODEV)
2697                return;
2698
2699        virt_dev = xhci->devs[udev->slot_id];
2700
2701        /* Stop any wayward timer functions (which may grab the lock) */
2702        for (i = 0; i < 31; ++i) {
2703                virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
2704                del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
2705        }
2706
2707        spin_lock_irqsave(&xhci->lock, flags);
2708        /* Don't disable the slot if the host controller is dead. */
2709        state = xhci_readl(xhci, &xhci->op_regs->status);
2710        if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
2711                        (xhci->xhc_state & XHCI_STATE_HALTED)) {
2712                xhci_free_virt_device(xhci, udev->slot_id);
2713                spin_unlock_irqrestore(&xhci->lock, flags);
2714                return;
2715        }
2716
2717        if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
2718                spin_unlock_irqrestore(&xhci->lock, flags);
2719                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2720                return;
2721        }
2722        xhci_ring_cmd_db(xhci);
2723        spin_unlock_irqrestore(&xhci->lock, flags);
2724        /*
2725         * Event command completion handler will free any data structures
2726         * associated with the slot.  XXX Can free sleep?
2727         */
2728}
2729
2730/*
2731 * Checks if we have enough host controller resources for the default control
2732 * endpoint.
2733 *
2734 * Must be called with xhci->lock held.
2735 */
2736static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
2737{
2738        if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
2739                xhci_dbg(xhci, "Not enough ep ctxs: "
2740                                "%u active, need to add 1, limit is %u.\n",
2741                                xhci->num_active_eps, xhci->limit_active_eps);
2742                return -ENOMEM;
2743        }
2744        xhci->num_active_eps += 1;
2745        xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
2746                        xhci->num_active_eps);
2747        return 0;
2748}
2749
2750
2751/*
2752 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2753 * timed out, or allocating memory failed.  Returns 1 on success.
2754 */
2755int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2756{
2757        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2758        unsigned long flags;
2759        int timeleft;
2760        int ret;
2761
2762        spin_lock_irqsave(&xhci->lock, flags);
2763        ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
2764        if (ret) {
2765                spin_unlock_irqrestore(&xhci->lock, flags);
2766                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2767                return 0;
2768        }
2769        xhci_ring_cmd_db(xhci);
2770        spin_unlock_irqrestore(&xhci->lock, flags);
2771
2772        /* XXX: how much time for xHC slot assignment? */
2773        timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2774                        USB_CTRL_SET_TIMEOUT);
2775        if (timeleft <= 0) {
2776                xhci_warn(xhci, "%s while waiting for a slot\n",
2777                                timeleft == 0 ? "Timeout" : "Signal");
2778                /* FIXME cancel the enable slot request */
2779                return 0;
2780        }
2781
2782        if (!xhci->slot_id) {
2783                xhci_err(xhci, "Error while assigning device slot ID\n");
2784                return 0;
2785        }
2786
2787        if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2788                spin_lock_irqsave(&xhci->lock, flags);
2789                ret = xhci_reserve_host_control_ep_resources(xhci);
2790                if (ret) {
2791                        spin_unlock_irqrestore(&xhci->lock, flags);
2792                        xhci_warn(xhci, "Not enough host resources, "
2793                                        "active endpoint contexts = %u\n",
2794                                        xhci->num_active_eps);
2795                        goto disable_slot;
2796                }
2797                spin_unlock_irqrestore(&xhci->lock, flags);
2798        }
2799        /* Use GFP_NOIO, since this function can be called from
2800         * xhci_discover_or_reset_device(), which may be called as part of
2801         * mass storage driver error handling.
2802         */
2803        if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2804                xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2805                goto disable_slot;
2806        }
2807        udev->slot_id = xhci->slot_id;
2808        /* Is this a LS or FS device under a HS hub? */
2809        /* Hub or peripherial? */
2810        return 1;
2811
2812disable_slot:
2813        /* Disable slot, if we can do it without mem alloc */
2814        spin_lock_irqsave(&xhci->lock, flags);
2815        if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2816                xhci_ring_cmd_db(xhci);
2817        spin_unlock_irqrestore(&xhci->lock, flags);
2818        return 0;
2819}
2820
2821/*
2822 * Issue an Address Device command (which will issue a SetAddress request to
2823 * the device).
2824 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
2825 * we should only issue and wait on one address command at the same time.
2826 *
2827 * We add one to the device address issued by the hardware because the USB core
2828 * uses address 1 for the root hubs (even though they're not really devices).
2829 */
2830int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2831{
2832        unsigned long flags;
2833        int timeleft;
2834        struct xhci_virt_device *virt_dev;
2835        int ret = 0;
2836        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2837        struct xhci_slot_ctx *slot_ctx;
2838        struct xhci_input_control_ctx *ctrl_ctx;
2839        u64 temp_64;
2840
2841        if (!udev->slot_id) {
2842                xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
2843                return -EINVAL;
2844        }
2845
2846        virt_dev = xhci->devs[udev->slot_id];
2847
2848        if (WARN_ON(!virt_dev)) {
2849                /*
2850                 * In plug/unplug torture test with an NEC controller,
2851                 * a zero-dereference was observed once due to virt_dev = 0.
2852                 * Print useful debug rather than crash if it is observed again!
2853                 */
2854                xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
2855                        udev->slot_id);
2856                return -EINVAL;
2857        }
2858
2859        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2860        /*
2861         * If this is the first Set Address since device plug-in or
2862         * virt_device realloaction after a resume with an xHCI power loss,
2863         * then set up the slot context.
2864         */
2865        if (!slot_ctx->dev_info)
2866                xhci_setup_addressable_virt_dev(xhci, udev);
2867        /* Otherwise, update the control endpoint ring enqueue pointer. */
2868        else
2869                xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2870        xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2871        xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2872
2873        spin_lock_irqsave(&xhci->lock, flags);
2874        ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
2875                                        udev->slot_id);
2876        if (ret) {
2877                spin_unlock_irqrestore(&xhci->lock, flags);
2878                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2879                return ret;
2880        }
2881        xhci_ring_cmd_db(xhci);
2882        spin_unlock_irqrestore(&xhci->lock, flags);
2883
2884        /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2885        timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2886                        USB_CTRL_SET_TIMEOUT);
2887        /* FIXME: From section 4.3.4: "Software shall be responsible for timing
2888         * the SetAddress() "recovery interval" required by USB and aborting the
2889         * command on a timeout.
2890         */
2891        if (timeleft <= 0) {
2892                xhci_warn(xhci, "%s while waiting for a slot\n",
2893                                timeleft == 0 ? "Timeout" : "Signal");
2894                /* FIXME cancel the address device command */
2895                return -ETIME;
2896        }
2897
2898        switch (virt_dev->cmd_status) {
2899        case COMP_CTX_STATE:
2900        case COMP_EBADSLT:
2901                xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
2902                                udev->slot_id);
2903                ret = -EINVAL;
2904                break;
2905        case COMP_TX_ERR:
2906                dev_warn(&udev->dev, "Device not responding to set address.\n");
2907                ret = -EPROTO;
2908                break;
2909        case COMP_DEV_ERR:
2910                dev_warn(&udev->dev, "ERROR: Incompatible device for address "
2911                                "device command.\n");
2912                ret = -ENODEV;
2913                break;
2914        case COMP_SUCCESS:
2915                xhci_dbg(xhci, "Successful Address Device command\n");
2916                break;
2917        default:
2918                xhci_err(xhci, "ERROR: unexpected command completion "
2919                                "code 0x%x.\n", virt_dev->cmd_status);
2920                xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2921                xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2922                ret = -EINVAL;
2923                break;
2924        }
2925        if (ret) {
2926                return ret;
2927        }
2928        temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2929        xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
2930        xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2931                 udev->slot_id,
2932                 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
2933                 (unsigned long long)
2934                 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
2935        xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
2936                        (unsigned long long)virt_dev->out_ctx->dma);
2937        xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2938        xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2939        xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2940        xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2941        /*
2942         * USB core uses address 1 for the roothubs, so we add one to the
2943         * address given back to us by the HC.
2944         */
2945        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2946        /* Use kernel assigned address for devices; store xHC assigned
2947         * address locally. */
2948        virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
2949                + 1;
2950        /* Zero the input context control for later use */
2951        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2952        ctrl_ctx->add_flags = 0;
2953        ctrl_ctx->drop_flags = 0;
2954
2955        xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
2956
2957        return 0;
2958}
2959
2960/* Once a hub descriptor is fetched for a device, we need to update the xHC's
2961 * internal data structures for the device.
2962 */
2963int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
2964                        struct usb_tt *tt, gfp_t mem_flags)
2965{
2966        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2967        struct xhci_virt_device *vdev;
2968        struct xhci_command *config_cmd;
2969        struct xhci_input_control_ctx *ctrl_ctx;
2970        struct xhci_slot_ctx *slot_ctx;
2971        unsigned long flags;
2972        unsigned think_time;
2973        int ret;
2974
2975        /* Ignore root hubs */
2976        if (!hdev->parent)
2977                return 0;
2978
2979        vdev = xhci->devs[hdev->slot_id];
2980        if (!vdev) {
2981                xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
2982                return -EINVAL;
2983        }
2984        config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2985        if (!config_cmd) {
2986                xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2987                return -ENOMEM;
2988        }
2989
2990        spin_lock_irqsave(&xhci->lock, flags);
2991        xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
2992        ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
2993        ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2994        slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
2995        slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
2996        if (tt->multi)
2997                slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
2998        if (xhci->hci_version > 0x95) {
2999                xhci_dbg(xhci, "xHCI version %x needs hub "
3000                                "TT think time and number of ports\n",
3001                                (unsigned int) xhci->hci_version);
3002                slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
3003                /* Set TT think time - convert from ns to FS bit times.
3004                 * 0 = 8 FS bit times, 1 = 16 FS bit times,
3005                 * 2 = 24 FS bit times, 3 = 32 FS bit times.
3006                 *
3007                 * xHCI 1.0: this field shall be 0 if the device is not a
3008                 * High-spped hub.
3009                 */
3010                think_time = tt->think_time;
3011                if (think_time != 0)
3012                        think_time = (think_time / 666) - 1;
3013                if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
3014                        slot_ctx->tt_info |=
3015                                cpu_to_le32(TT_THINK_TIME(think_time));
3016        } else {
3017                xhci_dbg(xhci, "xHCI version %x doesn't need hub "
3018                                "TT think time or number of ports\n",
3019                                (unsigned int) xhci->hci_version);
3020        }
3021        slot_ctx->dev_state = 0;
3022        spin_unlock_irqrestore(&xhci->lock, flags);
3023
3024        xhci_dbg(xhci, "Set up %s for hub device.\n",
3025                        (xhci->hci_version > 0x95) ?
3026                        "configure endpoint" : "evaluate context");
3027        xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
3028        xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
3029
3030        /* Issue and wait for the configure endpoint or
3031         * evaluate context command.
3032         */
3033        if (xhci->hci_version > 0x95)
3034                ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
3035                                false, false);
3036        else
3037                ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
3038                                true, false);
3039
3040        xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
3041        xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
3042
3043        xhci_free_command(xhci, config_cmd);
3044        return ret;
3045}
3046
3047int xhci_get_frame(struct usb_hcd *hcd)
3048{
3049        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3050        /* EHCI mods by the periodic size.  Why? */
3051        return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
3052}
3053
3054MODULE_DESCRIPTION(DRIVER_DESC);
3055MODULE_AUTHOR(DRIVER_AUTHOR);
3056MODULE_LICENSE("GPL");
3057
3058static int __init xhci_hcd_init(void)
3059{
3060#ifdef CONFIG_PCI
3061        int retval = 0;
3062
3063        retval = xhci_register_pci();
3064
3065        if (retval < 0) {
3066                printk(KERN_DEBUG "Problem registering PCI driver.");
3067                return retval;
3068        }
3069#endif
3070        /*
3071         * Check the compiler generated sizes of structures that must be laid
3072         * out in specific ways for hardware access.
3073         */
3074        BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
3075        BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
3076        BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
3077        /* xhci_device_control has eight fields, and also
3078         * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
3079         */
3080        BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
3081        BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
3082        BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
3083        BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
3084        BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
3085        /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
3086        BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
3087        BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
3088        return 0;
3089}
3090module_init(xhci_hcd_init);
3091
3092static void __exit xhci_hcd_cleanup(void)
3093{
3094#ifdef CONFIG_PCI
3095        xhci_unregister_pci();
3096#endif
3097}
3098module_exit(xhci_hcd_cleanup);
3099