1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30#include <linux/dma-mapping.h>
31
32#include "xhci.h"
33#include "xhci-trace.h"
34
35#define DRIVER_AUTHOR "Sarah Sharp"
36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
37
38#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
39
40
41static int link_quirk;
42module_param(link_quirk, int, S_IRUGO | S_IWUSR);
43MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
44
45static unsigned int quirks;
46module_param(quirks, uint, S_IRUGO);
47MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
64{
65 u32 result;
66
67 do {
68 result = readl(ptr);
69 if (result == ~(u32)0)
70 return -ENODEV;
71 result &= mask;
72 if (result == done)
73 return 0;
74 udelay(1);
75 usec--;
76 } while (usec > 0);
77 return -ETIMEDOUT;
78}
79
80
81
82
83void xhci_quiesce(struct xhci_hcd *xhci)
84{
85 u32 halted;
86 u32 cmd;
87 u32 mask;
88
89 mask = ~(XHCI_IRQS);
90 halted = readl(&xhci->op_regs->status) & STS_HALT;
91 if (!halted)
92 mask &= ~CMD_RUN;
93
94 cmd = readl(&xhci->op_regs->command);
95 cmd &= mask;
96 writel(cmd, &xhci->op_regs->command);
97}
98
99
100
101
102
103
104
105
106
107int xhci_halt(struct xhci_hcd *xhci)
108{
109 int ret;
110 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
111 xhci_quiesce(xhci);
112
113 ret = xhci_handshake(&xhci->op_regs->status,
114 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
115 if (!ret) {
116 xhci->xhc_state |= XHCI_STATE_HALTED;
117 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
118 } else
119 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
120 XHCI_MAX_HALT_USEC);
121 return ret;
122}
123
124
125
126
127static int xhci_start(struct xhci_hcd *xhci)
128{
129 u32 temp;
130 int ret;
131
132 temp = readl(&xhci->op_regs->command);
133 temp |= (CMD_RUN);
134 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
135 temp);
136 writel(temp, &xhci->op_regs->command);
137
138
139
140
141
142 ret = xhci_handshake(&xhci->op_regs->status,
143 STS_HALT, 0, XHCI_MAX_HALT_USEC);
144 if (ret == -ETIMEDOUT)
145 xhci_err(xhci, "Host took too long to start, "
146 "waited %u microseconds.\n",
147 XHCI_MAX_HALT_USEC);
148 if (!ret)
149 xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
150
151 return ret;
152}
153
154
155
156
157
158
159
160
161int xhci_reset(struct xhci_hcd *xhci)
162{
163 u32 command;
164 u32 state;
165 int ret, i;
166
167 state = readl(&xhci->op_regs->status);
168 if ((state & STS_HALT) == 0) {
169 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
170 return 0;
171 }
172
173 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
174 command = readl(&xhci->op_regs->command);
175 command |= CMD_RESET;
176 writel(command, &xhci->op_regs->command);
177
178 ret = xhci_handshake(&xhci->op_regs->command,
179 CMD_RESET, 0, 10 * 1000 * 1000);
180 if (ret)
181 return ret;
182
183 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
184 "Wait for controller to be ready for doorbell rings");
185
186
187
188
189 ret = xhci_handshake(&xhci->op_regs->status,
190 STS_CNR, 0, 10 * 1000 * 1000);
191
192 for (i = 0; i < 2; ++i) {
193 xhci->bus_state[i].port_c_suspend = 0;
194 xhci->bus_state[i].suspended_ports = 0;
195 xhci->bus_state[i].resuming_ports = 0;
196 }
197
198 return ret;
199}
200
201#ifdef CONFIG_PCI
202static int xhci_free_msi(struct xhci_hcd *xhci)
203{
204 int i;
205
206 if (!xhci->msix_entries)
207 return -EINVAL;
208
209 for (i = 0; i < xhci->msix_count; i++)
210 if (xhci->msix_entries[i].vector)
211 free_irq(xhci->msix_entries[i].vector,
212 xhci_to_hcd(xhci));
213 return 0;
214}
215
216
217
218
219static int xhci_setup_msi(struct xhci_hcd *xhci)
220{
221 int ret;
222 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
223
224 ret = pci_enable_msi(pdev);
225 if (ret) {
226 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
227 "failed to allocate MSI entry");
228 return ret;
229 }
230
231 ret = request_irq(pdev->irq, xhci_msi_irq,
232 0, "xhci_hcd", xhci_to_hcd(xhci));
233 if (ret) {
234 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
235 "disable MSI interrupt");
236 pci_disable_msi(pdev);
237 }
238
239 return ret;
240}
241
242
243
244
245
246static void xhci_free_irq(struct xhci_hcd *xhci)
247{
248 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
249 int ret;
250
251
252 if (xhci_to_hcd(xhci)->irq > 0)
253 return;
254
255 ret = xhci_free_msi(xhci);
256 if (!ret)
257 return;
258 if (pdev->irq > 0)
259 free_irq(pdev->irq, xhci_to_hcd(xhci));
260
261 return;
262}
263
264
265
266
267static int xhci_setup_msix(struct xhci_hcd *xhci)
268{
269 int i, ret = 0;
270 struct usb_hcd *hcd = xhci_to_hcd(xhci);
271 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
272
273
274
275
276
277
278
279
280 xhci->msix_count = min(num_online_cpus() + 1,
281 HCS_MAX_INTRS(xhci->hcs_params1));
282
283 xhci->msix_entries =
284 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
285 GFP_KERNEL);
286 if (!xhci->msix_entries) {
287 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
288 return -ENOMEM;
289 }
290
291 for (i = 0; i < xhci->msix_count; i++) {
292 xhci->msix_entries[i].entry = i;
293 xhci->msix_entries[i].vector = 0;
294 }
295
296 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
297 if (ret) {
298 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
299 "Failed to enable MSI-X");
300 goto free_entries;
301 }
302
303 for (i = 0; i < xhci->msix_count; i++) {
304 ret = request_irq(xhci->msix_entries[i].vector,
305 xhci_msi_irq,
306 0, "xhci_hcd", xhci_to_hcd(xhci));
307 if (ret)
308 goto disable_msix;
309 }
310
311 hcd->msix_enabled = 1;
312 return ret;
313
314disable_msix:
315 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
316 xhci_free_irq(xhci);
317 pci_disable_msix(pdev);
318free_entries:
319 kfree(xhci->msix_entries);
320 xhci->msix_entries = NULL;
321 return ret;
322}
323
324
325static void xhci_cleanup_msix(struct xhci_hcd *xhci)
326{
327 struct usb_hcd *hcd = xhci_to_hcd(xhci);
328 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
329
330 if (xhci->quirks & XHCI_PLAT)
331 return;
332
333 xhci_free_irq(xhci);
334
335 if (xhci->msix_entries) {
336 pci_disable_msix(pdev);
337 kfree(xhci->msix_entries);
338 xhci->msix_entries = NULL;
339 } else {
340 pci_disable_msi(pdev);
341 }
342
343 hcd->msix_enabled = 0;
344 return;
345}
346
347static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
348{
349 int i;
350
351 if (xhci->msix_entries) {
352 for (i = 0; i < xhci->msix_count; i++)
353 synchronize_irq(xhci->msix_entries[i].vector);
354 }
355}
356
357static int xhci_try_enable_msi(struct usb_hcd *hcd)
358{
359 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
360 struct pci_dev *pdev;
361 int ret;
362
363
364 if (xhci->quirks & XHCI_PLAT)
365 return 0;
366
367 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
368
369
370
371
372 if (xhci->quirks & XHCI_BROKEN_MSI)
373 goto legacy_irq;
374
375
376 if (hcd->irq)
377 free_irq(hcd->irq, hcd);
378 hcd->irq = 0;
379
380 ret = xhci_setup_msix(xhci);
381 if (ret)
382
383 ret = xhci_setup_msi(xhci);
384
385 if (!ret)
386
387 return 0;
388
389 if (!pdev->irq) {
390 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
391 return -EINVAL;
392 }
393
394 legacy_irq:
395 if (!strlen(hcd->irq_descr))
396 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
397 hcd->driver->description, hcd->self.busnum);
398
399
400 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
401 hcd->irq_descr, hcd);
402 if (ret) {
403 xhci_err(xhci, "request interrupt %d failed\n",
404 pdev->irq);
405 return ret;
406 }
407 hcd->irq = pdev->irq;
408 return 0;
409}
410
411#else
412
413static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
414{
415 return 0;
416}
417
418static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
419{
420}
421
422static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
423{
424}
425
426#endif
427
428static void compliance_mode_recovery(unsigned long arg)
429{
430 struct xhci_hcd *xhci;
431 struct usb_hcd *hcd;
432 u32 temp;
433 int i;
434
435 xhci = (struct xhci_hcd *)arg;
436
437 for (i = 0; i < xhci->num_usb3_ports; i++) {
438 temp = readl(xhci->usb3_ports[i]);
439 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
440
441
442
443
444 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
445 "Compliance mode detected->port %d",
446 i + 1);
447 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
448 "Attempting compliance mode recovery");
449 hcd = xhci->shared_hcd;
450
451 if (hcd->state == HC_STATE_SUSPENDED)
452 usb_hcd_resume_root_hub(hcd);
453
454 usb_hcd_poll_rh_status(hcd);
455 }
456 }
457
458 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
459 mod_timer(&xhci->comp_mode_recovery_timer,
460 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
461}
462
463
464
465
466
467
468
469
470
471
472
473static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
474{
475 xhci->port_status_u0 = 0;
476 setup_timer(&xhci->comp_mode_recovery_timer,
477 compliance_mode_recovery, (unsigned long)xhci);
478 xhci->comp_mode_recovery_timer.expires = jiffies +
479 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
480
481 set_timer_slack(&xhci->comp_mode_recovery_timer,
482 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
483 add_timer(&xhci->comp_mode_recovery_timer);
484 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
485 "Compliance mode recovery timer initialized");
486}
487
488
489
490
491
492
493
494static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
495{
496 const char *dmi_product_name, *dmi_sys_vendor;
497
498 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
499 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
500 if (!dmi_product_name || !dmi_sys_vendor)
501 return false;
502
503 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
504 return false;
505
506 if (strstr(dmi_product_name, "Z420") ||
507 strstr(dmi_product_name, "Z620") ||
508 strstr(dmi_product_name, "Z820") ||
509 strstr(dmi_product_name, "Z1 Workstation"))
510 return true;
511
512 return false;
513}
514
515static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
516{
517 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
518}
519
520
521
522
523
524
525
526
527
528int xhci_init(struct usb_hcd *hcd)
529{
530 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
531 int retval = 0;
532
533 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
534 spin_lock_init(&xhci->lock);
535 if (xhci->hci_version == 0x95 && link_quirk) {
536 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
537 "QUIRK: Not clearing Link TRB chain bits.");
538 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
539 } else {
540 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
541 "xHCI doesn't need link TRB QUIRK");
542 }
543 retval = xhci_mem_init(xhci, GFP_KERNEL);
544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
545
546
547 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
548 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
549 compliance_mode_recovery_timer_init(xhci);
550 }
551
552 return retval;
553}
554
555
556
557
558static int xhci_run_finished(struct xhci_hcd *xhci)
559{
560 if (xhci_start(xhci)) {
561 xhci_halt(xhci);
562 return -ENODEV;
563 }
564 xhci->shared_hcd->state = HC_STATE_RUNNING;
565 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
566
567 if (xhci->quirks & XHCI_NEC_HOST)
568 xhci_ring_cmd_db(xhci);
569
570 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
571 "Finished xhci_run for USB3 roothub");
572 return 0;
573}
574
575
576
577
578
579
580
581
582
583
584
585
586
587int xhci_run(struct usb_hcd *hcd)
588{
589 u32 temp;
590 u64 temp_64;
591 int ret;
592 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
593
594
595
596
597
598 hcd->uses_new_polling = 1;
599 if (!usb_hcd_is_primary_hcd(hcd))
600 return xhci_run_finished(xhci);
601
602 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
603
604 ret = xhci_try_enable_msi(hcd);
605 if (ret)
606 return ret;
607
608 xhci_dbg(xhci, "Command ring memory map follows:\n");
609 xhci_debug_ring(xhci, xhci->cmd_ring);
610 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
611 xhci_dbg_cmd_ptrs(xhci);
612
613 xhci_dbg(xhci, "ERST memory map follows:\n");
614 xhci_dbg_erst(xhci, &xhci->erst);
615 xhci_dbg(xhci, "Event ring:\n");
616 xhci_debug_ring(xhci, xhci->event_ring);
617 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
618 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
619 temp_64 &= ~ERST_PTR_MASK;
620 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
621 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
622
623 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
624 "// Set the interrupt modulation register");
625 temp = readl(&xhci->ir_set->irq_control);
626 temp &= ~ER_IRQ_INTERVAL_MASK;
627 temp |= (u32) 160;
628 writel(temp, &xhci->ir_set->irq_control);
629
630
631 temp = readl(&xhci->op_regs->command);
632 temp |= (CMD_EIE);
633 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
634 "// Enable interrupts, cmd = 0x%x.", temp);
635 writel(temp, &xhci->op_regs->command);
636
637 temp = readl(&xhci->ir_set->irq_pending);
638 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
639 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
640 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
641 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
642 xhci_print_ir_set(xhci, 0);
643
644 if (xhci->quirks & XHCI_NEC_HOST) {
645 struct xhci_command *command;
646 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
647 if (!command)
648 return -ENOMEM;
649 xhci_queue_vendor_command(xhci, command, 0, 0, 0,
650 TRB_TYPE(TRB_NEC_GET_FW));
651 }
652 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
653 "Finished xhci_run for USB2 roothub");
654 return 0;
655}
656EXPORT_SYMBOL_GPL(xhci_run);
657
658
659
660
661
662
663
664
665
666
667void xhci_stop(struct usb_hcd *hcd)
668{
669 u32 temp;
670 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
671
672 if (xhci->xhc_state & XHCI_STATE_HALTED)
673 return;
674
675 mutex_lock(&xhci->mutex);
676 spin_lock_irq(&xhci->lock);
677 xhci->xhc_state |= XHCI_STATE_HALTED;
678 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
679
680
681
682
683 xhci_halt(xhci);
684 xhci_reset(xhci);
685 spin_unlock_irq(&xhci->lock);
686
687 xhci_cleanup_msix(xhci);
688
689
690 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
691 (!(xhci_all_ports_seen_u0(xhci)))) {
692 del_timer_sync(&xhci->comp_mode_recovery_timer);
693 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
694 "%s: compliance mode recovery timer deleted",
695 __func__);
696 }
697
698 if (xhci->quirks & XHCI_AMD_PLL_FIX)
699 usb_amd_dev_put();
700
701 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
702 "// Disabling event ring interrupts");
703 temp = readl(&xhci->op_regs->status);
704 writel(temp & ~STS_EINT, &xhci->op_regs->status);
705 temp = readl(&xhci->ir_set->irq_pending);
706 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
707 xhci_print_ir_set(xhci, 0);
708
709 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
710 xhci_mem_cleanup(xhci);
711 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
712 "xhci_stop completed - status = %x",
713 readl(&xhci->op_regs->status));
714 mutex_unlock(&xhci->mutex);
715}
716
717
718
719
720
721
722
723
724
725
726void xhci_shutdown(struct usb_hcd *hcd)
727{
728 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
729
730 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
731 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
732
733 spin_lock_irq(&xhci->lock);
734 xhci_halt(xhci);
735
736 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
737 xhci_reset(xhci);
738 spin_unlock_irq(&xhci->lock);
739
740 xhci_cleanup_msix(xhci);
741
742 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
743 "xhci_shutdown completed - status = %x",
744 readl(&xhci->op_regs->status));
745
746
747 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
748 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
749}
750
751#ifdef CONFIG_PM
752static void xhci_save_registers(struct xhci_hcd *xhci)
753{
754 xhci->s3.command = readl(&xhci->op_regs->command);
755 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
756 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
757 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
758 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
759 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
760 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
761 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
762 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
763}
764
765static void xhci_restore_registers(struct xhci_hcd *xhci)
766{
767 writel(xhci->s3.command, &xhci->op_regs->command);
768 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
769 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
770 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
771 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
772 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
773 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
774 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
775 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
776}
777
778static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
779{
780 u64 val_64;
781
782
783 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
784 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
785 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
786 xhci->cmd_ring->dequeue) &
787 (u64) ~CMD_RING_RSVD_BITS) |
788 xhci->cmd_ring->cycle_state;
789 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
790 "// Setting command ring address to 0x%llx",
791 (long unsigned long) val_64);
792 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
793}
794
795
796
797
798
799
800
801
802
803
804static void xhci_clear_command_ring(struct xhci_hcd *xhci)
805{
806 struct xhci_ring *ring;
807 struct xhci_segment *seg;
808
809 ring = xhci->cmd_ring;
810 seg = ring->deq_seg;
811 do {
812 memset(seg->trbs, 0,
813 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
814 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
815 cpu_to_le32(~TRB_CYCLE);
816 seg = seg->next;
817 } while (seg != ring->deq_seg);
818
819
820 ring->deq_seg = ring->first_seg;
821 ring->dequeue = ring->first_seg->trbs;
822 ring->enq_seg = ring->deq_seg;
823 ring->enqueue = ring->dequeue;
824
825 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
826
827
828
829
830 ring->cycle_state = 1;
831
832
833
834
835
836
837
838
839 xhci_set_cmd_ring_deq(xhci);
840}
841
842static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
843{
844 int port_index;
845 __le32 __iomem **port_array;
846 unsigned long flags;
847 u32 t1, t2;
848
849 spin_lock_irqsave(&xhci->lock, flags);
850
851
852 port_index = xhci->num_usb3_ports;
853 port_array = xhci->usb3_ports;
854 while (port_index--) {
855 t1 = readl(port_array[port_index]);
856 t1 = xhci_port_state_to_neutral(t1);
857 t2 = t1 & ~PORT_WAKE_BITS;
858 if (t1 != t2)
859 writel(t2, port_array[port_index]);
860 }
861
862
863 port_index = xhci->num_usb2_ports;
864 port_array = xhci->usb2_ports;
865 while (port_index--) {
866 t1 = readl(port_array[port_index]);
867 t1 = xhci_port_state_to_neutral(t1);
868 t2 = t1 & ~PORT_WAKE_BITS;
869 if (t1 != t2)
870 writel(t2, port_array[port_index]);
871 }
872
873 spin_unlock_irqrestore(&xhci->lock, flags);
874}
875
876
877
878
879
880
881
882int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
883{
884 int rc = 0;
885 unsigned int delay = XHCI_MAX_HALT_USEC;
886 struct usb_hcd *hcd = xhci_to_hcd(xhci);
887 u32 command;
888
889 if (!hcd->state)
890 return 0;
891
892 if (hcd->state != HC_STATE_SUSPENDED ||
893 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
894 return -EINVAL;
895
896
897 if (!do_wakeup)
898 xhci_disable_port_wake_on_bits(xhci);
899
900
901 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
902 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
903 del_timer_sync(&hcd->rh_timer);
904 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
905 del_timer_sync(&xhci->shared_hcd->rh_timer);
906
907 spin_lock_irq(&xhci->lock);
908 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
909 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
910
911
912
913
914 command = readl(&xhci->op_regs->command);
915 command &= ~CMD_RUN;
916 writel(command, &xhci->op_regs->command);
917
918
919 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
920
921 if (xhci_handshake(&xhci->op_regs->status,
922 STS_HALT, STS_HALT, delay)) {
923 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
924 spin_unlock_irq(&xhci->lock);
925 return -ETIMEDOUT;
926 }
927 xhci_clear_command_ring(xhci);
928
929
930 xhci_save_registers(xhci);
931
932
933 command = readl(&xhci->op_regs->command);
934 command |= CMD_CSS;
935 writel(command, &xhci->op_regs->command);
936 if (xhci_handshake(&xhci->op_regs->status,
937 STS_SAVE, 0, 10 * 1000)) {
938 xhci_warn(xhci, "WARN: xHC save state timeout\n");
939 spin_unlock_irq(&xhci->lock);
940 return -ETIMEDOUT;
941 }
942 spin_unlock_irq(&xhci->lock);
943
944
945
946
947
948 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
949 (!(xhci_all_ports_seen_u0(xhci)))) {
950 del_timer_sync(&xhci->comp_mode_recovery_timer);
951 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
952 "%s: compliance mode recovery timer deleted",
953 __func__);
954 }
955
956
957
958 xhci_msix_sync_irqs(xhci);
959
960 return rc;
961}
962EXPORT_SYMBOL_GPL(xhci_suspend);
963
964
965
966
967
968
969
970int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
971{
972 u32 command, temp = 0, status;
973 struct usb_hcd *hcd = xhci_to_hcd(xhci);
974 struct usb_hcd *secondary_hcd;
975 int retval = 0;
976 bool comp_timer_running = false;
977
978 if (!hcd->state)
979 return 0;
980
981
982
983
984 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
985 time_before(jiffies,
986 xhci->bus_state[1].next_statechange))
987 msleep(100);
988
989 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
990 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
991
992 spin_lock_irq(&xhci->lock);
993 if (xhci->quirks & XHCI_RESET_ON_RESUME)
994 hibernated = true;
995
996 if (!hibernated) {
997
998 xhci_restore_registers(xhci);
999
1000 xhci_set_cmd_ring_deq(xhci);
1001
1002
1003 command = readl(&xhci->op_regs->command);
1004 command |= CMD_CRS;
1005 writel(command, &xhci->op_regs->command);
1006 if (xhci_handshake(&xhci->op_regs->status,
1007 STS_RESTORE, 0, 10 * 1000)) {
1008 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1009 spin_unlock_irq(&xhci->lock);
1010 return -ETIMEDOUT;
1011 }
1012 temp = readl(&xhci->op_regs->status);
1013 }
1014
1015
1016 if ((temp & STS_SRE) || hibernated) {
1017
1018 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1019 !(xhci_all_ports_seen_u0(xhci))) {
1020 del_timer_sync(&xhci->comp_mode_recovery_timer);
1021 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1022 "Compliance Mode Recovery Timer deleted!");
1023 }
1024
1025
1026 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1027 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1028
1029 xhci_dbg(xhci, "Stop HCD\n");
1030 xhci_halt(xhci);
1031 xhci_reset(xhci);
1032 spin_unlock_irq(&xhci->lock);
1033 xhci_cleanup_msix(xhci);
1034
1035 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1036 temp = readl(&xhci->op_regs->status);
1037 writel(temp & ~STS_EINT, &xhci->op_regs->status);
1038 temp = readl(&xhci->ir_set->irq_pending);
1039 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1040 xhci_print_ir_set(xhci, 0);
1041
1042 xhci_dbg(xhci, "cleaning up memory\n");
1043 xhci_mem_cleanup(xhci);
1044 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1045 readl(&xhci->op_regs->status));
1046
1047
1048
1049
1050
1051 if (!usb_hcd_is_primary_hcd(hcd))
1052 secondary_hcd = hcd;
1053 else
1054 secondary_hcd = xhci->shared_hcd;
1055
1056 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1057 retval = xhci_init(hcd->primary_hcd);
1058 if (retval)
1059 return retval;
1060 comp_timer_running = true;
1061
1062 xhci_dbg(xhci, "Start the primary HCD\n");
1063 retval = xhci_run(hcd->primary_hcd);
1064 if (!retval) {
1065 xhci_dbg(xhci, "Start the secondary HCD\n");
1066 retval = xhci_run(secondary_hcd);
1067 }
1068 hcd->state = HC_STATE_SUSPENDED;
1069 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1070 goto done;
1071 }
1072
1073
1074 command = readl(&xhci->op_regs->command);
1075 command |= CMD_RUN;
1076 writel(command, &xhci->op_regs->command);
1077 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1078 0, 250 * 1000);
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 spin_unlock_irq(&xhci->lock);
1090
1091 done:
1092 if (retval == 0) {
1093
1094 status = readl(&xhci->op_regs->status);
1095 if (status & STS_EINT) {
1096 usb_hcd_resume_root_hub(hcd);
1097 usb_hcd_resume_root_hub(xhci->shared_hcd);
1098 }
1099 }
1100
1101
1102
1103
1104
1105
1106
1107 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1108 compliance_mode_recovery_timer_init(xhci);
1109
1110
1111 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1112 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1113 usb_hcd_poll_rh_status(hcd);
1114 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1115 usb_hcd_poll_rh_status(xhci->shared_hcd);
1116
1117 return retval;
1118}
1119EXPORT_SYMBOL_GPL(xhci_resume);
1120#endif
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1135{
1136 unsigned int index;
1137 if (usb_endpoint_xfer_control(desc))
1138 index = (unsigned int) (usb_endpoint_num(desc)*2);
1139 else
1140 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1141 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1142 return index;
1143}
1144
1145
1146
1147
1148unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1149{
1150 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1151 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1152 return direction | number;
1153}
1154
1155
1156
1157
1158
1159unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1160{
1161 return 1 << (xhci_get_endpoint_index(desc) + 1);
1162}
1163
1164
1165
1166
1167
1168unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1169{
1170 return 1 << (ep_index + 1);
1171}
1172
1173
1174
1175
1176
1177
1178
1179unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1180{
1181 return fls(added_ctxs) - 1;
1182}
1183
1184
1185
1186
1187static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1188 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1189 const char *func) {
1190 struct xhci_hcd *xhci;
1191 struct xhci_virt_device *virt_dev;
1192
1193 if (!hcd || (check_ep && !ep) || !udev) {
1194 pr_debug("xHCI %s called with invalid args\n", func);
1195 return -EINVAL;
1196 }
1197 if (!udev->parent) {
1198 pr_debug("xHCI %s called for root hub\n", func);
1199 return 0;
1200 }
1201
1202 xhci = hcd_to_xhci(hcd);
1203 if (check_virt_dev) {
1204 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1205 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1206 func);
1207 return -EINVAL;
1208 }
1209
1210 virt_dev = xhci->devs[udev->slot_id];
1211 if (virt_dev->udev != udev) {
1212 xhci_dbg(xhci, "xHCI %s called with udev and "
1213 "virt_dev does not match\n", func);
1214 return -EINVAL;
1215 }
1216 }
1217
1218 if (xhci->xhc_state & XHCI_STATE_HALTED)
1219 return -ENODEV;
1220
1221 return 1;
1222}
1223
1224static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1225 struct usb_device *udev, struct xhci_command *command,
1226 bool ctx_change, bool must_succeed);
1227
1228
1229
1230
1231
1232
1233
1234static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1235 unsigned int ep_index, struct urb *urb)
1236{
1237 struct xhci_container_ctx *out_ctx;
1238 struct xhci_input_control_ctx *ctrl_ctx;
1239 struct xhci_ep_ctx *ep_ctx;
1240 struct xhci_command *command;
1241 int max_packet_size;
1242 int hw_max_packet_size;
1243 int ret = 0;
1244
1245 out_ctx = xhci->devs[slot_id]->out_ctx;
1246 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1247 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1248 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1249 if (hw_max_packet_size != max_packet_size) {
1250 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1251 "Max Packet Size for ep 0 changed.");
1252 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1253 "Max packet size in usb_device = %d",
1254 max_packet_size);
1255 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1256 "Max packet size in xHCI HW = %d",
1257 hw_max_packet_size);
1258 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1259 "Issuing evaluate context command.");
1260
1261
1262
1263
1264
1265
1266 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
1267 if (!command)
1268 return -ENOMEM;
1269
1270 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1271 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1272 if (!ctrl_ctx) {
1273 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1274 __func__);
1275 ret = -ENOMEM;
1276 goto command_cleanup;
1277 }
1278
1279 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1280 xhci->devs[slot_id]->out_ctx, ep_index);
1281
1282 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1283 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1284 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1285
1286 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1287 ctrl_ctx->drop_flags = 0;
1288
1289 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1290 xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
1291 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1292 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1293
1294 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1295 true, false);
1296
1297
1298
1299
1300 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1301command_cleanup:
1302 kfree(command->completion);
1303 kfree(command);
1304 }
1305 return ret;
1306}
1307
1308
1309
1310
1311
1312int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1313{
1314 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1315 struct xhci_td *buffer;
1316 unsigned long flags;
1317 int ret = 0;
1318 unsigned int slot_id, ep_index;
1319 struct urb_priv *urb_priv;
1320 int size, i;
1321
1322 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1323 true, true, __func__) <= 0)
1324 return -EINVAL;
1325
1326 slot_id = urb->dev->slot_id;
1327 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1328
1329 if (!HCD_HW_ACCESSIBLE(hcd)) {
1330 if (!in_interrupt())
1331 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1332 ret = -ESHUTDOWN;
1333 goto exit;
1334 }
1335
1336 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1337 size = urb->number_of_packets;
1338 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1339 urb->transfer_buffer_length > 0 &&
1340 urb->transfer_flags & URB_ZERO_PACKET &&
1341 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1342 size = 2;
1343 else
1344 size = 1;
1345
1346 urb_priv = kzalloc(sizeof(struct urb_priv) +
1347 size * sizeof(struct xhci_td *), mem_flags);
1348 if (!urb_priv)
1349 return -ENOMEM;
1350
1351 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1352 if (!buffer) {
1353 kfree(urb_priv);
1354 return -ENOMEM;
1355 }
1356
1357 for (i = 0; i < size; i++) {
1358 urb_priv->td[i] = buffer;
1359 buffer++;
1360 }
1361
1362 urb_priv->length = size;
1363 urb_priv->td_cnt = 0;
1364 urb->hcpriv = urb_priv;
1365
1366 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1367
1368
1369
1370 if (urb->dev->speed == USB_SPEED_FULL) {
1371 ret = xhci_check_maxpacket(xhci, slot_id,
1372 ep_index, urb);
1373 if (ret < 0) {
1374 xhci_urb_free_priv(urb_priv);
1375 urb->hcpriv = NULL;
1376 return ret;
1377 }
1378 }
1379
1380
1381
1382
1383 spin_lock_irqsave(&xhci->lock, flags);
1384 if (xhci->xhc_state & XHCI_STATE_DYING)
1385 goto dying;
1386 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1387 slot_id, ep_index);
1388 if (ret)
1389 goto free_priv;
1390 spin_unlock_irqrestore(&xhci->lock, flags);
1391 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1392 spin_lock_irqsave(&xhci->lock, flags);
1393 if (xhci->xhc_state & XHCI_STATE_DYING)
1394 goto dying;
1395 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1396 EP_GETTING_STREAMS) {
1397 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1398 "is transitioning to using streams.\n");
1399 ret = -EINVAL;
1400 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1401 EP_GETTING_NO_STREAMS) {
1402 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1403 "is transitioning to "
1404 "not having streams.\n");
1405 ret = -EINVAL;
1406 } else {
1407 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1408 slot_id, ep_index);
1409 }
1410 if (ret)
1411 goto free_priv;
1412 spin_unlock_irqrestore(&xhci->lock, flags);
1413 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1414 spin_lock_irqsave(&xhci->lock, flags);
1415 if (xhci->xhc_state & XHCI_STATE_DYING)
1416 goto dying;
1417 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1418 slot_id, ep_index);
1419 if (ret)
1420 goto free_priv;
1421 spin_unlock_irqrestore(&xhci->lock, flags);
1422 } else {
1423 spin_lock_irqsave(&xhci->lock, flags);
1424 if (xhci->xhc_state & XHCI_STATE_DYING)
1425 goto dying;
1426 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1427 slot_id, ep_index);
1428 if (ret)
1429 goto free_priv;
1430 spin_unlock_irqrestore(&xhci->lock, flags);
1431 }
1432exit:
1433 return ret;
1434dying:
1435 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1436 "non-responsive xHCI host.\n",
1437 urb->ep->desc.bEndpointAddress, urb);
1438 ret = -ESHUTDOWN;
1439free_priv:
1440 xhci_urb_free_priv(urb_priv);
1441 urb->hcpriv = NULL;
1442 spin_unlock_irqrestore(&xhci->lock, flags);
1443 return ret;
1444}
1445
1446
1447
1448
1449
1450static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1451 struct urb *urb)
1452{
1453 unsigned int slot_id;
1454 unsigned int ep_index;
1455 unsigned int stream_id;
1456 struct xhci_virt_ep *ep;
1457
1458 slot_id = urb->dev->slot_id;
1459 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1460 stream_id = urb->stream_id;
1461 ep = &xhci->devs[slot_id]->eps[ep_index];
1462
1463 if (!(ep->ep_state & EP_HAS_STREAMS))
1464 return ep->ring;
1465
1466 if (stream_id == 0) {
1467 xhci_warn(xhci,
1468 "WARN: Slot ID %u, ep index %u has streams, "
1469 "but URB has no stream ID.\n",
1470 slot_id, ep_index);
1471 return NULL;
1472 }
1473
1474 if (stream_id < ep->stream_info->num_streams)
1475 return ep->stream_info->stream_rings[stream_id];
1476
1477 xhci_warn(xhci,
1478 "WARN: Slot ID %u, ep index %u has "
1479 "stream IDs 1 to %u allocated, "
1480 "but stream ID %u is requested.\n",
1481 slot_id, ep_index,
1482 ep->stream_info->num_streams - 1,
1483 stream_id);
1484 return NULL;
1485}
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1519{
1520 unsigned long flags;
1521 int ret, i;
1522 u32 temp;
1523 struct xhci_hcd *xhci;
1524 struct urb_priv *urb_priv;
1525 struct xhci_td *td;
1526 unsigned int ep_index;
1527 struct xhci_ring *ep_ring;
1528 struct xhci_virt_ep *ep;
1529 struct xhci_command *command;
1530
1531 xhci = hcd_to_xhci(hcd);
1532 spin_lock_irqsave(&xhci->lock, flags);
1533
1534 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1535 if (ret || !urb->hcpriv)
1536 goto done;
1537 temp = readl(&xhci->op_regs->status);
1538 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1539 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1540 "HW died, freeing TD.");
1541 urb_priv = urb->hcpriv;
1542 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1543 td = urb_priv->td[i];
1544 if (!list_empty(&td->td_list))
1545 list_del_init(&td->td_list);
1546 if (!list_empty(&td->cancelled_td_list))
1547 list_del_init(&td->cancelled_td_list);
1548 }
1549
1550 usb_hcd_unlink_urb_from_ep(hcd, urb);
1551 spin_unlock_irqrestore(&xhci->lock, flags);
1552 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1553 xhci_urb_free_priv(urb_priv);
1554 return ret;
1555 }
1556 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1557 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1558 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1559 "Ep 0x%x: URB %p to be canceled on "
1560 "non-responsive xHCI host.",
1561 urb->ep->desc.bEndpointAddress, urb);
1562
1563
1564
1565
1566
1567 goto done;
1568 }
1569
1570 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1571 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1572 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1573 if (!ep_ring) {
1574 ret = -EINVAL;
1575 goto done;
1576 }
1577
1578 urb_priv = urb->hcpriv;
1579 i = urb_priv->td_cnt;
1580 if (i < urb_priv->length)
1581 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1582 "Cancel URB %p, dev %s, ep 0x%x, "
1583 "starting at offset 0x%llx",
1584 urb, urb->dev->devpath,
1585 urb->ep->desc.bEndpointAddress,
1586 (unsigned long long) xhci_trb_virt_to_dma(
1587 urb_priv->td[i]->start_seg,
1588 urb_priv->td[i]->first_trb));
1589
1590 for (; i < urb_priv->length; i++) {
1591 td = urb_priv->td[i];
1592 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1593 }
1594
1595
1596
1597
1598 if (!(ep->ep_state & EP_HALT_PENDING)) {
1599 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1600 if (!command) {
1601 ret = -ENOMEM;
1602 goto done;
1603 }
1604 ep->ep_state |= EP_HALT_PENDING;
1605 ep->stop_cmds_pending++;
1606 ep->stop_cmd_timer.expires = jiffies +
1607 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1608 add_timer(&ep->stop_cmd_timer);
1609 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1610 ep_index, 0);
1611 xhci_ring_cmd_db(xhci);
1612 }
1613done:
1614 spin_unlock_irqrestore(&xhci->lock, flags);
1615 return ret;
1616}
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1632 struct usb_host_endpoint *ep)
1633{
1634 struct xhci_hcd *xhci;
1635 struct xhci_container_ctx *in_ctx, *out_ctx;
1636 struct xhci_input_control_ctx *ctrl_ctx;
1637 unsigned int ep_index;
1638 struct xhci_ep_ctx *ep_ctx;
1639 u32 drop_flag;
1640 u32 new_add_flags, new_drop_flags;
1641 int ret;
1642
1643 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1644 if (ret <= 0)
1645 return ret;
1646 xhci = hcd_to_xhci(hcd);
1647 if (xhci->xhc_state & XHCI_STATE_DYING)
1648 return -ENODEV;
1649
1650 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1651 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1652 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1653 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1654 __func__, drop_flag);
1655 return 0;
1656 }
1657
1658 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1659 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1660 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1661 if (!ctrl_ctx) {
1662 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1663 __func__);
1664 return 0;
1665 }
1666
1667 ep_index = xhci_get_endpoint_index(&ep->desc);
1668 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1669
1670
1671
1672 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1673 cpu_to_le32(EP_STATE_DISABLED)) ||
1674 le32_to_cpu(ctrl_ctx->drop_flags) &
1675 xhci_get_endpoint_flag(&ep->desc)) {
1676
1677 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1678 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1679 __func__, ep);
1680 return 0;
1681 }
1682
1683 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1684 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1685
1686 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1687 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1688
1689 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1690
1691 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1692 (unsigned int) ep->desc.bEndpointAddress,
1693 udev->slot_id,
1694 (unsigned int) new_drop_flags,
1695 (unsigned int) new_add_flags);
1696 return 0;
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1713 struct usb_host_endpoint *ep)
1714{
1715 struct xhci_hcd *xhci;
1716 struct xhci_container_ctx *in_ctx;
1717 unsigned int ep_index;
1718 struct xhci_input_control_ctx *ctrl_ctx;
1719 u32 added_ctxs;
1720 u32 new_add_flags, new_drop_flags;
1721 struct xhci_virt_device *virt_dev;
1722 int ret = 0;
1723
1724 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1725 if (ret <= 0) {
1726
1727 ep->hcpriv = NULL;
1728 return ret;
1729 }
1730 xhci = hcd_to_xhci(hcd);
1731 if (xhci->xhc_state & XHCI_STATE_DYING)
1732 return -ENODEV;
1733
1734 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1735 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1736
1737
1738
1739
1740 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1741 __func__, added_ctxs);
1742 return 0;
1743 }
1744
1745 virt_dev = xhci->devs[udev->slot_id];
1746 in_ctx = virt_dev->in_ctx;
1747 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1748 if (!ctrl_ctx) {
1749 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1750 __func__);
1751 return 0;
1752 }
1753
1754 ep_index = xhci_get_endpoint_index(&ep->desc);
1755
1756
1757
1758 if (virt_dev->eps[ep_index].ring &&
1759 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1760 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1761 "without dropping it.\n",
1762 (unsigned int) ep->desc.bEndpointAddress);
1763 return -EINVAL;
1764 }
1765
1766
1767
1768
1769 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1770 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1771 __func__, ep);
1772 return 0;
1773 }
1774
1775
1776
1777
1778
1779
1780 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1781 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1782 __func__, ep->desc.bEndpointAddress);
1783 return -ENOMEM;
1784 }
1785
1786 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1787 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1788
1789
1790
1791
1792
1793
1794
1795 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1796
1797
1798 ep->hcpriv = udev;
1799
1800 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1801 (unsigned int) ep->desc.bEndpointAddress,
1802 udev->slot_id,
1803 (unsigned int) new_drop_flags,
1804 (unsigned int) new_add_flags);
1805 return 0;
1806}
1807
1808static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1809{
1810 struct xhci_input_control_ctx *ctrl_ctx;
1811 struct xhci_ep_ctx *ep_ctx;
1812 struct xhci_slot_ctx *slot_ctx;
1813 int i;
1814
1815 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1816 if (!ctrl_ctx) {
1817 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1818 __func__);
1819 return;
1820 }
1821
1822
1823
1824
1825
1826
1827 ctrl_ctx->drop_flags = 0;
1828 ctrl_ctx->add_flags = 0;
1829 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1830 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1831
1832 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1833 for (i = 1; i < 31; ++i) {
1834 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1835 ep_ctx->ep_info = 0;
1836 ep_ctx->ep_info2 = 0;
1837 ep_ctx->deq = 0;
1838 ep_ctx->tx_info = 0;
1839 }
1840}
1841
1842static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1843 struct usb_device *udev, u32 *cmd_status)
1844{
1845 int ret;
1846
1847 switch (*cmd_status) {
1848 case COMP_CMD_ABORT:
1849 case COMP_CMD_STOP:
1850 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1851 ret = -ETIME;
1852 break;
1853 case COMP_ENOMEM:
1854 dev_warn(&udev->dev,
1855 "Not enough host controller resources for new device state.\n");
1856 ret = -ENOMEM;
1857
1858 break;
1859 case COMP_BW_ERR:
1860 case COMP_2ND_BW_ERR:
1861 dev_warn(&udev->dev,
1862 "Not enough bandwidth for new device state.\n");
1863 ret = -ENOSPC;
1864
1865 break;
1866 case COMP_TRB_ERR:
1867
1868 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1869 "add flag = 1, "
1870 "and endpoint is not disabled.\n");
1871 ret = -EINVAL;
1872 break;
1873 case COMP_DEV_ERR:
1874 dev_warn(&udev->dev,
1875 "ERROR: Incompatible device for endpoint configure command.\n");
1876 ret = -ENODEV;
1877 break;
1878 case COMP_SUCCESS:
1879 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1880 "Successful Endpoint Configure command");
1881 ret = 0;
1882 break;
1883 default:
1884 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1885 *cmd_status);
1886 ret = -EINVAL;
1887 break;
1888 }
1889 return ret;
1890}
1891
1892static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1893 struct usb_device *udev, u32 *cmd_status)
1894{
1895 int ret;
1896 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1897
1898 switch (*cmd_status) {
1899 case COMP_CMD_ABORT:
1900 case COMP_CMD_STOP:
1901 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1902 ret = -ETIME;
1903 break;
1904 case COMP_EINVAL:
1905 dev_warn(&udev->dev,
1906 "WARN: xHCI driver setup invalid evaluate context command.\n");
1907 ret = -EINVAL;
1908 break;
1909 case COMP_EBADSLT:
1910 dev_warn(&udev->dev,
1911 "WARN: slot not enabled for evaluate context command.\n");
1912 ret = -EINVAL;
1913 break;
1914 case COMP_CTX_STATE:
1915 dev_warn(&udev->dev,
1916 "WARN: invalid context state for evaluate context command.\n");
1917 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1918 ret = -EINVAL;
1919 break;
1920 case COMP_DEV_ERR:
1921 dev_warn(&udev->dev,
1922 "ERROR: Incompatible device for evaluate context command.\n");
1923 ret = -ENODEV;
1924 break;
1925 case COMP_MEL_ERR:
1926
1927 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1928 ret = -EINVAL;
1929 break;
1930 case COMP_SUCCESS:
1931 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1932 "Successful evaluate context command");
1933 ret = 0;
1934 break;
1935 default:
1936 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1937 *cmd_status);
1938 ret = -EINVAL;
1939 break;
1940 }
1941 return ret;
1942}
1943
1944static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1945 struct xhci_input_control_ctx *ctrl_ctx)
1946{
1947 u32 valid_add_flags;
1948 u32 valid_drop_flags;
1949
1950
1951
1952
1953
1954 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1955 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1956
1957
1958
1959
1960
1961 return hweight32(valid_add_flags) -
1962 hweight32(valid_add_flags & valid_drop_flags);
1963}
1964
1965static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1966 struct xhci_input_control_ctx *ctrl_ctx)
1967{
1968 u32 valid_add_flags;
1969 u32 valid_drop_flags;
1970
1971 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1972 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1973
1974 return hweight32(valid_drop_flags) -
1975 hweight32(valid_add_flags & valid_drop_flags);
1976}
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1992 struct xhci_input_control_ctx *ctrl_ctx)
1993{
1994 u32 added_eps;
1995
1996 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1997 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1998 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1999 "Not enough ep ctxs: "
2000 "%u active, need to add %u, limit is %u.",
2001 xhci->num_active_eps, added_eps,
2002 xhci->limit_active_eps);
2003 return -ENOMEM;
2004 }
2005 xhci->num_active_eps += added_eps;
2006 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2007 "Adding %u ep ctxs, %u now active.", added_eps,
2008 xhci->num_active_eps);
2009 return 0;
2010}
2011
2012
2013
2014
2015
2016
2017
2018static void xhci_free_host_resources(struct xhci_hcd *xhci,
2019 struct xhci_input_control_ctx *ctrl_ctx)
2020{
2021 u32 num_failed_eps;
2022
2023 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2024 xhci->num_active_eps -= num_failed_eps;
2025 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2026 "Removing %u failed ep ctxs, %u now active.",
2027 num_failed_eps,
2028 xhci->num_active_eps);
2029}
2030
2031
2032
2033
2034
2035
2036
2037static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2038 struct xhci_input_control_ctx *ctrl_ctx)
2039{
2040 u32 num_dropped_eps;
2041
2042 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2043 xhci->num_active_eps -= num_dropped_eps;
2044 if (num_dropped_eps)
2045 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2046 "Removing %u dropped ep ctxs, %u now active.",
2047 num_dropped_eps,
2048 xhci->num_active_eps);
2049}
2050
2051static unsigned int xhci_get_block_size(struct usb_device *udev)
2052{
2053 switch (udev->speed) {
2054 case USB_SPEED_LOW:
2055 case USB_SPEED_FULL:
2056 return FS_BLOCK;
2057 case USB_SPEED_HIGH:
2058 return HS_BLOCK;
2059 case USB_SPEED_SUPER:
2060 return SS_BLOCK;
2061 case USB_SPEED_UNKNOWN:
2062 case USB_SPEED_WIRELESS:
2063 default:
2064
2065 return 1;
2066 }
2067}
2068
2069static unsigned int
2070xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2071{
2072 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2073 return LS_OVERHEAD;
2074 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2075 return FS_OVERHEAD;
2076 return HS_OVERHEAD;
2077}
2078
2079
2080
2081
2082
2083static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2084 struct xhci_virt_device *virt_dev,
2085 int old_active_eps)
2086{
2087 struct xhci_interval_bw_table *bw_table;
2088 struct xhci_tt_bw_info *tt_info;
2089
2090
2091 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2092 tt_info = virt_dev->tt_info;
2093
2094
2095
2096
2097 if (old_active_eps)
2098 return 0;
2099 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2100 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2101 return -ENOMEM;
2102 return 0;
2103 }
2104
2105
2106
2107
2108
2109
2110 return 0;
2111}
2112
2113static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2114 struct xhci_virt_device *virt_dev)
2115{
2116 unsigned int bw_reserved;
2117
2118 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2119 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2120 return -ENOMEM;
2121
2122 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2123 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2124 return -ENOMEM;
2125
2126 return 0;
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170static int xhci_check_bw_table(struct xhci_hcd *xhci,
2171 struct xhci_virt_device *virt_dev,
2172 int old_active_eps)
2173{
2174 unsigned int bw_reserved;
2175 unsigned int max_bandwidth;
2176 unsigned int bw_used;
2177 unsigned int block_size;
2178 struct xhci_interval_bw_table *bw_table;
2179 unsigned int packet_size = 0;
2180 unsigned int overhead = 0;
2181 unsigned int packets_transmitted = 0;
2182 unsigned int packets_remaining = 0;
2183 unsigned int i;
2184
2185 if (virt_dev->udev->speed == USB_SPEED_SUPER)
2186 return xhci_check_ss_bw(xhci, virt_dev);
2187
2188 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2189 max_bandwidth = HS_BW_LIMIT;
2190
2191 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2192 } else {
2193 max_bandwidth = FS_BW_LIMIT;
2194 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2195 }
2196
2197 bw_table = virt_dev->bw_table;
2198
2199
2200
2201 block_size = xhci_get_block_size(virt_dev->udev);
2202
2203
2204
2205
2206 if (virt_dev->tt_info) {
2207 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2208 "Recalculating BW for rootport %u",
2209 virt_dev->real_port);
2210 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2211 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2212 "newly activated TT.\n");
2213 return -ENOMEM;
2214 }
2215 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2216 "Recalculating BW for TT slot %u port %u",
2217 virt_dev->tt_info->slot_id,
2218 virt_dev->tt_info->ttport);
2219 } else {
2220 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2221 "Recalculating BW for rootport %u",
2222 virt_dev->real_port);
2223 }
2224
2225
2226
2227
2228 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2229 bw_table->interval_bw[0].num_packets *
2230 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2231
2232 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2233 unsigned int bw_added;
2234 unsigned int largest_mps;
2235 unsigned int interval_overhead;
2236
2237
2238
2239
2240
2241
2242 packets_remaining = 2 * packets_remaining +
2243 bw_table->interval_bw[i].num_packets;
2244
2245
2246
2247
2248 if (list_empty(&bw_table->interval_bw[i].endpoints))
2249 largest_mps = 0;
2250 else {
2251 struct xhci_virt_ep *virt_ep;
2252 struct list_head *ep_entry;
2253
2254 ep_entry = bw_table->interval_bw[i].endpoints.next;
2255 virt_ep = list_entry(ep_entry,
2256 struct xhci_virt_ep, bw_endpoint_list);
2257
2258 largest_mps = DIV_ROUND_UP(
2259 virt_ep->bw_info.max_packet_size,
2260 block_size);
2261 }
2262 if (largest_mps > packet_size)
2263 packet_size = largest_mps;
2264
2265
2266 interval_overhead = xhci_get_largest_overhead(
2267 &bw_table->interval_bw[i]);
2268 if (interval_overhead > overhead)
2269 overhead = interval_overhead;
2270
2271
2272
2273
2274 packets_transmitted = packets_remaining >> (i + 1);
2275
2276
2277 bw_added = packets_transmitted * (overhead + packet_size);
2278
2279
2280 packets_remaining = packets_remaining % (1 << (i + 1));
2281
2282
2283
2284
2285
2286 if (packets_remaining == 0) {
2287 packet_size = 0;
2288 overhead = 0;
2289 } else if (packets_transmitted > 0) {
2290
2291
2292
2293
2294
2295 packet_size = largest_mps;
2296 overhead = interval_overhead;
2297 }
2298
2299
2300
2301 bw_used += bw_added;
2302 if (bw_used > max_bandwidth) {
2303 xhci_warn(xhci, "Not enough bandwidth. "
2304 "Proposed: %u, Max: %u\n",
2305 bw_used, max_bandwidth);
2306 return -ENOMEM;
2307 }
2308 }
2309
2310
2311
2312
2313
2314
2315 if (packets_remaining > 0)
2316 bw_used += overhead + packet_size;
2317
2318 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2319 unsigned int port_index = virt_dev->real_port - 1;
2320
2321
2322
2323
2324
2325 bw_used += TT_HS_OVERHEAD *
2326 xhci->rh_bw[port_index].num_active_tts;
2327 }
2328
2329 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2330 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2331 "Available: %u " "percent",
2332 bw_used, max_bandwidth, bw_reserved,
2333 (max_bandwidth - bw_used - bw_reserved) * 100 /
2334 max_bandwidth);
2335
2336 bw_used += bw_reserved;
2337 if (bw_used > max_bandwidth) {
2338 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2339 bw_used, max_bandwidth);
2340 return -ENOMEM;
2341 }
2342
2343 bw_table->bw_used = bw_used;
2344 return 0;
2345}
2346
2347static bool xhci_is_async_ep(unsigned int ep_type)
2348{
2349 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2350 ep_type != ISOC_IN_EP &&
2351 ep_type != INT_IN_EP);
2352}
2353
2354static bool xhci_is_sync_in_ep(unsigned int ep_type)
2355{
2356 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2357}
2358
2359static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2360{
2361 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2362
2363 if (ep_bw->ep_interval == 0)
2364 return SS_OVERHEAD_BURST +
2365 (ep_bw->mult * ep_bw->num_packets *
2366 (SS_OVERHEAD + mps));
2367 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2368 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2369 1 << ep_bw->ep_interval);
2370
2371}
2372
2373void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2374 struct xhci_bw_info *ep_bw,
2375 struct xhci_interval_bw_table *bw_table,
2376 struct usb_device *udev,
2377 struct xhci_virt_ep *virt_ep,
2378 struct xhci_tt_bw_info *tt_info)
2379{
2380 struct xhci_interval_bw *interval_bw;
2381 int normalized_interval;
2382
2383 if (xhci_is_async_ep(ep_bw->type))
2384 return;
2385
2386 if (udev->speed == USB_SPEED_SUPER) {
2387 if (xhci_is_sync_in_ep(ep_bw->type))
2388 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2389 xhci_get_ss_bw_consumed(ep_bw);
2390 else
2391 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2392 xhci_get_ss_bw_consumed(ep_bw);
2393 return;
2394 }
2395
2396
2397
2398
2399 if (list_empty(&virt_ep->bw_endpoint_list))
2400 return;
2401
2402
2403
2404 if (udev->speed == USB_SPEED_HIGH)
2405 normalized_interval = ep_bw->ep_interval;
2406 else
2407 normalized_interval = ep_bw->ep_interval - 3;
2408
2409 if (normalized_interval == 0)
2410 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2411 interval_bw = &bw_table->interval_bw[normalized_interval];
2412 interval_bw->num_packets -= ep_bw->num_packets;
2413 switch (udev->speed) {
2414 case USB_SPEED_LOW:
2415 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2416 break;
2417 case USB_SPEED_FULL:
2418 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2419 break;
2420 case USB_SPEED_HIGH:
2421 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2422 break;
2423 case USB_SPEED_SUPER:
2424 case USB_SPEED_UNKNOWN:
2425 case USB_SPEED_WIRELESS:
2426
2427
2428
2429 return;
2430 }
2431 if (tt_info)
2432 tt_info->active_eps -= 1;
2433 list_del_init(&virt_ep->bw_endpoint_list);
2434}
2435
2436static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2437 struct xhci_bw_info *ep_bw,
2438 struct xhci_interval_bw_table *bw_table,
2439 struct usb_device *udev,
2440 struct xhci_virt_ep *virt_ep,
2441 struct xhci_tt_bw_info *tt_info)
2442{
2443 struct xhci_interval_bw *interval_bw;
2444 struct xhci_virt_ep *smaller_ep;
2445 int normalized_interval;
2446
2447 if (xhci_is_async_ep(ep_bw->type))
2448 return;
2449
2450 if (udev->speed == USB_SPEED_SUPER) {
2451 if (xhci_is_sync_in_ep(ep_bw->type))
2452 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2453 xhci_get_ss_bw_consumed(ep_bw);
2454 else
2455 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2456 xhci_get_ss_bw_consumed(ep_bw);
2457 return;
2458 }
2459
2460
2461
2462
2463 if (udev->speed == USB_SPEED_HIGH)
2464 normalized_interval = ep_bw->ep_interval;
2465 else
2466 normalized_interval = ep_bw->ep_interval - 3;
2467
2468 if (normalized_interval == 0)
2469 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2470 interval_bw = &bw_table->interval_bw[normalized_interval];
2471 interval_bw->num_packets += ep_bw->num_packets;
2472 switch (udev->speed) {
2473 case USB_SPEED_LOW:
2474 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2475 break;
2476 case USB_SPEED_FULL:
2477 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2478 break;
2479 case USB_SPEED_HIGH:
2480 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2481 break;
2482 case USB_SPEED_SUPER:
2483 case USB_SPEED_UNKNOWN:
2484 case USB_SPEED_WIRELESS:
2485
2486
2487
2488 return;
2489 }
2490
2491 if (tt_info)
2492 tt_info->active_eps += 1;
2493
2494 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2495 bw_endpoint_list) {
2496 if (ep_bw->max_packet_size >=
2497 smaller_ep->bw_info.max_packet_size) {
2498
2499 list_add_tail(&virt_ep->bw_endpoint_list,
2500 &smaller_ep->bw_endpoint_list);
2501 return;
2502 }
2503 }
2504
2505 list_add_tail(&virt_ep->bw_endpoint_list,
2506 &interval_bw->endpoints);
2507}
2508
2509void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2510 struct xhci_virt_device *virt_dev,
2511 int old_active_eps)
2512{
2513 struct xhci_root_port_bw_info *rh_bw_info;
2514 if (!virt_dev->tt_info)
2515 return;
2516
2517 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2518 if (old_active_eps == 0 &&
2519 virt_dev->tt_info->active_eps != 0) {
2520 rh_bw_info->num_active_tts += 1;
2521 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2522 } else if (old_active_eps != 0 &&
2523 virt_dev->tt_info->active_eps == 0) {
2524 rh_bw_info->num_active_tts -= 1;
2525 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2526 }
2527}
2528
2529static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2530 struct xhci_virt_device *virt_dev,
2531 struct xhci_container_ctx *in_ctx)
2532{
2533 struct xhci_bw_info ep_bw_info[31];
2534 int i;
2535 struct xhci_input_control_ctx *ctrl_ctx;
2536 int old_active_eps = 0;
2537
2538 if (virt_dev->tt_info)
2539 old_active_eps = virt_dev->tt_info->active_eps;
2540
2541 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2542 if (!ctrl_ctx) {
2543 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2544 __func__);
2545 return -ENOMEM;
2546 }
2547
2548 for (i = 0; i < 31; i++) {
2549 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2550 continue;
2551
2552
2553 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2554 sizeof(ep_bw_info[i]));
2555
2556
2557
2558 if (EP_IS_DROPPED(ctrl_ctx, i))
2559 xhci_drop_ep_from_interval_table(xhci,
2560 &virt_dev->eps[i].bw_info,
2561 virt_dev->bw_table,
2562 virt_dev->udev,
2563 &virt_dev->eps[i],
2564 virt_dev->tt_info);
2565 }
2566
2567 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2568 for (i = 0; i < 31; i++) {
2569
2570 if (EP_IS_ADDED(ctrl_ctx, i))
2571 xhci_add_ep_to_interval_table(xhci,
2572 &virt_dev->eps[i].bw_info,
2573 virt_dev->bw_table,
2574 virt_dev->udev,
2575 &virt_dev->eps[i],
2576 virt_dev->tt_info);
2577 }
2578
2579 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2580
2581
2582
2583 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2584 return 0;
2585 }
2586
2587
2588 for (i = 0; i < 31; i++) {
2589 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2590 continue;
2591
2592
2593
2594
2595 if (EP_IS_ADDED(ctrl_ctx, i)) {
2596 xhci_drop_ep_from_interval_table(xhci,
2597 &virt_dev->eps[i].bw_info,
2598 virt_dev->bw_table,
2599 virt_dev->udev,
2600 &virt_dev->eps[i],
2601 virt_dev->tt_info);
2602 }
2603
2604 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2605 sizeof(ep_bw_info[i]));
2606
2607 if (EP_IS_DROPPED(ctrl_ctx, i))
2608 xhci_add_ep_to_interval_table(xhci,
2609 &virt_dev->eps[i].bw_info,
2610 virt_dev->bw_table,
2611 virt_dev->udev,
2612 &virt_dev->eps[i],
2613 virt_dev->tt_info);
2614 }
2615 return -ENOMEM;
2616}
2617
2618
2619
2620
2621
2622static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2623 struct usb_device *udev,
2624 struct xhci_command *command,
2625 bool ctx_change, bool must_succeed)
2626{
2627 int ret;
2628 unsigned long flags;
2629 struct xhci_input_control_ctx *ctrl_ctx;
2630 struct xhci_virt_device *virt_dev;
2631
2632 if (!command)
2633 return -EINVAL;
2634
2635 spin_lock_irqsave(&xhci->lock, flags);
2636 virt_dev = xhci->devs[udev->slot_id];
2637
2638 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2639 if (!ctrl_ctx) {
2640 spin_unlock_irqrestore(&xhci->lock, flags);
2641 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2642 __func__);
2643 return -ENOMEM;
2644 }
2645
2646 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2647 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2648 spin_unlock_irqrestore(&xhci->lock, flags);
2649 xhci_warn(xhci, "Not enough host resources, "
2650 "active endpoint contexts = %u\n",
2651 xhci->num_active_eps);
2652 return -ENOMEM;
2653 }
2654 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2655 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2656 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2657 xhci_free_host_resources(xhci, ctrl_ctx);
2658 spin_unlock_irqrestore(&xhci->lock, flags);
2659 xhci_warn(xhci, "Not enough bandwidth\n");
2660 return -ENOMEM;
2661 }
2662
2663 if (!ctx_change)
2664 ret = xhci_queue_configure_endpoint(xhci, command,
2665 command->in_ctx->dma,
2666 udev->slot_id, must_succeed);
2667 else
2668 ret = xhci_queue_evaluate_context(xhci, command,
2669 command->in_ctx->dma,
2670 udev->slot_id, must_succeed);
2671 if (ret < 0) {
2672 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2673 xhci_free_host_resources(xhci, ctrl_ctx);
2674 spin_unlock_irqrestore(&xhci->lock, flags);
2675 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2676 "FIXME allocate a new ring segment");
2677 return -ENOMEM;
2678 }
2679 xhci_ring_cmd_db(xhci);
2680 spin_unlock_irqrestore(&xhci->lock, flags);
2681
2682
2683 wait_for_completion(command->completion);
2684
2685 if (!ctx_change)
2686 ret = xhci_configure_endpoint_result(xhci, udev,
2687 &command->status);
2688 else
2689 ret = xhci_evaluate_context_result(xhci, udev,
2690 &command->status);
2691
2692 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2693 spin_lock_irqsave(&xhci->lock, flags);
2694
2695
2696
2697 if (ret)
2698 xhci_free_host_resources(xhci, ctrl_ctx);
2699 else
2700 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2701 spin_unlock_irqrestore(&xhci->lock, flags);
2702 }
2703 return ret;
2704}
2705
2706static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2707 struct xhci_virt_device *vdev, int i)
2708{
2709 struct xhci_virt_ep *ep = &vdev->eps[i];
2710
2711 if (ep->ep_state & EP_HAS_STREAMS) {
2712 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2713 xhci_get_endpoint_address(i));
2714 xhci_free_stream_info(xhci, ep->stream_info);
2715 ep->stream_info = NULL;
2716 ep->ep_state &= ~EP_HAS_STREAMS;
2717 }
2718}
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2731{
2732 int i;
2733 int ret = 0;
2734 struct xhci_hcd *xhci;
2735 struct xhci_virt_device *virt_dev;
2736 struct xhci_input_control_ctx *ctrl_ctx;
2737 struct xhci_slot_ctx *slot_ctx;
2738 struct xhci_command *command;
2739
2740 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2741 if (ret <= 0)
2742 return ret;
2743 xhci = hcd_to_xhci(hcd);
2744 if (xhci->xhc_state & XHCI_STATE_DYING)
2745 return -ENODEV;
2746
2747 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2748 virt_dev = xhci->devs[udev->slot_id];
2749
2750 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
2751 if (!command)
2752 return -ENOMEM;
2753
2754 command->in_ctx = virt_dev->in_ctx;
2755
2756
2757 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2758 if (!ctrl_ctx) {
2759 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2760 __func__);
2761 ret = -ENOMEM;
2762 goto command_cleanup;
2763 }
2764 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2765 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2766 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2767
2768
2769 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2770 ctrl_ctx->drop_flags == 0) {
2771 ret = 0;
2772 goto command_cleanup;
2773 }
2774
2775 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2776 for (i = 31; i >= 1; i--) {
2777 __le32 le32 = cpu_to_le32(BIT(i));
2778
2779 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2780 || (ctrl_ctx->add_flags & le32) || i == 1) {
2781 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2782 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2783 break;
2784 }
2785 }
2786 xhci_dbg(xhci, "New Input Control Context:\n");
2787 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2788 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2789
2790 ret = xhci_configure_endpoint(xhci, udev, command,
2791 false, false);
2792 if (ret)
2793
2794 goto command_cleanup;
2795
2796 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2797 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2798 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2799
2800
2801 for (i = 1; i < 31; ++i) {
2802 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2803 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2804 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2805 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2806 }
2807 }
2808 xhci_zero_in_ctx(xhci, virt_dev);
2809
2810
2811
2812
2813 for (i = 1; i < 31; ++i) {
2814 if (!virt_dev->eps[i].new_ring)
2815 continue;
2816
2817
2818
2819 if (virt_dev->eps[i].ring) {
2820 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2821 }
2822 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2823 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2824 virt_dev->eps[i].new_ring = NULL;
2825 }
2826command_cleanup:
2827 kfree(command->completion);
2828 kfree(command);
2829
2830 return ret;
2831}
2832
2833void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2834{
2835 struct xhci_hcd *xhci;
2836 struct xhci_virt_device *virt_dev;
2837 int i, ret;
2838
2839 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2840 if (ret <= 0)
2841 return;
2842 xhci = hcd_to_xhci(hcd);
2843
2844 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2845 virt_dev = xhci->devs[udev->slot_id];
2846
2847 for (i = 0; i < 31; ++i) {
2848 if (virt_dev->eps[i].new_ring) {
2849 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2850 virt_dev->eps[i].new_ring = NULL;
2851 }
2852 }
2853 xhci_zero_in_ctx(xhci, virt_dev);
2854}
2855
2856static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2857 struct xhci_container_ctx *in_ctx,
2858 struct xhci_container_ctx *out_ctx,
2859 struct xhci_input_control_ctx *ctrl_ctx,
2860 u32 add_flags, u32 drop_flags)
2861{
2862 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2863 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2864 xhci_slot_copy(xhci, in_ctx, out_ctx);
2865 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2866
2867 xhci_dbg(xhci, "Input Context:\n");
2868 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2869}
2870
2871static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2872 unsigned int slot_id, unsigned int ep_index,
2873 struct xhci_dequeue_state *deq_state)
2874{
2875 struct xhci_input_control_ctx *ctrl_ctx;
2876 struct xhci_container_ctx *in_ctx;
2877 struct xhci_ep_ctx *ep_ctx;
2878 u32 added_ctxs;
2879 dma_addr_t addr;
2880
2881 in_ctx = xhci->devs[slot_id]->in_ctx;
2882 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2883 if (!ctrl_ctx) {
2884 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2885 __func__);
2886 return;
2887 }
2888
2889 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2890 xhci->devs[slot_id]->out_ctx, ep_index);
2891 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2892 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2893 deq_state->new_deq_ptr);
2894 if (addr == 0) {
2895 xhci_warn(xhci, "WARN Cannot submit config ep after "
2896 "reset ep command\n");
2897 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2898 deq_state->new_deq_seg,
2899 deq_state->new_deq_ptr);
2900 return;
2901 }
2902 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2903
2904 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2905 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2906 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2907 added_ctxs, added_ctxs);
2908}
2909
2910void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2911 unsigned int ep_index, struct xhci_td *td)
2912{
2913 struct xhci_dequeue_state deq_state;
2914 struct xhci_virt_ep *ep;
2915 struct usb_device *udev = td->urb->dev;
2916
2917 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2918 "Cleaning up stalled endpoint ring");
2919 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2920
2921
2922
2923 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2924 ep_index, ep->stopped_stream, td, &deq_state);
2925
2926 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2927 return;
2928
2929
2930
2931
2932 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2933 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2934 "Queueing new dequeue state");
2935 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2936 ep_index, ep->stopped_stream, &deq_state);
2937 } else {
2938
2939
2940
2941
2942
2943 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2944 "Setting up input context for "
2945 "configure endpoint command");
2946 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2947 ep_index, &deq_state);
2948 }
2949}
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959void xhci_endpoint_reset(struct usb_hcd *hcd,
2960 struct usb_host_endpoint *ep)
2961{
2962 struct xhci_hcd *xhci;
2963
2964 xhci = hcd_to_xhci(hcd);
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2977 ep->desc.bEndpointAddress);
2978}
2979
2980static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2981 struct usb_device *udev, struct usb_host_endpoint *ep,
2982 unsigned int slot_id)
2983{
2984 int ret;
2985 unsigned int ep_index;
2986 unsigned int ep_state;
2987
2988 if (!ep)
2989 return -EINVAL;
2990 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2991 if (ret <= 0)
2992 return -EINVAL;
2993 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
2994 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2995 " descriptor for ep 0x%x does not support streams\n",
2996 ep->desc.bEndpointAddress);
2997 return -EINVAL;
2998 }
2999
3000 ep_index = xhci_get_endpoint_index(&ep->desc);
3001 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3002 if (ep_state & EP_HAS_STREAMS ||
3003 ep_state & EP_GETTING_STREAMS) {
3004 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3005 "already has streams set up.\n",
3006 ep->desc.bEndpointAddress);
3007 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3008 "dynamic stream context array reallocation.\n");
3009 return -EINVAL;
3010 }
3011 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3012 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3013 "endpoint 0x%x; URBs are pending.\n",
3014 ep->desc.bEndpointAddress);
3015 return -EINVAL;
3016 }
3017 return 0;
3018}
3019
3020static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3021 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3022{
3023 unsigned int max_streams;
3024
3025
3026 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3027
3028
3029
3030
3031
3032
3033 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3034 if (*num_stream_ctxs > max_streams) {
3035 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3036 max_streams);
3037 *num_stream_ctxs = max_streams;
3038 *num_streams = max_streams;
3039 }
3040}
3041
3042
3043
3044
3045
3046static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3047 struct usb_device *udev,
3048 struct usb_host_endpoint **eps, unsigned int num_eps,
3049 unsigned int *num_streams, u32 *changed_ep_bitmask)
3050{
3051 unsigned int max_streams;
3052 unsigned int endpoint_flag;
3053 int i;
3054 int ret;
3055
3056 for (i = 0; i < num_eps; i++) {
3057 ret = xhci_check_streams_endpoint(xhci, udev,
3058 eps[i], udev->slot_id);
3059 if (ret < 0)
3060 return ret;
3061
3062 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3063 if (max_streams < (*num_streams - 1)) {
3064 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3065 eps[i]->desc.bEndpointAddress,
3066 max_streams);
3067 *num_streams = max_streams+1;
3068 }
3069
3070 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3071 if (*changed_ep_bitmask & endpoint_flag)
3072 return -EINVAL;
3073 *changed_ep_bitmask |= endpoint_flag;
3074 }
3075 return 0;
3076}
3077
3078static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3079 struct usb_device *udev,
3080 struct usb_host_endpoint **eps, unsigned int num_eps)
3081{
3082 u32 changed_ep_bitmask = 0;
3083 unsigned int slot_id;
3084 unsigned int ep_index;
3085 unsigned int ep_state;
3086 int i;
3087
3088 slot_id = udev->slot_id;
3089 if (!xhci->devs[slot_id])
3090 return 0;
3091
3092 for (i = 0; i < num_eps; i++) {
3093 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3094 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3095
3096 if (ep_state & EP_GETTING_NO_STREAMS) {
3097 xhci_warn(xhci, "WARN Can't disable streams for "
3098 "endpoint 0x%x, "
3099 "streams are being disabled already\n",
3100 eps[i]->desc.bEndpointAddress);
3101 return 0;
3102 }
3103
3104 if (!(ep_state & EP_HAS_STREAMS) &&
3105 !(ep_state & EP_GETTING_STREAMS)) {
3106 xhci_warn(xhci, "WARN Can't disable streams for "
3107 "endpoint 0x%x, "
3108 "streams are already disabled!\n",
3109 eps[i]->desc.bEndpointAddress);
3110 xhci_warn(xhci, "WARN xhci_free_streams() called "
3111 "with non-streams endpoint\n");
3112 return 0;
3113 }
3114 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3115 }
3116 return changed_ep_bitmask;
3117}
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3136 struct usb_host_endpoint **eps, unsigned int num_eps,
3137 unsigned int num_streams, gfp_t mem_flags)
3138{
3139 int i, ret;
3140 struct xhci_hcd *xhci;
3141 struct xhci_virt_device *vdev;
3142 struct xhci_command *config_cmd;
3143 struct xhci_input_control_ctx *ctrl_ctx;
3144 unsigned int ep_index;
3145 unsigned int num_stream_ctxs;
3146 unsigned long flags;
3147 u32 changed_ep_bitmask = 0;
3148
3149 if (!eps)
3150 return -EINVAL;
3151
3152
3153
3154
3155 num_streams += 1;
3156 xhci = hcd_to_xhci(hcd);
3157 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3158 num_streams);
3159
3160
3161 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3162 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3163 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3164 return -ENOSYS;
3165 }
3166
3167 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3168 if (!config_cmd) {
3169 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3170 return -ENOMEM;
3171 }
3172 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3173 if (!ctrl_ctx) {
3174 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3175 __func__);
3176 xhci_free_command(xhci, config_cmd);
3177 return -ENOMEM;
3178 }
3179
3180
3181
3182
3183
3184 spin_lock_irqsave(&xhci->lock, flags);
3185 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3186 num_eps, &num_streams, &changed_ep_bitmask);
3187 if (ret < 0) {
3188 xhci_free_command(xhci, config_cmd);
3189 spin_unlock_irqrestore(&xhci->lock, flags);
3190 return ret;
3191 }
3192 if (num_streams <= 1) {
3193 xhci_warn(xhci, "WARN: endpoints can't handle "
3194 "more than one stream.\n");
3195 xhci_free_command(xhci, config_cmd);
3196 spin_unlock_irqrestore(&xhci->lock, flags);
3197 return -EINVAL;
3198 }
3199 vdev = xhci->devs[udev->slot_id];
3200
3201
3202
3203 for (i = 0; i < num_eps; i++) {
3204 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3205 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3206 }
3207 spin_unlock_irqrestore(&xhci->lock, flags);
3208
3209
3210
3211
3212
3213 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3214 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3215 num_stream_ctxs, num_streams);
3216
3217 for (i = 0; i < num_eps; i++) {
3218 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3219 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3220 num_stream_ctxs,
3221 num_streams, mem_flags);
3222 if (!vdev->eps[ep_index].stream_info)
3223 goto cleanup;
3224
3225
3226
3227 }
3228
3229
3230 for (i = 0; i < num_eps; i++) {
3231 struct xhci_ep_ctx *ep_ctx;
3232
3233 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3234 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3235
3236 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3237 vdev->out_ctx, ep_index);
3238 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3239 vdev->eps[ep_index].stream_info);
3240 }
3241
3242
3243
3244 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3245 vdev->out_ctx, ctrl_ctx,
3246 changed_ep_bitmask, changed_ep_bitmask);
3247
3248
3249 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3250 false, false);
3251
3252
3253
3254
3255
3256 if (ret < 0)
3257 goto cleanup;
3258
3259 spin_lock_irqsave(&xhci->lock, flags);
3260 for (i = 0; i < num_eps; i++) {
3261 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3262 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3263 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3264 udev->slot_id, ep_index);
3265 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3266 }
3267 xhci_free_command(xhci, config_cmd);
3268 spin_unlock_irqrestore(&xhci->lock, flags);
3269
3270
3271 return num_streams - 1;
3272
3273cleanup:
3274
3275 for (i = 0; i < num_eps; i++) {
3276 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3277 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3278 vdev->eps[ep_index].stream_info = NULL;
3279
3280
3281
3282 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3283 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3284 xhci_endpoint_zero(xhci, vdev, eps[i]);
3285 }
3286 xhci_free_command(xhci, config_cmd);
3287 return -ENOMEM;
3288}
3289
3290
3291
3292
3293
3294
3295
3296int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3297 struct usb_host_endpoint **eps, unsigned int num_eps,
3298 gfp_t mem_flags)
3299{
3300 int i, ret;
3301 struct xhci_hcd *xhci;
3302 struct xhci_virt_device *vdev;
3303 struct xhci_command *command;
3304 struct xhci_input_control_ctx *ctrl_ctx;
3305 unsigned int ep_index;
3306 unsigned long flags;
3307 u32 changed_ep_bitmask;
3308
3309 xhci = hcd_to_xhci(hcd);
3310 vdev = xhci->devs[udev->slot_id];
3311
3312
3313 spin_lock_irqsave(&xhci->lock, flags);
3314 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3315 udev, eps, num_eps);
3316 if (changed_ep_bitmask == 0) {
3317 spin_unlock_irqrestore(&xhci->lock, flags);
3318 return -EINVAL;
3319 }
3320
3321
3322
3323
3324
3325 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3326 command = vdev->eps[ep_index].stream_info->free_streams_command;
3327 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3328 if (!ctrl_ctx) {
3329 spin_unlock_irqrestore(&xhci->lock, flags);
3330 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3331 __func__);
3332 return -EINVAL;
3333 }
3334
3335 for (i = 0; i < num_eps; i++) {
3336 struct xhci_ep_ctx *ep_ctx;
3337
3338 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3339 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3340 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3341 EP_GETTING_NO_STREAMS;
3342
3343 xhci_endpoint_copy(xhci, command->in_ctx,
3344 vdev->out_ctx, ep_index);
3345 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3346 &vdev->eps[ep_index]);
3347 }
3348 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3349 vdev->out_ctx, ctrl_ctx,
3350 changed_ep_bitmask, changed_ep_bitmask);
3351 spin_unlock_irqrestore(&xhci->lock, flags);
3352
3353
3354
3355
3356 ret = xhci_configure_endpoint(xhci, udev, command,
3357 false, true);
3358
3359
3360
3361
3362 if (ret < 0)
3363 return ret;
3364
3365 spin_lock_irqsave(&xhci->lock, flags);
3366 for (i = 0; i < num_eps; i++) {
3367 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3368 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3369 vdev->eps[ep_index].stream_info = NULL;
3370
3371
3372
3373 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3374 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3375 }
3376 spin_unlock_irqrestore(&xhci->lock, flags);
3377
3378 return 0;
3379}
3380
3381
3382
3383
3384
3385
3386
3387
3388void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3389 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3390{
3391 int i;
3392 unsigned int num_dropped_eps = 0;
3393 unsigned int drop_flags = 0;
3394
3395 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3396 if (virt_dev->eps[i].ring) {
3397 drop_flags |= 1 << i;
3398 num_dropped_eps++;
3399 }
3400 }
3401 xhci->num_active_eps -= num_dropped_eps;
3402 if (num_dropped_eps)
3403 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3404 "Dropped %u ep ctxs, flags = 0x%x, "
3405 "%u now active.",
3406 num_dropped_eps, drop_flags,
3407 xhci->num_active_eps);
3408}
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3429{
3430 int ret, i;
3431 unsigned long flags;
3432 struct xhci_hcd *xhci;
3433 unsigned int slot_id;
3434 struct xhci_virt_device *virt_dev;
3435 struct xhci_command *reset_device_cmd;
3436 int last_freed_endpoint;
3437 struct xhci_slot_ctx *slot_ctx;
3438 int old_active_eps = 0;
3439
3440 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3441 if (ret <= 0)
3442 return ret;
3443 xhci = hcd_to_xhci(hcd);
3444 slot_id = udev->slot_id;
3445 virt_dev = xhci->devs[slot_id];
3446 if (!virt_dev) {
3447 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3448 "not exist. Re-allocate the device\n", slot_id);
3449 ret = xhci_alloc_dev(hcd, udev);
3450 if (ret == 1)
3451 return 0;
3452 else
3453 return -EINVAL;
3454 }
3455
3456 if (virt_dev->tt_info)
3457 old_active_eps = virt_dev->tt_info->active_eps;
3458
3459 if (virt_dev->udev != udev) {
3460
3461
3462
3463
3464 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3465 "not match the udev. Re-allocate the device\n",
3466 slot_id);
3467 ret = xhci_alloc_dev(hcd, udev);
3468 if (ret == 1)
3469 return 0;
3470 else
3471 return -EINVAL;
3472 }
3473
3474
3475 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3476 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3477 SLOT_STATE_DISABLED)
3478 return 0;
3479
3480 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3481
3482
3483
3484
3485
3486
3487 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3488 if (!reset_device_cmd) {
3489 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3490 return -ENOMEM;
3491 }
3492
3493
3494 spin_lock_irqsave(&xhci->lock, flags);
3495
3496 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3497 if (ret) {
3498 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3499 spin_unlock_irqrestore(&xhci->lock, flags);
3500 goto command_cleanup;
3501 }
3502 xhci_ring_cmd_db(xhci);
3503 spin_unlock_irqrestore(&xhci->lock, flags);
3504
3505
3506 wait_for_completion(reset_device_cmd->completion);
3507
3508
3509
3510
3511
3512 ret = reset_device_cmd->status;
3513 switch (ret) {
3514 case COMP_CMD_ABORT:
3515 case COMP_CMD_STOP:
3516 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3517 ret = -ETIME;
3518 goto command_cleanup;
3519 case COMP_EBADSLT:
3520 case COMP_CTX_STATE:
3521 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3522 slot_id,
3523 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3524 xhci_dbg(xhci, "Not freeing device rings.\n");
3525
3526 ret = 0;
3527 goto command_cleanup;
3528 case COMP_SUCCESS:
3529 xhci_dbg(xhci, "Successful reset device command.\n");
3530 break;
3531 default:
3532 if (xhci_is_vendor_info_code(xhci, ret))
3533 break;
3534 xhci_warn(xhci, "Unknown completion code %u for "
3535 "reset device command.\n", ret);
3536 ret = -EINVAL;
3537 goto command_cleanup;
3538 }
3539
3540
3541 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3542 spin_lock_irqsave(&xhci->lock, flags);
3543
3544 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3545 spin_unlock_irqrestore(&xhci->lock, flags);
3546 }
3547
3548
3549 last_freed_endpoint = 1;
3550 for (i = 1; i < 31; ++i) {
3551 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3552
3553 if (ep->ep_state & EP_HAS_STREAMS) {
3554 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3555 xhci_get_endpoint_address(i));
3556 xhci_free_stream_info(xhci, ep->stream_info);
3557 ep->stream_info = NULL;
3558 ep->ep_state &= ~EP_HAS_STREAMS;
3559 }
3560
3561 if (ep->ring) {
3562 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3563 last_freed_endpoint = i;
3564 }
3565 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3566 xhci_drop_ep_from_interval_table(xhci,
3567 &virt_dev->eps[i].bw_info,
3568 virt_dev->bw_table,
3569 udev,
3570 &virt_dev->eps[i],
3571 virt_dev->tt_info);
3572 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3573 }
3574
3575 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3576
3577 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3578 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3579 ret = 0;
3580
3581command_cleanup:
3582 xhci_free_command(xhci, reset_device_cmd);
3583 return ret;
3584}
3585
3586
3587
3588
3589
3590
3591void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3592{
3593 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3594 struct xhci_virt_device *virt_dev;
3595 unsigned long flags;
3596 u32 state;
3597 int i, ret;
3598 struct xhci_command *command;
3599
3600 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3601 if (!command)
3602 return;
3603
3604#ifndef CONFIG_USB_DEFAULT_PERSIST
3605
3606
3607
3608
3609
3610 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3611 pm_runtime_put_noidle(hcd->self.controller);
3612#endif
3613
3614 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3615
3616
3617
3618 if (ret <= 0 && ret != -ENODEV) {
3619 kfree(command);
3620 return;
3621 }
3622
3623 virt_dev = xhci->devs[udev->slot_id];
3624
3625
3626 for (i = 0; i < 31; ++i) {
3627 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3628 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3629 }
3630
3631 spin_lock_irqsave(&xhci->lock, flags);
3632
3633 state = readl(&xhci->op_regs->status);
3634 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3635 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3636 xhci_free_virt_device(xhci, udev->slot_id);
3637 spin_unlock_irqrestore(&xhci->lock, flags);
3638 kfree(command);
3639 return;
3640 }
3641
3642 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3643 udev->slot_id)) {
3644 spin_unlock_irqrestore(&xhci->lock, flags);
3645 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3646 return;
3647 }
3648 xhci_ring_cmd_db(xhci);
3649 spin_unlock_irqrestore(&xhci->lock, flags);
3650
3651
3652
3653
3654
3655}
3656
3657
3658
3659
3660
3661
3662
3663static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3664{
3665 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3666 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3667 "Not enough ep ctxs: "
3668 "%u active, need to add 1, limit is %u.",
3669 xhci->num_active_eps, xhci->limit_active_eps);
3670 return -ENOMEM;
3671 }
3672 xhci->num_active_eps += 1;
3673 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3674 "Adding 1 ep ctx, %u now active.",
3675 xhci->num_active_eps);
3676 return 0;
3677}
3678
3679
3680
3681
3682
3683
3684int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3685{
3686 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3687 unsigned long flags;
3688 int ret, slot_id;
3689 struct xhci_command *command;
3690
3691 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3692 if (!command)
3693 return 0;
3694
3695
3696 mutex_lock(&xhci->mutex);
3697 spin_lock_irqsave(&xhci->lock, flags);
3698 command->completion = &xhci->addr_dev;
3699 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3700 if (ret) {
3701 spin_unlock_irqrestore(&xhci->lock, flags);
3702 mutex_unlock(&xhci->mutex);
3703 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3704 kfree(command);
3705 return 0;
3706 }
3707 xhci_ring_cmd_db(xhci);
3708 spin_unlock_irqrestore(&xhci->lock, flags);
3709
3710 wait_for_completion(command->completion);
3711 slot_id = xhci->slot_id;
3712 mutex_unlock(&xhci->mutex);
3713
3714 if (!slot_id || command->status != COMP_SUCCESS) {
3715 xhci_err(xhci, "Error while assigning device slot ID\n");
3716 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3717 HCS_MAX_SLOTS(
3718 readl(&xhci->cap_regs->hcs_params1)));
3719 kfree(command);
3720 return 0;
3721 }
3722
3723 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3724 spin_lock_irqsave(&xhci->lock, flags);
3725 ret = xhci_reserve_host_control_ep_resources(xhci);
3726 if (ret) {
3727 spin_unlock_irqrestore(&xhci->lock, flags);
3728 xhci_warn(xhci, "Not enough host resources, "
3729 "active endpoint contexts = %u\n",
3730 xhci->num_active_eps);
3731 goto disable_slot;
3732 }
3733 spin_unlock_irqrestore(&xhci->lock, flags);
3734 }
3735
3736
3737
3738
3739 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3740 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3741 goto disable_slot;
3742 }
3743 udev->slot_id = slot_id;
3744
3745#ifndef CONFIG_USB_DEFAULT_PERSIST
3746
3747
3748
3749
3750 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3751 pm_runtime_get_noresume(hcd->self.controller);
3752#endif
3753
3754
3755 kfree(command);
3756
3757
3758 return 1;
3759
3760disable_slot:
3761
3762 spin_lock_irqsave(&xhci->lock, flags);
3763 command->completion = NULL;
3764 command->status = 0;
3765 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3766 udev->slot_id))
3767 xhci_ring_cmd_db(xhci);
3768 spin_unlock_irqrestore(&xhci->lock, flags);
3769 return 0;
3770}
3771
3772
3773
3774
3775
3776static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3777 enum xhci_setup_dev setup)
3778{
3779 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3780 unsigned long flags;
3781 struct xhci_virt_device *virt_dev;
3782 int ret = 0;
3783 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3784 struct xhci_slot_ctx *slot_ctx;
3785 struct xhci_input_control_ctx *ctrl_ctx;
3786 u64 temp_64;
3787 struct xhci_command *command = NULL;
3788
3789 mutex_lock(&xhci->mutex);
3790
3791 if (xhci->xhc_state)
3792 goto out;
3793
3794 if (!udev->slot_id) {
3795 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3796 "Bad Slot ID %d", udev->slot_id);
3797 ret = -EINVAL;
3798 goto out;
3799 }
3800
3801 virt_dev = xhci->devs[udev->slot_id];
3802
3803 if (WARN_ON(!virt_dev)) {
3804
3805
3806
3807
3808
3809 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3810 udev->slot_id);
3811 ret = -EINVAL;
3812 goto out;
3813 }
3814
3815 if (setup == SETUP_CONTEXT_ONLY) {
3816 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3817 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3818 SLOT_STATE_DEFAULT) {
3819 xhci_dbg(xhci, "Slot already in default state\n");
3820 goto out;
3821 }
3822 }
3823
3824 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3825 if (!command) {
3826 ret = -ENOMEM;
3827 goto out;
3828 }
3829
3830 command->in_ctx = virt_dev->in_ctx;
3831 command->completion = &xhci->addr_dev;
3832
3833 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3834 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3835 if (!ctrl_ctx) {
3836 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3837 __func__);
3838 ret = -EINVAL;
3839 goto out;
3840 }
3841
3842
3843
3844
3845
3846 if (!slot_ctx->dev_info)
3847 xhci_setup_addressable_virt_dev(xhci, udev);
3848
3849 else
3850 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3851 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3852 ctrl_ctx->drop_flags = 0;
3853
3854 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3855 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3856 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3857 le32_to_cpu(slot_ctx->dev_info) >> 27);
3858
3859 spin_lock_irqsave(&xhci->lock, flags);
3860 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3861 udev->slot_id, setup);
3862 if (ret) {
3863 spin_unlock_irqrestore(&xhci->lock, flags);
3864 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3865 "FIXME: allocate a command ring segment");
3866 goto out;
3867 }
3868 xhci_ring_cmd_db(xhci);
3869 spin_unlock_irqrestore(&xhci->lock, flags);
3870
3871
3872 wait_for_completion(command->completion);
3873
3874
3875
3876
3877
3878 switch (command->status) {
3879 case COMP_CMD_ABORT:
3880 case COMP_CMD_STOP:
3881 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3882 ret = -ETIME;
3883 break;
3884 case COMP_CTX_STATE:
3885 case COMP_EBADSLT:
3886 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3887 act, udev->slot_id);
3888 ret = -EINVAL;
3889 break;
3890 case COMP_TX_ERR:
3891 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3892 ret = -EPROTO;
3893 break;
3894 case COMP_DEV_ERR:
3895 dev_warn(&udev->dev,
3896 "ERROR: Incompatible device for setup %s command\n", act);
3897 ret = -ENODEV;
3898 break;
3899 case COMP_SUCCESS:
3900 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3901 "Successful setup %s command", act);
3902 break;
3903 default:
3904 xhci_err(xhci,
3905 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3906 act, command->status);
3907 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3908 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3909 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3910 ret = -EINVAL;
3911 break;
3912 }
3913 if (ret)
3914 goto out;
3915 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3916 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3917 "Op regs DCBAA ptr = %#016llx", temp_64);
3918 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3919 "Slot ID %d dcbaa entry @%p = %#016llx",
3920 udev->slot_id,
3921 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3922 (unsigned long long)
3923 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3924 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3925 "Output Context DMA address = %#08llx",
3926 (unsigned long long)virt_dev->out_ctx->dma);
3927 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3928 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3929 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3930 le32_to_cpu(slot_ctx->dev_info) >> 27);
3931 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3932 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3933
3934
3935
3936
3937 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3938 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3939 le32_to_cpu(slot_ctx->dev_info) >> 27);
3940
3941 ctrl_ctx->add_flags = 0;
3942 ctrl_ctx->drop_flags = 0;
3943
3944 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3945 "Internal device address = %d",
3946 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3947out:
3948 mutex_unlock(&xhci->mutex);
3949 kfree(command);
3950 return ret;
3951}
3952
3953int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3954{
3955 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3956}
3957
3958int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3959{
3960 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3961}
3962
3963
3964
3965
3966
3967
3968
3969int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3970{
3971 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3972 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3973 __le32 __iomem *addr;
3974 int raw_port;
3975
3976 if (hcd->speed != HCD_USB3)
3977 addr = xhci->usb2_ports[port1 - 1];
3978 else
3979 addr = xhci->usb3_ports[port1 - 1];
3980
3981 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3982 return raw_port;
3983}
3984
3985
3986
3987
3988
3989static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3990 struct usb_device *udev, u16 max_exit_latency)
3991{
3992 struct xhci_virt_device *virt_dev;
3993 struct xhci_command *command;
3994 struct xhci_input_control_ctx *ctrl_ctx;
3995 struct xhci_slot_ctx *slot_ctx;
3996 unsigned long flags;
3997 int ret;
3998
3999 spin_lock_irqsave(&xhci->lock, flags);
4000
4001 virt_dev = xhci->devs[udev->slot_id];
4002
4003
4004
4005
4006
4007
4008
4009 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4010 spin_unlock_irqrestore(&xhci->lock, flags);
4011 return 0;
4012 }
4013
4014
4015 command = xhci->lpm_command;
4016 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4017 if (!ctrl_ctx) {
4018 spin_unlock_irqrestore(&xhci->lock, flags);
4019 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4020 __func__);
4021 return -ENOMEM;
4022 }
4023
4024 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4025 spin_unlock_irqrestore(&xhci->lock, flags);
4026
4027 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4028 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4029 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4030 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4031 slot_ctx->dev_state = 0;
4032
4033 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4034 "Set up evaluate context for LPM MEL change.");
4035 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4036 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4037
4038
4039 ret = xhci_configure_endpoint(xhci, udev, command,
4040 true, true);
4041 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4042 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4043
4044 if (!ret) {
4045 spin_lock_irqsave(&xhci->lock, flags);
4046 virt_dev->current_mel = max_exit_latency;
4047 spin_unlock_irqrestore(&xhci->lock, flags);
4048 }
4049 return ret;
4050}
4051
4052#ifdef CONFIG_PM
4053
4054
4055static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4056 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4057
4058
4059static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4060 struct usb_device *udev)
4061{
4062 int u2del, besl, besl_host;
4063 int besl_device = 0;
4064 u32 field;
4065
4066 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4067 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4068
4069 if (field & USB_BESL_SUPPORT) {
4070 for (besl_host = 0; besl_host < 16; besl_host++) {
4071 if (xhci_besl_encoding[besl_host] >= u2del)
4072 break;
4073 }
4074
4075 if (field & USB_BESL_BASELINE_VALID)
4076 besl_device = USB_GET_BESL_BASELINE(field);
4077 else if (field & USB_BESL_DEEP_VALID)
4078 besl_device = USB_GET_BESL_DEEP(field);
4079 } else {
4080 if (u2del <= 50)
4081 besl_host = 0;
4082 else
4083 besl_host = (u2del - 51) / 75 + 1;
4084 }
4085
4086 besl = besl_host + besl_device;
4087 if (besl > 15)
4088 besl = 15;
4089
4090 return besl;
4091}
4092
4093
4094static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4095{
4096 u32 field;
4097 int l1;
4098 int besld = 0;
4099 int hirdm = 0;
4100
4101 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4102
4103
4104 l1 = udev->l1_params.timeout / 256;
4105
4106
4107 if (field & USB_BESL_DEEP_VALID) {
4108 besld = USB_GET_BESL_DEEP(field);
4109 hirdm = 1;
4110 }
4111
4112 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4113}
4114
4115int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4116 struct usb_device *udev, int enable)
4117{
4118 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4119 __le32 __iomem **port_array;
4120 __le32 __iomem *pm_addr, *hlpm_addr;
4121 u32 pm_val, hlpm_val, field;
4122 unsigned int port_num;
4123 unsigned long flags;
4124 int hird, exit_latency;
4125 int ret;
4126
4127 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
4128 !udev->lpm_capable)
4129 return -EPERM;
4130
4131 if (!udev->parent || udev->parent->parent ||
4132 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4133 return -EPERM;
4134
4135 if (udev->usb2_hw_lpm_capable != 1)
4136 return -EPERM;
4137
4138 spin_lock_irqsave(&xhci->lock, flags);
4139
4140 port_array = xhci->usb2_ports;
4141 port_num = udev->portnum - 1;
4142 pm_addr = port_array[port_num] + PORTPMSC;
4143 pm_val = readl(pm_addr);
4144 hlpm_addr = port_array[port_num] + PORTHLPMC;
4145 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4146
4147 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4148 enable ? "enable" : "disable", port_num + 1);
4149
4150 if (enable) {
4151
4152 if (udev->usb2_hw_lpm_besl_capable) {
4153
4154
4155
4156
4157 if ((field & USB_BESL_SUPPORT) &&
4158 (field & USB_BESL_BASELINE_VALID))
4159 hird = USB_GET_BESL_BASELINE(field);
4160 else
4161 hird = udev->l1_params.besl;
4162
4163 exit_latency = xhci_besl_encoding[hird];
4164 spin_unlock_irqrestore(&xhci->lock, flags);
4165
4166
4167
4168
4169
4170
4171
4172
4173 mutex_lock(hcd->bandwidth_mutex);
4174 ret = xhci_change_max_exit_latency(xhci, udev,
4175 exit_latency);
4176 mutex_unlock(hcd->bandwidth_mutex);
4177
4178 if (ret < 0)
4179 return ret;
4180 spin_lock_irqsave(&xhci->lock, flags);
4181
4182 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4183 writel(hlpm_val, hlpm_addr);
4184
4185 readl(hlpm_addr);
4186 } else {
4187 hird = xhci_calculate_hird_besl(xhci, udev);
4188 }
4189
4190 pm_val &= ~PORT_HIRD_MASK;
4191 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4192 writel(pm_val, pm_addr);
4193 pm_val = readl(pm_addr);
4194 pm_val |= PORT_HLE;
4195 writel(pm_val, pm_addr);
4196
4197 readl(pm_addr);
4198 } else {
4199 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4200 writel(pm_val, pm_addr);
4201
4202 readl(pm_addr);
4203 if (udev->usb2_hw_lpm_besl_capable) {
4204 spin_unlock_irqrestore(&xhci->lock, flags);
4205 mutex_lock(hcd->bandwidth_mutex);
4206 xhci_change_max_exit_latency(xhci, udev, 0);
4207 mutex_unlock(hcd->bandwidth_mutex);
4208 return 0;
4209 }
4210 }
4211
4212 spin_unlock_irqrestore(&xhci->lock, flags);
4213 return 0;
4214}
4215
4216
4217
4218
4219
4220static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4221 unsigned capability)
4222{
4223 u32 port_offset, port_count;
4224 int i;
4225
4226 for (i = 0; i < xhci->num_ext_caps; i++) {
4227 if (xhci->ext_caps[i] & capability) {
4228
4229 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4230 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4231 if (port >= port_offset &&
4232 port < port_offset + port_count)
4233 return 1;
4234 }
4235 }
4236 return 0;
4237}
4238
4239int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4240{
4241 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4242 int portnum = udev->portnum - 1;
4243
4244 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
4245 !udev->lpm_capable)
4246 return 0;
4247
4248
4249 if (!udev->parent || udev->parent->parent ||
4250 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4251 return 0;
4252
4253 if (xhci->hw_lpm_support == 1 &&
4254 xhci_check_usb2_port_capability(
4255 xhci, portnum, XHCI_HLC)) {
4256 udev->usb2_hw_lpm_capable = 1;
4257 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4258 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4259 if (xhci_check_usb2_port_capability(xhci, portnum,
4260 XHCI_BLC))
4261 udev->usb2_hw_lpm_besl_capable = 1;
4262 }
4263
4264 return 0;
4265}
4266
4267
4268
4269
4270static unsigned long long xhci_service_interval_to_ns(
4271 struct usb_endpoint_descriptor *desc)
4272{
4273 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4274}
4275
4276static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4277 enum usb3_link_state state)
4278{
4279 unsigned long long sel;
4280 unsigned long long pel;
4281 unsigned int max_sel_pel;
4282 char *state_name;
4283
4284 switch (state) {
4285 case USB3_LPM_U1:
4286
4287 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4288 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4289 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4290 state_name = "U1";
4291 break;
4292 case USB3_LPM_U2:
4293 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4294 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4295 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4296 state_name = "U2";
4297 break;
4298 default:
4299 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4300 __func__);
4301 return USB3_LPM_DISABLED;
4302 }
4303
4304 if (sel <= max_sel_pel && pel <= max_sel_pel)
4305 return USB3_LPM_DEVICE_INITIATED;
4306
4307 if (sel > max_sel_pel)
4308 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4309 "due to long SEL %llu ms\n",
4310 state_name, sel);
4311 else
4312 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4313 "due to long PEL %llu ms\n",
4314 state_name, pel);
4315 return USB3_LPM_DISABLED;
4316}
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326static unsigned long long xhci_calculate_intel_u1_timeout(
4327 struct usb_device *udev,
4328 struct usb_endpoint_descriptor *desc)
4329{
4330 unsigned long long timeout_ns;
4331 int ep_type;
4332 int intr_type;
4333
4334 ep_type = usb_endpoint_type(desc);
4335 switch (ep_type) {
4336 case USB_ENDPOINT_XFER_CONTROL:
4337 timeout_ns = udev->u1_params.sel * 3;
4338 break;
4339 case USB_ENDPOINT_XFER_BULK:
4340 timeout_ns = udev->u1_params.sel * 5;
4341 break;
4342 case USB_ENDPOINT_XFER_INT:
4343 intr_type = usb_endpoint_interrupt_type(desc);
4344 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4345 timeout_ns = udev->u1_params.sel * 3;
4346 break;
4347 }
4348
4349 case USB_ENDPOINT_XFER_ISOC:
4350 timeout_ns = xhci_service_interval_to_ns(desc);
4351 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4352 if (timeout_ns < udev->u1_params.sel * 2)
4353 timeout_ns = udev->u1_params.sel * 2;
4354 break;
4355 default:
4356 return 0;
4357 }
4358
4359 return timeout_ns;
4360}
4361
4362
4363static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4364 struct usb_device *udev,
4365 struct usb_endpoint_descriptor *desc)
4366{
4367 unsigned long long timeout_ns;
4368
4369 if (xhci->quirks & XHCI_INTEL_HOST)
4370 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4371 else
4372 timeout_ns = udev->u1_params.sel;
4373
4374
4375
4376
4377 if (timeout_ns == USB3_LPM_DISABLED)
4378 timeout_ns = 1;
4379 else
4380 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4381
4382
4383
4384
4385 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4386 return timeout_ns;
4387 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4388 "due to long timeout %llu ms\n", timeout_ns);
4389 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4390}
4391
4392
4393
4394
4395
4396
4397
4398static unsigned long long xhci_calculate_intel_u2_timeout(
4399 struct usb_device *udev,
4400 struct usb_endpoint_descriptor *desc)
4401{
4402 unsigned long long timeout_ns;
4403 unsigned long long u2_del_ns;
4404
4405 timeout_ns = 10 * 1000 * 1000;
4406
4407 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4408 (xhci_service_interval_to_ns(desc) > timeout_ns))
4409 timeout_ns = xhci_service_interval_to_ns(desc);
4410
4411 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4412 if (u2_del_ns > timeout_ns)
4413 timeout_ns = u2_del_ns;
4414
4415 return timeout_ns;
4416}
4417
4418
4419static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4420 struct usb_device *udev,
4421 struct usb_endpoint_descriptor *desc)
4422{
4423 unsigned long long timeout_ns;
4424
4425 if (xhci->quirks & XHCI_INTEL_HOST)
4426 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4427 else
4428 timeout_ns = udev->u2_params.sel;
4429
4430
4431 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4432
4433
4434
4435 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4436 return timeout_ns;
4437 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4438 "due to long timeout %llu ms\n", timeout_ns);
4439 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4440}
4441
4442static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4443 struct usb_device *udev,
4444 struct usb_endpoint_descriptor *desc,
4445 enum usb3_link_state state,
4446 u16 *timeout)
4447{
4448 if (state == USB3_LPM_U1)
4449 return xhci_calculate_u1_timeout(xhci, udev, desc);
4450 else if (state == USB3_LPM_U2)
4451 return xhci_calculate_u2_timeout(xhci, udev, desc);
4452
4453 return USB3_LPM_DISABLED;
4454}
4455
4456static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4457 struct usb_device *udev,
4458 struct usb_endpoint_descriptor *desc,
4459 enum usb3_link_state state,
4460 u16 *timeout)
4461{
4462 u16 alt_timeout;
4463
4464 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4465 desc, state, timeout);
4466
4467
4468
4469
4470
4471 if (alt_timeout == USB3_LPM_DISABLED ||
4472 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4473 *timeout = alt_timeout;
4474 return -E2BIG;
4475 }
4476 if (alt_timeout > *timeout)
4477 *timeout = alt_timeout;
4478 return 0;
4479}
4480
4481static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4482 struct usb_device *udev,
4483 struct usb_host_interface *alt,
4484 enum usb3_link_state state,
4485 u16 *timeout)
4486{
4487 int j;
4488
4489 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4490 if (xhci_update_timeout_for_endpoint(xhci, udev,
4491 &alt->endpoint[j].desc, state, timeout))
4492 return -E2BIG;
4493 continue;
4494 }
4495 return 0;
4496}
4497
4498static int xhci_check_intel_tier_policy(struct usb_device *udev,
4499 enum usb3_link_state state)
4500{
4501 struct usb_device *parent;
4502 unsigned int num_hubs;
4503
4504 if (state == USB3_LPM_U2)
4505 return 0;
4506
4507
4508 for (parent = udev->parent, num_hubs = 0; parent->parent;
4509 parent = parent->parent)
4510 num_hubs++;
4511
4512 if (num_hubs < 2)
4513 return 0;
4514
4515 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4516 " below second-tier hub.\n");
4517 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4518 "to decrease power consumption.\n");
4519 return -E2BIG;
4520}
4521
4522static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4523 struct usb_device *udev,
4524 enum usb3_link_state state)
4525{
4526 if (xhci->quirks & XHCI_INTEL_HOST)
4527 return xhci_check_intel_tier_policy(udev, state);
4528 else
4529 return 0;
4530}
4531
4532
4533
4534
4535
4536
4537static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4538 struct usb_device *udev, enum usb3_link_state state)
4539{
4540 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4541 struct usb_host_config *config;
4542 char *state_name;
4543 int i;
4544 u16 timeout = USB3_LPM_DISABLED;
4545
4546 if (state == USB3_LPM_U1)
4547 state_name = "U1";
4548 else if (state == USB3_LPM_U2)
4549 state_name = "U2";
4550 else {
4551 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4552 state);
4553 return timeout;
4554 }
4555
4556 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4557 return timeout;
4558
4559
4560
4561
4562 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4563 state, &timeout))
4564 return timeout;
4565
4566 config = udev->actconfig;
4567 if (!config)
4568 return timeout;
4569
4570 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4571 struct usb_driver *driver;
4572 struct usb_interface *intf = config->interface[i];
4573
4574 if (!intf)
4575 continue;
4576
4577
4578
4579
4580 if (intf->dev.driver) {
4581 driver = to_usb_driver(intf->dev.driver);
4582 if (driver && driver->disable_hub_initiated_lpm) {
4583 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4584 "at request of driver %s\n",
4585 state_name, driver->name);
4586 return xhci_get_timeout_no_hub_lpm(udev, state);
4587 }
4588 }
4589
4590
4591 if (!intf->cur_altsetting)
4592 continue;
4593
4594 if (xhci_update_timeout_for_interface(xhci, udev,
4595 intf->cur_altsetting,
4596 state, &timeout))
4597 return timeout;
4598 }
4599 return timeout;
4600}
4601
4602static int calculate_max_exit_latency(struct usb_device *udev,
4603 enum usb3_link_state state_changed,
4604 u16 hub_encoded_timeout)
4605{
4606 unsigned long long u1_mel_us = 0;
4607 unsigned long long u2_mel_us = 0;
4608 unsigned long long mel_us = 0;
4609 bool disabling_u1;
4610 bool disabling_u2;
4611 bool enabling_u1;
4612 bool enabling_u2;
4613
4614 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4615 hub_encoded_timeout == USB3_LPM_DISABLED);
4616 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4617 hub_encoded_timeout == USB3_LPM_DISABLED);
4618
4619 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4620 hub_encoded_timeout != USB3_LPM_DISABLED);
4621 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4622 hub_encoded_timeout != USB3_LPM_DISABLED);
4623
4624
4625
4626
4627 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4628 enabling_u1)
4629 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4630 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4631 enabling_u2)
4632 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4633
4634 if (u1_mel_us > u2_mel_us)
4635 mel_us = u1_mel_us;
4636 else
4637 mel_us = u2_mel_us;
4638
4639 if (mel_us > MAX_EXIT) {
4640 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4641 "is too big.\n", mel_us);
4642 return -E2BIG;
4643 }
4644 return mel_us;
4645}
4646
4647
4648int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4649 struct usb_device *udev, enum usb3_link_state state)
4650{
4651 struct xhci_hcd *xhci;
4652 u16 hub_encoded_timeout;
4653 int mel;
4654 int ret;
4655
4656 xhci = hcd_to_xhci(hcd);
4657
4658
4659
4660
4661 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4662 !xhci->devs[udev->slot_id])
4663 return USB3_LPM_DISABLED;
4664
4665 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4666 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4667 if (mel < 0) {
4668
4669 hub_encoded_timeout = USB3_LPM_DISABLED;
4670 mel = 0;
4671 }
4672
4673 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4674 if (ret)
4675 return ret;
4676 return hub_encoded_timeout;
4677}
4678
4679int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4680 struct usb_device *udev, enum usb3_link_state state)
4681{
4682 struct xhci_hcd *xhci;
4683 u16 mel;
4684
4685 xhci = hcd_to_xhci(hcd);
4686 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4687 !xhci->devs[udev->slot_id])
4688 return 0;
4689
4690 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4691 return xhci_change_max_exit_latency(xhci, udev, mel);
4692}
4693#else
4694
4695int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4696 struct usb_device *udev, int enable)
4697{
4698 return 0;
4699}
4700
4701int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4702{
4703 return 0;
4704}
4705
4706int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4707 struct usb_device *udev, enum usb3_link_state state)
4708{
4709 return USB3_LPM_DISABLED;
4710}
4711
4712int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4713 struct usb_device *udev, enum usb3_link_state state)
4714{
4715 return 0;
4716}
4717#endif
4718
4719
4720
4721
4722
4723
4724int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4725 struct usb_tt *tt, gfp_t mem_flags)
4726{
4727 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4728 struct xhci_virt_device *vdev;
4729 struct xhci_command *config_cmd;
4730 struct xhci_input_control_ctx *ctrl_ctx;
4731 struct xhci_slot_ctx *slot_ctx;
4732 unsigned long flags;
4733 unsigned think_time;
4734 int ret;
4735
4736
4737 if (!hdev->parent)
4738 return 0;
4739
4740 vdev = xhci->devs[hdev->slot_id];
4741 if (!vdev) {
4742 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4743 return -EINVAL;
4744 }
4745 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4746 if (!config_cmd) {
4747 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4748 return -ENOMEM;
4749 }
4750 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4751 if (!ctrl_ctx) {
4752 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4753 __func__);
4754 xhci_free_command(xhci, config_cmd);
4755 return -ENOMEM;
4756 }
4757
4758 spin_lock_irqsave(&xhci->lock, flags);
4759 if (hdev->speed == USB_SPEED_HIGH &&
4760 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4761 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4762 xhci_free_command(xhci, config_cmd);
4763 spin_unlock_irqrestore(&xhci->lock, flags);
4764 return -ENOMEM;
4765 }
4766
4767 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4768 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4769 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4770 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4771 if (tt->multi)
4772 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4773 if (xhci->hci_version > 0x95) {
4774 xhci_dbg(xhci, "xHCI version %x needs hub "
4775 "TT think time and number of ports\n",
4776 (unsigned int) xhci->hci_version);
4777 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4778
4779
4780
4781
4782
4783
4784
4785 think_time = tt->think_time;
4786 if (think_time != 0)
4787 think_time = (think_time / 666) - 1;
4788 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4789 slot_ctx->tt_info |=
4790 cpu_to_le32(TT_THINK_TIME(think_time));
4791 } else {
4792 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4793 "TT think time or number of ports\n",
4794 (unsigned int) xhci->hci_version);
4795 }
4796 slot_ctx->dev_state = 0;
4797 spin_unlock_irqrestore(&xhci->lock, flags);
4798
4799 xhci_dbg(xhci, "Set up %s for hub device.\n",
4800 (xhci->hci_version > 0x95) ?
4801 "configure endpoint" : "evaluate context");
4802 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4803 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4804
4805
4806
4807
4808 if (xhci->hci_version > 0x95)
4809 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4810 false, false);
4811 else
4812 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4813 true, false);
4814
4815 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4816 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4817
4818 xhci_free_command(xhci, config_cmd);
4819 return ret;
4820}
4821
4822int xhci_get_frame(struct usb_hcd *hcd)
4823{
4824 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4825
4826 return readl(&xhci->run_regs->microframe_index) >> 3;
4827}
4828
4829int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4830{
4831 struct xhci_hcd *xhci;
4832 struct device *dev = hcd->self.controller;
4833 int retval;
4834
4835
4836 hcd->self.sg_tablesize = ~0;
4837
4838
4839 hcd->self.no_sg_constraint = 1;
4840
4841
4842 hcd->self.no_stop_on_short = 1;
4843
4844 if (usb_hcd_is_primary_hcd(hcd)) {
4845 xhci = hcd_to_xhci(hcd);
4846 xhci->main_hcd = hcd;
4847
4848
4849
4850 hcd->speed = HCD_USB2;
4851 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4852
4853
4854
4855
4856
4857 hcd->has_tt = 1;
4858 } else {
4859
4860
4861
4862 return 0;
4863 }
4864
4865 mutex_init(&xhci->mutex);
4866 xhci->cap_regs = hcd->regs;
4867 xhci->op_regs = hcd->regs +
4868 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4869 xhci->run_regs = hcd->regs +
4870 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4871
4872 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4873 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4874 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4875 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4876 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4877 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4878 xhci_print_registers(xhci);
4879
4880 xhci->quirks = quirks;
4881
4882 get_quirks(dev, xhci);
4883
4884
4885
4886
4887
4888 if (xhci->hci_version > 0x96)
4889 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4890
4891
4892 retval = xhci_halt(xhci);
4893 if (retval)
4894 return retval;
4895
4896 xhci_dbg(xhci, "Resetting HCD\n");
4897
4898 retval = xhci_reset(xhci);
4899 if (retval)
4900 return retval;
4901 xhci_dbg(xhci, "Reset complete\n");
4902
4903
4904
4905 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4906 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4907 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4908 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4909 }
4910
4911 xhci_dbg(xhci, "Calling HCD init\n");
4912
4913 retval = xhci_init(hcd);
4914 if (retval)
4915 return retval;
4916 xhci_dbg(xhci, "Called HCD init\n");
4917
4918 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4919 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4920
4921 return 0;
4922}
4923EXPORT_SYMBOL_GPL(xhci_gen_setup);
4924
4925static const struct hc_driver xhci_hc_driver = {
4926 .description = "xhci-hcd",
4927 .product_desc = "xHCI Host Controller",
4928 .hcd_priv_size = sizeof(struct xhci_hcd *),
4929
4930
4931
4932
4933 .irq = xhci_irq,
4934 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4935
4936
4937
4938
4939 .reset = NULL,
4940 .start = xhci_run,
4941 .stop = xhci_stop,
4942 .shutdown = xhci_shutdown,
4943
4944
4945
4946
4947 .urb_enqueue = xhci_urb_enqueue,
4948 .urb_dequeue = xhci_urb_dequeue,
4949 .alloc_dev = xhci_alloc_dev,
4950 .free_dev = xhci_free_dev,
4951 .alloc_streams = xhci_alloc_streams,
4952 .free_streams = xhci_free_streams,
4953 .add_endpoint = xhci_add_endpoint,
4954 .drop_endpoint = xhci_drop_endpoint,
4955 .endpoint_reset = xhci_endpoint_reset,
4956 .check_bandwidth = xhci_check_bandwidth,
4957 .reset_bandwidth = xhci_reset_bandwidth,
4958 .address_device = xhci_address_device,
4959 .enable_device = xhci_enable_device,
4960 .update_hub_device = xhci_update_hub_device,
4961 .reset_device = xhci_discover_or_reset_device,
4962
4963
4964
4965
4966 .get_frame_number = xhci_get_frame,
4967
4968
4969
4970
4971 .hub_control = xhci_hub_control,
4972 .hub_status_data = xhci_hub_status_data,
4973 .bus_suspend = xhci_bus_suspend,
4974 .bus_resume = xhci_bus_resume,
4975
4976
4977
4978
4979 .update_device = xhci_update_device,
4980 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
4981 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
4982 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
4983 .find_raw_port_number = xhci_find_raw_port_number,
4984};
4985
4986void xhci_init_driver(struct hc_driver *drv,
4987 const struct xhci_driver_overrides *over)
4988{
4989 BUG_ON(!over);
4990
4991
4992 *drv = xhci_hc_driver;
4993
4994 if (over) {
4995 drv->hcd_priv_size += over->extra_priv_size;
4996 if (over->reset)
4997 drv->reset = over->reset;
4998 if (over->start)
4999 drv->start = over->start;
5000 }
5001}
5002EXPORT_SYMBOL_GPL(xhci_init_driver);
5003
5004MODULE_DESCRIPTION(DRIVER_DESC);
5005MODULE_AUTHOR(DRIVER_AUTHOR);
5006MODULE_LICENSE("GPL");
5007
5008static int __init xhci_hcd_init(void)
5009{
5010
5011
5012
5013
5014 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5015 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5016 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5017
5018
5019
5020 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5021 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5022 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5023 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
5024 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5025
5026 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5027 return 0;
5028}
5029
5030
5031
5032
5033
5034static void __exit xhci_hcd_fini(void) { }
5035
5036module_init(xhci_hcd_init);
5037module_exit(xhci_hcd_fini);
5038