1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30#include <linux/dma-mapping.h>
31
32#include "xhci.h"
33#include "xhci-trace.h"
34#include "xhci-mtk.h"
35
36#define DRIVER_AUTHOR "Sarah Sharp"
37#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
38
39#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
40
41
42static int link_quirk;
43module_param(link_quirk, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
45
46static unsigned int quirks;
47module_param(quirks, uint, S_IRUGO);
48MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
65{
66 u32 result;
67
68 do {
69 result = readl(ptr);
70 if (result == ~(u32)0)
71 return -ENODEV;
72 result &= mask;
73 if (result == done)
74 return 0;
75 udelay(1);
76 usec--;
77 } while (usec > 0);
78 return -ETIMEDOUT;
79}
80
81
82
83
84void xhci_quiesce(struct xhci_hcd *xhci)
85{
86 u32 halted;
87 u32 cmd;
88 u32 mask;
89
90 mask = ~(XHCI_IRQS);
91 halted = readl(&xhci->op_regs->status) & STS_HALT;
92 if (!halted)
93 mask &= ~CMD_RUN;
94
95 cmd = readl(&xhci->op_regs->command);
96 cmd &= mask;
97 writel(cmd, &xhci->op_regs->command);
98}
99
100
101
102
103
104
105
106
107
108int xhci_halt(struct xhci_hcd *xhci)
109{
110 int ret;
111 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
112 xhci_quiesce(xhci);
113
114 ret = xhci_handshake(&xhci->op_regs->status,
115 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
116 if (!ret) {
117 xhci->xhc_state |= XHCI_STATE_HALTED;
118 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
119 } else
120 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
121 XHCI_MAX_HALT_USEC);
122 return ret;
123}
124
125
126
127
128static int xhci_start(struct xhci_hcd *xhci)
129{
130 u32 temp;
131 int ret;
132
133 temp = readl(&xhci->op_regs->command);
134 temp |= (CMD_RUN);
135 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
136 temp);
137 writel(temp, &xhci->op_regs->command);
138
139
140
141
142
143 ret = xhci_handshake(&xhci->op_regs->status,
144 STS_HALT, 0, XHCI_MAX_HALT_USEC);
145 if (ret == -ETIMEDOUT)
146 xhci_err(xhci, "Host took too long to start, "
147 "waited %u microseconds.\n",
148 XHCI_MAX_HALT_USEC);
149 if (!ret)
150
151 xhci->xhc_state = 0;
152
153 return ret;
154}
155
156
157
158
159
160
161
162
163int xhci_reset(struct xhci_hcd *xhci)
164{
165 u32 command;
166 u32 state;
167 int ret, i;
168
169 state = readl(&xhci->op_regs->status);
170 if ((state & STS_HALT) == 0) {
171 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
172 return 0;
173 }
174
175 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
176 command = readl(&xhci->op_regs->command);
177#ifdef CONFIG_USB_DWC3_OTG
178 command |= CMD_LRESET;
179#else
180 command |= CMD_RESET;
181#endif
182 writel(command, &xhci->op_regs->command);
183
184
185
186
187
188
189
190
191 if (xhci->quirks & XHCI_INTEL_HOST)
192 udelay(1000);
193
194 ret = xhci_handshake(&xhci->op_regs->command,
195#ifdef CONFIG_USB_DWC3_OTG
196 CMD_LRESET,
197#else
198 CMD_RESET,
199#endif
200 0, 10 * 1000 * 1000);
201 if (ret)
202 return ret;
203
204 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
205 "Wait for controller to be ready for doorbell rings");
206
207
208
209
210 ret = xhci_handshake(&xhci->op_regs->status,
211 STS_CNR, 0, 10 * 1000 * 1000);
212
213 for (i = 0; i < 2; ++i) {
214 xhci->bus_state[i].port_c_suspend = 0;
215 xhci->bus_state[i].suspended_ports = 0;
216 xhci->bus_state[i].resuming_ports = 0;
217 }
218
219 return ret;
220}
221
222#ifdef CONFIG_PCI
223static int xhci_free_msi(struct xhci_hcd *xhci)
224{
225 int i;
226
227 if (!xhci->msix_entries)
228 return -EINVAL;
229
230 for (i = 0; i < xhci->msix_count; i++)
231 if (xhci->msix_entries[i].vector)
232 free_irq(xhci->msix_entries[i].vector,
233 xhci_to_hcd(xhci));
234 return 0;
235}
236
237
238
239
240static int xhci_setup_msi(struct xhci_hcd *xhci)
241{
242 int ret;
243 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
244
245 ret = pci_enable_msi(pdev);
246 if (ret) {
247 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
248 "failed to allocate MSI entry");
249 return ret;
250 }
251
252 ret = request_irq(pdev->irq, xhci_msi_irq,
253 0, "xhci_hcd", xhci_to_hcd(xhci));
254 if (ret) {
255 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
256 "disable MSI interrupt");
257 pci_disable_msi(pdev);
258 }
259
260 return ret;
261}
262
263
264
265
266
267static void xhci_free_irq(struct xhci_hcd *xhci)
268{
269 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
270 int ret;
271
272
273 if (xhci_to_hcd(xhci)->irq > 0)
274 return;
275
276 ret = xhci_free_msi(xhci);
277 if (!ret)
278 return;
279 if (pdev->irq > 0)
280 free_irq(pdev->irq, xhci_to_hcd(xhci));
281
282 return;
283}
284
285
286
287
288static int xhci_setup_msix(struct xhci_hcd *xhci)
289{
290 int i, ret = 0;
291 struct usb_hcd *hcd = xhci_to_hcd(xhci);
292 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
293
294
295
296
297
298
299
300
301 xhci->msix_count = min(num_online_cpus() + 1,
302 HCS_MAX_INTRS(xhci->hcs_params1));
303
304 xhci->msix_entries =
305 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
306 GFP_KERNEL);
307 if (!xhci->msix_entries)
308 return -ENOMEM;
309
310 for (i = 0; i < xhci->msix_count; i++) {
311 xhci->msix_entries[i].entry = i;
312 xhci->msix_entries[i].vector = 0;
313 }
314
315 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
316 if (ret) {
317 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
318 "Failed to enable MSI-X");
319 goto free_entries;
320 }
321
322 for (i = 0; i < xhci->msix_count; i++) {
323 ret = request_irq(xhci->msix_entries[i].vector,
324 xhci_msi_irq,
325 0, "xhci_hcd", xhci_to_hcd(xhci));
326 if (ret)
327 goto disable_msix;
328 }
329
330 hcd->msix_enabled = 1;
331 return ret;
332
333disable_msix:
334 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
335 xhci_free_irq(xhci);
336 pci_disable_msix(pdev);
337free_entries:
338 kfree(xhci->msix_entries);
339 xhci->msix_entries = NULL;
340 return ret;
341}
342
343
344static void xhci_cleanup_msix(struct xhci_hcd *xhci)
345{
346 struct usb_hcd *hcd = xhci_to_hcd(xhci);
347 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
348
349 if (xhci->quirks & XHCI_PLAT)
350 return;
351
352 xhci_free_irq(xhci);
353
354 if (xhci->msix_entries) {
355 pci_disable_msix(pdev);
356 kfree(xhci->msix_entries);
357 xhci->msix_entries = NULL;
358 } else {
359 pci_disable_msi(pdev);
360 }
361
362 hcd->msix_enabled = 0;
363 return;
364}
365
366static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
367{
368 int i;
369
370 if (xhci->msix_entries) {
371 for (i = 0; i < xhci->msix_count; i++)
372 synchronize_irq(xhci->msix_entries[i].vector);
373 }
374}
375
376static int xhci_try_enable_msi(struct usb_hcd *hcd)
377{
378 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
379 struct pci_dev *pdev;
380 int ret;
381
382
383 if (xhci->quirks & XHCI_PLAT)
384 return 0;
385
386 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
387
388
389
390
391 if (xhci->quirks & XHCI_BROKEN_MSI)
392 goto legacy_irq;
393
394
395 if (hcd->irq)
396 free_irq(hcd->irq, hcd);
397 hcd->irq = 0;
398
399 ret = xhci_setup_msix(xhci);
400 if (ret)
401
402 ret = xhci_setup_msi(xhci);
403
404 if (!ret)
405
406 return 0;
407
408 if (!pdev->irq) {
409 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
410 return -EINVAL;
411 }
412
413 legacy_irq:
414 if (!strlen(hcd->irq_descr))
415 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
416 hcd->driver->description, hcd->self.busnum);
417
418
419 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
420 hcd->irq_descr, hcd);
421 if (ret) {
422 xhci_err(xhci, "request interrupt %d failed\n",
423 pdev->irq);
424 return ret;
425 }
426 hcd->irq = pdev->irq;
427 return 0;
428}
429
430#else
431
432static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
433{
434 return 0;
435}
436
437static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
438{
439}
440
441static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
442{
443}
444
445#endif
446
447static void compliance_mode_recovery(unsigned long arg)
448{
449 struct xhci_hcd *xhci;
450 struct usb_hcd *hcd;
451 u32 temp;
452 int i;
453
454 xhci = (struct xhci_hcd *)arg;
455
456 for (i = 0; i < xhci->num_usb3_ports; i++) {
457 temp = readl(xhci->usb3_ports[i]);
458 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
459
460
461
462
463 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
464 "Compliance mode detected->port %d",
465 i + 1);
466 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
467 "Attempting compliance mode recovery");
468 hcd = xhci->shared_hcd;
469
470 if (hcd->state == HC_STATE_SUSPENDED)
471 usb_hcd_resume_root_hub(hcd);
472
473 usb_hcd_poll_rh_status(hcd);
474 }
475 }
476
477 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
478 mod_timer(&xhci->comp_mode_recovery_timer,
479 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
480}
481
482
483
484
485
486
487
488
489
490
491
492static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
493{
494 xhci->port_status_u0 = 0;
495 setup_timer(&xhci->comp_mode_recovery_timer,
496 compliance_mode_recovery, (unsigned long)xhci);
497 xhci->comp_mode_recovery_timer.expires = jiffies +
498 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
499
500 add_timer(&xhci->comp_mode_recovery_timer);
501 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
502 "Compliance mode recovery timer initialized");
503}
504
505
506
507
508
509
510
511static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
512{
513 const char *dmi_product_name, *dmi_sys_vendor;
514
515 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
516 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
517 if (!dmi_product_name || !dmi_sys_vendor)
518 return false;
519
520 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
521 return false;
522
523 if (strstr(dmi_product_name, "Z420") ||
524 strstr(dmi_product_name, "Z620") ||
525 strstr(dmi_product_name, "Z820") ||
526 strstr(dmi_product_name, "Z1 Workstation"))
527 return true;
528
529 return false;
530}
531
532static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
533{
534 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
535}
536
537
538
539
540
541
542
543
544
545int xhci_init(struct usb_hcd *hcd)
546{
547 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
548 int retval = 0;
549
550 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
551 spin_lock_init(&xhci->lock);
552 if (xhci->hci_version == 0x95 && link_quirk) {
553 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
554 "QUIRK: Not clearing Link TRB chain bits.");
555 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
556 } else {
557 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
558 "xHCI doesn't need link TRB QUIRK");
559 }
560 retval = xhci_mem_init(xhci, GFP_KERNEL);
561 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
562
563
564 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
565 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
566 compliance_mode_recovery_timer_init(xhci);
567 }
568
569 return retval;
570}
571
572
573
574
575static int xhci_run_finished(struct xhci_hcd *xhci)
576{
577 if (xhci_start(xhci)) {
578 xhci_halt(xhci);
579 return -ENODEV;
580 }
581 xhci->shared_hcd->state = HC_STATE_RUNNING;
582 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
583
584 if (xhci->quirks & XHCI_NEC_HOST)
585 xhci_ring_cmd_db(xhci);
586
587 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
588 "Finished xhci_run for USB3 roothub");
589 return 0;
590}
591
592
593
594
595
596
597
598
599
600
601
602
603
604int xhci_run(struct usb_hcd *hcd)
605{
606 u32 temp;
607 u64 temp_64;
608 int ret;
609 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
610
611
612
613
614
615 hcd->uses_new_polling = 1;
616 if (!usb_hcd_is_primary_hcd(hcd))
617 return xhci_run_finished(xhci);
618
619 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
620
621 ret = xhci_try_enable_msi(hcd);
622 if (ret)
623 return ret;
624
625 xhci_dbg(xhci, "Command ring memory map follows:\n");
626 xhci_debug_ring(xhci, xhci->cmd_ring);
627 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
628 xhci_dbg_cmd_ptrs(xhci);
629
630 xhci_dbg(xhci, "ERST memory map follows:\n");
631 xhci_dbg_erst(xhci, &xhci->erst);
632 xhci_dbg(xhci, "Event ring:\n");
633 xhci_debug_ring(xhci, xhci->event_ring);
634 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
635 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
636 temp_64 &= ~ERST_PTR_MASK;
637 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
638 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
639
640 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
641 "// Set the interrupt modulation register");
642 temp = readl(&xhci->ir_set->irq_control);
643 temp &= ~ER_IRQ_INTERVAL_MASK;
644
645
646
647
648 temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160);
649 writel(temp, &xhci->ir_set->irq_control);
650
651
652 temp = readl(&xhci->op_regs->command);
653 temp |= (CMD_EIE);
654 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
655 "// Enable interrupts, cmd = 0x%x.", temp);
656 writel(temp, &xhci->op_regs->command);
657
658 temp = readl(&xhci->ir_set->irq_pending);
659 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
660 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
661 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
662 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
663 xhci_print_ir_set(xhci, 0);
664
665 if (xhci->quirks & XHCI_NEC_HOST) {
666 struct xhci_command *command;
667 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
668 if (!command)
669 return -ENOMEM;
670 xhci_queue_vendor_command(xhci, command, 0, 0, 0,
671 TRB_TYPE(TRB_NEC_GET_FW));
672 }
673 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
674 "Finished xhci_run for USB2 roothub");
675 return 0;
676}
677EXPORT_SYMBOL_GPL(xhci_run);
678
679
680
681
682
683
684
685
686
687
688void xhci_stop(struct usb_hcd *hcd)
689{
690 u32 temp;
691 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
692
693 mutex_lock(&xhci->mutex);
694
695 if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
696 spin_lock_irq(&xhci->lock);
697
698 xhci->xhc_state |= XHCI_STATE_HALTED;
699 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
700 xhci_halt(xhci);
701 xhci_reset(xhci);
702
703 spin_unlock_irq(&xhci->lock);
704 }
705
706 if (!usb_hcd_is_primary_hcd(hcd)) {
707 mutex_unlock(&xhci->mutex);
708 return;
709 }
710
711 xhci_cleanup_msix(xhci);
712
713
714 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
715 (!(xhci_all_ports_seen_u0(xhci)))) {
716 del_timer_sync(&xhci->comp_mode_recovery_timer);
717 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
718 "%s: compliance mode recovery timer deleted",
719 __func__);
720 }
721
722 if (xhci->quirks & XHCI_AMD_PLL_FIX)
723 usb_amd_dev_put();
724
725 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
726 "// Disabling event ring interrupts");
727 temp = readl(&xhci->op_regs->status);
728 writel(temp & ~STS_EINT, &xhci->op_regs->status);
729 temp = readl(&xhci->ir_set->irq_pending);
730 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
731 xhci_print_ir_set(xhci, 0);
732
733 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
734 xhci_mem_cleanup(xhci);
735 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
736 "xhci_stop completed - status = %x",
737 readl(&xhci->op_regs->status));
738 mutex_unlock(&xhci->mutex);
739}
740
741
742
743
744
745
746
747
748
749
750void xhci_shutdown(struct usb_hcd *hcd)
751{
752 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
753
754 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
755 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
756
757 spin_lock_irq(&xhci->lock);
758 xhci_halt(xhci);
759
760 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
761 xhci_reset(xhci);
762 spin_unlock_irq(&xhci->lock);
763
764 xhci_cleanup_msix(xhci);
765
766 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
767 "xhci_shutdown completed - status = %x",
768 readl(&xhci->op_regs->status));
769
770
771 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
772 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
773}
774
775#ifdef CONFIG_PM
776static void xhci_save_registers(struct xhci_hcd *xhci)
777{
778 xhci->s3.command = readl(&xhci->op_regs->command);
779 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
780 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
781 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
782 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
783 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
784 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
785 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
786 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
787}
788
789static void xhci_restore_registers(struct xhci_hcd *xhci)
790{
791 writel(xhci->s3.command, &xhci->op_regs->command);
792 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
793 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
794 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
795 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
796 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
797 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
798 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
799 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
800}
801
802static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
803{
804 u64 val_64;
805
806
807 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
808 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
809 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
810 xhci->cmd_ring->dequeue) &
811 (u64) ~CMD_RING_RSVD_BITS) |
812 xhci->cmd_ring->cycle_state;
813 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
814 "// Setting command ring address to 0x%llx",
815 (long unsigned long) val_64);
816 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
817}
818
819
820
821
822
823
824
825
826
827
828static void xhci_clear_command_ring(struct xhci_hcd *xhci)
829{
830 struct xhci_ring *ring;
831 struct xhci_segment *seg;
832
833 ring = xhci->cmd_ring;
834 seg = ring->deq_seg;
835 do {
836 memset(seg->trbs, 0,
837 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
838 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
839 cpu_to_le32(~TRB_CYCLE);
840 seg = seg->next;
841 } while (seg != ring->deq_seg);
842
843
844 ring->deq_seg = ring->first_seg;
845 ring->dequeue = ring->first_seg->trbs;
846 ring->enq_seg = ring->deq_seg;
847 ring->enqueue = ring->dequeue;
848
849 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
850
851
852
853
854 ring->cycle_state = 1;
855
856
857
858
859
860
861
862
863 xhci_set_cmd_ring_deq(xhci);
864}
865
866static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
867{
868 int port_index;
869 __le32 __iomem **port_array;
870 unsigned long flags;
871 u32 t1, t2;
872
873 spin_lock_irqsave(&xhci->lock, flags);
874
875
876 port_index = xhci->num_usb3_ports;
877 port_array = xhci->usb3_ports;
878 while (port_index--) {
879 t1 = readl(port_array[port_index]);
880 t1 = xhci_port_state_to_neutral(t1);
881 t2 = t1 & ~PORT_WAKE_BITS;
882 if (t1 != t2)
883 writel(t2, port_array[port_index]);
884 }
885
886
887 port_index = xhci->num_usb2_ports;
888 port_array = xhci->usb2_ports;
889 while (port_index--) {
890 t1 = readl(port_array[port_index]);
891 t1 = xhci_port_state_to_neutral(t1);
892 t2 = t1 & ~PORT_WAKE_BITS;
893 if (t1 != t2)
894 writel(t2, port_array[port_index]);
895 }
896
897 spin_unlock_irqrestore(&xhci->lock, flags);
898}
899
900
901
902
903
904
905
906int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
907{
908 int rc = 0;
909 unsigned int delay = XHCI_MAX_HALT_USEC;
910 struct usb_hcd *hcd = xhci_to_hcd(xhci);
911 u32 command;
912
913 if (!hcd->state)
914 return 0;
915
916 if (hcd->state != HC_STATE_SUSPENDED ||
917 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
918 return -EINVAL;
919
920
921 if (!do_wakeup)
922 xhci_disable_port_wake_on_bits(xhci);
923
924
925 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
926 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
927 del_timer_sync(&hcd->rh_timer);
928 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
929 del_timer_sync(&xhci->shared_hcd->rh_timer);
930
931 spin_lock_irq(&xhci->lock);
932 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
933 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
934
935
936
937
938 command = readl(&xhci->op_regs->command);
939 command &= ~CMD_RUN;
940 writel(command, &xhci->op_regs->command);
941
942
943 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
944
945 if (xhci_handshake(&xhci->op_regs->status,
946 STS_HALT, STS_HALT, delay)) {
947 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
948 spin_unlock_irq(&xhci->lock);
949 return -ETIMEDOUT;
950 }
951 xhci_clear_command_ring(xhci);
952
953
954 xhci_save_registers(xhci);
955
956
957 command = readl(&xhci->op_regs->command);
958 command |= CMD_CSS;
959 writel(command, &xhci->op_regs->command);
960 if (xhci_handshake(&xhci->op_regs->status,
961 STS_SAVE, 0, 10 * 1000)) {
962 xhci_warn(xhci, "WARN: xHC save state timeout\n");
963 spin_unlock_irq(&xhci->lock);
964 return -ETIMEDOUT;
965 }
966 spin_unlock_irq(&xhci->lock);
967
968
969
970
971
972 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
973 (!(xhci_all_ports_seen_u0(xhci)))) {
974 del_timer_sync(&xhci->comp_mode_recovery_timer);
975 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
976 "%s: compliance mode recovery timer deleted",
977 __func__);
978 }
979
980
981
982 xhci_msix_sync_irqs(xhci);
983
984 return rc;
985}
986EXPORT_SYMBOL_GPL(xhci_suspend);
987
988
989
990
991
992
993
994int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
995{
996 u32 command, temp = 0, status;
997 struct usb_hcd *hcd = xhci_to_hcd(xhci);
998 struct usb_hcd *secondary_hcd;
999 int retval = 0;
1000 bool comp_timer_running = false;
1001
1002 if (!hcd->state)
1003 return 0;
1004
1005
1006
1007
1008 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1009 time_before(jiffies,
1010 xhci->bus_state[1].next_statechange))
1011 msleep(100);
1012
1013 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1014 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1015
1016 spin_lock_irq(&xhci->lock);
1017 if (xhci->quirks & XHCI_RESET_ON_RESUME)
1018 hibernated = true;
1019
1020 if (!hibernated) {
1021
1022 xhci_restore_registers(xhci);
1023
1024 xhci_set_cmd_ring_deq(xhci);
1025
1026
1027 command = readl(&xhci->op_regs->command);
1028 command |= CMD_CRS;
1029 writel(command, &xhci->op_regs->command);
1030 if (xhci_handshake(&xhci->op_regs->status,
1031 STS_RESTORE, 0, 10 * 1000)) {
1032 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1033 spin_unlock_irq(&xhci->lock);
1034 return -ETIMEDOUT;
1035 }
1036 temp = readl(&xhci->op_regs->status);
1037 }
1038
1039
1040 if ((temp & STS_SRE) || hibernated) {
1041
1042 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1043 !(xhci_all_ports_seen_u0(xhci))) {
1044 del_timer_sync(&xhci->comp_mode_recovery_timer);
1045 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1046 "Compliance Mode Recovery Timer deleted!");
1047 }
1048
1049
1050 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1051 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1052
1053 xhci_dbg(xhci, "Stop HCD\n");
1054 xhci_halt(xhci);
1055 xhci_reset(xhci);
1056 spin_unlock_irq(&xhci->lock);
1057 xhci_cleanup_msix(xhci);
1058
1059 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1060 temp = readl(&xhci->op_regs->status);
1061 writel(temp & ~STS_EINT, &xhci->op_regs->status);
1062 temp = readl(&xhci->ir_set->irq_pending);
1063 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1064 xhci_print_ir_set(xhci, 0);
1065
1066 xhci_dbg(xhci, "cleaning up memory\n");
1067 xhci_mem_cleanup(xhci);
1068 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1069 readl(&xhci->op_regs->status));
1070
1071
1072
1073
1074
1075 if (!usb_hcd_is_primary_hcd(hcd))
1076 secondary_hcd = hcd;
1077 else
1078 secondary_hcd = xhci->shared_hcd;
1079
1080 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1081 retval = xhci_init(hcd->primary_hcd);
1082 if (retval)
1083 return retval;
1084 comp_timer_running = true;
1085
1086 xhci_dbg(xhci, "Start the primary HCD\n");
1087 retval = xhci_run(hcd->primary_hcd);
1088 if (!retval) {
1089 xhci_dbg(xhci, "Start the secondary HCD\n");
1090 retval = xhci_run(secondary_hcd);
1091 }
1092 hcd->state = HC_STATE_SUSPENDED;
1093 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1094 goto done;
1095 }
1096
1097
1098 command = readl(&xhci->op_regs->command);
1099 command |= CMD_RUN;
1100 writel(command, &xhci->op_regs->command);
1101 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1102 0, 250 * 1000);
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 spin_unlock_irq(&xhci->lock);
1114
1115 done:
1116 if (retval == 0) {
1117
1118 status = readl(&xhci->op_regs->status);
1119 if (status & STS_EINT) {
1120 usb_hcd_resume_root_hub(xhci->shared_hcd);
1121 usb_hcd_resume_root_hub(hcd);
1122 }
1123 }
1124
1125
1126
1127
1128
1129
1130
1131 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1132 compliance_mode_recovery_timer_init(xhci);
1133
1134
1135 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1136 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1137 usb_hcd_poll_rh_status(xhci->shared_hcd);
1138 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1139 usb_hcd_poll_rh_status(hcd);
1140
1141 return retval;
1142}
1143EXPORT_SYMBOL_GPL(xhci_resume);
1144#endif
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1159{
1160 unsigned int index;
1161 if (usb_endpoint_xfer_control(desc))
1162 index = (unsigned int) (usb_endpoint_num(desc)*2);
1163 else
1164 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1165 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1166 return index;
1167}
1168
1169
1170
1171
1172unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1173{
1174 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1175 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1176 return direction | number;
1177}
1178
1179
1180
1181
1182
1183unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1184{
1185 return 1 << (xhci_get_endpoint_index(desc) + 1);
1186}
1187
1188
1189
1190
1191
1192unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1193{
1194 return 1 << (ep_index + 1);
1195}
1196
1197
1198
1199
1200
1201
1202
1203unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1204{
1205 return fls(added_ctxs) - 1;
1206}
1207
1208
1209
1210
1211static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1212 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1213 const char *func) {
1214 struct xhci_hcd *xhci;
1215 struct xhci_virt_device *virt_dev;
1216
1217 if (!hcd || (check_ep && !ep) || !udev) {
1218 pr_debug("xHCI %s called with invalid args\n", func);
1219 return -EINVAL;
1220 }
1221 if (!udev->parent) {
1222 pr_debug("xHCI %s called for root hub\n", func);
1223 return 0;
1224 }
1225
1226 xhci = hcd_to_xhci(hcd);
1227 if (check_virt_dev) {
1228 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1229 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1230 func);
1231 return -EINVAL;
1232 }
1233
1234 virt_dev = xhci->devs[udev->slot_id];
1235 if (virt_dev->udev != udev) {
1236 xhci_dbg(xhci, "xHCI %s called with udev and "
1237 "virt_dev does not match\n", func);
1238 return -EINVAL;
1239 }
1240 }
1241
1242 if (xhci->xhc_state & XHCI_STATE_HALTED)
1243 return -ENODEV;
1244
1245 return 1;
1246}
1247
1248static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1249 struct usb_device *udev, struct xhci_command *command,
1250 bool ctx_change, bool must_succeed);
1251
1252
1253
1254
1255
1256
1257
1258static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1259 unsigned int ep_index, struct urb *urb)
1260{
1261 struct xhci_container_ctx *out_ctx;
1262 struct xhci_input_control_ctx *ctrl_ctx;
1263 struct xhci_ep_ctx *ep_ctx;
1264 struct xhci_command *command;
1265 int max_packet_size;
1266 int hw_max_packet_size;
1267 int ret = 0;
1268
1269 out_ctx = xhci->devs[slot_id]->out_ctx;
1270 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1271 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1272 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1273 if (hw_max_packet_size != max_packet_size) {
1274 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1275 "Max Packet Size for ep 0 changed.");
1276 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1277 "Max packet size in usb_device = %d",
1278 max_packet_size);
1279 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1280 "Max packet size in xHCI HW = %d",
1281 hw_max_packet_size);
1282 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1283 "Issuing evaluate context command.");
1284
1285
1286
1287
1288
1289
1290 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
1291 if (!command)
1292 return -ENOMEM;
1293
1294 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1295 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1296 if (!ctrl_ctx) {
1297 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1298 __func__);
1299 ret = -ENOMEM;
1300 goto command_cleanup;
1301 }
1302
1303 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1304 xhci->devs[slot_id]->out_ctx, ep_index);
1305
1306 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1307 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1308 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1309
1310 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1311 ctrl_ctx->drop_flags = 0;
1312
1313 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1314 xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
1315 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1316 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1317
1318 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1319 true, false);
1320
1321
1322
1323
1324 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1325command_cleanup:
1326 kfree(command->completion);
1327 kfree(command);
1328 }
1329 return ret;
1330}
1331
1332
1333
1334
1335
1336int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1337{
1338 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1339 struct xhci_td *buffer;
1340 unsigned long flags;
1341 int ret = 0;
1342 unsigned int slot_id, ep_index;
1343 struct urb_priv *urb_priv;
1344 int size, i;
1345
1346 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1347 true, true, __func__) <= 0)
1348 return -EINVAL;
1349
1350 slot_id = urb->dev->slot_id;
1351 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1352
1353 if (!HCD_HW_ACCESSIBLE(hcd)) {
1354 if (!in_interrupt())
1355 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1356 ret = -ESHUTDOWN;
1357 goto exit;
1358 }
1359
1360 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1361 size = urb->number_of_packets;
1362 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1363 urb->transfer_buffer_length > 0 &&
1364 urb->transfer_flags & URB_ZERO_PACKET &&
1365 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1366 size = 2;
1367 else
1368 size = 1;
1369
1370 urb_priv = kzalloc(sizeof(struct urb_priv) +
1371 size * sizeof(struct xhci_td *), mem_flags);
1372 if (!urb_priv)
1373 return -ENOMEM;
1374
1375 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1376 if (!buffer) {
1377 kfree(urb_priv);
1378 return -ENOMEM;
1379 }
1380
1381 for (i = 0; i < size; i++) {
1382 urb_priv->td[i] = buffer;
1383 buffer++;
1384 }
1385
1386 urb_priv->length = size;
1387 urb_priv->td_cnt = 0;
1388 urb->hcpriv = urb_priv;
1389
1390 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1391
1392
1393
1394 if (urb->dev->speed == USB_SPEED_FULL) {
1395 ret = xhci_check_maxpacket(xhci, slot_id,
1396 ep_index, urb);
1397 if (ret < 0) {
1398 xhci_urb_free_priv(urb_priv);
1399 urb->hcpriv = NULL;
1400 return ret;
1401 }
1402 }
1403
1404
1405
1406
1407 spin_lock_irqsave(&xhci->lock, flags);
1408 if (xhci->xhc_state & XHCI_STATE_DYING)
1409 goto dying;
1410 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1411 slot_id, ep_index);
1412 if (ret)
1413 goto free_priv;
1414 spin_unlock_irqrestore(&xhci->lock, flags);
1415 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1416 spin_lock_irqsave(&xhci->lock, flags);
1417 if (xhci->xhc_state & XHCI_STATE_DYING)
1418 goto dying;
1419 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1420 EP_GETTING_STREAMS) {
1421 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1422 "is transitioning to using streams.\n");
1423 ret = -EINVAL;
1424 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1425 EP_GETTING_NO_STREAMS) {
1426 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1427 "is transitioning to "
1428 "not having streams.\n");
1429 ret = -EINVAL;
1430 } else {
1431 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1432 slot_id, ep_index);
1433 }
1434 if (ret)
1435 goto free_priv;
1436 spin_unlock_irqrestore(&xhci->lock, flags);
1437 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1438 spin_lock_irqsave(&xhci->lock, flags);
1439 if (xhci->xhc_state & XHCI_STATE_DYING)
1440 goto dying;
1441 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1442 slot_id, ep_index);
1443 if (ret)
1444 goto free_priv;
1445 spin_unlock_irqrestore(&xhci->lock, flags);
1446 } else {
1447 spin_lock_irqsave(&xhci->lock, flags);
1448 if (xhci->xhc_state & XHCI_STATE_DYING)
1449 goto dying;
1450 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1451 slot_id, ep_index);
1452 if (ret)
1453 goto free_priv;
1454 spin_unlock_irqrestore(&xhci->lock, flags);
1455 }
1456exit:
1457 return ret;
1458dying:
1459 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1460 "non-responsive xHCI host.\n",
1461 urb->ep->desc.bEndpointAddress, urb);
1462 ret = -ESHUTDOWN;
1463free_priv:
1464 xhci_urb_free_priv(urb_priv);
1465 urb->hcpriv = NULL;
1466 spin_unlock_irqrestore(&xhci->lock, flags);
1467 return ret;
1468}
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1502{
1503 unsigned long flags;
1504 int ret, i;
1505 u32 temp;
1506 struct xhci_hcd *xhci;
1507 struct urb_priv *urb_priv;
1508 struct xhci_td *td;
1509 unsigned int ep_index;
1510 struct xhci_ring *ep_ring;
1511 struct xhci_virt_ep *ep;
1512 struct xhci_command *command;
1513
1514 xhci = hcd_to_xhci(hcd);
1515 spin_lock_irqsave(&xhci->lock, flags);
1516
1517 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1518 if (ret || !urb->hcpriv)
1519 goto done;
1520 temp = readl(&xhci->op_regs->status);
1521 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1522 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1523 "HW died, freeing TD.");
1524 urb_priv = urb->hcpriv;
1525 for (i = urb_priv->td_cnt;
1526 i < urb_priv->length && xhci->devs[urb->dev->slot_id];
1527 i++) {
1528 td = urb_priv->td[i];
1529 if (!list_empty(&td->td_list))
1530 list_del_init(&td->td_list);
1531 if (!list_empty(&td->cancelled_td_list))
1532 list_del_init(&td->cancelled_td_list);
1533 }
1534
1535 usb_hcd_unlink_urb_from_ep(hcd, urb);
1536 spin_unlock_irqrestore(&xhci->lock, flags);
1537 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1538 xhci_urb_free_priv(urb_priv);
1539 return ret;
1540 }
1541 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1542 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1543 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1544 "Ep 0x%x: URB %p to be canceled on "
1545 "non-responsive xHCI host.",
1546 urb->ep->desc.bEndpointAddress, urb);
1547
1548
1549
1550
1551
1552 goto done;
1553 }
1554
1555 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1556 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1557 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1558 if (!ep_ring) {
1559 ret = -EINVAL;
1560 goto done;
1561 }
1562
1563
1564 if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0))
1565 del_timer(&ep_ring->stream_timer);
1566
1567
1568 urb_priv = urb->hcpriv;
1569 i = urb_priv->td_cnt;
1570 if (i < urb_priv->length)
1571 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1572 "Cancel URB %p, dev %s, ep 0x%x, "
1573 "starting at offset 0x%llx",
1574 urb, urb->dev->devpath,
1575 urb->ep->desc.bEndpointAddress,
1576 (unsigned long long) xhci_trb_virt_to_dma(
1577 urb_priv->td[i]->start_seg,
1578 urb_priv->td[i]->first_trb));
1579
1580 for (; i < urb_priv->length; i++) {
1581 td = urb_priv->td[i];
1582 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1583 }
1584
1585
1586
1587
1588 if (!(ep->ep_state & EP_HALT_PENDING)) {
1589 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1590 if (!command) {
1591 ret = -ENOMEM;
1592 goto done;
1593 }
1594 ep->ep_state |= EP_HALT_PENDING;
1595 ep->stop_cmds_pending++;
1596 ep->stop_cmd_timer.expires = jiffies +
1597 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1598 add_timer(&ep->stop_cmd_timer);
1599 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1600 ep_index, 0);
1601 xhci_ring_cmd_db(xhci);
1602 }
1603done:
1604 spin_unlock_irqrestore(&xhci->lock, flags);
1605 return ret;
1606}
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1622 struct usb_host_endpoint *ep)
1623{
1624 struct xhci_hcd *xhci;
1625 struct xhci_container_ctx *in_ctx, *out_ctx;
1626 struct xhci_input_control_ctx *ctrl_ctx;
1627 unsigned int ep_index;
1628 struct xhci_ep_ctx *ep_ctx;
1629 u32 drop_flag;
1630 u32 new_add_flags, new_drop_flags;
1631 int ret;
1632
1633 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1634 if (ret <= 0)
1635 return ret;
1636 xhci = hcd_to_xhci(hcd);
1637 if (xhci->xhc_state & XHCI_STATE_DYING)
1638 return -ENODEV;
1639
1640 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1641 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1642 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1643 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1644 __func__, drop_flag);
1645 return 0;
1646 }
1647
1648 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1649 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1650 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1651 if (!ctrl_ctx) {
1652 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1653 __func__);
1654 return 0;
1655 }
1656
1657 ep_index = xhci_get_endpoint_index(&ep->desc);
1658 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1659
1660
1661
1662 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1663 cpu_to_le32(EP_STATE_DISABLED)) ||
1664 le32_to_cpu(ctrl_ctx->drop_flags) &
1665 xhci_get_endpoint_flag(&ep->desc)) {
1666
1667 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1668 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1669 __func__, ep);
1670 return 0;
1671 }
1672
1673 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1674 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1675
1676 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1677 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1678
1679 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1680
1681 if (xhci->quirks & XHCI_MTK_HOST)
1682 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1683
1684 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1685 (unsigned int) ep->desc.bEndpointAddress,
1686 udev->slot_id,
1687 (unsigned int) new_drop_flags,
1688 (unsigned int) new_add_flags);
1689 return 0;
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1706 struct usb_host_endpoint *ep)
1707{
1708 struct xhci_hcd *xhci;
1709 struct xhci_container_ctx *in_ctx;
1710 unsigned int ep_index;
1711 struct xhci_input_control_ctx *ctrl_ctx;
1712 u32 added_ctxs;
1713 u32 new_add_flags, new_drop_flags;
1714 struct xhci_virt_device *virt_dev;
1715 int ret = 0;
1716
1717 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1718 if (ret <= 0) {
1719
1720 ep->hcpriv = NULL;
1721 return ret;
1722 }
1723 xhci = hcd_to_xhci(hcd);
1724 if (xhci->xhc_state & XHCI_STATE_DYING)
1725 return -ENODEV;
1726
1727 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1728 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1729
1730
1731
1732
1733 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1734 __func__, added_ctxs);
1735 return 0;
1736 }
1737
1738 virt_dev = xhci->devs[udev->slot_id];
1739 in_ctx = virt_dev->in_ctx;
1740 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1741 if (!ctrl_ctx) {
1742 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1743 __func__);
1744 return 0;
1745 }
1746
1747 ep_index = xhci_get_endpoint_index(&ep->desc);
1748
1749
1750
1751 if (virt_dev->eps[ep_index].ring &&
1752 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1753 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1754 "without dropping it.\n",
1755 (unsigned int) ep->desc.bEndpointAddress);
1756 return -EINVAL;
1757 }
1758
1759
1760
1761
1762 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1763 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1764 __func__, ep);
1765 return 0;
1766 }
1767
1768
1769
1770
1771
1772
1773 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1774 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1775 __func__, ep->desc.bEndpointAddress);
1776 return -ENOMEM;
1777 }
1778
1779 if (xhci->quirks & XHCI_MTK_HOST) {
1780 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1781 if (ret < 0) {
1782 xhci_free_or_cache_endpoint_ring(xhci,
1783 virt_dev, ep_index);
1784 return ret;
1785 }
1786 }
1787
1788 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1789 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1790
1791
1792
1793
1794
1795
1796
1797 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1798
1799
1800 ep->hcpriv = udev;
1801
1802 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1803 (unsigned int) ep->desc.bEndpointAddress,
1804 udev->slot_id,
1805 (unsigned int) new_drop_flags,
1806 (unsigned int) new_add_flags);
1807 return 0;
1808}
1809
1810static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1811{
1812 struct xhci_input_control_ctx *ctrl_ctx;
1813 struct xhci_ep_ctx *ep_ctx;
1814 struct xhci_slot_ctx *slot_ctx;
1815 int i;
1816
1817 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1818 if (!ctrl_ctx) {
1819 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1820 __func__);
1821 return;
1822 }
1823
1824
1825
1826
1827
1828
1829 ctrl_ctx->drop_flags = 0;
1830 ctrl_ctx->add_flags = 0;
1831 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1832 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1833
1834 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1835 for (i = 1; i < 31; ++i) {
1836 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1837 ep_ctx->ep_info = 0;
1838 ep_ctx->ep_info2 = 0;
1839 ep_ctx->deq = 0;
1840 ep_ctx->tx_info = 0;
1841 }
1842}
1843
1844static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1845 struct usb_device *udev, u32 *cmd_status)
1846{
1847 int ret;
1848
1849 switch (*cmd_status) {
1850 case COMP_CMD_ABORT:
1851 case COMP_CMD_STOP:
1852 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1853 ret = -ETIME;
1854 break;
1855 case COMP_ENOMEM:
1856 dev_warn(&udev->dev,
1857 "Not enough host controller resources for new device state.\n");
1858 ret = -ENOMEM;
1859
1860 break;
1861 case COMP_BW_ERR:
1862 case COMP_2ND_BW_ERR:
1863 dev_warn(&udev->dev,
1864 "Not enough bandwidth for new device state.\n");
1865 ret = -ENOSPC;
1866
1867 break;
1868 case COMP_TRB_ERR:
1869
1870 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1871 "add flag = 1, "
1872 "and endpoint is not disabled.\n");
1873 ret = -EINVAL;
1874 break;
1875 case COMP_DEV_ERR:
1876 dev_warn(&udev->dev,
1877 "ERROR: Incompatible device for endpoint configure command.\n");
1878 ret = -ENODEV;
1879 break;
1880 case COMP_SUCCESS:
1881 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1882 "Successful Endpoint Configure command");
1883 ret = 0;
1884 break;
1885 default:
1886 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1887 *cmd_status);
1888 ret = -EINVAL;
1889 break;
1890 }
1891 return ret;
1892}
1893
1894static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1895 struct usb_device *udev, u32 *cmd_status)
1896{
1897 int ret;
1898 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1899
1900 switch (*cmd_status) {
1901 case COMP_CMD_ABORT:
1902 case COMP_CMD_STOP:
1903 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1904 ret = -ETIME;
1905 break;
1906 case COMP_EINVAL:
1907 dev_warn(&udev->dev,
1908 "WARN: xHCI driver setup invalid evaluate context command.\n");
1909 ret = -EINVAL;
1910 break;
1911 case COMP_EBADSLT:
1912 dev_warn(&udev->dev,
1913 "WARN: slot not enabled for evaluate context command.\n");
1914 ret = -EINVAL;
1915 break;
1916 case COMP_CTX_STATE:
1917 dev_warn(&udev->dev,
1918 "WARN: invalid context state for evaluate context command.\n");
1919 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1920 ret = -EINVAL;
1921 break;
1922 case COMP_DEV_ERR:
1923 dev_warn(&udev->dev,
1924 "ERROR: Incompatible device for evaluate context command.\n");
1925 ret = -ENODEV;
1926 break;
1927 case COMP_MEL_ERR:
1928
1929 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1930 ret = -EINVAL;
1931 break;
1932 case COMP_SUCCESS:
1933 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1934 "Successful evaluate context command");
1935 ret = 0;
1936 break;
1937 default:
1938 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1939 *cmd_status);
1940 ret = -EINVAL;
1941 break;
1942 }
1943 return ret;
1944}
1945
1946static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1947 struct xhci_input_control_ctx *ctrl_ctx)
1948{
1949 u32 valid_add_flags;
1950 u32 valid_drop_flags;
1951
1952
1953
1954
1955
1956 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1957 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1958
1959
1960
1961
1962
1963 return hweight32(valid_add_flags) -
1964 hweight32(valid_add_flags & valid_drop_flags);
1965}
1966
1967static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1968 struct xhci_input_control_ctx *ctrl_ctx)
1969{
1970 u32 valid_add_flags;
1971 u32 valid_drop_flags;
1972
1973 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1974 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1975
1976 return hweight32(valid_drop_flags) -
1977 hweight32(valid_add_flags & valid_drop_flags);
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1994 struct xhci_input_control_ctx *ctrl_ctx)
1995{
1996 u32 added_eps;
1997
1998 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1999 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2000 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2001 "Not enough ep ctxs: "
2002 "%u active, need to add %u, limit is %u.",
2003 xhci->num_active_eps, added_eps,
2004 xhci->limit_active_eps);
2005 return -ENOMEM;
2006 }
2007 xhci->num_active_eps += added_eps;
2008 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2009 "Adding %u ep ctxs, %u now active.", added_eps,
2010 xhci->num_active_eps);
2011 return 0;
2012}
2013
2014
2015
2016
2017
2018
2019
2020static void xhci_free_host_resources(struct xhci_hcd *xhci,
2021 struct xhci_input_control_ctx *ctrl_ctx)
2022{
2023 u32 num_failed_eps;
2024
2025 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2026 xhci->num_active_eps -= num_failed_eps;
2027 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2028 "Removing %u failed ep ctxs, %u now active.",
2029 num_failed_eps,
2030 xhci->num_active_eps);
2031}
2032
2033
2034
2035
2036
2037
2038
2039static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2040 struct xhci_input_control_ctx *ctrl_ctx)
2041{
2042 u32 num_dropped_eps;
2043
2044 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2045 xhci->num_active_eps -= num_dropped_eps;
2046 if (num_dropped_eps)
2047 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2048 "Removing %u dropped ep ctxs, %u now active.",
2049 num_dropped_eps,
2050 xhci->num_active_eps);
2051}
2052
2053static unsigned int xhci_get_block_size(struct usb_device *udev)
2054{
2055 switch (udev->speed) {
2056 case USB_SPEED_LOW:
2057 case USB_SPEED_FULL:
2058 return FS_BLOCK;
2059 case USB_SPEED_HIGH:
2060 return HS_BLOCK;
2061 case USB_SPEED_SUPER:
2062 case USB_SPEED_SUPER_PLUS:
2063 return SS_BLOCK;
2064 case USB_SPEED_UNKNOWN:
2065 case USB_SPEED_WIRELESS:
2066 default:
2067
2068 return 1;
2069 }
2070}
2071
2072static unsigned int
2073xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2074{
2075 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2076 return LS_OVERHEAD;
2077 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2078 return FS_OVERHEAD;
2079 return HS_OVERHEAD;
2080}
2081
2082
2083
2084
2085
2086static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2087 struct xhci_virt_device *virt_dev,
2088 int old_active_eps)
2089{
2090 struct xhci_interval_bw_table *bw_table;
2091 struct xhci_tt_bw_info *tt_info;
2092
2093
2094 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2095 tt_info = virt_dev->tt_info;
2096
2097
2098
2099
2100 if (old_active_eps)
2101 return 0;
2102 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2103 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2104 return -ENOMEM;
2105 return 0;
2106 }
2107
2108
2109
2110
2111
2112
2113 return 0;
2114}
2115
2116static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2117 struct xhci_virt_device *virt_dev)
2118{
2119 unsigned int bw_reserved;
2120
2121 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2122 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2123 return -ENOMEM;
2124
2125 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2126 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2127 return -ENOMEM;
2128
2129 return 0;
2130}
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173static int xhci_check_bw_table(struct xhci_hcd *xhci,
2174 struct xhci_virt_device *virt_dev,
2175 int old_active_eps)
2176{
2177 unsigned int bw_reserved;
2178 unsigned int max_bandwidth;
2179 unsigned int bw_used;
2180 unsigned int block_size;
2181 struct xhci_interval_bw_table *bw_table;
2182 unsigned int packet_size = 0;
2183 unsigned int overhead = 0;
2184 unsigned int packets_transmitted = 0;
2185 unsigned int packets_remaining = 0;
2186 unsigned int i;
2187
2188 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2189 return xhci_check_ss_bw(xhci, virt_dev);
2190
2191 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2192 max_bandwidth = HS_BW_LIMIT;
2193
2194 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2195 } else {
2196 max_bandwidth = FS_BW_LIMIT;
2197 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2198 }
2199
2200 bw_table = virt_dev->bw_table;
2201
2202
2203
2204 block_size = xhci_get_block_size(virt_dev->udev);
2205
2206
2207
2208
2209 if (virt_dev->tt_info) {
2210 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2211 "Recalculating BW for rootport %u",
2212 virt_dev->real_port);
2213 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2214 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2215 "newly activated TT.\n");
2216 return -ENOMEM;
2217 }
2218 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2219 "Recalculating BW for TT slot %u port %u",
2220 virt_dev->tt_info->slot_id,
2221 virt_dev->tt_info->ttport);
2222 } else {
2223 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2224 "Recalculating BW for rootport %u",
2225 virt_dev->real_port);
2226 }
2227
2228
2229
2230
2231 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2232 bw_table->interval_bw[0].num_packets *
2233 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2234
2235 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2236 unsigned int bw_added;
2237 unsigned int largest_mps;
2238 unsigned int interval_overhead;
2239
2240
2241
2242
2243
2244
2245 packets_remaining = 2 * packets_remaining +
2246 bw_table->interval_bw[i].num_packets;
2247
2248
2249
2250
2251 if (list_empty(&bw_table->interval_bw[i].endpoints))
2252 largest_mps = 0;
2253 else {
2254 struct xhci_virt_ep *virt_ep;
2255 struct list_head *ep_entry;
2256
2257 ep_entry = bw_table->interval_bw[i].endpoints.next;
2258 virt_ep = list_entry(ep_entry,
2259 struct xhci_virt_ep, bw_endpoint_list);
2260
2261 largest_mps = DIV_ROUND_UP(
2262 virt_ep->bw_info.max_packet_size,
2263 block_size);
2264 }
2265 if (largest_mps > packet_size)
2266 packet_size = largest_mps;
2267
2268
2269 interval_overhead = xhci_get_largest_overhead(
2270 &bw_table->interval_bw[i]);
2271 if (interval_overhead > overhead)
2272 overhead = interval_overhead;
2273
2274
2275
2276
2277 packets_transmitted = packets_remaining >> (i + 1);
2278
2279
2280 bw_added = packets_transmitted * (overhead + packet_size);
2281
2282
2283 packets_remaining = packets_remaining % (1 << (i + 1));
2284
2285
2286
2287
2288
2289 if (packets_remaining == 0) {
2290 packet_size = 0;
2291 overhead = 0;
2292 } else if (packets_transmitted > 0) {
2293
2294
2295
2296
2297
2298 packet_size = largest_mps;
2299 overhead = interval_overhead;
2300 }
2301
2302
2303
2304 bw_used += bw_added;
2305 if (bw_used > max_bandwidth) {
2306 xhci_warn(xhci, "Not enough bandwidth. "
2307 "Proposed: %u, Max: %u\n",
2308 bw_used, max_bandwidth);
2309 return -ENOMEM;
2310 }
2311 }
2312
2313
2314
2315
2316
2317
2318 if (packets_remaining > 0)
2319 bw_used += overhead + packet_size;
2320
2321 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2322 unsigned int port_index = virt_dev->real_port - 1;
2323
2324
2325
2326
2327
2328 bw_used += TT_HS_OVERHEAD *
2329 xhci->rh_bw[port_index].num_active_tts;
2330 }
2331
2332 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2333 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2334 "Available: %u " "percent",
2335 bw_used, max_bandwidth, bw_reserved,
2336 (max_bandwidth - bw_used - bw_reserved) * 100 /
2337 max_bandwidth);
2338
2339 bw_used += bw_reserved;
2340 if (bw_used > max_bandwidth) {
2341 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2342 bw_used, max_bandwidth);
2343 return -ENOMEM;
2344 }
2345
2346 bw_table->bw_used = bw_used;
2347 return 0;
2348}
2349
2350static bool xhci_is_async_ep(unsigned int ep_type)
2351{
2352 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2353 ep_type != ISOC_IN_EP &&
2354 ep_type != INT_IN_EP);
2355}
2356
2357static bool xhci_is_sync_in_ep(unsigned int ep_type)
2358{
2359 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2360}
2361
2362static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2363{
2364 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2365
2366 if (ep_bw->ep_interval == 0)
2367 return SS_OVERHEAD_BURST +
2368 (ep_bw->mult * ep_bw->num_packets *
2369 (SS_OVERHEAD + mps));
2370 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2371 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2372 1 << ep_bw->ep_interval);
2373
2374}
2375
2376void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2377 struct xhci_bw_info *ep_bw,
2378 struct xhci_interval_bw_table *bw_table,
2379 struct usb_device *udev,
2380 struct xhci_virt_ep *virt_ep,
2381 struct xhci_tt_bw_info *tt_info)
2382{
2383 struct xhci_interval_bw *interval_bw;
2384 int normalized_interval;
2385
2386 if (xhci_is_async_ep(ep_bw->type))
2387 return;
2388
2389 if (udev->speed >= USB_SPEED_SUPER) {
2390 if (xhci_is_sync_in_ep(ep_bw->type))
2391 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2392 xhci_get_ss_bw_consumed(ep_bw);
2393 else
2394 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2395 xhci_get_ss_bw_consumed(ep_bw);
2396 return;
2397 }
2398
2399
2400
2401
2402 if (list_empty(&virt_ep->bw_endpoint_list))
2403 return;
2404
2405
2406
2407 if (udev->speed == USB_SPEED_HIGH)
2408 normalized_interval = ep_bw->ep_interval;
2409 else
2410 normalized_interval = ep_bw->ep_interval - 3;
2411
2412 if (normalized_interval == 0)
2413 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2414 interval_bw = &bw_table->interval_bw[normalized_interval];
2415 interval_bw->num_packets -= ep_bw->num_packets;
2416 switch (udev->speed) {
2417 case USB_SPEED_LOW:
2418 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2419 break;
2420 case USB_SPEED_FULL:
2421 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2422 break;
2423 case USB_SPEED_HIGH:
2424 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2425 break;
2426 case USB_SPEED_SUPER:
2427 case USB_SPEED_SUPER_PLUS:
2428 case USB_SPEED_UNKNOWN:
2429 case USB_SPEED_WIRELESS:
2430
2431
2432
2433 return;
2434 }
2435 if (tt_info)
2436 tt_info->active_eps -= 1;
2437 list_del_init(&virt_ep->bw_endpoint_list);
2438}
2439
2440static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2441 struct xhci_bw_info *ep_bw,
2442 struct xhci_interval_bw_table *bw_table,
2443 struct usb_device *udev,
2444 struct xhci_virt_ep *virt_ep,
2445 struct xhci_tt_bw_info *tt_info)
2446{
2447 struct xhci_interval_bw *interval_bw;
2448 struct xhci_virt_ep *smaller_ep;
2449 int normalized_interval;
2450
2451 if (xhci_is_async_ep(ep_bw->type))
2452 return;
2453
2454 if (udev->speed == USB_SPEED_SUPER) {
2455 if (xhci_is_sync_in_ep(ep_bw->type))
2456 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2457 xhci_get_ss_bw_consumed(ep_bw);
2458 else
2459 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2460 xhci_get_ss_bw_consumed(ep_bw);
2461 return;
2462 }
2463
2464
2465
2466
2467 if (udev->speed == USB_SPEED_HIGH)
2468 normalized_interval = ep_bw->ep_interval;
2469 else
2470 normalized_interval = ep_bw->ep_interval - 3;
2471
2472 if (normalized_interval == 0)
2473 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2474 interval_bw = &bw_table->interval_bw[normalized_interval];
2475 interval_bw->num_packets += ep_bw->num_packets;
2476 switch (udev->speed) {
2477 case USB_SPEED_LOW:
2478 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2479 break;
2480 case USB_SPEED_FULL:
2481 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2482 break;
2483 case USB_SPEED_HIGH:
2484 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2485 break;
2486 case USB_SPEED_SUPER:
2487 case USB_SPEED_SUPER_PLUS:
2488 case USB_SPEED_UNKNOWN:
2489 case USB_SPEED_WIRELESS:
2490
2491
2492
2493 return;
2494 }
2495
2496 if (tt_info)
2497 tt_info->active_eps += 1;
2498
2499 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2500 bw_endpoint_list) {
2501 if (ep_bw->max_packet_size >=
2502 smaller_ep->bw_info.max_packet_size) {
2503
2504 list_add_tail(&virt_ep->bw_endpoint_list,
2505 &smaller_ep->bw_endpoint_list);
2506 return;
2507 }
2508 }
2509
2510 list_add_tail(&virt_ep->bw_endpoint_list,
2511 &interval_bw->endpoints);
2512}
2513
2514void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2515 struct xhci_virt_device *virt_dev,
2516 int old_active_eps)
2517{
2518 struct xhci_root_port_bw_info *rh_bw_info;
2519 if (!virt_dev->tt_info)
2520 return;
2521
2522 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2523 if (old_active_eps == 0 &&
2524 virt_dev->tt_info->active_eps != 0) {
2525 rh_bw_info->num_active_tts += 1;
2526 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2527 } else if (old_active_eps != 0 &&
2528 virt_dev->tt_info->active_eps == 0) {
2529 rh_bw_info->num_active_tts -= 1;
2530 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2531 }
2532}
2533
2534static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2535 struct xhci_virt_device *virt_dev,
2536 struct xhci_container_ctx *in_ctx)
2537{
2538 struct xhci_bw_info ep_bw_info[31];
2539 int i;
2540 struct xhci_input_control_ctx *ctrl_ctx;
2541 int old_active_eps = 0;
2542
2543 if (virt_dev->tt_info)
2544 old_active_eps = virt_dev->tt_info->active_eps;
2545
2546 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2547 if (!ctrl_ctx) {
2548 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2549 __func__);
2550 return -ENOMEM;
2551 }
2552
2553 for (i = 0; i < 31; i++) {
2554 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2555 continue;
2556
2557
2558 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2559 sizeof(ep_bw_info[i]));
2560
2561
2562
2563 if (EP_IS_DROPPED(ctrl_ctx, i))
2564 xhci_drop_ep_from_interval_table(xhci,
2565 &virt_dev->eps[i].bw_info,
2566 virt_dev->bw_table,
2567 virt_dev->udev,
2568 &virt_dev->eps[i],
2569 virt_dev->tt_info);
2570 }
2571
2572 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2573 for (i = 0; i < 31; i++) {
2574
2575 if (EP_IS_ADDED(ctrl_ctx, i))
2576 xhci_add_ep_to_interval_table(xhci,
2577 &virt_dev->eps[i].bw_info,
2578 virt_dev->bw_table,
2579 virt_dev->udev,
2580 &virt_dev->eps[i],
2581 virt_dev->tt_info);
2582 }
2583
2584 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2585
2586
2587
2588 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2589 return 0;
2590 }
2591
2592
2593 for (i = 0; i < 31; i++) {
2594 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2595 continue;
2596
2597
2598
2599
2600 if (EP_IS_ADDED(ctrl_ctx, i)) {
2601 xhci_drop_ep_from_interval_table(xhci,
2602 &virt_dev->eps[i].bw_info,
2603 virt_dev->bw_table,
2604 virt_dev->udev,
2605 &virt_dev->eps[i],
2606 virt_dev->tt_info);
2607 }
2608
2609 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2610 sizeof(ep_bw_info[i]));
2611
2612 if (EP_IS_DROPPED(ctrl_ctx, i))
2613 xhci_add_ep_to_interval_table(xhci,
2614 &virt_dev->eps[i].bw_info,
2615 virt_dev->bw_table,
2616 virt_dev->udev,
2617 &virt_dev->eps[i],
2618 virt_dev->tt_info);
2619 }
2620 return -ENOMEM;
2621}
2622
2623
2624
2625
2626
2627static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2628 struct usb_device *udev,
2629 struct xhci_command *command,
2630 bool ctx_change, bool must_succeed)
2631{
2632 int ret;
2633 unsigned long flags;
2634 struct xhci_input_control_ctx *ctrl_ctx;
2635 struct xhci_virt_device *virt_dev;
2636
2637 if (!command)
2638 return -EINVAL;
2639
2640 spin_lock_irqsave(&xhci->lock, flags);
2641 virt_dev = xhci->devs[udev->slot_id];
2642
2643 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2644 if (!ctrl_ctx) {
2645 spin_unlock_irqrestore(&xhci->lock, flags);
2646 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2647 __func__);
2648 return -ENOMEM;
2649 }
2650
2651 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2652 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2653 spin_unlock_irqrestore(&xhci->lock, flags);
2654 xhci_warn(xhci, "Not enough host resources, "
2655 "active endpoint contexts = %u\n",
2656 xhci->num_active_eps);
2657 return -ENOMEM;
2658 }
2659 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2660 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2661 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2662 xhci_free_host_resources(xhci, ctrl_ctx);
2663 spin_unlock_irqrestore(&xhci->lock, flags);
2664 xhci_warn(xhci, "Not enough bandwidth\n");
2665 return -ENOMEM;
2666 }
2667
2668 if (!ctx_change)
2669 ret = xhci_queue_configure_endpoint(xhci, command,
2670 command->in_ctx->dma,
2671 udev->slot_id, must_succeed);
2672 else
2673 ret = xhci_queue_evaluate_context(xhci, command,
2674 command->in_ctx->dma,
2675 udev->slot_id, must_succeed);
2676 if (ret < 0) {
2677 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2678 xhci_free_host_resources(xhci, ctrl_ctx);
2679 spin_unlock_irqrestore(&xhci->lock, flags);
2680 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2681 "FIXME allocate a new ring segment");
2682 return -ENOMEM;
2683 }
2684 xhci_ring_cmd_db(xhci);
2685 spin_unlock_irqrestore(&xhci->lock, flags);
2686
2687
2688 wait_for_completion(command->completion);
2689
2690 if (!ctx_change)
2691 ret = xhci_configure_endpoint_result(xhci, udev,
2692 &command->status);
2693 else
2694 ret = xhci_evaluate_context_result(xhci, udev,
2695 &command->status);
2696
2697 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2698 spin_lock_irqsave(&xhci->lock, flags);
2699
2700
2701
2702 if (ret)
2703 xhci_free_host_resources(xhci, ctrl_ctx);
2704 else
2705 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2706 spin_unlock_irqrestore(&xhci->lock, flags);
2707 }
2708 return ret;
2709}
2710
2711static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2712 struct xhci_virt_device *vdev, int i)
2713{
2714 struct xhci_virt_ep *ep = &vdev->eps[i];
2715
2716 if (ep->ep_state & EP_HAS_STREAMS) {
2717 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2718 xhci_get_endpoint_address(i));
2719 xhci_free_stream_info(xhci, ep->stream_info);
2720 ep->stream_info = NULL;
2721 ep->ep_state &= ~EP_HAS_STREAMS;
2722 }
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2736{
2737 int i;
2738 int ret = 0;
2739 struct xhci_hcd *xhci;
2740 struct xhci_virt_device *virt_dev;
2741 struct xhci_input_control_ctx *ctrl_ctx;
2742 struct xhci_slot_ctx *slot_ctx;
2743 struct xhci_command *command;
2744
2745 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2746 if (ret <= 0)
2747 return ret;
2748 xhci = hcd_to_xhci(hcd);
2749 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2750 (xhci->xhc_state & XHCI_STATE_REMOVING))
2751 return -ENODEV;
2752
2753 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2754 virt_dev = xhci->devs[udev->slot_id];
2755
2756 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
2757 if (!command)
2758 return -ENOMEM;
2759
2760 command->in_ctx = virt_dev->in_ctx;
2761
2762
2763 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2764 if (!ctrl_ctx) {
2765 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2766 __func__);
2767 ret = -ENOMEM;
2768 goto command_cleanup;
2769 }
2770 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2771 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2772 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2773
2774
2775 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2776 ctrl_ctx->drop_flags == 0) {
2777 ret = 0;
2778 goto command_cleanup;
2779 }
2780
2781 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2782 for (i = 31; i >= 1; i--) {
2783 __le32 le32 = cpu_to_le32(BIT(i));
2784
2785 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2786 || (ctrl_ctx->add_flags & le32) || i == 1) {
2787 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2788 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2789 break;
2790 }
2791 }
2792 xhci_dbg(xhci, "New Input Control Context:\n");
2793 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2794 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2795
2796 ret = xhci_configure_endpoint(xhci, udev, command,
2797 false, false);
2798 if (ret)
2799
2800 goto command_cleanup;
2801
2802 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2803 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2804 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2805
2806
2807 for (i = 1; i < 31; ++i) {
2808 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2809 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2810 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2811 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2812 }
2813 }
2814 xhci_zero_in_ctx(xhci, virt_dev);
2815
2816
2817
2818
2819 for (i = 1; i < 31; ++i) {
2820 if (!virt_dev->eps[i].new_ring)
2821 continue;
2822
2823
2824
2825 if (virt_dev->eps[i].ring) {
2826 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2827 }
2828 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2829 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2830 virt_dev->eps[i].new_ring = NULL;
2831 }
2832command_cleanup:
2833 kfree(command->completion);
2834 kfree(command);
2835
2836 return ret;
2837}
2838
2839void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2840{
2841 struct xhci_hcd *xhci;
2842 struct xhci_virt_device *virt_dev;
2843 int i, ret;
2844
2845 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2846 if (ret <= 0)
2847 return;
2848 xhci = hcd_to_xhci(hcd);
2849
2850 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2851 virt_dev = xhci->devs[udev->slot_id];
2852
2853 for (i = 0; i < 31; ++i) {
2854 if (virt_dev->eps[i].new_ring) {
2855 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2856 virt_dev->eps[i].new_ring = NULL;
2857 }
2858 }
2859 xhci_zero_in_ctx(xhci, virt_dev);
2860}
2861
2862static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2863 struct xhci_container_ctx *in_ctx,
2864 struct xhci_container_ctx *out_ctx,
2865 struct xhci_input_control_ctx *ctrl_ctx,
2866 u32 add_flags, u32 drop_flags)
2867{
2868 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2869 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2870 xhci_slot_copy(xhci, in_ctx, out_ctx);
2871 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2872
2873 xhci_dbg(xhci, "Input Context:\n");
2874 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2875}
2876
2877static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2878 unsigned int slot_id, unsigned int ep_index,
2879 struct xhci_dequeue_state *deq_state)
2880{
2881 struct xhci_input_control_ctx *ctrl_ctx;
2882 struct xhci_container_ctx *in_ctx;
2883 struct xhci_ep_ctx *ep_ctx;
2884 u32 added_ctxs;
2885 dma_addr_t addr;
2886
2887 in_ctx = xhci->devs[slot_id]->in_ctx;
2888 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2889 if (!ctrl_ctx) {
2890 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2891 __func__);
2892 return;
2893 }
2894
2895 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2896 xhci->devs[slot_id]->out_ctx, ep_index);
2897 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2898 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2899 deq_state->new_deq_ptr);
2900 if (addr == 0) {
2901 xhci_warn(xhci, "WARN Cannot submit config ep after "
2902 "reset ep command\n");
2903 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2904 deq_state->new_deq_seg,
2905 deq_state->new_deq_ptr);
2906 return;
2907 }
2908 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2909
2910 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2911 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2912 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2913 added_ctxs, added_ctxs);
2914}
2915
2916void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2917 unsigned int ep_index, struct xhci_td *td)
2918{
2919 struct xhci_dequeue_state deq_state;
2920 struct xhci_virt_ep *ep;
2921 struct usb_device *udev = td->urb->dev;
2922
2923 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2924 "Cleaning up stalled endpoint ring");
2925 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2926
2927
2928
2929 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2930 ep_index, ep->stopped_stream, td, &deq_state);
2931
2932 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2933 return;
2934
2935
2936
2937
2938 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2939 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2940 "Queueing new dequeue state");
2941 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2942 ep_index, ep->stopped_stream, &deq_state);
2943 } else {
2944
2945
2946
2947
2948
2949 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2950 "Setting up input context for "
2951 "configure endpoint command");
2952 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2953 ep_index, &deq_state);
2954 }
2955}
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965void xhci_endpoint_reset(struct usb_hcd *hcd,
2966 struct usb_host_endpoint *ep)
2967{
2968 struct xhci_hcd *xhci;
2969
2970 xhci = hcd_to_xhci(hcd);
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2983 ep->desc.bEndpointAddress);
2984}
2985
2986static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2987 struct usb_device *udev, struct usb_host_endpoint *ep,
2988 unsigned int slot_id)
2989{
2990 int ret;
2991 unsigned int ep_index;
2992 unsigned int ep_state;
2993
2994 if (!ep)
2995 return -EINVAL;
2996 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2997 if (ret <= 0)
2998 return -EINVAL;
2999 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3000 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3001 " descriptor for ep 0x%x does not support streams\n",
3002 ep->desc.bEndpointAddress);
3003 return -EINVAL;
3004 }
3005
3006 ep_index = xhci_get_endpoint_index(&ep->desc);
3007 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3008 if (ep_state & EP_HAS_STREAMS ||
3009 ep_state & EP_GETTING_STREAMS) {
3010 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3011 "already has streams set up.\n",
3012 ep->desc.bEndpointAddress);
3013 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3014 "dynamic stream context array reallocation.\n");
3015 return -EINVAL;
3016 }
3017 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3018 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3019 "endpoint 0x%x; URBs are pending.\n",
3020 ep->desc.bEndpointAddress);
3021 return -EINVAL;
3022 }
3023 return 0;
3024}
3025
3026static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3027 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3028{
3029 unsigned int max_streams;
3030
3031
3032 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3033
3034
3035
3036
3037
3038
3039 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3040 if (*num_stream_ctxs > max_streams) {
3041 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3042 max_streams);
3043 *num_stream_ctxs = max_streams;
3044 *num_streams = max_streams;
3045 }
3046}
3047
3048
3049
3050
3051
3052static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3053 struct usb_device *udev,
3054 struct usb_host_endpoint **eps, unsigned int num_eps,
3055 unsigned int *num_streams, u32 *changed_ep_bitmask)
3056{
3057 unsigned int max_streams;
3058 unsigned int endpoint_flag;
3059 int i;
3060 int ret;
3061
3062 for (i = 0; i < num_eps; i++) {
3063 ret = xhci_check_streams_endpoint(xhci, udev,
3064 eps[i], udev->slot_id);
3065 if (ret < 0)
3066 return ret;
3067
3068 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3069 if (max_streams < (*num_streams - 1)) {
3070 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3071 eps[i]->desc.bEndpointAddress,
3072 max_streams);
3073 *num_streams = max_streams+1;
3074 }
3075
3076 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3077 if (*changed_ep_bitmask & endpoint_flag)
3078 return -EINVAL;
3079 *changed_ep_bitmask |= endpoint_flag;
3080 }
3081 return 0;
3082}
3083
3084static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3085 struct usb_device *udev,
3086 struct usb_host_endpoint **eps, unsigned int num_eps)
3087{
3088 u32 changed_ep_bitmask = 0;
3089 unsigned int slot_id;
3090 unsigned int ep_index;
3091 unsigned int ep_state;
3092 int i;
3093
3094 slot_id = udev->slot_id;
3095 if (!xhci->devs[slot_id])
3096 return 0;
3097
3098 for (i = 0; i < num_eps; i++) {
3099 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3100 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3101
3102 if (ep_state & EP_GETTING_NO_STREAMS) {
3103 xhci_warn(xhci, "WARN Can't disable streams for "
3104 "endpoint 0x%x, "
3105 "streams are being disabled already\n",
3106 eps[i]->desc.bEndpointAddress);
3107 return 0;
3108 }
3109
3110 if (!(ep_state & EP_HAS_STREAMS) &&
3111 !(ep_state & EP_GETTING_STREAMS)) {
3112 xhci_warn(xhci, "WARN Can't disable streams for "
3113 "endpoint 0x%x, "
3114 "streams are already disabled!\n",
3115 eps[i]->desc.bEndpointAddress);
3116 xhci_warn(xhci, "WARN xhci_free_streams() called "
3117 "with non-streams endpoint\n");
3118 return 0;
3119 }
3120 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3121 }
3122 return changed_ep_bitmask;
3123}
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3142 struct usb_host_endpoint **eps, unsigned int num_eps,
3143 unsigned int num_streams, gfp_t mem_flags)
3144{
3145 int i, ret;
3146 struct xhci_hcd *xhci;
3147 struct xhci_virt_device *vdev;
3148 struct xhci_command *config_cmd;
3149 struct xhci_input_control_ctx *ctrl_ctx;
3150 unsigned int ep_index;
3151 unsigned int num_stream_ctxs;
3152 unsigned int max_packet;
3153 unsigned long flags;
3154 u32 changed_ep_bitmask = 0;
3155
3156 if (!eps)
3157 return -EINVAL;
3158
3159
3160
3161
3162 num_streams += 1;
3163 xhci = hcd_to_xhci(hcd);
3164 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3165 num_streams);
3166
3167
3168 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3169 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3170 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3171 return -ENOSYS;
3172 }
3173
3174 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3175 if (!config_cmd) {
3176 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3177 return -ENOMEM;
3178 }
3179 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3180 if (!ctrl_ctx) {
3181 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3182 __func__);
3183 xhci_free_command(xhci, config_cmd);
3184 return -ENOMEM;
3185 }
3186
3187
3188
3189
3190
3191 spin_lock_irqsave(&xhci->lock, flags);
3192 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3193 num_eps, &num_streams, &changed_ep_bitmask);
3194 if (ret < 0) {
3195 xhci_free_command(xhci, config_cmd);
3196 spin_unlock_irqrestore(&xhci->lock, flags);
3197 return ret;
3198 }
3199 if (num_streams <= 1) {
3200 xhci_warn(xhci, "WARN: endpoints can't handle "
3201 "more than one stream.\n");
3202 xhci_free_command(xhci, config_cmd);
3203 spin_unlock_irqrestore(&xhci->lock, flags);
3204 return -EINVAL;
3205 }
3206 vdev = xhci->devs[udev->slot_id];
3207
3208
3209
3210 for (i = 0; i < num_eps; i++) {
3211 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3212 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3213 }
3214 spin_unlock_irqrestore(&xhci->lock, flags);
3215
3216
3217
3218
3219
3220 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3221 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3222 num_stream_ctxs, num_streams);
3223
3224 for (i = 0; i < num_eps; i++) {
3225 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3226 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc));
3227 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3228 num_stream_ctxs,
3229 num_streams,
3230 max_packet, mem_flags);
3231 if (!vdev->eps[ep_index].stream_info)
3232 goto cleanup;
3233
3234
3235
3236 }
3237
3238
3239 for (i = 0; i < num_eps; i++) {
3240 struct xhci_ep_ctx *ep_ctx;
3241
3242 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3243 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3244
3245 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3246 vdev->out_ctx, ep_index);
3247 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3248 vdev->eps[ep_index].stream_info);
3249 }
3250
3251
3252
3253 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3254 vdev->out_ctx, ctrl_ctx,
3255 changed_ep_bitmask, changed_ep_bitmask);
3256
3257
3258 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3259 false, false);
3260
3261
3262
3263
3264
3265 if (ret < 0)
3266 goto cleanup;
3267
3268 spin_lock_irqsave(&xhci->lock, flags);
3269 for (i = 0; i < num_eps; i++) {
3270 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3271 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3272 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3273 udev->slot_id, ep_index);
3274 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3275 }
3276 xhci_free_command(xhci, config_cmd);
3277 spin_unlock_irqrestore(&xhci->lock, flags);
3278
3279
3280 return num_streams - 1;
3281
3282cleanup:
3283
3284 for (i = 0; i < num_eps; i++) {
3285 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3286 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3287 vdev->eps[ep_index].stream_info = NULL;
3288
3289
3290
3291 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3292 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3293 xhci_endpoint_zero(xhci, vdev, eps[i]);
3294 }
3295 xhci_free_command(xhci, config_cmd);
3296 return -ENOMEM;
3297}
3298
3299
3300
3301
3302
3303
3304
3305int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3306 struct usb_host_endpoint **eps, unsigned int num_eps,
3307 gfp_t mem_flags)
3308{
3309 int i, ret;
3310 struct xhci_hcd *xhci;
3311 struct xhci_virt_device *vdev;
3312 struct xhci_command *command;
3313 struct xhci_input_control_ctx *ctrl_ctx;
3314 unsigned int ep_index;
3315 unsigned long flags;
3316 u32 changed_ep_bitmask;
3317
3318 xhci = hcd_to_xhci(hcd);
3319 vdev = xhci->devs[udev->slot_id];
3320
3321
3322 spin_lock_irqsave(&xhci->lock, flags);
3323 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3324 udev, eps, num_eps);
3325 if (changed_ep_bitmask == 0) {
3326 spin_unlock_irqrestore(&xhci->lock, flags);
3327 return -EINVAL;
3328 }
3329
3330
3331
3332
3333
3334 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3335 command = vdev->eps[ep_index].stream_info->free_streams_command;
3336 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3337 if (!ctrl_ctx) {
3338 spin_unlock_irqrestore(&xhci->lock, flags);
3339 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3340 __func__);
3341 return -EINVAL;
3342 }
3343
3344 for (i = 0; i < num_eps; i++) {
3345 struct xhci_ep_ctx *ep_ctx;
3346
3347 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3348 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3349 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3350 EP_GETTING_NO_STREAMS;
3351
3352 xhci_endpoint_copy(xhci, command->in_ctx,
3353 vdev->out_ctx, ep_index);
3354 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3355 &vdev->eps[ep_index]);
3356 }
3357 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3358 vdev->out_ctx, ctrl_ctx,
3359 changed_ep_bitmask, changed_ep_bitmask);
3360 spin_unlock_irqrestore(&xhci->lock, flags);
3361
3362
3363
3364
3365 ret = xhci_configure_endpoint(xhci, udev, command,
3366 false, true);
3367
3368
3369
3370
3371 if (ret < 0)
3372 return ret;
3373
3374 spin_lock_irqsave(&xhci->lock, flags);
3375 for (i = 0; i < num_eps; i++) {
3376 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3377 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3378 vdev->eps[ep_index].stream_info = NULL;
3379
3380
3381
3382 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3383 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3384 }
3385 spin_unlock_irqrestore(&xhci->lock, flags);
3386
3387 return 0;
3388}
3389
3390
3391
3392
3393
3394
3395
3396
3397void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3398 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3399{
3400 int i;
3401 unsigned int num_dropped_eps = 0;
3402 unsigned int drop_flags = 0;
3403
3404 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3405 if (virt_dev->eps[i].ring) {
3406 drop_flags |= 1 << i;
3407 num_dropped_eps++;
3408 }
3409 }
3410 xhci->num_active_eps -= num_dropped_eps;
3411 if (num_dropped_eps)
3412 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3413 "Dropped %u ep ctxs, flags = 0x%x, "
3414 "%u now active.",
3415 num_dropped_eps, drop_flags,
3416 xhci->num_active_eps);
3417}
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3438{
3439 int ret, i;
3440 unsigned long flags;
3441 struct xhci_hcd *xhci;
3442 unsigned int slot_id;
3443 struct xhci_virt_device *virt_dev;
3444 struct xhci_command *reset_device_cmd;
3445 int last_freed_endpoint;
3446 struct xhci_slot_ctx *slot_ctx;
3447 int old_active_eps = 0;
3448
3449 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3450 if (ret <= 0)
3451 return ret;
3452 xhci = hcd_to_xhci(hcd);
3453 slot_id = udev->slot_id;
3454 virt_dev = xhci->devs[slot_id];
3455 if (!virt_dev) {
3456 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3457 "not exist. Re-allocate the device\n", slot_id);
3458 ret = xhci_alloc_dev(hcd, udev);
3459 if (ret == 1)
3460 return 0;
3461 else
3462 return -EINVAL;
3463 }
3464
3465 if (virt_dev->tt_info)
3466 old_active_eps = virt_dev->tt_info->active_eps;
3467
3468 if (virt_dev->udev != udev) {
3469
3470
3471
3472
3473 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3474 "not match the udev. Re-allocate the device\n",
3475 slot_id);
3476 ret = xhci_alloc_dev(hcd, udev);
3477 if (ret == 1)
3478 return 0;
3479 else
3480 return -EINVAL;
3481 }
3482
3483
3484 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3485 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3486 SLOT_STATE_DISABLED)
3487 return 0;
3488
3489 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3490
3491
3492
3493
3494
3495
3496 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3497 if (!reset_device_cmd) {
3498 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3499 return -ENOMEM;
3500 }
3501
3502
3503 spin_lock_irqsave(&xhci->lock, flags);
3504
3505 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3506 if (ret) {
3507 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3508 spin_unlock_irqrestore(&xhci->lock, flags);
3509 goto command_cleanup;
3510 }
3511 xhci_ring_cmd_db(xhci);
3512 spin_unlock_irqrestore(&xhci->lock, flags);
3513
3514
3515 wait_for_completion(reset_device_cmd->completion);
3516
3517
3518
3519
3520
3521 ret = reset_device_cmd->status;
3522 switch (ret) {
3523 case COMP_CMD_ABORT:
3524 case COMP_CMD_STOP:
3525 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3526 ret = -ETIME;
3527 goto command_cleanup;
3528 case COMP_EBADSLT:
3529 case COMP_CTX_STATE:
3530 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3531 slot_id,
3532 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3533 xhci_dbg(xhci, "Not freeing device rings.\n");
3534
3535 ret = 0;
3536 goto command_cleanup;
3537 case COMP_SUCCESS:
3538 xhci_dbg(xhci, "Successful reset device command.\n");
3539 break;
3540 default:
3541 if (xhci_is_vendor_info_code(xhci, ret))
3542 break;
3543 xhci_warn(xhci, "Unknown completion code %u for "
3544 "reset device command.\n", ret);
3545 ret = -EINVAL;
3546 goto command_cleanup;
3547 }
3548
3549
3550 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3551 spin_lock_irqsave(&xhci->lock, flags);
3552
3553 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3554 spin_unlock_irqrestore(&xhci->lock, flags);
3555 }
3556
3557
3558 last_freed_endpoint = 1;
3559 for (i = 1; i < 31; ++i) {
3560 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3561
3562 if (ep->ep_state & EP_HAS_STREAMS) {
3563 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3564 xhci_get_endpoint_address(i));
3565 xhci_free_stream_info(xhci, ep->stream_info);
3566 ep->stream_info = NULL;
3567 ep->ep_state &= ~EP_HAS_STREAMS;
3568 }
3569
3570 if (ep->ring) {
3571 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3572 last_freed_endpoint = i;
3573 }
3574 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3575 xhci_drop_ep_from_interval_table(xhci,
3576 &virt_dev->eps[i].bw_info,
3577 virt_dev->bw_table,
3578 udev,
3579 &virt_dev->eps[i],
3580 virt_dev->tt_info);
3581 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3582 }
3583
3584 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3585
3586 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3587 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3588 ret = 0;
3589
3590command_cleanup:
3591 xhci_free_command(xhci, reset_device_cmd);
3592 return ret;
3593}
3594
3595
3596
3597
3598
3599
3600void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3601{
3602 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3603 struct xhci_virt_device *virt_dev;
3604 unsigned long flags;
3605 u32 state;
3606 int i, ret;
3607 struct xhci_command *command;
3608
3609 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3610 if (!command)
3611 return;
3612
3613#ifndef CONFIG_USB_DEFAULT_PERSIST
3614
3615
3616
3617
3618
3619 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3620 pm_runtime_put_noidle(hcd->self.controller);
3621#endif
3622
3623 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3624
3625
3626
3627 if (ret <= 0 && ret != -ENODEV) {
3628 kfree(command);
3629 return;
3630 }
3631
3632 virt_dev = xhci->devs[udev->slot_id];
3633
3634
3635 for (i = 0; i < 31; ++i) {
3636 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3637 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3638 }
3639
3640 spin_lock_irqsave(&xhci->lock, flags);
3641
3642 state = readl(&xhci->op_regs->status);
3643 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3644 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3645 xhci_free_virt_device(xhci, udev->slot_id);
3646 spin_unlock_irqrestore(&xhci->lock, flags);
3647 kfree(command);
3648 return;
3649 }
3650
3651 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3652 udev->slot_id)) {
3653 spin_unlock_irqrestore(&xhci->lock, flags);
3654 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3655 return;
3656 }
3657 xhci_ring_cmd_db(xhci);
3658 spin_unlock_irqrestore(&xhci->lock, flags);
3659
3660
3661
3662
3663
3664}
3665
3666
3667
3668
3669
3670
3671
3672static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3673{
3674 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3675 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3676 "Not enough ep ctxs: "
3677 "%u active, need to add 1, limit is %u.",
3678 xhci->num_active_eps, xhci->limit_active_eps);
3679 return -ENOMEM;
3680 }
3681 xhci->num_active_eps += 1;
3682 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3683 "Adding 1 ep ctx, %u now active.",
3684 xhci->num_active_eps);
3685 return 0;
3686}
3687
3688
3689
3690
3691
3692
3693int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3694{
3695 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3696 unsigned long flags;
3697 int ret, slot_id;
3698 struct xhci_command *command;
3699
3700 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3701 if (!command)
3702 return 0;
3703
3704
3705 mutex_lock(&xhci->mutex);
3706 spin_lock_irqsave(&xhci->lock, flags);
3707 command->completion = &xhci->addr_dev;
3708 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3709 if (ret) {
3710 spin_unlock_irqrestore(&xhci->lock, flags);
3711 mutex_unlock(&xhci->mutex);
3712 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3713 kfree(command);
3714 return 0;
3715 }
3716 xhci_ring_cmd_db(xhci);
3717 spin_unlock_irqrestore(&xhci->lock, flags);
3718
3719 wait_for_completion(command->completion);
3720 slot_id = xhci->slot_id;
3721 mutex_unlock(&xhci->mutex);
3722
3723 if (!slot_id || command->status != COMP_SUCCESS) {
3724 xhci_err(xhci, "Error while assigning device slot ID\n");
3725 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3726 HCS_MAX_SLOTS(
3727 readl(&xhci->cap_regs->hcs_params1)));
3728 kfree(command);
3729 return 0;
3730 }
3731
3732 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3733 spin_lock_irqsave(&xhci->lock, flags);
3734 ret = xhci_reserve_host_control_ep_resources(xhci);
3735 if (ret) {
3736 spin_unlock_irqrestore(&xhci->lock, flags);
3737 xhci_warn(xhci, "Not enough host resources, "
3738 "active endpoint contexts = %u\n",
3739 xhci->num_active_eps);
3740 goto disable_slot;
3741 }
3742 spin_unlock_irqrestore(&xhci->lock, flags);
3743 }
3744
3745
3746
3747
3748 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3749 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3750 goto disable_slot;
3751 }
3752 udev->slot_id = slot_id;
3753
3754#ifndef CONFIG_USB_DEFAULT_PERSIST
3755
3756
3757
3758
3759 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3760 pm_runtime_get_noresume(hcd->self.controller);
3761#endif
3762
3763
3764 kfree(command);
3765
3766
3767 return 1;
3768
3769disable_slot:
3770
3771 spin_lock_irqsave(&xhci->lock, flags);
3772 command->completion = NULL;
3773 command->status = 0;
3774 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3775 udev->slot_id))
3776 xhci_ring_cmd_db(xhci);
3777 spin_unlock_irqrestore(&xhci->lock, flags);
3778 return 0;
3779}
3780
3781
3782
3783
3784
3785static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3786 enum xhci_setup_dev setup)
3787{
3788 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3789 unsigned long flags;
3790 struct xhci_virt_device *virt_dev;
3791 int ret = 0;
3792 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3793 struct xhci_slot_ctx *slot_ctx;
3794 struct xhci_input_control_ctx *ctrl_ctx;
3795 u64 temp_64;
3796 struct xhci_command *command = NULL;
3797
3798 mutex_lock(&xhci->mutex);
3799
3800 if (xhci->xhc_state)
3801 goto out;
3802
3803 if (!udev->slot_id) {
3804 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3805 "Bad Slot ID %d", udev->slot_id);
3806 ret = -EINVAL;
3807 goto out;
3808 }
3809
3810 virt_dev = xhci->devs[udev->slot_id];
3811
3812 if (WARN_ON(!virt_dev)) {
3813
3814
3815
3816
3817
3818 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3819 udev->slot_id);
3820 ret = -EINVAL;
3821 goto out;
3822 }
3823
3824 if (setup == SETUP_CONTEXT_ONLY) {
3825 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3826 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3827 SLOT_STATE_DEFAULT) {
3828 xhci_dbg(xhci, "Slot already in default state\n");
3829 goto out;
3830 }
3831 }
3832
3833 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3834 if (!command) {
3835 ret = -ENOMEM;
3836 goto out;
3837 }
3838
3839 command->in_ctx = virt_dev->in_ctx;
3840 command->completion = &xhci->addr_dev;
3841
3842 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3843 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3844 if (!ctrl_ctx) {
3845 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3846 __func__);
3847 ret = -EINVAL;
3848 goto out;
3849 }
3850
3851
3852
3853
3854
3855 if (!slot_ctx->dev_info)
3856 xhci_setup_addressable_virt_dev(xhci, udev);
3857
3858 else
3859 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3860 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3861 ctrl_ctx->drop_flags = 0;
3862
3863 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3864 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3865 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3866 le32_to_cpu(slot_ctx->dev_info) >> 27);
3867
3868 spin_lock_irqsave(&xhci->lock, flags);
3869 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3870 udev->slot_id, setup);
3871 if (ret) {
3872 spin_unlock_irqrestore(&xhci->lock, flags);
3873 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3874 "FIXME: allocate a command ring segment");
3875 goto out;
3876 }
3877 xhci_ring_cmd_db(xhci);
3878 spin_unlock_irqrestore(&xhci->lock, flags);
3879
3880
3881 wait_for_completion(command->completion);
3882
3883
3884
3885
3886
3887 switch (command->status) {
3888 case COMP_CMD_ABORT:
3889 case COMP_CMD_STOP:
3890 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3891 ret = -ETIME;
3892 break;
3893 case COMP_CTX_STATE:
3894 case COMP_EBADSLT:
3895 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3896 act, udev->slot_id);
3897 ret = -EINVAL;
3898 break;
3899 case COMP_TX_ERR:
3900 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3901 ret = -EPROTO;
3902 break;
3903 case COMP_DEV_ERR:
3904 dev_warn(&udev->dev,
3905 "ERROR: Incompatible device for setup %s command\n", act);
3906 ret = -ENODEV;
3907 break;
3908 case COMP_SUCCESS:
3909 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3910 "Successful setup %s command", act);
3911 break;
3912 default:
3913 xhci_err(xhci,
3914 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3915 act, command->status);
3916 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3917 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3918 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3919 ret = -EINVAL;
3920 break;
3921 }
3922 if (ret)
3923 goto out;
3924 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3925 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3926 "Op regs DCBAA ptr = %#016llx", temp_64);
3927 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3928 "Slot ID %d dcbaa entry @%p = %#016llx",
3929 udev->slot_id,
3930 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3931 (unsigned long long)
3932 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3933 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3934 "Output Context DMA address = %#08llx",
3935 (unsigned long long)virt_dev->out_ctx->dma);
3936 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3937 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3938 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3939 le32_to_cpu(slot_ctx->dev_info) >> 27);
3940 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3941 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3942
3943
3944
3945
3946 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3947 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3948 le32_to_cpu(slot_ctx->dev_info) >> 27);
3949
3950 ctrl_ctx->add_flags = 0;
3951 ctrl_ctx->drop_flags = 0;
3952
3953 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3954 "Internal device address = %d",
3955 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3956out:
3957 mutex_unlock(&xhci->mutex);
3958 kfree(command);
3959 return ret;
3960}
3961
3962int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3963{
3964 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3965}
3966
3967int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3968{
3969 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3970}
3971
3972
3973
3974
3975
3976
3977
3978int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3979{
3980 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3981 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3982 __le32 __iomem *addr;
3983 int raw_port;
3984
3985 if (hcd->speed < HCD_USB3)
3986 addr = xhci->usb2_ports[port1 - 1];
3987 else
3988 addr = xhci->usb3_ports[port1 - 1];
3989
3990 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3991 return raw_port;
3992}
3993
3994
3995
3996
3997
3998static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3999 struct usb_device *udev, u16 max_exit_latency)
4000{
4001 struct xhci_virt_device *virt_dev;
4002 struct xhci_command *command;
4003 struct xhci_input_control_ctx *ctrl_ctx;
4004 struct xhci_slot_ctx *slot_ctx;
4005 unsigned long flags;
4006 int ret;
4007
4008 spin_lock_irqsave(&xhci->lock, flags);
4009
4010 virt_dev = xhci->devs[udev->slot_id];
4011
4012
4013
4014
4015
4016
4017
4018 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4019 spin_unlock_irqrestore(&xhci->lock, flags);
4020 return 0;
4021 }
4022
4023
4024 command = xhci->lpm_command;
4025 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4026 if (!ctrl_ctx) {
4027 spin_unlock_irqrestore(&xhci->lock, flags);
4028 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4029 __func__);
4030 return -ENOMEM;
4031 }
4032
4033 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4034 spin_unlock_irqrestore(&xhci->lock, flags);
4035
4036 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4037 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4038 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4039 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4040 slot_ctx->dev_state = 0;
4041
4042 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4043 "Set up evaluate context for LPM MEL change.");
4044 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4045 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4046
4047
4048 ret = xhci_configure_endpoint(xhci, udev, command,
4049 true, true);
4050 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4051 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4052
4053 if (!ret) {
4054 spin_lock_irqsave(&xhci->lock, flags);
4055 virt_dev->current_mel = max_exit_latency;
4056 spin_unlock_irqrestore(&xhci->lock, flags);
4057 }
4058 return ret;
4059}
4060
4061#ifdef CONFIG_PM
4062
4063
4064static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4065 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4066
4067
4068static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4069 struct usb_device *udev)
4070{
4071 int u2del, besl, besl_host;
4072 int besl_device = 0;
4073 u32 field;
4074
4075 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4076 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4077
4078 if (field & USB_BESL_SUPPORT) {
4079 for (besl_host = 0; besl_host < 16; besl_host++) {
4080 if (xhci_besl_encoding[besl_host] >= u2del)
4081 break;
4082 }
4083
4084 if (field & USB_BESL_BASELINE_VALID)
4085 besl_device = USB_GET_BESL_BASELINE(field);
4086 else if (field & USB_BESL_DEEP_VALID)
4087 besl_device = USB_GET_BESL_DEEP(field);
4088 } else {
4089 if (u2del <= 50)
4090 besl_host = 0;
4091 else
4092 besl_host = (u2del - 51) / 75 + 1;
4093 }
4094
4095 besl = besl_host + besl_device;
4096 if (besl > 15)
4097 besl = 15;
4098
4099 return besl;
4100}
4101
4102
4103static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4104{
4105 u32 field;
4106 int l1;
4107 int besld = 0;
4108 int hirdm = 0;
4109
4110 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4111
4112
4113 l1 = udev->l1_params.timeout / 256;
4114
4115
4116 if (field & USB_BESL_DEEP_VALID) {
4117 besld = USB_GET_BESL_DEEP(field);
4118 hirdm = 1;
4119 }
4120
4121 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4122}
4123
4124int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4125 struct usb_device *udev, int enable)
4126{
4127 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4128 __le32 __iomem **port_array;
4129 __le32 __iomem *pm_addr, *hlpm_addr;
4130 u32 pm_val, hlpm_val, field;
4131 unsigned int port_num;
4132 unsigned long flags;
4133 int hird, exit_latency;
4134 int ret;
4135
4136 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4137 !udev->lpm_capable)
4138 return -EPERM;
4139
4140 if (!udev->parent || udev->parent->parent ||
4141 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4142 return -EPERM;
4143
4144 if (udev->usb2_hw_lpm_capable != 1)
4145 return -EPERM;
4146
4147 spin_lock_irqsave(&xhci->lock, flags);
4148
4149 port_array = xhci->usb2_ports;
4150 port_num = udev->portnum - 1;
4151 pm_addr = port_array[port_num] + PORTPMSC;
4152 pm_val = readl(pm_addr);
4153 hlpm_addr = port_array[port_num] + PORTHLPMC;
4154 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4155
4156 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4157 enable ? "enable" : "disable", port_num + 1);
4158
4159 if (enable) {
4160
4161 if (udev->usb2_hw_lpm_besl_capable) {
4162
4163
4164
4165
4166 if ((field & USB_BESL_SUPPORT) &&
4167 (field & USB_BESL_BASELINE_VALID))
4168 hird = USB_GET_BESL_BASELINE(field);
4169 else
4170 hird = udev->l1_params.besl;
4171
4172 exit_latency = xhci_besl_encoding[hird];
4173 spin_unlock_irqrestore(&xhci->lock, flags);
4174
4175
4176
4177
4178
4179
4180
4181
4182 mutex_lock(hcd->bandwidth_mutex);
4183 ret = xhci_change_max_exit_latency(xhci, udev,
4184 exit_latency);
4185 mutex_unlock(hcd->bandwidth_mutex);
4186
4187 if (ret < 0)
4188 return ret;
4189 spin_lock_irqsave(&xhci->lock, flags);
4190
4191 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4192 writel(hlpm_val, hlpm_addr);
4193
4194 readl(hlpm_addr);
4195 } else {
4196 hird = xhci_calculate_hird_besl(xhci, udev);
4197 }
4198
4199 pm_val &= ~PORT_HIRD_MASK;
4200 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4201 writel(pm_val, pm_addr);
4202 pm_val = readl(pm_addr);
4203 pm_val |= PORT_HLE;
4204 writel(pm_val, pm_addr);
4205
4206 readl(pm_addr);
4207 } else {
4208 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4209 writel(pm_val, pm_addr);
4210
4211 readl(pm_addr);
4212 if (udev->usb2_hw_lpm_besl_capable) {
4213 spin_unlock_irqrestore(&xhci->lock, flags);
4214 mutex_lock(hcd->bandwidth_mutex);
4215 xhci_change_max_exit_latency(xhci, udev, 0);
4216 mutex_unlock(hcd->bandwidth_mutex);
4217 return 0;
4218 }
4219 }
4220
4221 spin_unlock_irqrestore(&xhci->lock, flags);
4222 return 0;
4223}
4224
4225
4226
4227
4228
4229static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4230 unsigned capability)
4231{
4232 u32 port_offset, port_count;
4233 int i;
4234
4235 for (i = 0; i < xhci->num_ext_caps; i++) {
4236 if (xhci->ext_caps[i] & capability) {
4237
4238 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4239 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4240 if (port >= port_offset &&
4241 port < port_offset + port_count)
4242 return 1;
4243 }
4244 }
4245 return 0;
4246}
4247
4248int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4249{
4250 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4251 int portnum = udev->portnum - 1;
4252
4253 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4254 !udev->lpm_capable)
4255 return 0;
4256
4257
4258 if (!udev->parent || udev->parent->parent ||
4259 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4260 return 0;
4261
4262 if (xhci->hw_lpm_support == 1 &&
4263 xhci_check_usb2_port_capability(
4264 xhci, portnum, XHCI_HLC)) {
4265 udev->usb2_hw_lpm_capable = 1;
4266 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4267 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4268 if (xhci_check_usb2_port_capability(xhci, portnum,
4269 XHCI_BLC))
4270 udev->usb2_hw_lpm_besl_capable = 1;
4271 }
4272
4273 return 0;
4274}
4275
4276
4277
4278
4279static unsigned long long xhci_service_interval_to_ns(
4280 struct usb_endpoint_descriptor *desc)
4281{
4282 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4283}
4284
4285static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4286 enum usb3_link_state state)
4287{
4288 unsigned long long sel;
4289 unsigned long long pel;
4290 unsigned int max_sel_pel;
4291 char *state_name;
4292
4293 switch (state) {
4294 case USB3_LPM_U1:
4295
4296 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4297 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4298 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4299 state_name = "U1";
4300 break;
4301 case USB3_LPM_U2:
4302 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4303 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4304 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4305 state_name = "U2";
4306 break;
4307 default:
4308 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4309 __func__);
4310 return USB3_LPM_DISABLED;
4311 }
4312
4313 if (sel <= max_sel_pel && pel <= max_sel_pel)
4314 return USB3_LPM_DEVICE_INITIATED;
4315
4316 if (sel > max_sel_pel)
4317 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4318 "due to long SEL %llu ms\n",
4319 state_name, sel);
4320 else
4321 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4322 "due to long PEL %llu ms\n",
4323 state_name, pel);
4324 return USB3_LPM_DISABLED;
4325}
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335static unsigned long long xhci_calculate_intel_u1_timeout(
4336 struct usb_device *udev,
4337 struct usb_endpoint_descriptor *desc)
4338{
4339 unsigned long long timeout_ns;
4340 int ep_type;
4341 int intr_type;
4342
4343 ep_type = usb_endpoint_type(desc);
4344 switch (ep_type) {
4345 case USB_ENDPOINT_XFER_CONTROL:
4346 timeout_ns = udev->u1_params.sel * 3;
4347 break;
4348 case USB_ENDPOINT_XFER_BULK:
4349 timeout_ns = udev->u1_params.sel * 5;
4350 break;
4351 case USB_ENDPOINT_XFER_INT:
4352 intr_type = usb_endpoint_interrupt_type(desc);
4353 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4354 timeout_ns = udev->u1_params.sel * 3;
4355 break;
4356 }
4357
4358 case USB_ENDPOINT_XFER_ISOC:
4359 timeout_ns = xhci_service_interval_to_ns(desc);
4360 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4361 if (timeout_ns < udev->u1_params.sel * 2)
4362 timeout_ns = udev->u1_params.sel * 2;
4363 break;
4364 default:
4365 return 0;
4366 }
4367
4368 return timeout_ns;
4369}
4370
4371
4372static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4373 struct usb_device *udev,
4374 struct usb_endpoint_descriptor *desc)
4375{
4376 unsigned long long timeout_ns;
4377
4378 if (xhci->quirks & XHCI_INTEL_HOST)
4379 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4380 else
4381 timeout_ns = udev->u1_params.sel;
4382
4383
4384
4385
4386 if (timeout_ns == USB3_LPM_DISABLED)
4387 timeout_ns = 1;
4388 else
4389 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4390
4391
4392
4393
4394 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4395 return timeout_ns;
4396 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4397 "due to long timeout %llu ms\n", timeout_ns);
4398 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4399}
4400
4401
4402
4403
4404
4405
4406
4407static unsigned long long xhci_calculate_intel_u2_timeout(
4408 struct usb_device *udev,
4409 struct usb_endpoint_descriptor *desc)
4410{
4411 unsigned long long timeout_ns;
4412 unsigned long long u2_del_ns;
4413
4414 timeout_ns = 10 * 1000 * 1000;
4415
4416 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4417 (xhci_service_interval_to_ns(desc) > timeout_ns))
4418 timeout_ns = xhci_service_interval_to_ns(desc);
4419
4420 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4421 if (u2_del_ns > timeout_ns)
4422 timeout_ns = u2_del_ns;
4423
4424 return timeout_ns;
4425}
4426
4427
4428static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4429 struct usb_device *udev,
4430 struct usb_endpoint_descriptor *desc)
4431{
4432 unsigned long long timeout_ns;
4433
4434 if (xhci->quirks & XHCI_INTEL_HOST)
4435 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4436 else
4437 timeout_ns = udev->u2_params.sel;
4438
4439
4440 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4441
4442
4443
4444 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4445 return timeout_ns;
4446 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4447 "due to long timeout %llu ms\n", timeout_ns);
4448 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4449}
4450
4451static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4452 struct usb_device *udev,
4453 struct usb_endpoint_descriptor *desc,
4454 enum usb3_link_state state,
4455 u16 *timeout)
4456{
4457 if (state == USB3_LPM_U1)
4458 return xhci_calculate_u1_timeout(xhci, udev, desc);
4459 else if (state == USB3_LPM_U2)
4460 return xhci_calculate_u2_timeout(xhci, udev, desc);
4461
4462 return USB3_LPM_DISABLED;
4463}
4464
4465static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4466 struct usb_device *udev,
4467 struct usb_endpoint_descriptor *desc,
4468 enum usb3_link_state state,
4469 u16 *timeout)
4470{
4471 u16 alt_timeout;
4472
4473 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4474 desc, state, timeout);
4475
4476
4477
4478
4479
4480 if (alt_timeout == USB3_LPM_DISABLED ||
4481 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4482 *timeout = alt_timeout;
4483 return -E2BIG;
4484 }
4485 if (alt_timeout > *timeout)
4486 *timeout = alt_timeout;
4487 return 0;
4488}
4489
4490static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4491 struct usb_device *udev,
4492 struct usb_host_interface *alt,
4493 enum usb3_link_state state,
4494 u16 *timeout)
4495{
4496 int j;
4497
4498 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4499 if (xhci_update_timeout_for_endpoint(xhci, udev,
4500 &alt->endpoint[j].desc, state, timeout))
4501 return -E2BIG;
4502 continue;
4503 }
4504 return 0;
4505}
4506
4507static int xhci_check_intel_tier_policy(struct usb_device *udev,
4508 enum usb3_link_state state)
4509{
4510 struct usb_device *parent;
4511 unsigned int num_hubs;
4512
4513 if (state == USB3_LPM_U2)
4514 return 0;
4515
4516
4517 for (parent = udev->parent, num_hubs = 0; parent->parent;
4518 parent = parent->parent)
4519 num_hubs++;
4520
4521 if (num_hubs < 2)
4522 return 0;
4523
4524 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4525 " below second-tier hub.\n");
4526 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4527 "to decrease power consumption.\n");
4528 return -E2BIG;
4529}
4530
4531static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4532 struct usb_device *udev,
4533 enum usb3_link_state state)
4534{
4535 if (xhci->quirks & XHCI_INTEL_HOST)
4536 return xhci_check_intel_tier_policy(udev, state);
4537 else
4538 return 0;
4539}
4540
4541
4542
4543
4544
4545
4546static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4547 struct usb_device *udev, enum usb3_link_state state)
4548{
4549 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4550 struct usb_host_config *config;
4551 char *state_name;
4552 int i;
4553 u16 timeout = USB3_LPM_DISABLED;
4554
4555 if (state == USB3_LPM_U1)
4556 state_name = "U1";
4557 else if (state == USB3_LPM_U2)
4558 state_name = "U2";
4559 else {
4560 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4561 state);
4562 return timeout;
4563 }
4564
4565 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4566 return timeout;
4567
4568
4569
4570
4571 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4572 state, &timeout))
4573 return timeout;
4574
4575 config = udev->actconfig;
4576 if (!config)
4577 return timeout;
4578
4579 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4580 struct usb_driver *driver;
4581 struct usb_interface *intf = config->interface[i];
4582
4583 if (!intf)
4584 continue;
4585
4586
4587
4588
4589 if (intf->dev.driver) {
4590 driver = to_usb_driver(intf->dev.driver);
4591 if (driver && driver->disable_hub_initiated_lpm) {
4592 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4593 "at request of driver %s\n",
4594 state_name, driver->name);
4595 return xhci_get_timeout_no_hub_lpm(udev, state);
4596 }
4597 }
4598
4599
4600 if (!intf->cur_altsetting)
4601 continue;
4602
4603 if (xhci_update_timeout_for_interface(xhci, udev,
4604 intf->cur_altsetting,
4605 state, &timeout))
4606 return timeout;
4607 }
4608 return timeout;
4609}
4610
4611static int calculate_max_exit_latency(struct usb_device *udev,
4612 enum usb3_link_state state_changed,
4613 u16 hub_encoded_timeout)
4614{
4615 unsigned long long u1_mel_us = 0;
4616 unsigned long long u2_mel_us = 0;
4617 unsigned long long mel_us = 0;
4618 bool disabling_u1;
4619 bool disabling_u2;
4620 bool enabling_u1;
4621 bool enabling_u2;
4622
4623 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4624 hub_encoded_timeout == USB3_LPM_DISABLED);
4625 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4626 hub_encoded_timeout == USB3_LPM_DISABLED);
4627
4628 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4629 hub_encoded_timeout != USB3_LPM_DISABLED);
4630 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4631 hub_encoded_timeout != USB3_LPM_DISABLED);
4632
4633
4634
4635
4636 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4637 enabling_u1)
4638 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4639 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4640 enabling_u2)
4641 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4642
4643 if (u1_mel_us > u2_mel_us)
4644 mel_us = u1_mel_us;
4645 else
4646 mel_us = u2_mel_us;
4647
4648 if (mel_us > MAX_EXIT) {
4649 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4650 "is too big.\n", mel_us);
4651 return -E2BIG;
4652 }
4653 return mel_us;
4654}
4655
4656
4657int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4658 struct usb_device *udev, enum usb3_link_state state)
4659{
4660 struct xhci_hcd *xhci;
4661 u16 hub_encoded_timeout;
4662 int mel;
4663 int ret;
4664
4665 xhci = hcd_to_xhci(hcd);
4666
4667
4668
4669
4670 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4671 !xhci->devs[udev->slot_id])
4672 return USB3_LPM_DISABLED;
4673
4674 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4675 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4676 if (mel < 0) {
4677
4678 hub_encoded_timeout = USB3_LPM_DISABLED;
4679 mel = 0;
4680 }
4681
4682 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4683 if (ret)
4684 return ret;
4685 return hub_encoded_timeout;
4686}
4687
4688int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4689 struct usb_device *udev, enum usb3_link_state state)
4690{
4691 struct xhci_hcd *xhci;
4692 u16 mel;
4693
4694 xhci = hcd_to_xhci(hcd);
4695 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4696 !xhci->devs[udev->slot_id])
4697 return 0;
4698
4699 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4700 return xhci_change_max_exit_latency(xhci, udev, mel);
4701}
4702#else
4703
4704int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4705 struct usb_device *udev, int enable)
4706{
4707 return 0;
4708}
4709
4710int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4711{
4712 return 0;
4713}
4714
4715int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4716 struct usb_device *udev, enum usb3_link_state state)
4717{
4718 return USB3_LPM_DISABLED;
4719}
4720
4721int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4722 struct usb_device *udev, enum usb3_link_state state)
4723{
4724 return 0;
4725}
4726#endif
4727
4728
4729
4730
4731
4732
4733int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4734 struct usb_tt *tt, gfp_t mem_flags)
4735{
4736 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4737 struct xhci_virt_device *vdev;
4738 struct xhci_command *config_cmd;
4739 struct xhci_input_control_ctx *ctrl_ctx;
4740 struct xhci_slot_ctx *slot_ctx;
4741 unsigned long flags;
4742 unsigned think_time;
4743 int ret;
4744
4745
4746 if (!hdev->parent)
4747 return 0;
4748
4749 vdev = xhci->devs[hdev->slot_id];
4750 if (!vdev) {
4751 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4752 return -EINVAL;
4753 }
4754 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4755 if (!config_cmd) {
4756 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4757 return -ENOMEM;
4758 }
4759 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4760 if (!ctrl_ctx) {
4761 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4762 __func__);
4763 xhci_free_command(xhci, config_cmd);
4764 return -ENOMEM;
4765 }
4766
4767 spin_lock_irqsave(&xhci->lock, flags);
4768 if (hdev->speed == USB_SPEED_HIGH &&
4769 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4770 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4771 xhci_free_command(xhci, config_cmd);
4772 spin_unlock_irqrestore(&xhci->lock, flags);
4773 return -ENOMEM;
4774 }
4775
4776 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4777 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4778 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4779 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4780
4781
4782
4783
4784
4785 if (tt->multi)
4786 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4787 else if (hdev->speed == USB_SPEED_FULL)
4788 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4789
4790 if (xhci->hci_version > 0x95) {
4791 xhci_dbg(xhci, "xHCI version %x needs hub "
4792 "TT think time and number of ports\n",
4793 (unsigned int) xhci->hci_version);
4794 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4795
4796
4797
4798
4799
4800
4801
4802 think_time = tt->think_time;
4803 if (think_time != 0)
4804 think_time = (think_time / 666) - 1;
4805 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4806 slot_ctx->tt_info |=
4807 cpu_to_le32(TT_THINK_TIME(think_time));
4808 } else {
4809 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4810 "TT think time or number of ports\n",
4811 (unsigned int) xhci->hci_version);
4812 }
4813 slot_ctx->dev_state = 0;
4814 spin_unlock_irqrestore(&xhci->lock, flags);
4815
4816 xhci_dbg(xhci, "Set up %s for hub device.\n",
4817 (xhci->hci_version > 0x95) ?
4818 "configure endpoint" : "evaluate context");
4819 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4820 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4821
4822
4823
4824
4825 if (xhci->hci_version > 0x95)
4826 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4827 false, false);
4828 else
4829 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4830 true, false);
4831
4832 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4833 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4834
4835 xhci_free_command(xhci, config_cmd);
4836 return ret;
4837}
4838
4839int xhci_get_frame(struct usb_hcd *hcd)
4840{
4841 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4842
4843 return readl(&xhci->run_regs->microframe_index) >> 3;
4844}
4845
4846int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4847{
4848 struct xhci_hcd *xhci;
4849 struct device *dev = hcd->self.controller;
4850 int retval;
4851
4852
4853 hcd->self.sg_tablesize = ~0;
4854
4855
4856 hcd->self.no_sg_constraint = 1;
4857
4858
4859 hcd->self.no_stop_on_short = 1;
4860
4861 xhci = hcd_to_xhci(hcd);
4862
4863 if (usb_hcd_is_primary_hcd(hcd)) {
4864 xhci->main_hcd = hcd;
4865
4866
4867
4868 hcd->speed = HCD_USB2;
4869 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4870
4871
4872
4873
4874
4875 hcd->has_tt = 1;
4876 } else {
4877 if (xhci->sbrn == 0x31) {
4878 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
4879 hcd->speed = HCD_USB31;
4880 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
4881 }
4882
4883
4884
4885 return 0;
4886 }
4887
4888 mutex_init(&xhci->mutex);
4889 xhci->cap_regs = hcd->regs;
4890 xhci->op_regs = hcd->regs +
4891 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4892 xhci->run_regs = hcd->regs +
4893 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4894
4895 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4896 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4897 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4898 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4899 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4900 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4901 if (xhci->hci_version > 0x100)
4902 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4903 xhci_print_registers(xhci);
4904
4905 xhci->quirks |= quirks;
4906
4907 get_quirks(dev, xhci);
4908
4909
4910
4911
4912
4913 if (xhci->hci_version > 0x96)
4914 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4915
4916
4917 retval = xhci_halt(xhci);
4918 if (retval)
4919 return retval;
4920
4921 xhci_dbg(xhci, "Resetting HCD\n");
4922
4923 retval = xhci_reset(xhci);
4924 if (retval)
4925 return retval;
4926 xhci_dbg(xhci, "Reset complete\n");
4927
4928
4929
4930
4931
4932
4933
4934
4935 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4936 xhci->hcc_params &= ~BIT(0);
4937
4938
4939
4940 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4941 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4942 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4943 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4944 } else {
4945
4946
4947
4948
4949 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
4950 if (retval)
4951 return retval;
4952 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
4953 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
4954 }
4955
4956 xhci_dbg(xhci, "Calling HCD init\n");
4957
4958 retval = xhci_init(hcd);
4959 if (retval)
4960 return retval;
4961 xhci_dbg(xhci, "Called HCD init\n");
4962
4963 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4964 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4965
4966 return 0;
4967}
4968EXPORT_SYMBOL_GPL(xhci_gen_setup);
4969
4970static const struct hc_driver xhci_hc_driver = {
4971 .description = "xhci-hcd",
4972 .product_desc = "xHCI Host Controller",
4973 .hcd_priv_size = sizeof(struct xhci_hcd),
4974
4975
4976
4977
4978 .irq = xhci_irq,
4979 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4980
4981
4982
4983
4984 .reset = NULL,
4985 .start = xhci_run,
4986 .stop = xhci_stop,
4987 .shutdown = xhci_shutdown,
4988
4989
4990
4991
4992 .urb_enqueue = xhci_urb_enqueue,
4993 .urb_dequeue = xhci_urb_dequeue,
4994 .alloc_dev = xhci_alloc_dev,
4995 .free_dev = xhci_free_dev,
4996 .alloc_streams = xhci_alloc_streams,
4997 .free_streams = xhci_free_streams,
4998 .add_endpoint = xhci_add_endpoint,
4999 .drop_endpoint = xhci_drop_endpoint,
5000 .endpoint_reset = xhci_endpoint_reset,
5001 .check_bandwidth = xhci_check_bandwidth,
5002 .reset_bandwidth = xhci_reset_bandwidth,
5003 .address_device = xhci_address_device,
5004 .enable_device = xhci_enable_device,
5005 .update_hub_device = xhci_update_hub_device,
5006 .reset_device = xhci_discover_or_reset_device,
5007
5008
5009
5010
5011 .get_frame_number = xhci_get_frame,
5012
5013
5014
5015
5016 .hub_control = xhci_hub_control,
5017 .hub_status_data = xhci_hub_status_data,
5018 .bus_suspend = xhci_bus_suspend,
5019 .bus_resume = xhci_bus_resume,
5020
5021
5022
5023
5024 .update_device = xhci_update_device,
5025 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5026 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5027 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5028 .find_raw_port_number = xhci_find_raw_port_number,
5029};
5030
5031void xhci_init_driver(struct hc_driver *drv,
5032 const struct xhci_driver_overrides *over)
5033{
5034 BUG_ON(!over);
5035
5036
5037 *drv = xhci_hc_driver;
5038
5039 if (over) {
5040 drv->hcd_priv_size += over->extra_priv_size;
5041 if (over->reset)
5042 drv->reset = over->reset;
5043 if (over->start)
5044 drv->start = over->start;
5045 }
5046}
5047EXPORT_SYMBOL_GPL(xhci_init_driver);
5048
5049MODULE_DESCRIPTION(DRIVER_DESC);
5050MODULE_AUTHOR(DRIVER_AUTHOR);
5051MODULE_LICENSE("GPL");
5052
5053static int __init xhci_hcd_init(void)
5054{
5055
5056
5057
5058
5059 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5060 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5061 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5062
5063
5064
5065 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5066 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5067 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5068 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5069 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5070
5071 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5072
5073 if (usb_disabled())
5074 return -ENODEV;
5075
5076 return 0;
5077}
5078
5079
5080
5081
5082
5083static void __exit xhci_hcd_fini(void) { }
5084
5085module_init(xhci_hcd_init);
5086module_exit(xhci_hcd_fini);
5087