1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30#include <linux/dma-mapping.h>
31
32#include "xhci.h"
33#include "xhci-trace.h"
34#include "xhci-mtk.h"
35
36#define DRIVER_AUTHOR "Sarah Sharp"
37#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
38
39#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
40
41
42static int link_quirk;
43module_param(link_quirk, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
45
46static unsigned int quirks;
47module_param(quirks, uint, S_IRUGO);
48MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
65{
66 u32 result;
67
68 do {
69 result = readl(ptr);
70 if (result == ~(u32)0)
71 return -ENODEV;
72 result &= mask;
73 if (result == done)
74 return 0;
75 udelay(1);
76 usec--;
77 } while (usec > 0);
78 return -ETIMEDOUT;
79}
80
81
82
83
84void xhci_quiesce(struct xhci_hcd *xhci)
85{
86 u32 halted;
87 u32 cmd;
88 u32 mask;
89
90 mask = ~(XHCI_IRQS);
91 halted = readl(&xhci->op_regs->status) & STS_HALT;
92 if (!halted)
93 mask &= ~CMD_RUN;
94
95 cmd = readl(&xhci->op_regs->command);
96 cmd &= mask;
97 writel(cmd, &xhci->op_regs->command);
98}
99
100
101
102
103
104
105
106
107
108int xhci_halt(struct xhci_hcd *xhci)
109{
110 int ret;
111 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
112 xhci_quiesce(xhci);
113
114 ret = xhci_handshake(&xhci->op_regs->status,
115 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
116 if (ret) {
117 xhci_warn(xhci, "Host halt failed, %d\n", ret);
118 return ret;
119 }
120 xhci->xhc_state |= XHCI_STATE_HALTED;
121 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
122 return ret;
123}
124
125
126
127
128int xhci_start(struct xhci_hcd *xhci)
129{
130 u32 temp;
131 int ret;
132
133 temp = readl(&xhci->op_regs->command);
134 temp |= (CMD_RUN);
135 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
136 temp);
137 writel(temp, &xhci->op_regs->command);
138
139
140
141
142
143 ret = xhci_handshake(&xhci->op_regs->status,
144 STS_HALT, 0, XHCI_MAX_HALT_USEC);
145 if (ret == -ETIMEDOUT)
146 xhci_err(xhci, "Host took too long to start, "
147 "waited %u microseconds.\n",
148 XHCI_MAX_HALT_USEC);
149 if (!ret)
150
151 xhci->xhc_state = 0;
152
153 return ret;
154}
155
156
157
158
159
160
161
162
163int xhci_reset(struct xhci_hcd *xhci)
164{
165 u32 command;
166 u32 state;
167 int ret, i;
168
169 state = readl(&xhci->op_regs->status);
170
171 if (state == ~(u32)0) {
172 xhci_warn(xhci, "Host not accessible, reset failed.\n");
173 return -ENODEV;
174 }
175
176 if ((state & STS_HALT) == 0) {
177 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
178 return 0;
179 }
180
181 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
182 command = readl(&xhci->op_regs->command);
183#ifdef CONFIG_USB_DWC3_OTG
184 command |= CMD_LRESET;
185#else
186 command |= CMD_RESET;
187#endif
188 writel(command, &xhci->op_regs->command);
189
190
191
192
193
194
195
196
197 if (xhci->quirks & XHCI_INTEL_HOST)
198 udelay(1000);
199
200 ret = xhci_handshake(&xhci->op_regs->command,
201#ifdef CONFIG_USB_DWC3_OTG
202 CMD_LRESET,
203#else
204 CMD_RESET,
205#endif
206 0, 10 * 1000 * 1000);
207 if (ret)
208 return ret;
209
210 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
211 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
212
213 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
214 "Wait for controller to be ready for doorbell rings");
215
216
217
218
219 ret = xhci_handshake(&xhci->op_regs->status,
220 STS_CNR, 0, 10 * 1000 * 1000);
221
222 for (i = 0; i < 2; i++) {
223 xhci->bus_state[i].port_c_suspend = 0;
224 xhci->bus_state[i].suspended_ports = 0;
225 xhci->bus_state[i].resuming_ports = 0;
226 }
227
228 return ret;
229}
230
231
232#ifdef CONFIG_USB_PCI
233
234
235
236static int xhci_setup_msi(struct xhci_hcd *xhci)
237{
238 int ret;
239
240
241
242 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
243
244 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
245 if (ret < 0) {
246 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
247 "failed to allocate MSI entry");
248 return ret;
249 }
250
251 ret = request_irq(pdev->irq, xhci_msi_irq,
252 0, "xhci_hcd", xhci_to_hcd(xhci));
253 if (ret) {
254 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
255 "disable MSI interrupt");
256 pci_free_irq_vectors(pdev);
257 }
258
259 return ret;
260}
261
262
263
264
265static int xhci_setup_msix(struct xhci_hcd *xhci)
266{
267 int i, ret = 0;
268 struct usb_hcd *hcd = xhci_to_hcd(xhci);
269 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
270
271
272
273
274
275
276
277
278 xhci->msix_count = min(num_online_cpus() + 1,
279 HCS_MAX_INTRS(xhci->hcs_params1));
280
281 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
282 PCI_IRQ_MSIX);
283 if (ret < 0) {
284 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
285 "Failed to enable MSI-X");
286 return ret;
287 }
288
289 for (i = 0; i < xhci->msix_count; i++) {
290 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
291 "xhci_hcd", xhci_to_hcd(xhci));
292 if (ret)
293 goto disable_msix;
294 }
295
296 hcd->msix_enabled = 1;
297 return ret;
298
299disable_msix:
300 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
301 while (--i >= 0)
302 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
303 pci_free_irq_vectors(pdev);
304 return ret;
305}
306
307
308static void xhci_cleanup_msix(struct xhci_hcd *xhci)
309{
310 struct usb_hcd *hcd = xhci_to_hcd(xhci);
311 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
312
313 if (xhci->quirks & XHCI_PLAT)
314 return;
315
316
317 if (hcd->irq > 0)
318 return;
319
320 if (hcd->msix_enabled) {
321 int i;
322
323 for (i = 0; i < xhci->msix_count; i++)
324 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
325 } else {
326 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
327 }
328
329 pci_free_irq_vectors(pdev);
330 hcd->msix_enabled = 0;
331}
332
333static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
334{
335 struct usb_hcd *hcd = xhci_to_hcd(xhci);
336
337 if (hcd->msix_enabled) {
338 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
339 int i;
340
341 for (i = 0; i < xhci->msix_count; i++)
342 synchronize_irq(pci_irq_vector(pdev, i));
343 }
344}
345
346static int xhci_try_enable_msi(struct usb_hcd *hcd)
347{
348 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
349 struct pci_dev *pdev;
350 int ret;
351
352
353 if (xhci->quirks & XHCI_PLAT)
354 return 0;
355
356 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
357
358
359
360
361 if (xhci->quirks & XHCI_BROKEN_MSI)
362 goto legacy_irq;
363
364
365 if (hcd->irq)
366 free_irq(hcd->irq, hcd);
367 hcd->irq = 0;
368
369 ret = xhci_setup_msix(xhci);
370 if (ret)
371
372 ret = xhci_setup_msi(xhci);
373
374 if (!ret) {
375 hcd->msi_enabled = 1;
376 return 0;
377 }
378
379 if (!pdev->irq) {
380 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
381 return -EINVAL;
382 }
383
384 legacy_irq:
385 if (!strlen(hcd->irq_descr))
386 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
387 hcd->driver->description, hcd->self.busnum);
388
389
390 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
391 hcd->irq_descr, hcd);
392 if (ret) {
393 xhci_err(xhci, "request interrupt %d failed\n",
394 pdev->irq);
395 return ret;
396 }
397 hcd->irq = pdev->irq;
398 return 0;
399}
400
401#else
402
403static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
404{
405 return 0;
406}
407
408static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
409{
410}
411
412static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
413{
414}
415
416#endif
417
418static void compliance_mode_recovery(unsigned long arg)
419{
420 struct xhci_hcd *xhci;
421 struct usb_hcd *hcd;
422 u32 temp;
423 int i;
424
425 xhci = (struct xhci_hcd *)arg;
426
427 for (i = 0; i < xhci->num_usb3_ports; i++) {
428 temp = readl(xhci->usb3_ports[i]);
429 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
430
431
432
433
434 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
435 "Compliance mode detected->port %d",
436 i + 1);
437 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
438 "Attempting compliance mode recovery");
439 hcd = xhci->shared_hcd;
440
441 if (hcd->state == HC_STATE_SUSPENDED)
442 usb_hcd_resume_root_hub(hcd);
443
444 usb_hcd_poll_rh_status(hcd);
445 }
446 }
447
448 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
449 mod_timer(&xhci->comp_mode_recovery_timer,
450 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
451}
452
453
454
455
456
457
458
459
460
461
462
463static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
464{
465 xhci->port_status_u0 = 0;
466 setup_timer(&xhci->comp_mode_recovery_timer,
467 compliance_mode_recovery, (unsigned long)xhci);
468 xhci->comp_mode_recovery_timer.expires = jiffies +
469 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
470
471 add_timer(&xhci->comp_mode_recovery_timer);
472 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
473 "Compliance mode recovery timer initialized");
474}
475
476
477
478
479
480
481
482static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
483{
484 const char *dmi_product_name, *dmi_sys_vendor;
485
486 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
487 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
488 if (!dmi_product_name || !dmi_sys_vendor)
489 return false;
490
491 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
492 return false;
493
494 if (strstr(dmi_product_name, "Z420") ||
495 strstr(dmi_product_name, "Z620") ||
496 strstr(dmi_product_name, "Z820") ||
497 strstr(dmi_product_name, "Z1 Workstation"))
498 return true;
499
500 return false;
501}
502
503static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
504{
505 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
506}
507
508
509
510
511
512
513
514
515
516static int xhci_init(struct usb_hcd *hcd)
517{
518 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
519 int retval = 0;
520
521 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
522 spin_lock_init(&xhci->lock);
523 if (xhci->hci_version == 0x95 && link_quirk) {
524 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
525 "QUIRK: Not clearing Link TRB chain bits.");
526 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
527 } else {
528 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
529 "xHCI doesn't need link TRB QUIRK");
530 }
531 retval = xhci_mem_init(xhci, GFP_KERNEL);
532 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
533
534
535 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
536 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
537 compliance_mode_recovery_timer_init(xhci);
538 }
539
540 return retval;
541}
542
543
544
545
546static int xhci_run_finished(struct xhci_hcd *xhci)
547{
548 if (xhci_start(xhci)) {
549 xhci_halt(xhci);
550 return -ENODEV;
551 }
552 xhci->shared_hcd->state = HC_STATE_RUNNING;
553 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
554
555 if (xhci->quirks & XHCI_NEC_HOST)
556 xhci_ring_cmd_db(xhci);
557
558 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
559 "Finished xhci_run for USB3 roothub");
560 return 0;
561}
562
563
564
565
566
567
568
569
570
571
572
573
574
575int xhci_run(struct usb_hcd *hcd)
576{
577 u32 temp;
578 u64 temp_64;
579 int ret;
580 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
581
582
583
584
585
586 hcd->uses_new_polling = 1;
587 if (!usb_hcd_is_primary_hcd(hcd))
588 return xhci_run_finished(xhci);
589
590 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
591
592 ret = xhci_try_enable_msi(hcd);
593 if (ret)
594 return ret;
595
596 xhci_dbg_cmd_ptrs(xhci);
597
598 xhci_dbg(xhci, "ERST memory map follows:\n");
599 xhci_dbg_erst(xhci, &xhci->erst);
600 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
601 temp_64 &= ~ERST_PTR_MASK;
602 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
603 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
604
605 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
606 "// Set the interrupt modulation register");
607 temp = readl(&xhci->ir_set->irq_control);
608 temp &= ~ER_IRQ_INTERVAL_MASK;
609
610
611
612
613 temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160);
614 writel(temp, &xhci->ir_set->irq_control);
615
616
617 temp = readl(&xhci->op_regs->command);
618 temp |= (CMD_EIE);
619 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
620 "// Enable interrupts, cmd = 0x%x.", temp);
621 writel(temp, &xhci->op_regs->command);
622
623 temp = readl(&xhci->ir_set->irq_pending);
624 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
625 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
626 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
627 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
628 xhci_print_ir_set(xhci, 0);
629
630 if (xhci->quirks & XHCI_NEC_HOST) {
631 struct xhci_command *command;
632
633 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
634 if (!command)
635 return -ENOMEM;
636
637 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
638 TRB_TYPE(TRB_NEC_GET_FW));
639 if (ret)
640 xhci_free_command(xhci, command);
641 }
642 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
643 "Finished xhci_run for USB2 roothub");
644 return 0;
645}
646EXPORT_SYMBOL_GPL(xhci_run);
647
648
649
650
651
652
653
654
655
656
657static void xhci_stop(struct usb_hcd *hcd)
658{
659 u32 temp;
660 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
661
662 mutex_lock(&xhci->mutex);
663
664
665 if (!usb_hcd_is_primary_hcd(hcd)) {
666
667 if (!hcd->self.otg_port) {
668
669 xhci->shared_hcd = NULL;
670 }
671
672 mutex_unlock(&xhci->mutex);
673 return;
674 }
675
676 spin_lock_irq(&xhci->lock);
677 xhci->xhc_state |= XHCI_STATE_HALTED;
678 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
679 xhci_halt(xhci);
680 xhci_reset(xhci);
681 spin_unlock_irq(&xhci->lock);
682
683 xhci_cleanup_msix(xhci);
684
685
686 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
687 (!(xhci_all_ports_seen_u0(xhci)))) {
688 del_timer_sync(&xhci->comp_mode_recovery_timer);
689 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
690 "%s: compliance mode recovery timer deleted",
691 __func__);
692 }
693
694 if (xhci->quirks & XHCI_AMD_PLL_FIX)
695 usb_amd_dev_put();
696
697 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
698 "// Disabling event ring interrupts");
699 temp = readl(&xhci->op_regs->status);
700 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
701 temp = readl(&xhci->ir_set->irq_pending);
702 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
703 xhci_print_ir_set(xhci, 0);
704
705 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
706 xhci_mem_cleanup(xhci);
707 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
708 "xhci_stop completed - status = %x",
709 readl(&xhci->op_regs->status));
710 mutex_unlock(&xhci->mutex);
711}
712
713
714
715
716
717
718
719
720
721
722static void xhci_shutdown(struct usb_hcd *hcd)
723{
724 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
725
726 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
727 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
728
729 spin_lock_irq(&xhci->lock);
730 xhci_halt(xhci);
731
732 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
733 xhci_reset(xhci);
734 spin_unlock_irq(&xhci->lock);
735
736 xhci_cleanup_msix(xhci);
737
738 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
739 "xhci_shutdown completed - status = %x",
740 readl(&xhci->op_regs->status));
741
742
743 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
744 pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
745}
746
747#ifdef CONFIG_PM
748static void xhci_save_registers(struct xhci_hcd *xhci)
749{
750 xhci->s3.command = readl(&xhci->op_regs->command);
751 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
752 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
753 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
754 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
755 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
756 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
757 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
758 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
759}
760
761static void xhci_restore_registers(struct xhci_hcd *xhci)
762{
763 writel(xhci->s3.command, &xhci->op_regs->command);
764 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
765 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
766 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
767 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
768 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
769 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
770 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
771 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
772}
773
774static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
775{
776 u64 val_64;
777
778
779 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
780 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
781 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
782 xhci->cmd_ring->dequeue) &
783 (u64) ~CMD_RING_RSVD_BITS) |
784 xhci->cmd_ring->cycle_state;
785 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
786 "// Setting command ring address to 0x%llx",
787 (long unsigned long) val_64);
788 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
789}
790
791
792
793
794
795
796
797
798
799
800static void xhci_clear_command_ring(struct xhci_hcd *xhci)
801{
802 struct xhci_ring *ring;
803 struct xhci_segment *seg;
804
805 ring = xhci->cmd_ring;
806 seg = ring->deq_seg;
807 do {
808 memset(seg->trbs, 0,
809 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
810 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
811 cpu_to_le32(~TRB_CYCLE);
812 seg = seg->next;
813 } while (seg != ring->deq_seg);
814
815
816 ring->deq_seg = ring->first_seg;
817 ring->dequeue = ring->first_seg->trbs;
818 ring->enq_seg = ring->deq_seg;
819 ring->enqueue = ring->dequeue;
820
821 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
822
823
824
825
826 ring->cycle_state = 1;
827
828
829
830
831
832
833
834
835 xhci_set_cmd_ring_deq(xhci);
836}
837
838static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
839{
840 int port_index;
841 __le32 __iomem **port_array;
842 unsigned long flags;
843 u32 t1, t2;
844
845 spin_lock_irqsave(&xhci->lock, flags);
846
847
848 port_index = xhci->num_usb3_ports;
849 port_array = xhci->usb3_ports;
850 while (port_index--) {
851 t1 = readl(port_array[port_index]);
852 t1 = xhci_port_state_to_neutral(t1);
853 t2 = t1 & ~PORT_WAKE_BITS;
854 if (t1 != t2)
855 writel(t2, port_array[port_index]);
856 }
857
858
859 port_index = xhci->num_usb2_ports;
860 port_array = xhci->usb2_ports;
861 while (port_index--) {
862 t1 = readl(port_array[port_index]);
863 t1 = xhci_port_state_to_neutral(t1);
864 t2 = t1 & ~PORT_WAKE_BITS;
865 if (t1 != t2)
866 writel(t2, port_array[port_index]);
867 }
868
869 spin_unlock_irqrestore(&xhci->lock, flags);
870}
871
872
873
874
875
876
877
878int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
879{
880 int rc = 0;
881 unsigned int delay = XHCI_MAX_HALT_USEC;
882 struct usb_hcd *hcd = xhci_to_hcd(xhci);
883 u32 command;
884
885 if (!hcd->state)
886 return 0;
887
888 if (hcd->state != HC_STATE_SUSPENDED ||
889 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
890 return -EINVAL;
891
892
893 if (!do_wakeup)
894 xhci_disable_port_wake_on_bits(xhci);
895
896
897 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
898 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
899 del_timer_sync(&hcd->rh_timer);
900 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
901 del_timer_sync(&xhci->shared_hcd->rh_timer);
902
903 spin_lock_irq(&xhci->lock);
904 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
905 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
906
907
908
909
910 command = readl(&xhci->op_regs->command);
911 command &= ~CMD_RUN;
912 writel(command, &xhci->op_regs->command);
913
914
915 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
916
917 if (xhci_handshake(&xhci->op_regs->status,
918 STS_HALT, STS_HALT, delay)) {
919 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
920 spin_unlock_irq(&xhci->lock);
921 return -ETIMEDOUT;
922 }
923 xhci_clear_command_ring(xhci);
924
925
926 xhci_save_registers(xhci);
927
928
929 command = readl(&xhci->op_regs->command);
930 command |= CMD_CSS;
931 writel(command, &xhci->op_regs->command);
932 if (xhci_handshake(&xhci->op_regs->status,
933 STS_SAVE, 0, 10 * 1000)) {
934 xhci_warn(xhci, "WARN: xHC save state timeout\n");
935 spin_unlock_irq(&xhci->lock);
936 return -ETIMEDOUT;
937 }
938 spin_unlock_irq(&xhci->lock);
939
940
941
942
943
944 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
945 (!(xhci_all_ports_seen_u0(xhci)))) {
946 del_timer_sync(&xhci->comp_mode_recovery_timer);
947 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
948 "%s: compliance mode recovery timer deleted",
949 __func__);
950 }
951
952
953
954 xhci_msix_sync_irqs(xhci);
955
956 return rc;
957}
958EXPORT_SYMBOL_GPL(xhci_suspend);
959
960
961
962
963
964
965
966int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
967{
968 u32 command, temp = 0, status;
969 struct usb_hcd *hcd = xhci_to_hcd(xhci);
970 struct usb_hcd *secondary_hcd;
971 int retval = 0;
972 bool comp_timer_running = false;
973
974 if (!hcd->state)
975 return 0;
976
977
978
979
980 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
981 time_before(jiffies,
982 xhci->bus_state[1].next_statechange))
983 msleep(100);
984
985 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
986 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
987
988 spin_lock_irq(&xhci->lock);
989 if (xhci->quirks & XHCI_RESET_ON_RESUME)
990 hibernated = true;
991
992 if (!hibernated) {
993
994 xhci_restore_registers(xhci);
995
996 xhci_set_cmd_ring_deq(xhci);
997
998
999 command = readl(&xhci->op_regs->command);
1000 command |= CMD_CRS;
1001 writel(command, &xhci->op_regs->command);
1002 if (xhci_handshake(&xhci->op_regs->status,
1003 STS_RESTORE, 0, 10 * 1000)) {
1004 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1005 spin_unlock_irq(&xhci->lock);
1006 return -ETIMEDOUT;
1007 }
1008 temp = readl(&xhci->op_regs->status);
1009 }
1010
1011
1012 if ((temp & STS_SRE) || hibernated) {
1013
1014 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1015 !(xhci_all_ports_seen_u0(xhci))) {
1016 del_timer_sync(&xhci->comp_mode_recovery_timer);
1017 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1018 "Compliance Mode Recovery Timer deleted!");
1019 }
1020
1021
1022 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1023 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1024
1025 xhci_dbg(xhci, "Stop HCD\n");
1026 xhci_halt(xhci);
1027 xhci_reset(xhci);
1028 spin_unlock_irq(&xhci->lock);
1029 xhci_cleanup_msix(xhci);
1030
1031 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1032 temp = readl(&xhci->op_regs->status);
1033 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1034 temp = readl(&xhci->ir_set->irq_pending);
1035 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1036 xhci_print_ir_set(xhci, 0);
1037
1038 xhci_dbg(xhci, "cleaning up memory\n");
1039 xhci_mem_cleanup(xhci);
1040 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1041 readl(&xhci->op_regs->status));
1042
1043
1044
1045
1046
1047 if (!usb_hcd_is_primary_hcd(hcd))
1048 secondary_hcd = hcd;
1049 else
1050 secondary_hcd = xhci->shared_hcd;
1051
1052 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1053 retval = xhci_init(hcd->primary_hcd);
1054 if (retval)
1055 return retval;
1056 comp_timer_running = true;
1057
1058 xhci_dbg(xhci, "Start the primary HCD\n");
1059 retval = xhci_run(hcd->primary_hcd);
1060 if (!retval) {
1061 xhci_dbg(xhci, "Start the secondary HCD\n");
1062 retval = xhci_run(secondary_hcd);
1063 }
1064 hcd->state = HC_STATE_SUSPENDED;
1065 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1066 goto done;
1067 }
1068
1069
1070 command = readl(&xhci->op_regs->command);
1071 command |= CMD_RUN;
1072 writel(command, &xhci->op_regs->command);
1073 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1074 0, 250 * 1000);
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 spin_unlock_irq(&xhci->lock);
1086
1087 done:
1088 if (retval == 0) {
1089
1090 status = readl(&xhci->op_regs->status);
1091 if (status & STS_EINT) {
1092 usb_hcd_resume_root_hub(xhci->shared_hcd);
1093 usb_hcd_resume_root_hub(hcd);
1094 }
1095 }
1096
1097
1098
1099
1100
1101
1102
1103 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1104 compliance_mode_recovery_timer_init(xhci);
1105
1106 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1107 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1108
1109
1110 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1111 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1112 usb_hcd_poll_rh_status(xhci->shared_hcd);
1113 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1114 usb_hcd_poll_rh_status(hcd);
1115
1116 return retval;
1117}
1118EXPORT_SYMBOL_GPL(xhci_resume);
1119#endif
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1134{
1135 unsigned int index;
1136 if (usb_endpoint_xfer_control(desc))
1137 index = (unsigned int) (usb_endpoint_num(desc)*2);
1138 else
1139 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1140 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1141 return index;
1142}
1143
1144
1145
1146
1147unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1148{
1149 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1150 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1151 return direction | number;
1152}
1153
1154
1155
1156
1157
1158static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1159{
1160 return 1 << (xhci_get_endpoint_index(desc) + 1);
1161}
1162
1163
1164
1165
1166
1167static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1168{
1169 return 1 << (ep_index + 1);
1170}
1171
1172
1173
1174
1175
1176
1177
1178unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1179{
1180 return fls(added_ctxs) - 1;
1181}
1182
1183
1184
1185
1186static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1187 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1188 const char *func) {
1189 struct xhci_hcd *xhci;
1190 struct xhci_virt_device *virt_dev;
1191
1192 if (!hcd || (check_ep && !ep) || !udev) {
1193 pr_debug("xHCI %s called with invalid args\n", func);
1194 return -EINVAL;
1195 }
1196 if (!udev->parent) {
1197 pr_debug("xHCI %s called for root hub\n", func);
1198 return 0;
1199 }
1200
1201 xhci = hcd_to_xhci(hcd);
1202 if (check_virt_dev) {
1203 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1204 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1205 func);
1206 return -EINVAL;
1207 }
1208
1209 virt_dev = xhci->devs[udev->slot_id];
1210 if (virt_dev->udev != udev) {
1211 xhci_dbg(xhci, "xHCI %s called with udev and "
1212 "virt_dev does not match\n", func);
1213 return -EINVAL;
1214 }
1215 }
1216
1217 if (xhci->xhc_state & XHCI_STATE_HALTED)
1218 return -ENODEV;
1219
1220 return 1;
1221}
1222
1223static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1224 struct usb_device *udev, struct xhci_command *command,
1225 bool ctx_change, bool must_succeed);
1226
1227
1228
1229
1230
1231
1232
1233static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1234 unsigned int ep_index, struct urb *urb)
1235{
1236 struct xhci_container_ctx *out_ctx;
1237 struct xhci_input_control_ctx *ctrl_ctx;
1238 struct xhci_ep_ctx *ep_ctx;
1239 struct xhci_command *command;
1240 int max_packet_size;
1241 int hw_max_packet_size;
1242 int ret = 0;
1243
1244 out_ctx = xhci->devs[slot_id]->out_ctx;
1245 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1246 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1247 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1248 if (hw_max_packet_size != max_packet_size) {
1249 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1250 "Max Packet Size for ep 0 changed.");
1251 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1252 "Max packet size in usb_device = %d",
1253 max_packet_size);
1254 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1255 "Max packet size in xHCI HW = %d",
1256 hw_max_packet_size);
1257 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1258 "Issuing evaluate context command.");
1259
1260
1261
1262
1263
1264
1265 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
1266 if (!command)
1267 return -ENOMEM;
1268
1269 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1270 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1271 if (!ctrl_ctx) {
1272 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1273 __func__);
1274 ret = -ENOMEM;
1275 goto command_cleanup;
1276 }
1277
1278 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1279 xhci->devs[slot_id]->out_ctx, ep_index);
1280
1281 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1282 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1283 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1284
1285 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1286 ctrl_ctx->drop_flags = 0;
1287
1288 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1289 true, false);
1290
1291
1292
1293
1294 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1295command_cleanup:
1296 kfree(command->completion);
1297 kfree(command);
1298 }
1299 return ret;
1300}
1301
1302
1303
1304
1305
1306static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1307{
1308 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1309 unsigned long flags;
1310 int ret = 0;
1311 unsigned int slot_id, ep_index, ep_state;
1312 struct urb_priv *urb_priv;
1313 int num_tds;
1314
1315 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1316 true, true, __func__) <= 0)
1317 return -EINVAL;
1318
1319 slot_id = urb->dev->slot_id;
1320 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1321
1322 if (!HCD_HW_ACCESSIBLE(hcd)) {
1323 if (!in_interrupt())
1324 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1325 return -ESHUTDOWN;
1326 }
1327
1328 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1329 num_tds = urb->number_of_packets;
1330 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1331 urb->transfer_buffer_length > 0 &&
1332 urb->transfer_flags & URB_ZERO_PACKET &&
1333 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1334 num_tds = 2;
1335 else
1336 num_tds = 1;
1337
1338 urb_priv = kzalloc(sizeof(struct urb_priv) +
1339 num_tds * sizeof(struct xhci_td), mem_flags);
1340 if (!urb_priv)
1341 return -ENOMEM;
1342
1343 urb_priv->num_tds = num_tds;
1344 urb_priv->num_tds_done = 0;
1345 urb->hcpriv = urb_priv;
1346
1347 trace_xhci_urb_enqueue(urb);
1348
1349 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1350
1351
1352
1353 if (urb->dev->speed == USB_SPEED_FULL) {
1354 ret = xhci_check_maxpacket(xhci, slot_id,
1355 ep_index, urb);
1356 if (ret < 0) {
1357 xhci_urb_free_priv(urb_priv);
1358 urb->hcpriv = NULL;
1359 return ret;
1360 }
1361 }
1362 }
1363
1364 spin_lock_irqsave(&xhci->lock, flags);
1365
1366 if (xhci->xhc_state & XHCI_STATE_DYING) {
1367 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1368 urb->ep->desc.bEndpointAddress, urb);
1369 ret = -ESHUTDOWN;
1370 goto free_priv;
1371 }
1372
1373 switch (usb_endpoint_type(&urb->ep->desc)) {
1374
1375 case USB_ENDPOINT_XFER_CONTROL:
1376 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1377 slot_id, ep_index);
1378 break;
1379 case USB_ENDPOINT_XFER_BULK:
1380 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1381 if (ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1382 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1383 ep_state);
1384 ret = -EINVAL;
1385 break;
1386 }
1387 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1388 slot_id, ep_index);
1389 break;
1390
1391
1392 case USB_ENDPOINT_XFER_INT:
1393 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1394 slot_id, ep_index);
1395 break;
1396
1397 case USB_ENDPOINT_XFER_ISOC:
1398 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1399 slot_id, ep_index);
1400 }
1401
1402 if (ret) {
1403free_priv:
1404 xhci_urb_free_priv(urb_priv);
1405 urb->hcpriv = NULL;
1406 }
1407 spin_unlock_irqrestore(&xhci->lock, flags);
1408 return ret;
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1443{
1444 unsigned long flags;
1445 int ret, i;
1446 u32 temp;
1447 struct xhci_hcd *xhci;
1448 struct urb_priv *urb_priv;
1449 struct xhci_td *td;
1450 unsigned int ep_index;
1451 struct xhci_ring *ep_ring;
1452 struct xhci_virt_ep *ep;
1453 struct xhci_command *command;
1454 struct xhci_virt_device *vdev;
1455
1456 xhci = hcd_to_xhci(hcd);
1457 spin_lock_irqsave(&xhci->lock, flags);
1458
1459 trace_xhci_urb_dequeue(urb);
1460
1461
1462 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1463 if (ret)
1464 goto done;
1465
1466
1467 vdev = xhci->devs[urb->dev->slot_id];
1468 urb_priv = urb->hcpriv;
1469 if (!vdev || !urb_priv)
1470 goto err_giveback;
1471
1472 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1473 ep = &vdev->eps[ep_index];
1474 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1475 if (!ep || !ep_ring)
1476 goto err_giveback;
1477
1478
1479 temp = readl(&xhci->op_regs->status);
1480 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1481 xhci_hc_died(xhci);
1482 goto done;
1483 }
1484
1485 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1486 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1487 "HC halted, freeing TD manually.");
1488 for (i = urb_priv->num_tds_done;
1489 i < urb_priv->num_tds;
1490 i++) {
1491 td = &urb_priv->td[i];
1492 if (!list_empty(&td->td_list))
1493 list_del_init(&td->td_list);
1494 if (!list_empty(&td->cancelled_td_list))
1495 list_del_init(&td->cancelled_td_list);
1496 }
1497 goto err_giveback;
1498 }
1499
1500 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1501 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1502 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1503 if (!ep_ring) {
1504 ret = -EINVAL;
1505 goto done;
1506 }
1507
1508
1509 if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0))
1510 del_timer(&ep_ring->stream_timer);
1511
1512 i = urb_priv->num_tds_done;
1513 if (i < urb_priv->num_tds)
1514
1515 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1516 "Cancel URB %p, dev %s, ep 0x%x, "
1517 "starting at offset 0x%llx",
1518 urb, urb->dev->devpath,
1519 urb->ep->desc.bEndpointAddress,
1520 (unsigned long long) xhci_trb_virt_to_dma(
1521 urb_priv->td[i].start_seg,
1522 urb_priv->td[i].first_trb));
1523
1524 for (; i < urb_priv->num_tds; i++) {
1525 td = &urb_priv->td[i];
1526 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1527 }
1528
1529
1530
1531
1532 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1533 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1534 if (!command) {
1535 ret = -ENOMEM;
1536 goto done;
1537 }
1538 ep->ep_state |= EP_STOP_CMD_PENDING;
1539 ep->stop_cmd_timer.expires = jiffies +
1540 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1541 add_timer(&ep->stop_cmd_timer);
1542 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1543 ep_index, 0);
1544 xhci_ring_cmd_db(xhci);
1545 }
1546done:
1547 spin_unlock_irqrestore(&xhci->lock, flags);
1548 return ret;
1549
1550err_giveback:
1551 if (urb_priv)
1552 xhci_urb_free_priv(urb_priv);
1553 usb_hcd_unlink_urb_from_ep(hcd, urb);
1554 spin_unlock_irqrestore(&xhci->lock, flags);
1555 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1556 return ret;
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1573 struct usb_host_endpoint *ep)
1574{
1575 struct xhci_hcd *xhci;
1576 struct xhci_container_ctx *in_ctx, *out_ctx;
1577 struct xhci_input_control_ctx *ctrl_ctx;
1578 unsigned int ep_index;
1579 struct xhci_ep_ctx *ep_ctx;
1580 u32 drop_flag;
1581 u32 new_add_flags, new_drop_flags;
1582 int ret;
1583
1584 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1585 if (ret <= 0)
1586 return ret;
1587 xhci = hcd_to_xhci(hcd);
1588 if (xhci->xhc_state & XHCI_STATE_DYING)
1589 return -ENODEV;
1590
1591 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1592 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1593 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1594 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1595 __func__, drop_flag);
1596 return 0;
1597 }
1598
1599 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1600 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1601 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1602 if (!ctrl_ctx) {
1603 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1604 __func__);
1605 return 0;
1606 }
1607
1608 ep_index = xhci_get_endpoint_index(&ep->desc);
1609 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1610
1611
1612
1613 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1614 le32_to_cpu(ctrl_ctx->drop_flags) &
1615 xhci_get_endpoint_flag(&ep->desc)) {
1616
1617 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1618 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1619 __func__, ep);
1620 return 0;
1621 }
1622
1623 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1624 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1625
1626 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1627 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1628
1629 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1630
1631 if (xhci->quirks & XHCI_MTK_HOST)
1632 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1633
1634 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1635 (unsigned int) ep->desc.bEndpointAddress,
1636 udev->slot_id,
1637 (unsigned int) new_drop_flags,
1638 (unsigned int) new_add_flags);
1639 return 0;
1640}
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1656 struct usb_host_endpoint *ep)
1657{
1658 struct xhci_hcd *xhci;
1659 struct xhci_container_ctx *in_ctx;
1660 unsigned int ep_index;
1661 struct xhci_input_control_ctx *ctrl_ctx;
1662 u32 added_ctxs;
1663 u32 new_add_flags, new_drop_flags;
1664 struct xhci_virt_device *virt_dev;
1665 int ret = 0;
1666
1667 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1668 if (ret <= 0) {
1669
1670 ep->hcpriv = NULL;
1671 return ret;
1672 }
1673 xhci = hcd_to_xhci(hcd);
1674 if (xhci->xhc_state & XHCI_STATE_DYING)
1675 return -ENODEV;
1676
1677 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1678 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1679
1680
1681
1682
1683 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1684 __func__, added_ctxs);
1685 return 0;
1686 }
1687
1688 virt_dev = xhci->devs[udev->slot_id];
1689 in_ctx = virt_dev->in_ctx;
1690 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1691 if (!ctrl_ctx) {
1692 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1693 __func__);
1694 return 0;
1695 }
1696
1697 ep_index = xhci_get_endpoint_index(&ep->desc);
1698
1699
1700
1701 if (virt_dev->eps[ep_index].ring &&
1702 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1703 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1704 "without dropping it.\n",
1705 (unsigned int) ep->desc.bEndpointAddress);
1706 return -EINVAL;
1707 }
1708
1709
1710
1711
1712 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1713 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1714 __func__, ep);
1715 return 0;
1716 }
1717
1718
1719
1720
1721
1722
1723 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1724 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1725 __func__, ep->desc.bEndpointAddress);
1726 return -ENOMEM;
1727 }
1728
1729 if (xhci->quirks & XHCI_MTK_HOST) {
1730 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1731 if (ret < 0) {
1732 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1733 virt_dev->eps[ep_index].new_ring = NULL;
1734 return ret;
1735 }
1736 }
1737
1738 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1739 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1740
1741
1742
1743
1744
1745
1746
1747 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1748
1749
1750 ep->hcpriv = udev;
1751
1752 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1753 (unsigned int) ep->desc.bEndpointAddress,
1754 udev->slot_id,
1755 (unsigned int) new_drop_flags,
1756 (unsigned int) new_add_flags);
1757 return 0;
1758}
1759
1760static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1761{
1762 struct xhci_input_control_ctx *ctrl_ctx;
1763 struct xhci_ep_ctx *ep_ctx;
1764 struct xhci_slot_ctx *slot_ctx;
1765 int i;
1766
1767 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1768 if (!ctrl_ctx) {
1769 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1770 __func__);
1771 return;
1772 }
1773
1774
1775
1776
1777
1778
1779 ctrl_ctx->drop_flags = 0;
1780 ctrl_ctx->add_flags = 0;
1781 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1782 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1783
1784 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1785 for (i = 1; i < 31; i++) {
1786 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1787 ep_ctx->ep_info = 0;
1788 ep_ctx->ep_info2 = 0;
1789 ep_ctx->deq = 0;
1790 ep_ctx->tx_info = 0;
1791 }
1792}
1793
1794static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1795 struct usb_device *udev, u32 *cmd_status)
1796{
1797 int ret;
1798
1799 switch (*cmd_status) {
1800 case COMP_COMMAND_ABORTED:
1801 case COMP_COMMAND_RING_STOPPED:
1802 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1803 ret = -ETIME;
1804 break;
1805 case COMP_RESOURCE_ERROR:
1806 dev_warn(&udev->dev,
1807 "Not enough host controller resources for new device state.\n");
1808 ret = -ENOMEM;
1809
1810 break;
1811 case COMP_BANDWIDTH_ERROR:
1812 case COMP_SECONDARY_BANDWIDTH_ERROR:
1813 dev_warn(&udev->dev,
1814 "Not enough bandwidth for new device state.\n");
1815 ret = -ENOSPC;
1816
1817 break;
1818 case COMP_TRB_ERROR:
1819
1820 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1821 "add flag = 1, "
1822 "and endpoint is not disabled.\n");
1823 ret = -EINVAL;
1824 break;
1825 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1826 dev_warn(&udev->dev,
1827 "ERROR: Incompatible device for endpoint configure command.\n");
1828 ret = -ENODEV;
1829 break;
1830 case COMP_SUCCESS:
1831 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1832 "Successful Endpoint Configure command");
1833 ret = 0;
1834 break;
1835 default:
1836 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1837 *cmd_status);
1838 ret = -EINVAL;
1839 break;
1840 }
1841 return ret;
1842}
1843
1844static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1845 struct usb_device *udev, u32 *cmd_status)
1846{
1847 int ret;
1848
1849 switch (*cmd_status) {
1850 case COMP_COMMAND_ABORTED:
1851 case COMP_COMMAND_RING_STOPPED:
1852 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1853 ret = -ETIME;
1854 break;
1855 case COMP_PARAMETER_ERROR:
1856 dev_warn(&udev->dev,
1857 "WARN: xHCI driver setup invalid evaluate context command.\n");
1858 ret = -EINVAL;
1859 break;
1860 case COMP_SLOT_NOT_ENABLED_ERROR:
1861 dev_warn(&udev->dev,
1862 "WARN: slot not enabled for evaluate context command.\n");
1863 ret = -EINVAL;
1864 break;
1865 case COMP_CONTEXT_STATE_ERROR:
1866 dev_warn(&udev->dev,
1867 "WARN: invalid context state for evaluate context command.\n");
1868 ret = -EINVAL;
1869 break;
1870 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1871 dev_warn(&udev->dev,
1872 "ERROR: Incompatible device for evaluate context command.\n");
1873 ret = -ENODEV;
1874 break;
1875 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
1876
1877 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1878 ret = -EINVAL;
1879 break;
1880 case COMP_SUCCESS:
1881 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1882 "Successful evaluate context command");
1883 ret = 0;
1884 break;
1885 default:
1886 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1887 *cmd_status);
1888 ret = -EINVAL;
1889 break;
1890 }
1891 return ret;
1892}
1893
1894static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1895 struct xhci_input_control_ctx *ctrl_ctx)
1896{
1897 u32 valid_add_flags;
1898 u32 valid_drop_flags;
1899
1900
1901
1902
1903
1904 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1905 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1906
1907
1908
1909
1910
1911 return hweight32(valid_add_flags) -
1912 hweight32(valid_add_flags & valid_drop_flags);
1913}
1914
1915static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1916 struct xhci_input_control_ctx *ctrl_ctx)
1917{
1918 u32 valid_add_flags;
1919 u32 valid_drop_flags;
1920
1921 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1922 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1923
1924 return hweight32(valid_drop_flags) -
1925 hweight32(valid_add_flags & valid_drop_flags);
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1942 struct xhci_input_control_ctx *ctrl_ctx)
1943{
1944 u32 added_eps;
1945
1946 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1947 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1948 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1949 "Not enough ep ctxs: "
1950 "%u active, need to add %u, limit is %u.",
1951 xhci->num_active_eps, added_eps,
1952 xhci->limit_active_eps);
1953 return -ENOMEM;
1954 }
1955 xhci->num_active_eps += added_eps;
1956 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1957 "Adding %u ep ctxs, %u now active.", added_eps,
1958 xhci->num_active_eps);
1959 return 0;
1960}
1961
1962
1963
1964
1965
1966
1967
1968static void xhci_free_host_resources(struct xhci_hcd *xhci,
1969 struct xhci_input_control_ctx *ctrl_ctx)
1970{
1971 u32 num_failed_eps;
1972
1973 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1974 xhci->num_active_eps -= num_failed_eps;
1975 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1976 "Removing %u failed ep ctxs, %u now active.",
1977 num_failed_eps,
1978 xhci->num_active_eps);
1979}
1980
1981
1982
1983
1984
1985
1986
1987static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1988 struct xhci_input_control_ctx *ctrl_ctx)
1989{
1990 u32 num_dropped_eps;
1991
1992 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
1993 xhci->num_active_eps -= num_dropped_eps;
1994 if (num_dropped_eps)
1995 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1996 "Removing %u dropped ep ctxs, %u now active.",
1997 num_dropped_eps,
1998 xhci->num_active_eps);
1999}
2000
2001static unsigned int xhci_get_block_size(struct usb_device *udev)
2002{
2003 switch (udev->speed) {
2004 case USB_SPEED_LOW:
2005 case USB_SPEED_FULL:
2006 return FS_BLOCK;
2007 case USB_SPEED_HIGH:
2008 return HS_BLOCK;
2009 case USB_SPEED_SUPER:
2010 case USB_SPEED_SUPER_PLUS:
2011 return SS_BLOCK;
2012 case USB_SPEED_UNKNOWN:
2013 case USB_SPEED_WIRELESS:
2014 default:
2015
2016 return 1;
2017 }
2018}
2019
2020static unsigned int
2021xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2022{
2023 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2024 return LS_OVERHEAD;
2025 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2026 return FS_OVERHEAD;
2027 return HS_OVERHEAD;
2028}
2029
2030
2031
2032
2033
2034static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2035 struct xhci_virt_device *virt_dev,
2036 int old_active_eps)
2037{
2038 struct xhci_interval_bw_table *bw_table;
2039 struct xhci_tt_bw_info *tt_info;
2040
2041
2042 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2043 tt_info = virt_dev->tt_info;
2044
2045
2046
2047
2048 if (old_active_eps)
2049 return 0;
2050 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2051 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2052 return -ENOMEM;
2053 return 0;
2054 }
2055
2056
2057
2058
2059
2060
2061 return 0;
2062}
2063
2064static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2065 struct xhci_virt_device *virt_dev)
2066{
2067 unsigned int bw_reserved;
2068
2069 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2070 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2071 return -ENOMEM;
2072
2073 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2074 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2075 return -ENOMEM;
2076
2077 return 0;
2078}
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121static int xhci_check_bw_table(struct xhci_hcd *xhci,
2122 struct xhci_virt_device *virt_dev,
2123 int old_active_eps)
2124{
2125 unsigned int bw_reserved;
2126 unsigned int max_bandwidth;
2127 unsigned int bw_used;
2128 unsigned int block_size;
2129 struct xhci_interval_bw_table *bw_table;
2130 unsigned int packet_size = 0;
2131 unsigned int overhead = 0;
2132 unsigned int packets_transmitted = 0;
2133 unsigned int packets_remaining = 0;
2134 unsigned int i;
2135
2136 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2137 return xhci_check_ss_bw(xhci, virt_dev);
2138
2139 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2140 max_bandwidth = HS_BW_LIMIT;
2141
2142 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2143 } else {
2144 max_bandwidth = FS_BW_LIMIT;
2145 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2146 }
2147
2148 bw_table = virt_dev->bw_table;
2149
2150
2151
2152 block_size = xhci_get_block_size(virt_dev->udev);
2153
2154
2155
2156
2157 if (virt_dev->tt_info) {
2158 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2159 "Recalculating BW for rootport %u",
2160 virt_dev->real_port);
2161 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2162 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2163 "newly activated TT.\n");
2164 return -ENOMEM;
2165 }
2166 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2167 "Recalculating BW for TT slot %u port %u",
2168 virt_dev->tt_info->slot_id,
2169 virt_dev->tt_info->ttport);
2170 } else {
2171 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2172 "Recalculating BW for rootport %u",
2173 virt_dev->real_port);
2174 }
2175
2176
2177
2178
2179 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2180 bw_table->interval_bw[0].num_packets *
2181 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2182
2183 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2184 unsigned int bw_added;
2185 unsigned int largest_mps;
2186 unsigned int interval_overhead;
2187
2188
2189
2190
2191
2192
2193 packets_remaining = 2 * packets_remaining +
2194 bw_table->interval_bw[i].num_packets;
2195
2196
2197
2198
2199 if (list_empty(&bw_table->interval_bw[i].endpoints))
2200 largest_mps = 0;
2201 else {
2202 struct xhci_virt_ep *virt_ep;
2203 struct list_head *ep_entry;
2204
2205 ep_entry = bw_table->interval_bw[i].endpoints.next;
2206 virt_ep = list_entry(ep_entry,
2207 struct xhci_virt_ep, bw_endpoint_list);
2208
2209 largest_mps = DIV_ROUND_UP(
2210 virt_ep->bw_info.max_packet_size,
2211 block_size);
2212 }
2213 if (largest_mps > packet_size)
2214 packet_size = largest_mps;
2215
2216
2217 interval_overhead = xhci_get_largest_overhead(
2218 &bw_table->interval_bw[i]);
2219 if (interval_overhead > overhead)
2220 overhead = interval_overhead;
2221
2222
2223
2224
2225 packets_transmitted = packets_remaining >> (i + 1);
2226
2227
2228 bw_added = packets_transmitted * (overhead + packet_size);
2229
2230
2231 packets_remaining = packets_remaining % (1 << (i + 1));
2232
2233
2234
2235
2236
2237 if (packets_remaining == 0) {
2238 packet_size = 0;
2239 overhead = 0;
2240 } else if (packets_transmitted > 0) {
2241
2242
2243
2244
2245
2246 packet_size = largest_mps;
2247 overhead = interval_overhead;
2248 }
2249
2250
2251
2252 bw_used += bw_added;
2253 if (bw_used > max_bandwidth) {
2254 xhci_warn(xhci, "Not enough bandwidth. "
2255 "Proposed: %u, Max: %u\n",
2256 bw_used, max_bandwidth);
2257 return -ENOMEM;
2258 }
2259 }
2260
2261
2262
2263
2264
2265
2266 if (packets_remaining > 0)
2267 bw_used += overhead + packet_size;
2268
2269 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2270 unsigned int port_index = virt_dev->real_port - 1;
2271
2272
2273
2274
2275
2276 bw_used += TT_HS_OVERHEAD *
2277 xhci->rh_bw[port_index].num_active_tts;
2278 }
2279
2280 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2281 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2282 "Available: %u " "percent",
2283 bw_used, max_bandwidth, bw_reserved,
2284 (max_bandwidth - bw_used - bw_reserved) * 100 /
2285 max_bandwidth);
2286
2287 bw_used += bw_reserved;
2288 if (bw_used > max_bandwidth) {
2289 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2290 bw_used, max_bandwidth);
2291 return -ENOMEM;
2292 }
2293
2294 bw_table->bw_used = bw_used;
2295 return 0;
2296}
2297
2298static bool xhci_is_async_ep(unsigned int ep_type)
2299{
2300 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2301 ep_type != ISOC_IN_EP &&
2302 ep_type != INT_IN_EP);
2303}
2304
2305static bool xhci_is_sync_in_ep(unsigned int ep_type)
2306{
2307 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2308}
2309
2310static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2311{
2312 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2313
2314 if (ep_bw->ep_interval == 0)
2315 return SS_OVERHEAD_BURST +
2316 (ep_bw->mult * ep_bw->num_packets *
2317 (SS_OVERHEAD + mps));
2318 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2319 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2320 1 << ep_bw->ep_interval);
2321
2322}
2323
2324static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2325 struct xhci_bw_info *ep_bw,
2326 struct xhci_interval_bw_table *bw_table,
2327 struct usb_device *udev,
2328 struct xhci_virt_ep *virt_ep,
2329 struct xhci_tt_bw_info *tt_info)
2330{
2331 struct xhci_interval_bw *interval_bw;
2332 int normalized_interval;
2333
2334 if (xhci_is_async_ep(ep_bw->type))
2335 return;
2336
2337 if (udev->speed >= USB_SPEED_SUPER) {
2338 if (xhci_is_sync_in_ep(ep_bw->type))
2339 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2340 xhci_get_ss_bw_consumed(ep_bw);
2341 else
2342 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2343 xhci_get_ss_bw_consumed(ep_bw);
2344 return;
2345 }
2346
2347
2348
2349
2350 if (list_empty(&virt_ep->bw_endpoint_list))
2351 return;
2352
2353
2354
2355 if (udev->speed == USB_SPEED_HIGH)
2356 normalized_interval = ep_bw->ep_interval;
2357 else
2358 normalized_interval = ep_bw->ep_interval - 3;
2359
2360 if (normalized_interval == 0)
2361 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2362 interval_bw = &bw_table->interval_bw[normalized_interval];
2363 interval_bw->num_packets -= ep_bw->num_packets;
2364 switch (udev->speed) {
2365 case USB_SPEED_LOW:
2366 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2367 break;
2368 case USB_SPEED_FULL:
2369 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2370 break;
2371 case USB_SPEED_HIGH:
2372 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2373 break;
2374 case USB_SPEED_SUPER:
2375 case USB_SPEED_SUPER_PLUS:
2376 case USB_SPEED_UNKNOWN:
2377 case USB_SPEED_WIRELESS:
2378
2379
2380
2381 return;
2382 }
2383 if (tt_info)
2384 tt_info->active_eps -= 1;
2385 list_del_init(&virt_ep->bw_endpoint_list);
2386}
2387
2388static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2389 struct xhci_bw_info *ep_bw,
2390 struct xhci_interval_bw_table *bw_table,
2391 struct usb_device *udev,
2392 struct xhci_virt_ep *virt_ep,
2393 struct xhci_tt_bw_info *tt_info)
2394{
2395 struct xhci_interval_bw *interval_bw;
2396 struct xhci_virt_ep *smaller_ep;
2397 int normalized_interval;
2398
2399 if (xhci_is_async_ep(ep_bw->type))
2400 return;
2401
2402 if (udev->speed == USB_SPEED_SUPER) {
2403 if (xhci_is_sync_in_ep(ep_bw->type))
2404 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2405 xhci_get_ss_bw_consumed(ep_bw);
2406 else
2407 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2408 xhci_get_ss_bw_consumed(ep_bw);
2409 return;
2410 }
2411
2412
2413
2414
2415 if (udev->speed == USB_SPEED_HIGH)
2416 normalized_interval = ep_bw->ep_interval;
2417 else
2418 normalized_interval = ep_bw->ep_interval - 3;
2419
2420 if (normalized_interval == 0)
2421 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2422 interval_bw = &bw_table->interval_bw[normalized_interval];
2423 interval_bw->num_packets += ep_bw->num_packets;
2424 switch (udev->speed) {
2425 case USB_SPEED_LOW:
2426 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2427 break;
2428 case USB_SPEED_FULL:
2429 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2430 break;
2431 case USB_SPEED_HIGH:
2432 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2433 break;
2434 case USB_SPEED_SUPER:
2435 case USB_SPEED_SUPER_PLUS:
2436 case USB_SPEED_UNKNOWN:
2437 case USB_SPEED_WIRELESS:
2438
2439
2440
2441 return;
2442 }
2443
2444 if (tt_info)
2445 tt_info->active_eps += 1;
2446
2447 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2448 bw_endpoint_list) {
2449 if (ep_bw->max_packet_size >=
2450 smaller_ep->bw_info.max_packet_size) {
2451
2452 list_add_tail(&virt_ep->bw_endpoint_list,
2453 &smaller_ep->bw_endpoint_list);
2454 return;
2455 }
2456 }
2457
2458 list_add_tail(&virt_ep->bw_endpoint_list,
2459 &interval_bw->endpoints);
2460}
2461
2462void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2463 struct xhci_virt_device *virt_dev,
2464 int old_active_eps)
2465{
2466 struct xhci_root_port_bw_info *rh_bw_info;
2467 if (!virt_dev->tt_info)
2468 return;
2469
2470 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2471 if (old_active_eps == 0 &&
2472 virt_dev->tt_info->active_eps != 0) {
2473 rh_bw_info->num_active_tts += 1;
2474 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2475 } else if (old_active_eps != 0 &&
2476 virt_dev->tt_info->active_eps == 0) {
2477 rh_bw_info->num_active_tts -= 1;
2478 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2479 }
2480}
2481
2482static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2483 struct xhci_virt_device *virt_dev,
2484 struct xhci_container_ctx *in_ctx)
2485{
2486 struct xhci_bw_info ep_bw_info[31];
2487 int i;
2488 struct xhci_input_control_ctx *ctrl_ctx;
2489 int old_active_eps = 0;
2490
2491 if (virt_dev->tt_info)
2492 old_active_eps = virt_dev->tt_info->active_eps;
2493
2494 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2495 if (!ctrl_ctx) {
2496 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2497 __func__);
2498 return -ENOMEM;
2499 }
2500
2501 for (i = 0; i < 31; i++) {
2502 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2503 continue;
2504
2505
2506 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2507 sizeof(ep_bw_info[i]));
2508
2509
2510
2511 if (EP_IS_DROPPED(ctrl_ctx, i))
2512 xhci_drop_ep_from_interval_table(xhci,
2513 &virt_dev->eps[i].bw_info,
2514 virt_dev->bw_table,
2515 virt_dev->udev,
2516 &virt_dev->eps[i],
2517 virt_dev->tt_info);
2518 }
2519
2520 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2521 for (i = 0; i < 31; i++) {
2522
2523 if (EP_IS_ADDED(ctrl_ctx, i))
2524 xhci_add_ep_to_interval_table(xhci,
2525 &virt_dev->eps[i].bw_info,
2526 virt_dev->bw_table,
2527 virt_dev->udev,
2528 &virt_dev->eps[i],
2529 virt_dev->tt_info);
2530 }
2531
2532 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2533
2534
2535
2536 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2537 return 0;
2538 }
2539
2540
2541 for (i = 0; i < 31; i++) {
2542 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2543 continue;
2544
2545
2546
2547
2548 if (EP_IS_ADDED(ctrl_ctx, i)) {
2549 xhci_drop_ep_from_interval_table(xhci,
2550 &virt_dev->eps[i].bw_info,
2551 virt_dev->bw_table,
2552 virt_dev->udev,
2553 &virt_dev->eps[i],
2554 virt_dev->tt_info);
2555 }
2556
2557 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2558 sizeof(ep_bw_info[i]));
2559
2560 if (EP_IS_DROPPED(ctrl_ctx, i))
2561 xhci_add_ep_to_interval_table(xhci,
2562 &virt_dev->eps[i].bw_info,
2563 virt_dev->bw_table,
2564 virt_dev->udev,
2565 &virt_dev->eps[i],
2566 virt_dev->tt_info);
2567 }
2568 return -ENOMEM;
2569}
2570
2571
2572
2573
2574
2575static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2576 struct usb_device *udev,
2577 struct xhci_command *command,
2578 bool ctx_change, bool must_succeed)
2579{
2580 int ret;
2581 unsigned long flags;
2582 struct xhci_input_control_ctx *ctrl_ctx;
2583 struct xhci_virt_device *virt_dev;
2584
2585 if (!command)
2586 return -EINVAL;
2587
2588 spin_lock_irqsave(&xhci->lock, flags);
2589
2590 if (xhci->xhc_state & XHCI_STATE_DYING) {
2591 spin_unlock_irqrestore(&xhci->lock, flags);
2592 return -ESHUTDOWN;
2593 }
2594
2595 virt_dev = xhci->devs[udev->slot_id];
2596
2597 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2598 if (!ctrl_ctx) {
2599 spin_unlock_irqrestore(&xhci->lock, flags);
2600 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2601 __func__);
2602 return -ENOMEM;
2603 }
2604
2605 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2606 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2607 spin_unlock_irqrestore(&xhci->lock, flags);
2608 xhci_warn(xhci, "Not enough host resources, "
2609 "active endpoint contexts = %u\n",
2610 xhci->num_active_eps);
2611 return -ENOMEM;
2612 }
2613 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2614 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2615 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2616 xhci_free_host_resources(xhci, ctrl_ctx);
2617 spin_unlock_irqrestore(&xhci->lock, flags);
2618 xhci_warn(xhci, "Not enough bandwidth\n");
2619 return -ENOMEM;
2620 }
2621
2622 if (!ctx_change)
2623 ret = xhci_queue_configure_endpoint(xhci, command,
2624 command->in_ctx->dma,
2625 udev->slot_id, must_succeed);
2626 else
2627 ret = xhci_queue_evaluate_context(xhci, command,
2628 command->in_ctx->dma,
2629 udev->slot_id, must_succeed);
2630 if (ret < 0) {
2631 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2632 xhci_free_host_resources(xhci, ctrl_ctx);
2633 spin_unlock_irqrestore(&xhci->lock, flags);
2634 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2635 "FIXME allocate a new ring segment");
2636 return -ENOMEM;
2637 }
2638 xhci_ring_cmd_db(xhci);
2639 spin_unlock_irqrestore(&xhci->lock, flags);
2640
2641
2642 wait_for_completion(command->completion);
2643
2644 if (!ctx_change)
2645 ret = xhci_configure_endpoint_result(xhci, udev,
2646 &command->status);
2647 else
2648 ret = xhci_evaluate_context_result(xhci, udev,
2649 &command->status);
2650
2651 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2652 spin_lock_irqsave(&xhci->lock, flags);
2653
2654
2655
2656 if (ret)
2657 xhci_free_host_resources(xhci, ctrl_ctx);
2658 else
2659 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2660 spin_unlock_irqrestore(&xhci->lock, flags);
2661 }
2662 return ret;
2663}
2664
2665static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2666 struct xhci_virt_device *vdev, int i)
2667{
2668 struct xhci_virt_ep *ep = &vdev->eps[i];
2669
2670 if (ep->ep_state & EP_HAS_STREAMS) {
2671 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2672 xhci_get_endpoint_address(i));
2673 xhci_free_stream_info(xhci, ep->stream_info);
2674 ep->stream_info = NULL;
2675 ep->ep_state &= ~EP_HAS_STREAMS;
2676 }
2677}
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2690{
2691 int i;
2692 int ret = 0;
2693 struct xhci_hcd *xhci;
2694 struct xhci_virt_device *virt_dev;
2695 struct xhci_input_control_ctx *ctrl_ctx;
2696 struct xhci_slot_ctx *slot_ctx;
2697 struct xhci_command *command;
2698
2699 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2700 if (ret <= 0)
2701 return ret;
2702 xhci = hcd_to_xhci(hcd);
2703 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2704 (xhci->xhc_state & XHCI_STATE_REMOVING))
2705 return -ENODEV;
2706
2707 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2708 virt_dev = xhci->devs[udev->slot_id];
2709
2710 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
2711 if (!command)
2712 return -ENOMEM;
2713
2714 command->in_ctx = virt_dev->in_ctx;
2715
2716
2717 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2718 if (!ctrl_ctx) {
2719 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2720 __func__);
2721 ret = -ENOMEM;
2722 goto command_cleanup;
2723 }
2724 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2725 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2726 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2727
2728
2729 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2730 ctrl_ctx->drop_flags == 0) {
2731 ret = 0;
2732 goto command_cleanup;
2733 }
2734
2735 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2736 for (i = 31; i >= 1; i--) {
2737 __le32 le32 = cpu_to_le32(BIT(i));
2738
2739 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2740 || (ctrl_ctx->add_flags & le32) || i == 1) {
2741 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2742 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2743 break;
2744 }
2745 }
2746
2747 ret = xhci_configure_endpoint(xhci, udev, command,
2748 false, false);
2749 if (ret)
2750
2751 goto command_cleanup;
2752
2753
2754 for (i = 1; i < 31; i++) {
2755 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2756 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2757 xhci_free_endpoint_ring(xhci, virt_dev, i);
2758 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2759 }
2760 }
2761 xhci_zero_in_ctx(xhci, virt_dev);
2762
2763
2764
2765
2766 for (i = 1; i < 31; i++) {
2767 if (!virt_dev->eps[i].new_ring)
2768 continue;
2769
2770
2771
2772 if (virt_dev->eps[i].ring) {
2773 xhci_free_endpoint_ring(xhci, virt_dev, i);
2774 }
2775 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2776 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2777 virt_dev->eps[i].new_ring = NULL;
2778 }
2779command_cleanup:
2780 kfree(command->completion);
2781 kfree(command);
2782
2783 return ret;
2784}
2785
2786static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2787{
2788 struct xhci_hcd *xhci;
2789 struct xhci_virt_device *virt_dev;
2790 int i, ret;
2791
2792 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2793 if (ret <= 0)
2794 return;
2795 xhci = hcd_to_xhci(hcd);
2796
2797 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2798 virt_dev = xhci->devs[udev->slot_id];
2799
2800 for (i = 0; i < 31; i++) {
2801 if (virt_dev->eps[i].new_ring) {
2802 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2803 virt_dev->eps[i].new_ring = NULL;
2804 }
2805 }
2806 xhci_zero_in_ctx(xhci, virt_dev);
2807}
2808
2809static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2810 struct xhci_container_ctx *in_ctx,
2811 struct xhci_container_ctx *out_ctx,
2812 struct xhci_input_control_ctx *ctrl_ctx,
2813 u32 add_flags, u32 drop_flags)
2814{
2815 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2816 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2817 xhci_slot_copy(xhci, in_ctx, out_ctx);
2818 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2819}
2820
2821static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2822 unsigned int slot_id, unsigned int ep_index,
2823 struct xhci_dequeue_state *deq_state)
2824{
2825 struct xhci_input_control_ctx *ctrl_ctx;
2826 struct xhci_container_ctx *in_ctx;
2827 struct xhci_ep_ctx *ep_ctx;
2828 u32 added_ctxs;
2829 dma_addr_t addr;
2830
2831 in_ctx = xhci->devs[slot_id]->in_ctx;
2832 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2833 if (!ctrl_ctx) {
2834 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2835 __func__);
2836 return;
2837 }
2838
2839 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2840 xhci->devs[slot_id]->out_ctx, ep_index);
2841 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2842 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2843 deq_state->new_deq_ptr);
2844 if (addr == 0) {
2845 xhci_warn(xhci, "WARN Cannot submit config ep after "
2846 "reset ep command\n");
2847 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2848 deq_state->new_deq_seg,
2849 deq_state->new_deq_ptr);
2850 return;
2851 }
2852 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2853
2854 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2855 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2856 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2857 added_ctxs, added_ctxs);
2858}
2859
2860void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
2861 unsigned int stream_id, struct xhci_td *td)
2862{
2863 struct xhci_dequeue_state deq_state;
2864 struct xhci_virt_ep *ep;
2865 struct usb_device *udev = td->urb->dev;
2866
2867 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2868 "Cleaning up stalled endpoint ring");
2869 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2870
2871
2872
2873 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2874 ep_index, stream_id, td, &deq_state);
2875
2876 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2877 return;
2878
2879
2880
2881
2882 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2883 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2884 "Queueing new dequeue state");
2885 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2886 ep_index, &deq_state);
2887 } else {
2888
2889
2890
2891
2892
2893 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2894 "Setting up input context for "
2895 "configure endpoint command");
2896 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2897 ep_index, &deq_state);
2898 }
2899}
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909static void xhci_endpoint_reset(struct usb_hcd *hcd,
2910 struct usb_host_endpoint *ep)
2911{
2912 struct xhci_hcd *xhci;
2913
2914 xhci = hcd_to_xhci(hcd);
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2927 ep->desc.bEndpointAddress);
2928}
2929
2930static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2931 struct usb_device *udev, struct usb_host_endpoint *ep,
2932 unsigned int slot_id)
2933{
2934 int ret;
2935 unsigned int ep_index;
2936 unsigned int ep_state;
2937
2938 if (!ep)
2939 return -EINVAL;
2940 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2941 if (ret <= 0)
2942 return -EINVAL;
2943 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
2944 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2945 " descriptor for ep 0x%x does not support streams\n",
2946 ep->desc.bEndpointAddress);
2947 return -EINVAL;
2948 }
2949
2950 ep_index = xhci_get_endpoint_index(&ep->desc);
2951 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2952 if (ep_state & EP_HAS_STREAMS ||
2953 ep_state & EP_GETTING_STREAMS) {
2954 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2955 "already has streams set up.\n",
2956 ep->desc.bEndpointAddress);
2957 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2958 "dynamic stream context array reallocation.\n");
2959 return -EINVAL;
2960 }
2961 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2962 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2963 "endpoint 0x%x; URBs are pending.\n",
2964 ep->desc.bEndpointAddress);
2965 return -EINVAL;
2966 }
2967 return 0;
2968}
2969
2970static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2971 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2972{
2973 unsigned int max_streams;
2974
2975
2976 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2977
2978
2979
2980
2981
2982
2983 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2984 if (*num_stream_ctxs > max_streams) {
2985 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2986 max_streams);
2987 *num_stream_ctxs = max_streams;
2988 *num_streams = max_streams;
2989 }
2990}
2991
2992
2993
2994
2995
2996static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2997 struct usb_device *udev,
2998 struct usb_host_endpoint **eps, unsigned int num_eps,
2999 unsigned int *num_streams, u32 *changed_ep_bitmask)
3000{
3001 unsigned int max_streams;
3002 unsigned int endpoint_flag;
3003 int i;
3004 int ret;
3005
3006 for (i = 0; i < num_eps; i++) {
3007 ret = xhci_check_streams_endpoint(xhci, udev,
3008 eps[i], udev->slot_id);
3009 if (ret < 0)
3010 return ret;
3011
3012 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3013 if (max_streams < (*num_streams - 1)) {
3014 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3015 eps[i]->desc.bEndpointAddress,
3016 max_streams);
3017 *num_streams = max_streams+1;
3018 }
3019
3020 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3021 if (*changed_ep_bitmask & endpoint_flag)
3022 return -EINVAL;
3023 *changed_ep_bitmask |= endpoint_flag;
3024 }
3025 return 0;
3026}
3027
3028static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3029 struct usb_device *udev,
3030 struct usb_host_endpoint **eps, unsigned int num_eps)
3031{
3032 u32 changed_ep_bitmask = 0;
3033 unsigned int slot_id;
3034 unsigned int ep_index;
3035 unsigned int ep_state;
3036 int i;
3037
3038 slot_id = udev->slot_id;
3039 if (!xhci->devs[slot_id])
3040 return 0;
3041
3042 for (i = 0; i < num_eps; i++) {
3043 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3044 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3045
3046 if (ep_state & EP_GETTING_NO_STREAMS) {
3047 xhci_warn(xhci, "WARN Can't disable streams for "
3048 "endpoint 0x%x, "
3049 "streams are being disabled already\n",
3050 eps[i]->desc.bEndpointAddress);
3051 return 0;
3052 }
3053
3054 if (!(ep_state & EP_HAS_STREAMS) &&
3055 !(ep_state & EP_GETTING_STREAMS)) {
3056 xhci_warn(xhci, "WARN Can't disable streams for "
3057 "endpoint 0x%x, "
3058 "streams are already disabled!\n",
3059 eps[i]->desc.bEndpointAddress);
3060 xhci_warn(xhci, "WARN xhci_free_streams() called "
3061 "with non-streams endpoint\n");
3062 return 0;
3063 }
3064 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3065 }
3066 return changed_ep_bitmask;
3067}
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3086 struct usb_host_endpoint **eps, unsigned int num_eps,
3087 unsigned int num_streams, gfp_t mem_flags)
3088{
3089 int i, ret;
3090 struct xhci_hcd *xhci;
3091 struct xhci_virt_device *vdev;
3092 struct xhci_command *config_cmd;
3093 struct xhci_input_control_ctx *ctrl_ctx;
3094 unsigned int ep_index;
3095 unsigned int num_stream_ctxs;
3096 unsigned int max_packet;
3097 unsigned long flags;
3098 u32 changed_ep_bitmask = 0;
3099
3100 if (!eps)
3101 return -EINVAL;
3102
3103
3104
3105
3106 num_streams += 1;
3107 xhci = hcd_to_xhci(hcd);
3108 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3109 num_streams);
3110
3111
3112 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3113 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3114 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3115 return -ENOSYS;
3116 }
3117
3118 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3119 if (!config_cmd)
3120 return -ENOMEM;
3121
3122 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3123 if (!ctrl_ctx) {
3124 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3125 __func__);
3126 xhci_free_command(xhci, config_cmd);
3127 return -ENOMEM;
3128 }
3129
3130
3131
3132
3133
3134 spin_lock_irqsave(&xhci->lock, flags);
3135 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3136 num_eps, &num_streams, &changed_ep_bitmask);
3137 if (ret < 0) {
3138 xhci_free_command(xhci, config_cmd);
3139 spin_unlock_irqrestore(&xhci->lock, flags);
3140 return ret;
3141 }
3142 if (num_streams <= 1) {
3143 xhci_warn(xhci, "WARN: endpoints can't handle "
3144 "more than one stream.\n");
3145 xhci_free_command(xhci, config_cmd);
3146 spin_unlock_irqrestore(&xhci->lock, flags);
3147 return -EINVAL;
3148 }
3149 vdev = xhci->devs[udev->slot_id];
3150
3151
3152
3153 for (i = 0; i < num_eps; i++) {
3154 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3155 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3156 }
3157 spin_unlock_irqrestore(&xhci->lock, flags);
3158
3159
3160
3161
3162
3163 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3164 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3165 num_stream_ctxs, num_streams);
3166
3167 for (i = 0; i < num_eps; i++) {
3168 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3169 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3170 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3171 num_stream_ctxs,
3172 num_streams,
3173 max_packet, mem_flags);
3174 if (!vdev->eps[ep_index].stream_info)
3175 goto cleanup;
3176
3177
3178
3179 }
3180
3181
3182 for (i = 0; i < num_eps; i++) {
3183 struct xhci_ep_ctx *ep_ctx;
3184
3185 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3186 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3187
3188 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3189 vdev->out_ctx, ep_index);
3190 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3191 vdev->eps[ep_index].stream_info);
3192 }
3193
3194
3195
3196 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3197 vdev->out_ctx, ctrl_ctx,
3198 changed_ep_bitmask, changed_ep_bitmask);
3199
3200
3201 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3202 false, false);
3203
3204
3205
3206
3207
3208 if (ret < 0)
3209 goto cleanup;
3210
3211 spin_lock_irqsave(&xhci->lock, flags);
3212 for (i = 0; i < num_eps; i++) {
3213 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3214 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3215 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3216 udev->slot_id, ep_index);
3217 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3218 }
3219 xhci_free_command(xhci, config_cmd);
3220 spin_unlock_irqrestore(&xhci->lock, flags);
3221
3222
3223 return num_streams - 1;
3224
3225cleanup:
3226
3227 for (i = 0; i < num_eps; i++) {
3228 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3229 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3230 vdev->eps[ep_index].stream_info = NULL;
3231
3232
3233
3234 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3235 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3236 xhci_endpoint_zero(xhci, vdev, eps[i]);
3237 }
3238 xhci_free_command(xhci, config_cmd);
3239 return -ENOMEM;
3240}
3241
3242
3243
3244
3245
3246
3247
3248static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3249 struct usb_host_endpoint **eps, unsigned int num_eps,
3250 gfp_t mem_flags)
3251{
3252 int i, ret;
3253 struct xhci_hcd *xhci;
3254 struct xhci_virt_device *vdev;
3255 struct xhci_command *command;
3256 struct xhci_input_control_ctx *ctrl_ctx;
3257 unsigned int ep_index;
3258 unsigned long flags;
3259 u32 changed_ep_bitmask;
3260
3261 xhci = hcd_to_xhci(hcd);
3262 vdev = xhci->devs[udev->slot_id];
3263
3264
3265 spin_lock_irqsave(&xhci->lock, flags);
3266 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3267 udev, eps, num_eps);
3268 if (changed_ep_bitmask == 0) {
3269 spin_unlock_irqrestore(&xhci->lock, flags);
3270 return -EINVAL;
3271 }
3272
3273
3274
3275
3276
3277 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3278 command = vdev->eps[ep_index].stream_info->free_streams_command;
3279 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3280 if (!ctrl_ctx) {
3281 spin_unlock_irqrestore(&xhci->lock, flags);
3282 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3283 __func__);
3284 return -EINVAL;
3285 }
3286
3287 for (i = 0; i < num_eps; i++) {
3288 struct xhci_ep_ctx *ep_ctx;
3289
3290 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3291 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3292 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3293 EP_GETTING_NO_STREAMS;
3294
3295 xhci_endpoint_copy(xhci, command->in_ctx,
3296 vdev->out_ctx, ep_index);
3297 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3298 &vdev->eps[ep_index]);
3299 }
3300 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3301 vdev->out_ctx, ctrl_ctx,
3302 changed_ep_bitmask, changed_ep_bitmask);
3303 spin_unlock_irqrestore(&xhci->lock, flags);
3304
3305
3306
3307
3308 ret = xhci_configure_endpoint(xhci, udev, command,
3309 false, true);
3310
3311
3312
3313
3314 if (ret < 0)
3315 return ret;
3316
3317 spin_lock_irqsave(&xhci->lock, flags);
3318 for (i = 0; i < num_eps; i++) {
3319 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3320 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3321 vdev->eps[ep_index].stream_info = NULL;
3322
3323
3324
3325 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3326 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3327 }
3328 spin_unlock_irqrestore(&xhci->lock, flags);
3329
3330 return 0;
3331}
3332
3333
3334
3335
3336
3337
3338
3339
3340void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3341 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3342{
3343 int i;
3344 unsigned int num_dropped_eps = 0;
3345 unsigned int drop_flags = 0;
3346
3347 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3348 if (virt_dev->eps[i].ring) {
3349 drop_flags |= 1 << i;
3350 num_dropped_eps++;
3351 }
3352 }
3353 xhci->num_active_eps -= num_dropped_eps;
3354 if (num_dropped_eps)
3355 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3356 "Dropped %u ep ctxs, flags = 0x%x, "
3357 "%u now active.",
3358 num_dropped_eps, drop_flags,
3359 xhci->num_active_eps);
3360}
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3381 struct usb_device *udev)
3382{
3383 int ret, i;
3384 unsigned long flags;
3385 struct xhci_hcd *xhci;
3386 unsigned int slot_id;
3387 struct xhci_virt_device *virt_dev;
3388 struct xhci_command *reset_device_cmd;
3389 int last_freed_endpoint;
3390 struct xhci_slot_ctx *slot_ctx;
3391 int old_active_eps = 0;
3392
3393 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3394 if (ret <= 0)
3395 return ret;
3396 xhci = hcd_to_xhci(hcd);
3397 slot_id = udev->slot_id;
3398 virt_dev = xhci->devs[slot_id];
3399 if (!virt_dev) {
3400 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3401 "not exist. Re-allocate the device\n", slot_id);
3402 ret = xhci_alloc_dev(hcd, udev);
3403 if (ret == 1)
3404 return 0;
3405 else
3406 return -EINVAL;
3407 }
3408
3409 if (virt_dev->tt_info)
3410 old_active_eps = virt_dev->tt_info->active_eps;
3411
3412 if (virt_dev->udev != udev) {
3413
3414
3415
3416
3417 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3418 "not match the udev. Re-allocate the device\n",
3419 slot_id);
3420 ret = xhci_alloc_dev(hcd, udev);
3421 if (ret == 1)
3422 return 0;
3423 else
3424 return -EINVAL;
3425 }
3426
3427
3428 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3429 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3430 SLOT_STATE_DISABLED)
3431 return 0;
3432
3433 trace_xhci_discover_or_reset_device(slot_ctx);
3434
3435 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3436
3437
3438
3439
3440
3441
3442 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3443 if (!reset_device_cmd) {
3444 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3445 return -ENOMEM;
3446 }
3447
3448
3449 spin_lock_irqsave(&xhci->lock, flags);
3450
3451 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3452 if (ret) {
3453 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3454 spin_unlock_irqrestore(&xhci->lock, flags);
3455 goto command_cleanup;
3456 }
3457 xhci_ring_cmd_db(xhci);
3458 spin_unlock_irqrestore(&xhci->lock, flags);
3459
3460
3461 wait_for_completion(reset_device_cmd->completion);
3462
3463
3464
3465
3466
3467 ret = reset_device_cmd->status;
3468 switch (ret) {
3469 case COMP_COMMAND_ABORTED:
3470 case COMP_COMMAND_RING_STOPPED:
3471 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3472 ret = -ETIME;
3473 goto command_cleanup;
3474 case COMP_SLOT_NOT_ENABLED_ERROR:
3475 case COMP_CONTEXT_STATE_ERROR:
3476 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3477 slot_id,
3478 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3479 xhci_dbg(xhci, "Not freeing device rings.\n");
3480
3481 ret = 0;
3482 goto command_cleanup;
3483 case COMP_SUCCESS:
3484 xhci_dbg(xhci, "Successful reset device command.\n");
3485 break;
3486 default:
3487 if (xhci_is_vendor_info_code(xhci, ret))
3488 break;
3489 xhci_warn(xhci, "Unknown completion code %u for "
3490 "reset device command.\n", ret);
3491 ret = -EINVAL;
3492 goto command_cleanup;
3493 }
3494
3495
3496 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3497 spin_lock_irqsave(&xhci->lock, flags);
3498
3499 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3500 spin_unlock_irqrestore(&xhci->lock, flags);
3501 }
3502
3503
3504 last_freed_endpoint = 1;
3505 for (i = 1; i < 31; i++) {
3506 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3507
3508 if (ep->ep_state & EP_HAS_STREAMS) {
3509 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3510 xhci_get_endpoint_address(i));
3511 xhci_free_stream_info(xhci, ep->stream_info);
3512 ep->stream_info = NULL;
3513 ep->ep_state &= ~EP_HAS_STREAMS;
3514 }
3515
3516 if (ep->ring) {
3517 xhci_free_endpoint_ring(xhci, virt_dev, i);
3518 last_freed_endpoint = i;
3519 }
3520 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3521 xhci_drop_ep_from_interval_table(xhci,
3522 &virt_dev->eps[i].bw_info,
3523 virt_dev->bw_table,
3524 udev,
3525 &virt_dev->eps[i],
3526 virt_dev->tt_info);
3527 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3528 }
3529
3530 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3531 ret = 0;
3532
3533command_cleanup:
3534 xhci_free_command(xhci, reset_device_cmd);
3535 return ret;
3536}
3537
3538
3539
3540
3541
3542
3543static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3544{
3545 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3546 struct xhci_virt_device *virt_dev;
3547 struct xhci_slot_ctx *slot_ctx;
3548 int i, ret;
3549 struct xhci_command *command;
3550
3551 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3552 if (!command)
3553 return;
3554
3555#ifndef CONFIG_USB_DEFAULT_PERSIST
3556
3557
3558
3559
3560
3561 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3562 pm_runtime_put_noidle(hcd->self.controller);
3563#endif
3564
3565 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3566
3567
3568
3569 if (ret <= 0 && ret != -ENODEV) {
3570 kfree(command);
3571 return;
3572 }
3573
3574 virt_dev = xhci->devs[udev->slot_id];
3575 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3576 trace_xhci_free_dev(slot_ctx);
3577
3578
3579 for (i = 0; i < 31; i++) {
3580 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3581 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3582 }
3583
3584 xhci_disable_slot(xhci, command, udev->slot_id);
3585
3586
3587
3588
3589}
3590
3591int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command,
3592 u32 slot_id)
3593{
3594 unsigned long flags;
3595 u32 state;
3596 int ret = 0;
3597 struct xhci_virt_device *virt_dev;
3598
3599 virt_dev = xhci->devs[slot_id];
3600 if (!virt_dev)
3601 return -EINVAL;
3602 if (!command)
3603 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3604 if (!command)
3605 return -ENOMEM;
3606
3607 spin_lock_irqsave(&xhci->lock, flags);
3608
3609 state = readl(&xhci->op_regs->status);
3610 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3611 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3612 xhci_free_virt_device(xhci, slot_id);
3613 spin_unlock_irqrestore(&xhci->lock, flags);
3614 kfree(command);
3615 return ret;
3616 }
3617
3618 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3619 slot_id);
3620 if (ret) {
3621 spin_unlock_irqrestore(&xhci->lock, flags);
3622 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3623 return ret;
3624 }
3625 xhci_ring_cmd_db(xhci);
3626 spin_unlock_irqrestore(&xhci->lock, flags);
3627 return ret;
3628}
3629
3630
3631
3632
3633
3634
3635
3636static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3637{
3638 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3639 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3640 "Not enough ep ctxs: "
3641 "%u active, need to add 1, limit is %u.",
3642 xhci->num_active_eps, xhci->limit_active_eps);
3643 return -ENOMEM;
3644 }
3645 xhci->num_active_eps += 1;
3646 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3647 "Adding 1 ep ctx, %u now active.",
3648 xhci->num_active_eps);
3649 return 0;
3650}
3651
3652
3653
3654
3655
3656
3657int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3658{
3659 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3660 struct xhci_virt_device *vdev;
3661 struct xhci_slot_ctx *slot_ctx;
3662 unsigned long flags;
3663 int ret, slot_id;
3664 struct xhci_command *command;
3665
3666 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
3667 if (!command)
3668 return 0;
3669
3670
3671 mutex_lock(&xhci->mutex);
3672 spin_lock_irqsave(&xhci->lock, flags);
3673 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3674 if (ret) {
3675 spin_unlock_irqrestore(&xhci->lock, flags);
3676 mutex_unlock(&xhci->mutex);
3677 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3678 xhci_free_command(xhci, command);
3679 return 0;
3680 }
3681 xhci_ring_cmd_db(xhci);
3682 spin_unlock_irqrestore(&xhci->lock, flags);
3683
3684 wait_for_completion(command->completion);
3685 slot_id = command->slot_id;
3686 mutex_unlock(&xhci->mutex);
3687
3688 if (!slot_id || command->status != COMP_SUCCESS) {
3689 xhci_err(xhci, "Error while assigning device slot ID\n");
3690 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3691 HCS_MAX_SLOTS(
3692 readl(&xhci->cap_regs->hcs_params1)));
3693 xhci_free_command(xhci, command);
3694 return 0;
3695 }
3696
3697 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3698 spin_lock_irqsave(&xhci->lock, flags);
3699 ret = xhci_reserve_host_control_ep_resources(xhci);
3700 if (ret) {
3701 spin_unlock_irqrestore(&xhci->lock, flags);
3702 xhci_warn(xhci, "Not enough host resources, "
3703 "active endpoint contexts = %u\n",
3704 xhci->num_active_eps);
3705 goto disable_slot;
3706 }
3707 spin_unlock_irqrestore(&xhci->lock, flags);
3708 }
3709
3710
3711
3712
3713 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3714 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3715 goto disable_slot;
3716 }
3717 vdev = xhci->devs[slot_id];
3718 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
3719 trace_xhci_alloc_dev(slot_ctx);
3720
3721 udev->slot_id = slot_id;
3722
3723#ifndef CONFIG_USB_DEFAULT_PERSIST
3724
3725
3726
3727
3728 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3729 pm_runtime_get_noresume(hcd->self.controller);
3730#endif
3731
3732
3733 xhci_free_command(xhci, command);
3734
3735
3736 return 1;
3737
3738disable_slot:
3739
3740 kfree(command->completion);
3741 command->completion = NULL;
3742 command->status = 0;
3743 return xhci_disable_slot(xhci, command, udev->slot_id);
3744}
3745
3746
3747
3748
3749
3750static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3751 enum xhci_setup_dev setup)
3752{
3753 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3754 unsigned long flags;
3755 struct xhci_virt_device *virt_dev;
3756 int ret = 0;
3757 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3758 struct xhci_slot_ctx *slot_ctx;
3759 struct xhci_input_control_ctx *ctrl_ctx;
3760 u64 temp_64;
3761 struct xhci_command *command = NULL;
3762
3763 mutex_lock(&xhci->mutex);
3764
3765 if (xhci->xhc_state) {
3766 ret = -ESHUTDOWN;
3767 goto out;
3768 }
3769
3770 if (!udev->slot_id) {
3771 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3772 "Bad Slot ID %d", udev->slot_id);
3773 ret = -EINVAL;
3774 goto out;
3775 }
3776
3777 virt_dev = xhci->devs[udev->slot_id];
3778
3779 if (WARN_ON(!virt_dev)) {
3780
3781
3782
3783
3784
3785 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3786 udev->slot_id);
3787 ret = -EINVAL;
3788 goto out;
3789 }
3790 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3791 trace_xhci_setup_device_slot(slot_ctx);
3792
3793 if (setup == SETUP_CONTEXT_ONLY) {
3794 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3795 SLOT_STATE_DEFAULT) {
3796 xhci_dbg(xhci, "Slot already in default state\n");
3797 goto out;
3798 }
3799 }
3800
3801 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
3802 if (!command) {
3803 ret = -ENOMEM;
3804 goto out;
3805 }
3806
3807 command->in_ctx = virt_dev->in_ctx;
3808
3809 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3810 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3811 if (!ctrl_ctx) {
3812 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3813 __func__);
3814 ret = -EINVAL;
3815 goto out;
3816 }
3817
3818
3819
3820
3821
3822 if (!slot_ctx->dev_info)
3823 xhci_setup_addressable_virt_dev(xhci, udev);
3824
3825 else
3826 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3827 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3828 ctrl_ctx->drop_flags = 0;
3829
3830 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3831 le32_to_cpu(slot_ctx->dev_info) >> 27);
3832
3833 spin_lock_irqsave(&xhci->lock, flags);
3834 trace_xhci_setup_device(virt_dev);
3835 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3836 udev->slot_id, setup);
3837 if (ret) {
3838 spin_unlock_irqrestore(&xhci->lock, flags);
3839 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3840 "FIXME: allocate a command ring segment");
3841 goto out;
3842 }
3843 xhci_ring_cmd_db(xhci);
3844 spin_unlock_irqrestore(&xhci->lock, flags);
3845
3846
3847 wait_for_completion(command->completion);
3848
3849
3850
3851
3852
3853 switch (command->status) {
3854 case COMP_COMMAND_ABORTED:
3855 case COMP_COMMAND_RING_STOPPED:
3856 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3857 ret = -ETIME;
3858 break;
3859 case COMP_CONTEXT_STATE_ERROR:
3860 case COMP_SLOT_NOT_ENABLED_ERROR:
3861 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3862 act, udev->slot_id);
3863 ret = -EINVAL;
3864 break;
3865 case COMP_USB_TRANSACTION_ERROR:
3866 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3867 ret = -EPROTO;
3868 break;
3869 case COMP_INCOMPATIBLE_DEVICE_ERROR:
3870 dev_warn(&udev->dev,
3871 "ERROR: Incompatible device for setup %s command\n", act);
3872 ret = -ENODEV;
3873 break;
3874 case COMP_SUCCESS:
3875 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3876 "Successful setup %s command", act);
3877 break;
3878 default:
3879 xhci_err(xhci,
3880 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3881 act, command->status);
3882 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3883 ret = -EINVAL;
3884 break;
3885 }
3886 if (ret)
3887 goto out;
3888 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3889 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3890 "Op regs DCBAA ptr = %#016llx", temp_64);
3891 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3892 "Slot ID %d dcbaa entry @%p = %#016llx",
3893 udev->slot_id,
3894 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3895 (unsigned long long)
3896 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3897 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3898 "Output Context DMA address = %#08llx",
3899 (unsigned long long)virt_dev->out_ctx->dma);
3900 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3901 le32_to_cpu(slot_ctx->dev_info) >> 27);
3902
3903
3904
3905
3906 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3907 le32_to_cpu(slot_ctx->dev_info) >> 27);
3908
3909 ctrl_ctx->add_flags = 0;
3910 ctrl_ctx->drop_flags = 0;
3911
3912 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3913 "Internal device address = %d",
3914 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3915out:
3916 mutex_unlock(&xhci->mutex);
3917 if (command) {
3918 kfree(command->completion);
3919 kfree(command);
3920 }
3921 return ret;
3922}
3923
3924static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3925{
3926 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3927}
3928
3929static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3930{
3931 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3932}
3933
3934
3935
3936
3937
3938
3939
3940int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3941{
3942 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3943 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3944 __le32 __iomem *addr;
3945 int raw_port;
3946
3947 if (hcd->speed < HCD_USB3)
3948 addr = xhci->usb2_ports[port1 - 1];
3949 else
3950 addr = xhci->usb3_ports[port1 - 1];
3951
3952 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3953 return raw_port;
3954}
3955
3956
3957
3958
3959
3960static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3961 struct usb_device *udev, u16 max_exit_latency)
3962{
3963 struct xhci_virt_device *virt_dev;
3964 struct xhci_command *command;
3965 struct xhci_input_control_ctx *ctrl_ctx;
3966 struct xhci_slot_ctx *slot_ctx;
3967 unsigned long flags;
3968 int ret;
3969
3970 spin_lock_irqsave(&xhci->lock, flags);
3971
3972 virt_dev = xhci->devs[udev->slot_id];
3973
3974
3975
3976
3977
3978
3979
3980 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
3981 spin_unlock_irqrestore(&xhci->lock, flags);
3982 return 0;
3983 }
3984
3985
3986 command = xhci->lpm_command;
3987 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3988 if (!ctrl_ctx) {
3989 spin_unlock_irqrestore(&xhci->lock, flags);
3990 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3991 __func__);
3992 return -ENOMEM;
3993 }
3994
3995 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
3996 spin_unlock_irqrestore(&xhci->lock, flags);
3997
3998 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3999 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4000 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4001 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4002 slot_ctx->dev_state = 0;
4003
4004 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4005 "Set up evaluate context for LPM MEL change.");
4006
4007
4008 ret = xhci_configure_endpoint(xhci, udev, command,
4009 true, true);
4010
4011 if (!ret) {
4012 spin_lock_irqsave(&xhci->lock, flags);
4013 virt_dev->current_mel = max_exit_latency;
4014 spin_unlock_irqrestore(&xhci->lock, flags);
4015 }
4016 return ret;
4017}
4018
4019#ifdef CONFIG_PM
4020
4021
4022static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4023 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4024
4025
4026static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4027 struct usb_device *udev)
4028{
4029 int u2del, besl, besl_host;
4030 int besl_device = 0;
4031 u32 field;
4032
4033 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4034 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4035
4036 if (field & USB_BESL_SUPPORT) {
4037 for (besl_host = 0; besl_host < 16; besl_host++) {
4038 if (xhci_besl_encoding[besl_host] >= u2del)
4039 break;
4040 }
4041
4042 if (field & USB_BESL_BASELINE_VALID)
4043 besl_device = USB_GET_BESL_BASELINE(field);
4044 else if (field & USB_BESL_DEEP_VALID)
4045 besl_device = USB_GET_BESL_DEEP(field);
4046 } else {
4047 if (u2del <= 50)
4048 besl_host = 0;
4049 else
4050 besl_host = (u2del - 51) / 75 + 1;
4051 }
4052
4053 besl = besl_host + besl_device;
4054 if (besl > 15)
4055 besl = 15;
4056
4057 return besl;
4058}
4059
4060
4061static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4062{
4063 u32 field;
4064 int l1;
4065 int besld = 0;
4066 int hirdm = 0;
4067
4068 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4069
4070
4071 l1 = udev->l1_params.timeout / 256;
4072
4073
4074 if (field & USB_BESL_DEEP_VALID) {
4075 besld = USB_GET_BESL_DEEP(field);
4076 hirdm = 1;
4077 }
4078
4079 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4080}
4081
4082static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4083 struct usb_device *udev, int enable)
4084{
4085 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4086 __le32 __iomem **port_array;
4087 __le32 __iomem *pm_addr, *hlpm_addr;
4088 u32 pm_val, hlpm_val, field;
4089 unsigned int port_num;
4090 unsigned long flags;
4091 int hird, exit_latency;
4092 int ret;
4093
4094 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4095 !udev->lpm_capable)
4096 return -EPERM;
4097
4098 if (!udev->parent || udev->parent->parent ||
4099 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4100 return -EPERM;
4101
4102 if (udev->usb2_hw_lpm_capable != 1)
4103 return -EPERM;
4104
4105 spin_lock_irqsave(&xhci->lock, flags);
4106
4107 port_array = xhci->usb2_ports;
4108 port_num = udev->portnum - 1;
4109 pm_addr = port_array[port_num] + PORTPMSC;
4110 pm_val = readl(pm_addr);
4111 hlpm_addr = port_array[port_num] + PORTHLPMC;
4112 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4113
4114 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4115 enable ? "enable" : "disable", port_num + 1);
4116
4117 if (enable) {
4118
4119 if (udev->usb2_hw_lpm_besl_capable) {
4120
4121
4122
4123
4124 if ((field & USB_BESL_SUPPORT) &&
4125 (field & USB_BESL_BASELINE_VALID))
4126 hird = USB_GET_BESL_BASELINE(field);
4127 else
4128 hird = udev->l1_params.besl;
4129
4130 exit_latency = xhci_besl_encoding[hird];
4131 spin_unlock_irqrestore(&xhci->lock, flags);
4132
4133
4134
4135
4136
4137
4138
4139
4140 mutex_lock(hcd->bandwidth_mutex);
4141 ret = xhci_change_max_exit_latency(xhci, udev,
4142 exit_latency);
4143 mutex_unlock(hcd->bandwidth_mutex);
4144
4145 if (ret < 0)
4146 return ret;
4147 spin_lock_irqsave(&xhci->lock, flags);
4148
4149 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4150 writel(hlpm_val, hlpm_addr);
4151
4152 readl(hlpm_addr);
4153 } else {
4154 hird = xhci_calculate_hird_besl(xhci, udev);
4155 }
4156
4157 pm_val &= ~PORT_HIRD_MASK;
4158 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4159 writel(pm_val, pm_addr);
4160 pm_val = readl(pm_addr);
4161 pm_val |= PORT_HLE;
4162 writel(pm_val, pm_addr);
4163
4164 readl(pm_addr);
4165 } else {
4166 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4167 writel(pm_val, pm_addr);
4168
4169 readl(pm_addr);
4170 if (udev->usb2_hw_lpm_besl_capable) {
4171 spin_unlock_irqrestore(&xhci->lock, flags);
4172 mutex_lock(hcd->bandwidth_mutex);
4173 xhci_change_max_exit_latency(xhci, udev, 0);
4174 mutex_unlock(hcd->bandwidth_mutex);
4175 return 0;
4176 }
4177 }
4178
4179 spin_unlock_irqrestore(&xhci->lock, flags);
4180 return 0;
4181}
4182
4183
4184
4185
4186
4187static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4188 unsigned capability)
4189{
4190 u32 port_offset, port_count;
4191 int i;
4192
4193 for (i = 0; i < xhci->num_ext_caps; i++) {
4194 if (xhci->ext_caps[i] & capability) {
4195
4196 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4197 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4198 if (port >= port_offset &&
4199 port < port_offset + port_count)
4200 return 1;
4201 }
4202 }
4203 return 0;
4204}
4205
4206static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4207{
4208 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4209 int portnum = udev->portnum - 1;
4210
4211 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4212 !udev->lpm_capable)
4213 return 0;
4214
4215
4216 if (!udev->parent || udev->parent->parent ||
4217 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4218 return 0;
4219
4220 if (xhci->hw_lpm_support == 1 &&
4221 xhci_check_usb2_port_capability(
4222 xhci, portnum, XHCI_HLC)) {
4223 udev->usb2_hw_lpm_capable = 1;
4224 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4225 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4226 if (xhci_check_usb2_port_capability(xhci, portnum,
4227 XHCI_BLC))
4228 udev->usb2_hw_lpm_besl_capable = 1;
4229 }
4230
4231 return 0;
4232}
4233
4234
4235
4236
4237static unsigned long long xhci_service_interval_to_ns(
4238 struct usb_endpoint_descriptor *desc)
4239{
4240 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4241}
4242
4243static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4244 enum usb3_link_state state)
4245{
4246 unsigned long long sel;
4247 unsigned long long pel;
4248 unsigned int max_sel_pel;
4249 char *state_name;
4250
4251 switch (state) {
4252 case USB3_LPM_U1:
4253
4254 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4255 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4256 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4257 state_name = "U1";
4258 break;
4259 case USB3_LPM_U2:
4260 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4261 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4262 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4263 state_name = "U2";
4264 break;
4265 default:
4266 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4267 __func__);
4268 return USB3_LPM_DISABLED;
4269 }
4270
4271 if (sel <= max_sel_pel && pel <= max_sel_pel)
4272 return USB3_LPM_DEVICE_INITIATED;
4273
4274 if (sel > max_sel_pel)
4275 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4276 "due to long SEL %llu ms\n",
4277 state_name, sel);
4278 else
4279 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4280 "due to long PEL %llu ms\n",
4281 state_name, pel);
4282 return USB3_LPM_DISABLED;
4283}
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293static unsigned long long xhci_calculate_intel_u1_timeout(
4294 struct usb_device *udev,
4295 struct usb_endpoint_descriptor *desc)
4296{
4297 unsigned long long timeout_ns;
4298 int ep_type;
4299 int intr_type;
4300
4301 ep_type = usb_endpoint_type(desc);
4302 switch (ep_type) {
4303 case USB_ENDPOINT_XFER_CONTROL:
4304 timeout_ns = udev->u1_params.sel * 3;
4305 break;
4306 case USB_ENDPOINT_XFER_BULK:
4307 timeout_ns = udev->u1_params.sel * 5;
4308 break;
4309 case USB_ENDPOINT_XFER_INT:
4310 intr_type = usb_endpoint_interrupt_type(desc);
4311 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4312 timeout_ns = udev->u1_params.sel * 3;
4313 break;
4314 }
4315
4316 case USB_ENDPOINT_XFER_ISOC:
4317 timeout_ns = xhci_service_interval_to_ns(desc);
4318 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4319 if (timeout_ns < udev->u1_params.sel * 2)
4320 timeout_ns = udev->u1_params.sel * 2;
4321 break;
4322 default:
4323 return 0;
4324 }
4325
4326 return timeout_ns;
4327}
4328
4329
4330static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4331 struct usb_device *udev,
4332 struct usb_endpoint_descriptor *desc)
4333{
4334 unsigned long long timeout_ns;
4335
4336 if (xhci->quirks & XHCI_INTEL_HOST)
4337 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4338 else
4339 timeout_ns = udev->u1_params.sel;
4340
4341
4342
4343
4344 if (timeout_ns == USB3_LPM_DISABLED)
4345 timeout_ns = 1;
4346 else
4347 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4348
4349
4350
4351
4352 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4353 return timeout_ns;
4354 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4355 "due to long timeout %llu ms\n", timeout_ns);
4356 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4357}
4358
4359
4360
4361
4362
4363
4364
4365static unsigned long long xhci_calculate_intel_u2_timeout(
4366 struct usb_device *udev,
4367 struct usb_endpoint_descriptor *desc)
4368{
4369 unsigned long long timeout_ns;
4370 unsigned long long u2_del_ns;
4371
4372 timeout_ns = 10 * 1000 * 1000;
4373
4374 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4375 (xhci_service_interval_to_ns(desc) > timeout_ns))
4376 timeout_ns = xhci_service_interval_to_ns(desc);
4377
4378 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4379 if (u2_del_ns > timeout_ns)
4380 timeout_ns = u2_del_ns;
4381
4382 return timeout_ns;
4383}
4384
4385
4386static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4387 struct usb_device *udev,
4388 struct usb_endpoint_descriptor *desc)
4389{
4390 unsigned long long timeout_ns;
4391
4392 if (xhci->quirks & XHCI_INTEL_HOST)
4393 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4394 else
4395 timeout_ns = udev->u2_params.sel;
4396
4397
4398 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4399
4400
4401
4402 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4403 return timeout_ns;
4404 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4405 "due to long timeout %llu ms\n", timeout_ns);
4406 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4407}
4408
4409static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4410 struct usb_device *udev,
4411 struct usb_endpoint_descriptor *desc,
4412 enum usb3_link_state state,
4413 u16 *timeout)
4414{
4415 if (state == USB3_LPM_U1)
4416 return xhci_calculate_u1_timeout(xhci, udev, desc);
4417 else if (state == USB3_LPM_U2)
4418 return xhci_calculate_u2_timeout(xhci, udev, desc);
4419
4420 return USB3_LPM_DISABLED;
4421}
4422
4423static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4424 struct usb_device *udev,
4425 struct usb_endpoint_descriptor *desc,
4426 enum usb3_link_state state,
4427 u16 *timeout)
4428{
4429 u16 alt_timeout;
4430
4431 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4432 desc, state, timeout);
4433
4434
4435
4436
4437
4438 if (alt_timeout == USB3_LPM_DISABLED ||
4439 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4440 *timeout = alt_timeout;
4441 return -E2BIG;
4442 }
4443 if (alt_timeout > *timeout)
4444 *timeout = alt_timeout;
4445 return 0;
4446}
4447
4448static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4449 struct usb_device *udev,
4450 struct usb_host_interface *alt,
4451 enum usb3_link_state state,
4452 u16 *timeout)
4453{
4454 int j;
4455
4456 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4457 if (xhci_update_timeout_for_endpoint(xhci, udev,
4458 &alt->endpoint[j].desc, state, timeout))
4459 return -E2BIG;
4460 continue;
4461 }
4462 return 0;
4463}
4464
4465static int xhci_check_intel_tier_policy(struct usb_device *udev,
4466 enum usb3_link_state state)
4467{
4468 struct usb_device *parent;
4469 unsigned int num_hubs;
4470
4471 if (state == USB3_LPM_U2)
4472 return 0;
4473
4474
4475 for (parent = udev->parent, num_hubs = 0; parent->parent;
4476 parent = parent->parent)
4477 num_hubs++;
4478
4479 if (num_hubs < 2)
4480 return 0;
4481
4482 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4483 " below second-tier hub.\n");
4484 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4485 "to decrease power consumption.\n");
4486 return -E2BIG;
4487}
4488
4489static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4490 struct usb_device *udev,
4491 enum usb3_link_state state)
4492{
4493 if (xhci->quirks & XHCI_INTEL_HOST)
4494 return xhci_check_intel_tier_policy(udev, state);
4495 else
4496 return 0;
4497}
4498
4499
4500
4501
4502
4503
4504static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4505 struct usb_device *udev, enum usb3_link_state state)
4506{
4507 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4508 struct usb_host_config *config;
4509 char *state_name;
4510 int i;
4511 u16 timeout = USB3_LPM_DISABLED;
4512
4513 if (state == USB3_LPM_U1)
4514 state_name = "U1";
4515 else if (state == USB3_LPM_U2)
4516 state_name = "U2";
4517 else {
4518 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4519 state);
4520 return timeout;
4521 }
4522
4523 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4524 return timeout;
4525
4526
4527
4528
4529 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4530 state, &timeout))
4531 return timeout;
4532
4533 config = udev->actconfig;
4534 if (!config)
4535 return timeout;
4536
4537 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4538 struct usb_driver *driver;
4539 struct usb_interface *intf = config->interface[i];
4540
4541 if (!intf)
4542 continue;
4543
4544
4545
4546
4547 if (intf->dev.driver) {
4548 driver = to_usb_driver(intf->dev.driver);
4549 if (driver && driver->disable_hub_initiated_lpm) {
4550 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4551 "at request of driver %s\n",
4552 state_name, driver->name);
4553 return xhci_get_timeout_no_hub_lpm(udev, state);
4554 }
4555 }
4556
4557
4558 if (!intf->cur_altsetting)
4559 continue;
4560
4561 if (xhci_update_timeout_for_interface(xhci, udev,
4562 intf->cur_altsetting,
4563 state, &timeout))
4564 return timeout;
4565 }
4566 return timeout;
4567}
4568
4569static int calculate_max_exit_latency(struct usb_device *udev,
4570 enum usb3_link_state state_changed,
4571 u16 hub_encoded_timeout)
4572{
4573 unsigned long long u1_mel_us = 0;
4574 unsigned long long u2_mel_us = 0;
4575 unsigned long long mel_us = 0;
4576 bool disabling_u1;
4577 bool disabling_u2;
4578 bool enabling_u1;
4579 bool enabling_u2;
4580
4581 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4582 hub_encoded_timeout == USB3_LPM_DISABLED);
4583 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4584 hub_encoded_timeout == USB3_LPM_DISABLED);
4585
4586 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4587 hub_encoded_timeout != USB3_LPM_DISABLED);
4588 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4589 hub_encoded_timeout != USB3_LPM_DISABLED);
4590
4591
4592
4593
4594 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4595 enabling_u1)
4596 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4597 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4598 enabling_u2)
4599 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4600
4601 if (u1_mel_us > u2_mel_us)
4602 mel_us = u1_mel_us;
4603 else
4604 mel_us = u2_mel_us;
4605
4606 if (mel_us > MAX_EXIT) {
4607 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4608 "is too big.\n", mel_us);
4609 return -E2BIG;
4610 }
4611 return mel_us;
4612}
4613
4614
4615static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4616 struct usb_device *udev, enum usb3_link_state state)
4617{
4618 struct xhci_hcd *xhci;
4619 u16 hub_encoded_timeout;
4620 int mel;
4621 int ret;
4622
4623 xhci = hcd_to_xhci(hcd);
4624
4625
4626
4627
4628 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4629 !xhci->devs[udev->slot_id])
4630 return USB3_LPM_DISABLED;
4631
4632 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4633 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4634 if (mel < 0) {
4635
4636 hub_encoded_timeout = USB3_LPM_DISABLED;
4637 mel = 0;
4638 }
4639
4640 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4641 if (ret)
4642 return ret;
4643 return hub_encoded_timeout;
4644}
4645
4646static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4647 struct usb_device *udev, enum usb3_link_state state)
4648{
4649 struct xhci_hcd *xhci;
4650 u16 mel;
4651
4652 xhci = hcd_to_xhci(hcd);
4653 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4654 !xhci->devs[udev->slot_id])
4655 return 0;
4656
4657 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4658 return xhci_change_max_exit_latency(xhci, udev, mel);
4659}
4660#else
4661
4662static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4663 struct usb_device *udev, int enable)
4664{
4665 return 0;
4666}
4667
4668static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4669{
4670 return 0;
4671}
4672
4673static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4674 struct usb_device *udev, enum usb3_link_state state)
4675{
4676 return USB3_LPM_DISABLED;
4677}
4678
4679static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4680 struct usb_device *udev, enum usb3_link_state state)
4681{
4682 return 0;
4683}
4684#endif
4685
4686
4687
4688
4689
4690
4691static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4692 struct usb_tt *tt, gfp_t mem_flags)
4693{
4694 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4695 struct xhci_virt_device *vdev;
4696 struct xhci_command *config_cmd;
4697 struct xhci_input_control_ctx *ctrl_ctx;
4698 struct xhci_slot_ctx *slot_ctx;
4699 unsigned long flags;
4700 unsigned think_time;
4701 int ret;
4702
4703
4704 if (!hdev->parent)
4705 return 0;
4706
4707 vdev = xhci->devs[hdev->slot_id];
4708 if (!vdev) {
4709 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4710 return -EINVAL;
4711 }
4712
4713 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4714 if (!config_cmd)
4715 return -ENOMEM;
4716
4717 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4718 if (!ctrl_ctx) {
4719 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4720 __func__);
4721 xhci_free_command(xhci, config_cmd);
4722 return -ENOMEM;
4723 }
4724
4725 spin_lock_irqsave(&xhci->lock, flags);
4726 if (hdev->speed == USB_SPEED_HIGH &&
4727 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4728 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4729 xhci_free_command(xhci, config_cmd);
4730 spin_unlock_irqrestore(&xhci->lock, flags);
4731 return -ENOMEM;
4732 }
4733
4734 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4735 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4736 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4737 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4738
4739
4740
4741
4742
4743 if (tt->multi)
4744 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4745 else if (hdev->speed == USB_SPEED_FULL)
4746 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4747
4748 if (xhci->hci_version > 0x95) {
4749 xhci_dbg(xhci, "xHCI version %x needs hub "
4750 "TT think time and number of ports\n",
4751 (unsigned int) xhci->hci_version);
4752 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4753
4754
4755
4756
4757
4758
4759
4760 think_time = tt->think_time;
4761 if (think_time != 0)
4762 think_time = (think_time / 666) - 1;
4763 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4764 slot_ctx->tt_info |=
4765 cpu_to_le32(TT_THINK_TIME(think_time));
4766 } else {
4767 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4768 "TT think time or number of ports\n",
4769 (unsigned int) xhci->hci_version);
4770 }
4771 slot_ctx->dev_state = 0;
4772 spin_unlock_irqrestore(&xhci->lock, flags);
4773
4774 xhci_dbg(xhci, "Set up %s for hub device.\n",
4775 (xhci->hci_version > 0x95) ?
4776 "configure endpoint" : "evaluate context");
4777
4778
4779
4780
4781 if (xhci->hci_version > 0x95)
4782 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4783 false, false);
4784 else
4785 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4786 true, false);
4787
4788 xhci_free_command(xhci, config_cmd);
4789 return ret;
4790}
4791
4792static int xhci_get_frame(struct usb_hcd *hcd)
4793{
4794 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4795
4796 return readl(&xhci->run_regs->microframe_index) >> 3;
4797}
4798
4799int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4800{
4801 struct xhci_hcd *xhci;
4802
4803
4804
4805
4806 struct device *dev = hcd->self.sysdev;
4807 int retval;
4808
4809
4810 hcd->self.sg_tablesize = ~0;
4811
4812
4813 hcd->self.no_sg_constraint = 1;
4814
4815
4816 hcd->self.no_stop_on_short = 1;
4817
4818 xhci = hcd_to_xhci(hcd);
4819
4820 if (usb_hcd_is_primary_hcd(hcd)) {
4821 xhci->main_hcd = hcd;
4822
4823
4824
4825 hcd->speed = HCD_USB2;
4826 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4827
4828
4829
4830
4831
4832 hcd->has_tt = 1;
4833 } else {
4834
4835 if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
4836 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
4837 hcd->speed = HCD_USB31;
4838 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
4839 }
4840
4841
4842
4843 return 0;
4844 }
4845
4846 mutex_init(&xhci->mutex);
4847 xhci->cap_regs = hcd->regs;
4848 xhci->op_regs = hcd->regs +
4849 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4850 xhci->run_regs = hcd->regs +
4851 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4852
4853 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4854 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4855 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4856 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4857 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4858 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4859 if (xhci->hci_version > 0x100)
4860 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4861 xhci_print_registers(xhci);
4862
4863 xhci->quirks |= quirks;
4864
4865 get_quirks(dev, xhci);
4866
4867
4868
4869
4870
4871 if (xhci->hci_version > 0x96)
4872 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4873
4874
4875 retval = xhci_halt(xhci);
4876 if (retval)
4877 return retval;
4878
4879 xhci_dbg(xhci, "Resetting HCD\n");
4880
4881 retval = xhci_reset(xhci);
4882 if (retval)
4883 return retval;
4884 xhci_dbg(xhci, "Reset complete\n");
4885
4886
4887
4888
4889
4890
4891
4892
4893 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4894 xhci->hcc_params &= ~BIT(0);
4895
4896
4897
4898 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4899 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4900 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4901 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4902 } else {
4903
4904
4905
4906
4907 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
4908 if (retval)
4909 return retval;
4910 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
4911 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
4912 }
4913
4914 xhci_dbg(xhci, "Calling HCD init\n");
4915
4916 retval = xhci_init(hcd);
4917 if (retval)
4918 return retval;
4919 xhci_dbg(xhci, "Called HCD init\n");
4920
4921 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4922 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4923
4924 return 0;
4925}
4926EXPORT_SYMBOL_GPL(xhci_gen_setup);
4927
4928static const struct hc_driver xhci_hc_driver = {
4929 .description = "xhci-hcd",
4930 .product_desc = "xHCI Host Controller",
4931 .hcd_priv_size = sizeof(struct xhci_hcd),
4932
4933
4934
4935
4936 .irq = xhci_irq,
4937 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4938
4939
4940
4941
4942 .reset = NULL,
4943 .start = xhci_run,
4944 .stop = xhci_stop,
4945 .shutdown = xhci_shutdown,
4946
4947
4948
4949
4950 .urb_enqueue = xhci_urb_enqueue,
4951 .urb_dequeue = xhci_urb_dequeue,
4952 .alloc_dev = xhci_alloc_dev,
4953 .free_dev = xhci_free_dev,
4954 .alloc_streams = xhci_alloc_streams,
4955 .free_streams = xhci_free_streams,
4956 .add_endpoint = xhci_add_endpoint,
4957 .drop_endpoint = xhci_drop_endpoint,
4958 .endpoint_reset = xhci_endpoint_reset,
4959 .check_bandwidth = xhci_check_bandwidth,
4960 .reset_bandwidth = xhci_reset_bandwidth,
4961 .address_device = xhci_address_device,
4962 .enable_device = xhci_enable_device,
4963 .update_hub_device = xhci_update_hub_device,
4964 .reset_device = xhci_discover_or_reset_device,
4965
4966
4967
4968
4969 .get_frame_number = xhci_get_frame,
4970
4971
4972
4973
4974 .hub_control = xhci_hub_control,
4975 .hub_status_data = xhci_hub_status_data,
4976 .bus_suspend = xhci_bus_suspend,
4977 .bus_resume = xhci_bus_resume,
4978
4979
4980
4981
4982 .update_device = xhci_update_device,
4983 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
4984 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
4985 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
4986 .find_raw_port_number = xhci_find_raw_port_number,
4987};
4988
4989void xhci_init_driver(struct hc_driver *drv,
4990 const struct xhci_driver_overrides *over)
4991{
4992 BUG_ON(!over);
4993
4994
4995 *drv = xhci_hc_driver;
4996
4997 if (over) {
4998 drv->hcd_priv_size += over->extra_priv_size;
4999 if (over->reset)
5000 drv->reset = over->reset;
5001 if (over->start)
5002 drv->start = over->start;
5003 }
5004}
5005EXPORT_SYMBOL_GPL(xhci_init_driver);
5006
5007MODULE_DESCRIPTION(DRIVER_DESC);
5008MODULE_AUTHOR(DRIVER_AUTHOR);
5009MODULE_LICENSE("GPL");
5010
5011static int __init xhci_hcd_init(void)
5012{
5013
5014
5015
5016
5017 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5018 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5019 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5020
5021
5022
5023 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5024 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5025 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5026 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5027 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5028
5029 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5030
5031 if (usb_disabled())
5032 return -ENODEV;
5033
5034 return 0;
5035}
5036
5037
5038
5039
5040
5041static void __exit xhci_hcd_fini(void) { }
5042
5043module_init(xhci_hcd_init);
5044module_exit(xhci_hcd_fini);
5045