1
2
3
4
5
6
7
8
9
10
11#include <linux/pci.h>
12#include <linux/irq.h>
13#include <linux/log2.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/slab.h>
17#include <linux/dmi.h>
18#include <linux/dma-mapping.h>
19
20#include "xhci.h"
21#include "xhci-trace.h"
22#include "xhci-mtk.h"
23#include "xhci-debugfs.h"
24#include "xhci-dbgcap.h"
25
26#define DRIVER_AUTHOR "Sarah Sharp"
27#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
28
29#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
30
31
32static int link_quirk;
33module_param(link_quirk, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
35
36static unsigned long long quirks;
37module_param(quirks, ullong, S_IRUGO);
38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
55{
56 u32 result;
57
58 do {
59 result = readl(ptr);
60 if (result == ~(u32)0)
61 return -ENODEV;
62 result &= mask;
63 if (result == done)
64 return 0;
65 udelay(1);
66 usec--;
67 } while (usec > 0);
68 return -ETIMEDOUT;
69}
70
71
72
73
74void xhci_quiesce(struct xhci_hcd *xhci)
75{
76 u32 halted;
77 u32 cmd;
78 u32 mask;
79
80 mask = ~(XHCI_IRQS);
81 halted = readl(&xhci->op_regs->status) & STS_HALT;
82 if (!halted)
83 mask &= ~CMD_RUN;
84
85 cmd = readl(&xhci->op_regs->command);
86 cmd &= mask;
87 writel(cmd, &xhci->op_regs->command);
88}
89
90
91
92
93
94
95
96
97
98int xhci_halt(struct xhci_hcd *xhci)
99{
100 int ret;
101 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
102 xhci_quiesce(xhci);
103
104 ret = xhci_handshake(&xhci->op_regs->status,
105 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
106 if (ret) {
107 xhci_warn(xhci, "Host halt failed, %d\n", ret);
108 return ret;
109 }
110 xhci->xhc_state |= XHCI_STATE_HALTED;
111 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
112 return ret;
113}
114
115
116
117
118int xhci_start(struct xhci_hcd *xhci)
119{
120 u32 temp;
121 int ret;
122
123 temp = readl(&xhci->op_regs->command);
124 temp |= (CMD_RUN);
125 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
126 temp);
127 writel(temp, &xhci->op_regs->command);
128
129
130
131
132
133 ret = xhci_handshake(&xhci->op_regs->status,
134 STS_HALT, 0, XHCI_MAX_HALT_USEC);
135 if (ret == -ETIMEDOUT)
136 xhci_err(xhci, "Host took too long to start, "
137 "waited %u microseconds.\n",
138 XHCI_MAX_HALT_USEC);
139 if (!ret)
140
141 xhci->xhc_state = 0;
142
143 return ret;
144}
145
146
147
148
149
150
151
152
153int xhci_reset(struct xhci_hcd *xhci)
154{
155 u32 command;
156 u32 state;
157 int ret, i;
158
159 state = readl(&xhci->op_regs->status);
160
161 if (state == ~(u32)0) {
162 xhci_warn(xhci, "Host not accessible, reset failed.\n");
163 return -ENODEV;
164 }
165
166 if ((state & STS_HALT) == 0) {
167 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
168 return 0;
169 }
170
171 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
172 command = readl(&xhci->op_regs->command);
173 command |= CMD_RESET;
174 writel(command, &xhci->op_regs->command);
175
176
177
178
179
180
181
182
183 if (xhci->quirks & XHCI_INTEL_HOST)
184 udelay(1000);
185
186 ret = xhci_handshake(&xhci->op_regs->command,
187 CMD_RESET, 0, 10 * 1000 * 1000);
188 if (ret)
189 return ret;
190
191 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
192 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
193
194 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
195 "Wait for controller to be ready for doorbell rings");
196
197
198
199
200 ret = xhci_handshake(&xhci->op_regs->status,
201 STS_CNR, 0, 10 * 1000 * 1000);
202
203 for (i = 0; i < 2; i++) {
204 xhci->bus_state[i].port_c_suspend = 0;
205 xhci->bus_state[i].suspended_ports = 0;
206 xhci->bus_state[i].resuming_ports = 0;
207 }
208
209 return ret;
210}
211
212static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
213{
214 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
215 int err, i;
216 u64 val;
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group)
233 return;
234
235 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
236
237
238 val = readl(&xhci->op_regs->command);
239 val &= ~CMD_HSEIE;
240 writel(val, &xhci->op_regs->command);
241
242
243 val = readl(&xhci->op_regs->status);
244 val |= STS_FATAL;
245 writel(val, &xhci->op_regs->status);
246
247
248 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
249 if (upper_32_bits(val))
250 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
251 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
252 if (upper_32_bits(val))
253 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
254
255 for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
256 struct xhci_intr_reg __iomem *ir;
257
258 ir = &xhci->run_regs->ir_set[i];
259 val = xhci_read_64(xhci, &ir->erst_base);
260 if (upper_32_bits(val))
261 xhci_write_64(xhci, 0, &ir->erst_base);
262 val= xhci_read_64(xhci, &ir->erst_dequeue);
263 if (upper_32_bits(val))
264 xhci_write_64(xhci, 0, &ir->erst_dequeue);
265 }
266
267
268 err = xhci_handshake(&xhci->op_regs->status,
269 STS_FATAL, STS_FATAL,
270 XHCI_MAX_HALT_USEC);
271 if (!err)
272 xhci_info(xhci, "Fault detected\n");
273}
274
275#ifdef CONFIG_USB_PCI
276
277
278
279static int xhci_setup_msi(struct xhci_hcd *xhci)
280{
281 int ret;
282
283
284
285 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
286
287 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
288 if (ret < 0) {
289 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
290 "failed to allocate MSI entry");
291 return ret;
292 }
293
294 ret = request_irq(pdev->irq, xhci_msi_irq,
295 0, "xhci_hcd", xhci_to_hcd(xhci));
296 if (ret) {
297 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
298 "disable MSI interrupt");
299 pci_free_irq_vectors(pdev);
300 }
301
302 return ret;
303}
304
305
306
307
308static int xhci_setup_msix(struct xhci_hcd *xhci)
309{
310 int i, ret = 0;
311 struct usb_hcd *hcd = xhci_to_hcd(xhci);
312 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
313
314
315
316
317
318
319
320
321 xhci->msix_count = min(num_online_cpus() + 1,
322 HCS_MAX_INTRS(xhci->hcs_params1));
323
324 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
325 PCI_IRQ_MSIX);
326 if (ret < 0) {
327 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
328 "Failed to enable MSI-X");
329 return ret;
330 }
331
332 for (i = 0; i < xhci->msix_count; i++) {
333 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
334 "xhci_hcd", xhci_to_hcd(xhci));
335 if (ret)
336 goto disable_msix;
337 }
338
339 hcd->msix_enabled = 1;
340 return ret;
341
342disable_msix:
343 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
344 while (--i >= 0)
345 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
346 pci_free_irq_vectors(pdev);
347 return ret;
348}
349
350
351static void xhci_cleanup_msix(struct xhci_hcd *xhci)
352{
353 struct usb_hcd *hcd = xhci_to_hcd(xhci);
354 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
355
356 if (xhci->quirks & XHCI_PLAT)
357 return;
358
359
360 if (hcd->irq > 0)
361 return;
362
363 if (hcd->msix_enabled) {
364 int i;
365
366 for (i = 0; i < xhci->msix_count; i++)
367 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
368 } else {
369 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
370 }
371
372 pci_free_irq_vectors(pdev);
373 hcd->msix_enabled = 0;
374}
375
376static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
377{
378 struct usb_hcd *hcd = xhci_to_hcd(xhci);
379
380 if (hcd->msix_enabled) {
381 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
382 int i;
383
384 for (i = 0; i < xhci->msix_count; i++)
385 synchronize_irq(pci_irq_vector(pdev, i));
386 }
387}
388
389static int xhci_try_enable_msi(struct usb_hcd *hcd)
390{
391 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
392 struct pci_dev *pdev;
393 int ret;
394
395
396 if (xhci->quirks & XHCI_PLAT)
397 return 0;
398
399 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
400
401
402
403
404 if (xhci->quirks & XHCI_BROKEN_MSI)
405 goto legacy_irq;
406
407
408 if (hcd->irq)
409 free_irq(hcd->irq, hcd);
410 hcd->irq = 0;
411
412 ret = xhci_setup_msix(xhci);
413 if (ret)
414
415 ret = xhci_setup_msi(xhci);
416
417 if (!ret) {
418 hcd->msi_enabled = 1;
419 return 0;
420 }
421
422 if (!pdev->irq) {
423 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
424 return -EINVAL;
425 }
426
427 legacy_irq:
428 if (!strlen(hcd->irq_descr))
429 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
430 hcd->driver->description, hcd->self.busnum);
431
432
433 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
434 hcd->irq_descr, hcd);
435 if (ret) {
436 xhci_err(xhci, "request interrupt %d failed\n",
437 pdev->irq);
438 return ret;
439 }
440 hcd->irq = pdev->irq;
441 return 0;
442}
443
444#else
445
446static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
447{
448 return 0;
449}
450
451static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
452{
453}
454
455static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
456{
457}
458
459#endif
460
461static void compliance_mode_recovery(struct timer_list *t)
462{
463 struct xhci_hcd *xhci;
464 struct usb_hcd *hcd;
465 struct xhci_hub *rhub;
466 u32 temp;
467 int i;
468
469 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
470 rhub = &xhci->usb3_rhub;
471
472 for (i = 0; i < rhub->num_ports; i++) {
473 temp = readl(rhub->ports[i]->addr);
474 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
475
476
477
478
479 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
480 "Compliance mode detected->port %d",
481 i + 1);
482 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
483 "Attempting compliance mode recovery");
484 hcd = xhci->shared_hcd;
485
486 if (hcd->state == HC_STATE_SUSPENDED)
487 usb_hcd_resume_root_hub(hcd);
488
489 usb_hcd_poll_rh_status(hcd);
490 }
491 }
492
493 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
494 mod_timer(&xhci->comp_mode_recovery_timer,
495 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
496}
497
498
499
500
501
502
503
504
505
506
507
508static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
509{
510 xhci->port_status_u0 = 0;
511 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
512 0);
513 xhci->comp_mode_recovery_timer.expires = jiffies +
514 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
515
516 add_timer(&xhci->comp_mode_recovery_timer);
517 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
518 "Compliance mode recovery timer initialized");
519}
520
521
522
523
524
525
526
527static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
528{
529 const char *dmi_product_name, *dmi_sys_vendor;
530
531 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
532 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
533 if (!dmi_product_name || !dmi_sys_vendor)
534 return false;
535
536 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
537 return false;
538
539 if (strstr(dmi_product_name, "Z420") ||
540 strstr(dmi_product_name, "Z620") ||
541 strstr(dmi_product_name, "Z820") ||
542 strstr(dmi_product_name, "Z1 Workstation"))
543 return true;
544
545 return false;
546}
547
548static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
549{
550 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
551}
552
553
554
555
556
557
558
559
560
561static int xhci_init(struct usb_hcd *hcd)
562{
563 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
564 int retval = 0;
565
566 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
567 spin_lock_init(&xhci->lock);
568 if (xhci->hci_version == 0x95 && link_quirk) {
569 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
570 "QUIRK: Not clearing Link TRB chain bits.");
571 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
572 } else {
573 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
574 "xHCI doesn't need link TRB QUIRK");
575 }
576 retval = xhci_mem_init(xhci, GFP_KERNEL);
577 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
578
579
580 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
581 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
582 compliance_mode_recovery_timer_init(xhci);
583 }
584
585 return retval;
586}
587
588
589
590
591static int xhci_run_finished(struct xhci_hcd *xhci)
592{
593 if (xhci_start(xhci)) {
594 xhci_halt(xhci);
595 return -ENODEV;
596 }
597 xhci->shared_hcd->state = HC_STATE_RUNNING;
598 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
599
600 if (xhci->quirks & XHCI_NEC_HOST)
601 xhci_ring_cmd_db(xhci);
602
603 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
604 "Finished xhci_run for USB3 roothub");
605 return 0;
606}
607
608
609
610
611
612
613
614
615
616
617
618
619
620int xhci_run(struct usb_hcd *hcd)
621{
622 u32 temp;
623 u64 temp_64;
624 int ret;
625 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
626
627
628
629
630
631 hcd->uses_new_polling = 1;
632 if (!usb_hcd_is_primary_hcd(hcd))
633 return xhci_run_finished(xhci);
634
635 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
636
637 ret = xhci_try_enable_msi(hcd);
638 if (ret)
639 return ret;
640
641 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
642 temp_64 &= ~ERST_PTR_MASK;
643 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
644 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
645
646 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
647 "// Set the interrupt modulation register");
648 temp = readl(&xhci->ir_set->irq_control);
649 temp &= ~ER_IRQ_INTERVAL_MASK;
650 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
651 writel(temp, &xhci->ir_set->irq_control);
652
653
654 temp = readl(&xhci->op_regs->command);
655 temp |= (CMD_EIE);
656 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
657 "// Enable interrupts, cmd = 0x%x.", temp);
658 writel(temp, &xhci->op_regs->command);
659
660 temp = readl(&xhci->ir_set->irq_pending);
661 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
662 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
663 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
664 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
665
666 if (xhci->quirks & XHCI_NEC_HOST) {
667 struct xhci_command *command;
668
669 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
670 if (!command)
671 return -ENOMEM;
672
673 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
674 TRB_TYPE(TRB_NEC_GET_FW));
675 if (ret)
676 xhci_free_command(xhci, command);
677 }
678 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
679 "Finished xhci_run for USB2 roothub");
680
681 xhci_dbc_init(xhci);
682
683 xhci_debugfs_init(xhci);
684
685 return 0;
686}
687EXPORT_SYMBOL_GPL(xhci_run);
688
689
690
691
692
693
694
695
696
697
698static void xhci_stop(struct usb_hcd *hcd)
699{
700 u32 temp;
701 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
702
703 mutex_lock(&xhci->mutex);
704
705
706 if (!usb_hcd_is_primary_hcd(hcd)) {
707
708 xhci->shared_hcd = NULL;
709 mutex_unlock(&xhci->mutex);
710 return;
711 }
712
713 xhci_dbc_exit(xhci);
714
715 spin_lock_irq(&xhci->lock);
716 xhci->xhc_state |= XHCI_STATE_HALTED;
717 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
718 xhci_halt(xhci);
719 xhci_reset(xhci);
720 spin_unlock_irq(&xhci->lock);
721
722 xhci_cleanup_msix(xhci);
723
724
725 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
726 (!(xhci_all_ports_seen_u0(xhci)))) {
727 del_timer_sync(&xhci->comp_mode_recovery_timer);
728 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
729 "%s: compliance mode recovery timer deleted",
730 __func__);
731 }
732
733 if (xhci->quirks & XHCI_AMD_PLL_FIX)
734 usb_amd_dev_put();
735
736 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
737 "// Disabling event ring interrupts");
738 temp = readl(&xhci->op_regs->status);
739 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
740 temp = readl(&xhci->ir_set->irq_pending);
741 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
742
743 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
744 xhci_mem_cleanup(xhci);
745 xhci_debugfs_exit(xhci);
746 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
747 "xhci_stop completed - status = %x",
748 readl(&xhci->op_regs->status));
749 mutex_unlock(&xhci->mutex);
750}
751
752
753
754
755
756
757
758
759
760
761static void xhci_shutdown(struct usb_hcd *hcd)
762{
763 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
764
765 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
766 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
767
768 spin_lock_irq(&xhci->lock);
769 xhci_halt(xhci);
770
771 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
772 xhci_reset(xhci);
773 spin_unlock_irq(&xhci->lock);
774
775 xhci_cleanup_msix(xhci);
776
777 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
778 "xhci_shutdown completed - status = %x",
779 readl(&xhci->op_regs->status));
780
781
782 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
783 pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
784}
785
786#ifdef CONFIG_PM
787static void xhci_save_registers(struct xhci_hcd *xhci)
788{
789 xhci->s3.command = readl(&xhci->op_regs->command);
790 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
791 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
792 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
793 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
794 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
795 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
796 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
797 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
798}
799
800static void xhci_restore_registers(struct xhci_hcd *xhci)
801{
802 writel(xhci->s3.command, &xhci->op_regs->command);
803 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
804 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
805 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
806 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
807 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
808 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
809 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
810 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
811}
812
813static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
814{
815 u64 val_64;
816
817
818 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
819 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
820 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
821 xhci->cmd_ring->dequeue) &
822 (u64) ~CMD_RING_RSVD_BITS) |
823 xhci->cmd_ring->cycle_state;
824 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
825 "// Setting command ring address to 0x%llx",
826 (long unsigned long) val_64);
827 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
828}
829
830
831
832
833
834
835
836
837
838
839static void xhci_clear_command_ring(struct xhci_hcd *xhci)
840{
841 struct xhci_ring *ring;
842 struct xhci_segment *seg;
843
844 ring = xhci->cmd_ring;
845 seg = ring->deq_seg;
846 do {
847 memset(seg->trbs, 0,
848 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
849 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
850 cpu_to_le32(~TRB_CYCLE);
851 seg = seg->next;
852 } while (seg != ring->deq_seg);
853
854
855 ring->deq_seg = ring->first_seg;
856 ring->dequeue = ring->first_seg->trbs;
857 ring->enq_seg = ring->deq_seg;
858 ring->enqueue = ring->dequeue;
859
860 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
861
862
863
864
865 ring->cycle_state = 1;
866
867
868
869
870
871
872
873
874 xhci_set_cmd_ring_deq(xhci);
875}
876
877static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
878{
879 struct xhci_port **ports;
880 int port_index;
881 unsigned long flags;
882 u32 t1, t2;
883
884 spin_lock_irqsave(&xhci->lock, flags);
885
886
887 port_index = xhci->usb3_rhub.num_ports;
888 ports = xhci->usb3_rhub.ports;
889 while (port_index--) {
890 t1 = readl(ports[port_index]->addr);
891 t1 = xhci_port_state_to_neutral(t1);
892 t2 = t1 & ~PORT_WAKE_BITS;
893 if (t1 != t2)
894 writel(t2, ports[port_index]->addr);
895 }
896
897
898 port_index = xhci->usb2_rhub.num_ports;
899 ports = xhci->usb2_rhub.ports;
900 while (port_index--) {
901 t1 = readl(ports[port_index]->addr);
902 t1 = xhci_port_state_to_neutral(t1);
903 t2 = t1 & ~PORT_WAKE_BITS;
904 if (t1 != t2)
905 writel(t2, ports[port_index]->addr);
906 }
907
908 spin_unlock_irqrestore(&xhci->lock, flags);
909}
910
911static bool xhci_pending_portevent(struct xhci_hcd *xhci)
912{
913 struct xhci_port **ports;
914 int port_index;
915 u32 status;
916 u32 portsc;
917
918 status = readl(&xhci->op_regs->status);
919 if (status & STS_EINT)
920 return true;
921
922
923
924
925
926
927 port_index = xhci->usb2_rhub.num_ports;
928 ports = xhci->usb2_rhub.ports;
929 while (port_index--) {
930 portsc = readl(ports[port_index]->addr);
931 if (portsc & PORT_CHANGE_MASK ||
932 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
933 return true;
934 }
935 port_index = xhci->usb3_rhub.num_ports;
936 ports = xhci->usb3_rhub.ports;
937 while (port_index--) {
938 portsc = readl(ports[port_index]->addr);
939 if (portsc & PORT_CHANGE_MASK ||
940 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
941 return true;
942 }
943 return false;
944}
945
946
947
948
949
950
951
952int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
953{
954 int rc = 0;
955 unsigned int delay = XHCI_MAX_HALT_USEC;
956 struct usb_hcd *hcd = xhci_to_hcd(xhci);
957 u32 command;
958
959 if (!hcd->state)
960 return 0;
961
962 if (hcd->state != HC_STATE_SUSPENDED ||
963 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
964 return -EINVAL;
965
966 xhci_dbc_suspend(xhci);
967
968
969 if (!do_wakeup)
970 xhci_disable_port_wake_on_bits(xhci);
971
972
973 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
974 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
975 del_timer_sync(&hcd->rh_timer);
976 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
977 del_timer_sync(&xhci->shared_hcd->rh_timer);
978
979 if (xhci->quirks & XHCI_SUSPEND_DELAY)
980 usleep_range(1000, 1500);
981
982 spin_lock_irq(&xhci->lock);
983 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
984 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
985
986
987
988
989 command = readl(&xhci->op_regs->command);
990 command &= ~CMD_RUN;
991 writel(command, &xhci->op_regs->command);
992
993
994 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
995
996 if (xhci_handshake(&xhci->op_regs->status,
997 STS_HALT, STS_HALT, delay)) {
998 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
999 spin_unlock_irq(&xhci->lock);
1000 return -ETIMEDOUT;
1001 }
1002 xhci_clear_command_ring(xhci);
1003
1004
1005 xhci_save_registers(xhci);
1006
1007
1008 command = readl(&xhci->op_regs->command);
1009 command |= CMD_CSS;
1010 writel(command, &xhci->op_regs->command);
1011 if (xhci_handshake(&xhci->op_regs->status,
1012 STS_SAVE, 0, 10 * 1000)) {
1013 xhci_warn(xhci, "WARN: xHC save state timeout\n");
1014 spin_unlock_irq(&xhci->lock);
1015 return -ETIMEDOUT;
1016 }
1017 spin_unlock_irq(&xhci->lock);
1018
1019
1020
1021
1022
1023 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1024 (!(xhci_all_ports_seen_u0(xhci)))) {
1025 del_timer_sync(&xhci->comp_mode_recovery_timer);
1026 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1027 "%s: compliance mode recovery timer deleted",
1028 __func__);
1029 }
1030
1031
1032
1033 xhci_msix_sync_irqs(xhci);
1034
1035 return rc;
1036}
1037EXPORT_SYMBOL_GPL(xhci_suspend);
1038
1039
1040
1041
1042
1043
1044
1045int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1046{
1047 u32 command, temp = 0;
1048 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1049 struct usb_hcd *secondary_hcd;
1050 int retval = 0;
1051 bool comp_timer_running = false;
1052
1053 if (!hcd->state)
1054 return 0;
1055
1056
1057
1058
1059 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1060 time_before(jiffies,
1061 xhci->bus_state[1].next_statechange))
1062 msleep(100);
1063
1064 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1065 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1066
1067 spin_lock_irq(&xhci->lock);
1068 if (xhci->quirks & XHCI_RESET_ON_RESUME)
1069 hibernated = true;
1070
1071 if (!hibernated) {
1072
1073 xhci_restore_registers(xhci);
1074
1075 xhci_set_cmd_ring_deq(xhci);
1076
1077
1078 command = readl(&xhci->op_regs->command);
1079 command |= CMD_CRS;
1080 writel(command, &xhci->op_regs->command);
1081
1082
1083
1084
1085
1086 if (xhci_handshake(&xhci->op_regs->status,
1087 STS_RESTORE, 0, 100 * 1000)) {
1088 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1089 spin_unlock_irq(&xhci->lock);
1090 return -ETIMEDOUT;
1091 }
1092 temp = readl(&xhci->op_regs->status);
1093 }
1094
1095
1096 if ((temp & STS_SRE) || hibernated) {
1097
1098 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1099 !(xhci_all_ports_seen_u0(xhci))) {
1100 del_timer_sync(&xhci->comp_mode_recovery_timer);
1101 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1102 "Compliance Mode Recovery Timer deleted!");
1103 }
1104
1105
1106 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1107 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1108
1109 xhci_dbg(xhci, "Stop HCD\n");
1110 xhci_halt(xhci);
1111 xhci_zero_64b_regs(xhci);
1112 xhci_reset(xhci);
1113 spin_unlock_irq(&xhci->lock);
1114 xhci_cleanup_msix(xhci);
1115
1116 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1117 temp = readl(&xhci->op_regs->status);
1118 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1119 temp = readl(&xhci->ir_set->irq_pending);
1120 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1121
1122 xhci_dbg(xhci, "cleaning up memory\n");
1123 xhci_mem_cleanup(xhci);
1124 xhci_debugfs_exit(xhci);
1125 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1126 readl(&xhci->op_regs->status));
1127
1128
1129
1130
1131
1132 if (!usb_hcd_is_primary_hcd(hcd))
1133 secondary_hcd = hcd;
1134 else
1135 secondary_hcd = xhci->shared_hcd;
1136
1137 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1138 retval = xhci_init(hcd->primary_hcd);
1139 if (retval)
1140 return retval;
1141 comp_timer_running = true;
1142
1143 xhci_dbg(xhci, "Start the primary HCD\n");
1144 retval = xhci_run(hcd->primary_hcd);
1145 if (!retval) {
1146 xhci_dbg(xhci, "Start the secondary HCD\n");
1147 retval = xhci_run(secondary_hcd);
1148 }
1149 hcd->state = HC_STATE_SUSPENDED;
1150 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1151 goto done;
1152 }
1153
1154
1155 command = readl(&xhci->op_regs->command);
1156 command |= CMD_RUN;
1157 writel(command, &xhci->op_regs->command);
1158 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1159 0, 250 * 1000);
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 spin_unlock_irq(&xhci->lock);
1171
1172 xhci_dbc_resume(xhci);
1173
1174 done:
1175 if (retval == 0) {
1176
1177 if (xhci_pending_portevent(xhci)) {
1178 usb_hcd_resume_root_hub(xhci->shared_hcd);
1179 usb_hcd_resume_root_hub(hcd);
1180 }
1181 }
1182
1183
1184
1185
1186
1187
1188
1189 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1190 compliance_mode_recovery_timer_init(xhci);
1191
1192 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1193 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1194
1195
1196 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1197 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1198 usb_hcd_poll_rh_status(xhci->shared_hcd);
1199 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1200 usb_hcd_poll_rh_status(hcd);
1201
1202 return retval;
1203}
1204EXPORT_SYMBOL_GPL(xhci_resume);
1205#endif
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1220{
1221 unsigned int index;
1222 if (usb_endpoint_xfer_control(desc))
1223 index = (unsigned int) (usb_endpoint_num(desc)*2);
1224 else
1225 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1226 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1227 return index;
1228}
1229
1230
1231
1232
1233unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1234{
1235 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1236 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1237 return direction | number;
1238}
1239
1240
1241
1242
1243
1244static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1245{
1246 return 1 << (xhci_get_endpoint_index(desc) + 1);
1247}
1248
1249
1250
1251
1252
1253static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1254{
1255 return 1 << (ep_index + 1);
1256}
1257
1258
1259
1260
1261
1262
1263
1264unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1265{
1266 return fls(added_ctxs) - 1;
1267}
1268
1269
1270
1271
1272static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1273 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1274 const char *func) {
1275 struct xhci_hcd *xhci;
1276 struct xhci_virt_device *virt_dev;
1277
1278 if (!hcd || (check_ep && !ep) || !udev) {
1279 pr_debug("xHCI %s called with invalid args\n", func);
1280 return -EINVAL;
1281 }
1282 if (!udev->parent) {
1283 pr_debug("xHCI %s called for root hub\n", func);
1284 return 0;
1285 }
1286
1287 xhci = hcd_to_xhci(hcd);
1288 if (check_virt_dev) {
1289 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1290 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1291 func);
1292 return -EINVAL;
1293 }
1294
1295 virt_dev = xhci->devs[udev->slot_id];
1296 if (virt_dev->udev != udev) {
1297 xhci_dbg(xhci, "xHCI %s called with udev and "
1298 "virt_dev does not match\n", func);
1299 return -EINVAL;
1300 }
1301 }
1302
1303 if (xhci->xhc_state & XHCI_STATE_HALTED)
1304 return -ENODEV;
1305
1306 return 1;
1307}
1308
1309static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1310 struct usb_device *udev, struct xhci_command *command,
1311 bool ctx_change, bool must_succeed);
1312
1313
1314
1315
1316
1317
1318
1319static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1320 unsigned int ep_index, struct urb *urb)
1321{
1322 struct xhci_container_ctx *out_ctx;
1323 struct xhci_input_control_ctx *ctrl_ctx;
1324 struct xhci_ep_ctx *ep_ctx;
1325 struct xhci_command *command;
1326 int max_packet_size;
1327 int hw_max_packet_size;
1328 int ret = 0;
1329
1330 out_ctx = xhci->devs[slot_id]->out_ctx;
1331 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1332 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1333 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1334 if (hw_max_packet_size != max_packet_size) {
1335 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1336 "Max Packet Size for ep 0 changed.");
1337 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1338 "Max packet size in usb_device = %d",
1339 max_packet_size);
1340 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1341 "Max packet size in xHCI HW = %d",
1342 hw_max_packet_size);
1343 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1344 "Issuing evaluate context command.");
1345
1346
1347
1348
1349
1350
1351 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1352 if (!command)
1353 return -ENOMEM;
1354
1355 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1356 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1357 if (!ctrl_ctx) {
1358 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1359 __func__);
1360 ret = -ENOMEM;
1361 goto command_cleanup;
1362 }
1363
1364 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1365 xhci->devs[slot_id]->out_ctx, ep_index);
1366
1367 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1368 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1369 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1370
1371 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1372 ctrl_ctx->drop_flags = 0;
1373
1374 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1375 true, false);
1376
1377
1378
1379
1380 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1381command_cleanup:
1382 kfree(command->completion);
1383 kfree(command);
1384 }
1385 return ret;
1386}
1387
1388
1389
1390
1391
1392static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1393{
1394 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1395 unsigned long flags;
1396 int ret = 0;
1397 unsigned int slot_id, ep_index;
1398 unsigned int *ep_state;
1399 struct urb_priv *urb_priv;
1400 int num_tds;
1401
1402 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1403 true, true, __func__) <= 0)
1404 return -EINVAL;
1405
1406 slot_id = urb->dev->slot_id;
1407 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1408 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1409
1410 if (!HCD_HW_ACCESSIBLE(hcd)) {
1411 if (!in_interrupt())
1412 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1413 return -ESHUTDOWN;
1414 }
1415
1416 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1417 num_tds = urb->number_of_packets;
1418 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1419 urb->transfer_buffer_length > 0 &&
1420 urb->transfer_flags & URB_ZERO_PACKET &&
1421 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1422 num_tds = 2;
1423 else
1424 num_tds = 1;
1425
1426 urb_priv = kzalloc(sizeof(struct urb_priv) +
1427 num_tds * sizeof(struct xhci_td), mem_flags);
1428 if (!urb_priv)
1429 return -ENOMEM;
1430
1431 urb_priv->num_tds = num_tds;
1432 urb_priv->num_tds_done = 0;
1433 urb->hcpriv = urb_priv;
1434
1435 trace_xhci_urb_enqueue(urb);
1436
1437 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1438
1439
1440
1441 if (urb->dev->speed == USB_SPEED_FULL) {
1442 ret = xhci_check_maxpacket(xhci, slot_id,
1443 ep_index, urb);
1444 if (ret < 0) {
1445 xhci_urb_free_priv(urb_priv);
1446 urb->hcpriv = NULL;
1447 return ret;
1448 }
1449 }
1450 }
1451
1452 spin_lock_irqsave(&xhci->lock, flags);
1453
1454 if (xhci->xhc_state & XHCI_STATE_DYING) {
1455 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1456 urb->ep->desc.bEndpointAddress, urb);
1457 ret = -ESHUTDOWN;
1458 goto free_priv;
1459 }
1460 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1461 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1462 *ep_state);
1463 ret = -EINVAL;
1464 goto free_priv;
1465 }
1466 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1467 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1468 ret = -EINVAL;
1469 goto free_priv;
1470 }
1471
1472 switch (usb_endpoint_type(&urb->ep->desc)) {
1473
1474 case USB_ENDPOINT_XFER_CONTROL:
1475 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1476 slot_id, ep_index);
1477 break;
1478 case USB_ENDPOINT_XFER_BULK:
1479 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1480 slot_id, ep_index);
1481 break;
1482 case USB_ENDPOINT_XFER_INT:
1483 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1484 slot_id, ep_index);
1485 break;
1486 case USB_ENDPOINT_XFER_ISOC:
1487 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1488 slot_id, ep_index);
1489 }
1490
1491 if (ret) {
1492free_priv:
1493 xhci_urb_free_priv(urb_priv);
1494 urb->hcpriv = NULL;
1495 }
1496 spin_unlock_irqrestore(&xhci->lock, flags);
1497 return ret;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1532{
1533 unsigned long flags;
1534 int ret, i;
1535 u32 temp;
1536 struct xhci_hcd *xhci;
1537 struct urb_priv *urb_priv;
1538 struct xhci_td *td;
1539 unsigned int ep_index;
1540 struct xhci_ring *ep_ring;
1541 struct xhci_virt_ep *ep;
1542 struct xhci_command *command;
1543 struct xhci_virt_device *vdev;
1544
1545 xhci = hcd_to_xhci(hcd);
1546 spin_lock_irqsave(&xhci->lock, flags);
1547
1548 trace_xhci_urb_dequeue(urb);
1549
1550
1551 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1552 if (ret)
1553 goto done;
1554
1555
1556 vdev = xhci->devs[urb->dev->slot_id];
1557 urb_priv = urb->hcpriv;
1558 if (!vdev || !urb_priv)
1559 goto err_giveback;
1560
1561 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1562 ep = &vdev->eps[ep_index];
1563 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1564 if (!ep || !ep_ring)
1565 goto err_giveback;
1566
1567
1568 temp = readl(&xhci->op_regs->status);
1569 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1570 xhci_hc_died(xhci);
1571 goto done;
1572 }
1573
1574 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1575 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1576 "HC halted, freeing TD manually.");
1577 for (i = urb_priv->num_tds_done;
1578 i < urb_priv->num_tds;
1579 i++) {
1580 td = &urb_priv->td[i];
1581 if (!list_empty(&td->td_list))
1582 list_del_init(&td->td_list);
1583 if (!list_empty(&td->cancelled_td_list))
1584 list_del_init(&td->cancelled_td_list);
1585 }
1586 goto err_giveback;
1587 }
1588
1589 i = urb_priv->num_tds_done;
1590 if (i < urb_priv->num_tds)
1591 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1592 "Cancel URB %p, dev %s, ep 0x%x, "
1593 "starting at offset 0x%llx",
1594 urb, urb->dev->devpath,
1595 urb->ep->desc.bEndpointAddress,
1596 (unsigned long long) xhci_trb_virt_to_dma(
1597 urb_priv->td[i].start_seg,
1598 urb_priv->td[i].first_trb));
1599
1600 for (; i < urb_priv->num_tds; i++) {
1601 td = &urb_priv->td[i];
1602 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1603 }
1604
1605
1606
1607
1608 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1609 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1610 if (!command) {
1611 ret = -ENOMEM;
1612 goto done;
1613 }
1614 ep->ep_state |= EP_STOP_CMD_PENDING;
1615 ep->stop_cmd_timer.expires = jiffies +
1616 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1617 add_timer(&ep->stop_cmd_timer);
1618 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1619 ep_index, 0);
1620 xhci_ring_cmd_db(xhci);
1621 }
1622done:
1623 spin_unlock_irqrestore(&xhci->lock, flags);
1624 return ret;
1625
1626err_giveback:
1627 if (urb_priv)
1628 xhci_urb_free_priv(urb_priv);
1629 usb_hcd_unlink_urb_from_ep(hcd, urb);
1630 spin_unlock_irqrestore(&xhci->lock, flags);
1631 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1632 return ret;
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1649 struct usb_host_endpoint *ep)
1650{
1651 struct xhci_hcd *xhci;
1652 struct xhci_container_ctx *in_ctx, *out_ctx;
1653 struct xhci_input_control_ctx *ctrl_ctx;
1654 unsigned int ep_index;
1655 struct xhci_ep_ctx *ep_ctx;
1656 u32 drop_flag;
1657 u32 new_add_flags, new_drop_flags;
1658 int ret;
1659
1660 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1661 if (ret <= 0)
1662 return ret;
1663 xhci = hcd_to_xhci(hcd);
1664 if (xhci->xhc_state & XHCI_STATE_DYING)
1665 return -ENODEV;
1666
1667 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1668 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1669 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1670 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1671 __func__, drop_flag);
1672 return 0;
1673 }
1674
1675 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1676 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1677 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1678 if (!ctrl_ctx) {
1679 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1680 __func__);
1681 return 0;
1682 }
1683
1684 ep_index = xhci_get_endpoint_index(&ep->desc);
1685 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1686
1687
1688
1689 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1690 le32_to_cpu(ctrl_ctx->drop_flags) &
1691 xhci_get_endpoint_flag(&ep->desc)) {
1692
1693 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1694 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1695 __func__, ep);
1696 return 0;
1697 }
1698
1699 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1700 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1701
1702 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1703 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1704
1705 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1706
1707 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1708
1709 if (xhci->quirks & XHCI_MTK_HOST)
1710 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1711
1712 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1713 (unsigned int) ep->desc.bEndpointAddress,
1714 udev->slot_id,
1715 (unsigned int) new_drop_flags,
1716 (unsigned int) new_add_flags);
1717 return 0;
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1734 struct usb_host_endpoint *ep)
1735{
1736 struct xhci_hcd *xhci;
1737 struct xhci_container_ctx *in_ctx;
1738 unsigned int ep_index;
1739 struct xhci_input_control_ctx *ctrl_ctx;
1740 u32 added_ctxs;
1741 u32 new_add_flags, new_drop_flags;
1742 struct xhci_virt_device *virt_dev;
1743 int ret = 0;
1744
1745 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1746 if (ret <= 0) {
1747
1748 ep->hcpriv = NULL;
1749 return ret;
1750 }
1751 xhci = hcd_to_xhci(hcd);
1752 if (xhci->xhc_state & XHCI_STATE_DYING)
1753 return -ENODEV;
1754
1755 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1756 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1757
1758
1759
1760
1761 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1762 __func__, added_ctxs);
1763 return 0;
1764 }
1765
1766 virt_dev = xhci->devs[udev->slot_id];
1767 in_ctx = virt_dev->in_ctx;
1768 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1769 if (!ctrl_ctx) {
1770 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1771 __func__);
1772 return 0;
1773 }
1774
1775 ep_index = xhci_get_endpoint_index(&ep->desc);
1776
1777
1778
1779 if (virt_dev->eps[ep_index].ring &&
1780 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1781 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1782 "without dropping it.\n",
1783 (unsigned int) ep->desc.bEndpointAddress);
1784 return -EINVAL;
1785 }
1786
1787
1788
1789
1790 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1791 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1792 __func__, ep);
1793 return 0;
1794 }
1795
1796
1797
1798
1799
1800
1801 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1802 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1803 __func__, ep->desc.bEndpointAddress);
1804 return -ENOMEM;
1805 }
1806
1807 if (xhci->quirks & XHCI_MTK_HOST) {
1808 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1809 if (ret < 0) {
1810 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1811 virt_dev->eps[ep_index].new_ring = NULL;
1812 return ret;
1813 }
1814 }
1815
1816 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1817 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1818
1819
1820
1821
1822
1823
1824
1825 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1826
1827
1828 ep->hcpriv = udev;
1829
1830 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
1831
1832 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1833 (unsigned int) ep->desc.bEndpointAddress,
1834 udev->slot_id,
1835 (unsigned int) new_drop_flags,
1836 (unsigned int) new_add_flags);
1837 return 0;
1838}
1839
1840static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1841{
1842 struct xhci_input_control_ctx *ctrl_ctx;
1843 struct xhci_ep_ctx *ep_ctx;
1844 struct xhci_slot_ctx *slot_ctx;
1845 int i;
1846
1847 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1848 if (!ctrl_ctx) {
1849 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1850 __func__);
1851 return;
1852 }
1853
1854
1855
1856
1857
1858
1859 ctrl_ctx->drop_flags = 0;
1860 ctrl_ctx->add_flags = 0;
1861 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1862 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1863
1864 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1865 for (i = 1; i < 31; i++) {
1866 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1867 ep_ctx->ep_info = 0;
1868 ep_ctx->ep_info2 = 0;
1869 ep_ctx->deq = 0;
1870 ep_ctx->tx_info = 0;
1871 }
1872}
1873
1874static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1875 struct usb_device *udev, u32 *cmd_status)
1876{
1877 int ret;
1878
1879 switch (*cmd_status) {
1880 case COMP_COMMAND_ABORTED:
1881 case COMP_COMMAND_RING_STOPPED:
1882 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1883 ret = -ETIME;
1884 break;
1885 case COMP_RESOURCE_ERROR:
1886 dev_warn(&udev->dev,
1887 "Not enough host controller resources for new device state.\n");
1888 ret = -ENOMEM;
1889
1890 break;
1891 case COMP_BANDWIDTH_ERROR:
1892 case COMP_SECONDARY_BANDWIDTH_ERROR:
1893 dev_warn(&udev->dev,
1894 "Not enough bandwidth for new device state.\n");
1895 ret = -ENOSPC;
1896
1897 break;
1898 case COMP_TRB_ERROR:
1899
1900 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1901 "add flag = 1, "
1902 "and endpoint is not disabled.\n");
1903 ret = -EINVAL;
1904 break;
1905 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1906 dev_warn(&udev->dev,
1907 "ERROR: Incompatible device for endpoint configure command.\n");
1908 ret = -ENODEV;
1909 break;
1910 case COMP_SUCCESS:
1911 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1912 "Successful Endpoint Configure command");
1913 ret = 0;
1914 break;
1915 default:
1916 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1917 *cmd_status);
1918 ret = -EINVAL;
1919 break;
1920 }
1921 return ret;
1922}
1923
1924static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1925 struct usb_device *udev, u32 *cmd_status)
1926{
1927 int ret;
1928
1929 switch (*cmd_status) {
1930 case COMP_COMMAND_ABORTED:
1931 case COMP_COMMAND_RING_STOPPED:
1932 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1933 ret = -ETIME;
1934 break;
1935 case COMP_PARAMETER_ERROR:
1936 dev_warn(&udev->dev,
1937 "WARN: xHCI driver setup invalid evaluate context command.\n");
1938 ret = -EINVAL;
1939 break;
1940 case COMP_SLOT_NOT_ENABLED_ERROR:
1941 dev_warn(&udev->dev,
1942 "WARN: slot not enabled for evaluate context command.\n");
1943 ret = -EINVAL;
1944 break;
1945 case COMP_CONTEXT_STATE_ERROR:
1946 dev_warn(&udev->dev,
1947 "WARN: invalid context state for evaluate context command.\n");
1948 ret = -EINVAL;
1949 break;
1950 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1951 dev_warn(&udev->dev,
1952 "ERROR: Incompatible device for evaluate context command.\n");
1953 ret = -ENODEV;
1954 break;
1955 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
1956
1957 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1958 ret = -EINVAL;
1959 break;
1960 case COMP_SUCCESS:
1961 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1962 "Successful evaluate context command");
1963 ret = 0;
1964 break;
1965 default:
1966 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1967 *cmd_status);
1968 ret = -EINVAL;
1969 break;
1970 }
1971 return ret;
1972}
1973
1974static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1975 struct xhci_input_control_ctx *ctrl_ctx)
1976{
1977 u32 valid_add_flags;
1978 u32 valid_drop_flags;
1979
1980
1981
1982
1983
1984 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1985 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1986
1987
1988
1989
1990
1991 return hweight32(valid_add_flags) -
1992 hweight32(valid_add_flags & valid_drop_flags);
1993}
1994
1995static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1996 struct xhci_input_control_ctx *ctrl_ctx)
1997{
1998 u32 valid_add_flags;
1999 u32 valid_drop_flags;
2000
2001 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2002 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2003
2004 return hweight32(valid_drop_flags) -
2005 hweight32(valid_add_flags & valid_drop_flags);
2006}
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2022 struct xhci_input_control_ctx *ctrl_ctx)
2023{
2024 u32 added_eps;
2025
2026 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2027 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2028 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2029 "Not enough ep ctxs: "
2030 "%u active, need to add %u, limit is %u.",
2031 xhci->num_active_eps, added_eps,
2032 xhci->limit_active_eps);
2033 return -ENOMEM;
2034 }
2035 xhci->num_active_eps += added_eps;
2036 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2037 "Adding %u ep ctxs, %u now active.", added_eps,
2038 xhci->num_active_eps);
2039 return 0;
2040}
2041
2042
2043
2044
2045
2046
2047
2048static void xhci_free_host_resources(struct xhci_hcd *xhci,
2049 struct xhci_input_control_ctx *ctrl_ctx)
2050{
2051 u32 num_failed_eps;
2052
2053 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2054 xhci->num_active_eps -= num_failed_eps;
2055 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2056 "Removing %u failed ep ctxs, %u now active.",
2057 num_failed_eps,
2058 xhci->num_active_eps);
2059}
2060
2061
2062
2063
2064
2065
2066
2067static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2068 struct xhci_input_control_ctx *ctrl_ctx)
2069{
2070 u32 num_dropped_eps;
2071
2072 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2073 xhci->num_active_eps -= num_dropped_eps;
2074 if (num_dropped_eps)
2075 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2076 "Removing %u dropped ep ctxs, %u now active.",
2077 num_dropped_eps,
2078 xhci->num_active_eps);
2079}
2080
2081static unsigned int xhci_get_block_size(struct usb_device *udev)
2082{
2083 switch (udev->speed) {
2084 case USB_SPEED_LOW:
2085 case USB_SPEED_FULL:
2086 return FS_BLOCK;
2087 case USB_SPEED_HIGH:
2088 return HS_BLOCK;
2089 case USB_SPEED_SUPER:
2090 case USB_SPEED_SUPER_PLUS:
2091 return SS_BLOCK;
2092 case USB_SPEED_UNKNOWN:
2093 case USB_SPEED_WIRELESS:
2094 default:
2095
2096 return 1;
2097 }
2098}
2099
2100static unsigned int
2101xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2102{
2103 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2104 return LS_OVERHEAD;
2105 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2106 return FS_OVERHEAD;
2107 return HS_OVERHEAD;
2108}
2109
2110
2111
2112
2113
2114static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2115 struct xhci_virt_device *virt_dev,
2116 int old_active_eps)
2117{
2118 struct xhci_interval_bw_table *bw_table;
2119 struct xhci_tt_bw_info *tt_info;
2120
2121
2122 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2123 tt_info = virt_dev->tt_info;
2124
2125
2126
2127
2128 if (old_active_eps)
2129 return 0;
2130 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2131 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2132 return -ENOMEM;
2133 return 0;
2134 }
2135
2136
2137
2138
2139
2140
2141 return 0;
2142}
2143
2144static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2145 struct xhci_virt_device *virt_dev)
2146{
2147 unsigned int bw_reserved;
2148
2149 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2150 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2151 return -ENOMEM;
2152
2153 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2154 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2155 return -ENOMEM;
2156
2157 return 0;
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201static int xhci_check_bw_table(struct xhci_hcd *xhci,
2202 struct xhci_virt_device *virt_dev,
2203 int old_active_eps)
2204{
2205 unsigned int bw_reserved;
2206 unsigned int max_bandwidth;
2207 unsigned int bw_used;
2208 unsigned int block_size;
2209 struct xhci_interval_bw_table *bw_table;
2210 unsigned int packet_size = 0;
2211 unsigned int overhead = 0;
2212 unsigned int packets_transmitted = 0;
2213 unsigned int packets_remaining = 0;
2214 unsigned int i;
2215
2216 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2217 return xhci_check_ss_bw(xhci, virt_dev);
2218
2219 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2220 max_bandwidth = HS_BW_LIMIT;
2221
2222 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2223 } else {
2224 max_bandwidth = FS_BW_LIMIT;
2225 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2226 }
2227
2228 bw_table = virt_dev->bw_table;
2229
2230
2231
2232 block_size = xhci_get_block_size(virt_dev->udev);
2233
2234
2235
2236
2237 if (virt_dev->tt_info) {
2238 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2239 "Recalculating BW for rootport %u",
2240 virt_dev->real_port);
2241 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2242 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2243 "newly activated TT.\n");
2244 return -ENOMEM;
2245 }
2246 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2247 "Recalculating BW for TT slot %u port %u",
2248 virt_dev->tt_info->slot_id,
2249 virt_dev->tt_info->ttport);
2250 } else {
2251 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2252 "Recalculating BW for rootport %u",
2253 virt_dev->real_port);
2254 }
2255
2256
2257
2258
2259 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2260 bw_table->interval_bw[0].num_packets *
2261 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2262
2263 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2264 unsigned int bw_added;
2265 unsigned int largest_mps;
2266 unsigned int interval_overhead;
2267
2268
2269
2270
2271
2272
2273 packets_remaining = 2 * packets_remaining +
2274 bw_table->interval_bw[i].num_packets;
2275
2276
2277
2278
2279 if (list_empty(&bw_table->interval_bw[i].endpoints))
2280 largest_mps = 0;
2281 else {
2282 struct xhci_virt_ep *virt_ep;
2283 struct list_head *ep_entry;
2284
2285 ep_entry = bw_table->interval_bw[i].endpoints.next;
2286 virt_ep = list_entry(ep_entry,
2287 struct xhci_virt_ep, bw_endpoint_list);
2288
2289 largest_mps = DIV_ROUND_UP(
2290 virt_ep->bw_info.max_packet_size,
2291 block_size);
2292 }
2293 if (largest_mps > packet_size)
2294 packet_size = largest_mps;
2295
2296
2297 interval_overhead = xhci_get_largest_overhead(
2298 &bw_table->interval_bw[i]);
2299 if (interval_overhead > overhead)
2300 overhead = interval_overhead;
2301
2302
2303
2304
2305 packets_transmitted = packets_remaining >> (i + 1);
2306
2307
2308 bw_added = packets_transmitted * (overhead + packet_size);
2309
2310
2311 packets_remaining = packets_remaining % (1 << (i + 1));
2312
2313
2314
2315
2316
2317 if (packets_remaining == 0) {
2318 packet_size = 0;
2319 overhead = 0;
2320 } else if (packets_transmitted > 0) {
2321
2322
2323
2324
2325
2326 packet_size = largest_mps;
2327 overhead = interval_overhead;
2328 }
2329
2330
2331
2332 bw_used += bw_added;
2333 if (bw_used > max_bandwidth) {
2334 xhci_warn(xhci, "Not enough bandwidth. "
2335 "Proposed: %u, Max: %u\n",
2336 bw_used, max_bandwidth);
2337 return -ENOMEM;
2338 }
2339 }
2340
2341
2342
2343
2344
2345
2346 if (packets_remaining > 0)
2347 bw_used += overhead + packet_size;
2348
2349 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2350 unsigned int port_index = virt_dev->real_port - 1;
2351
2352
2353
2354
2355
2356 bw_used += TT_HS_OVERHEAD *
2357 xhci->rh_bw[port_index].num_active_tts;
2358 }
2359
2360 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2361 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2362 "Available: %u " "percent",
2363 bw_used, max_bandwidth, bw_reserved,
2364 (max_bandwidth - bw_used - bw_reserved) * 100 /
2365 max_bandwidth);
2366
2367 bw_used += bw_reserved;
2368 if (bw_used > max_bandwidth) {
2369 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2370 bw_used, max_bandwidth);
2371 return -ENOMEM;
2372 }
2373
2374 bw_table->bw_used = bw_used;
2375 return 0;
2376}
2377
2378static bool xhci_is_async_ep(unsigned int ep_type)
2379{
2380 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2381 ep_type != ISOC_IN_EP &&
2382 ep_type != INT_IN_EP);
2383}
2384
2385static bool xhci_is_sync_in_ep(unsigned int ep_type)
2386{
2387 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2388}
2389
2390static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2391{
2392 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2393
2394 if (ep_bw->ep_interval == 0)
2395 return SS_OVERHEAD_BURST +
2396 (ep_bw->mult * ep_bw->num_packets *
2397 (SS_OVERHEAD + mps));
2398 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2399 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2400 1 << ep_bw->ep_interval);
2401
2402}
2403
2404static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2405 struct xhci_bw_info *ep_bw,
2406 struct xhci_interval_bw_table *bw_table,
2407 struct usb_device *udev,
2408 struct xhci_virt_ep *virt_ep,
2409 struct xhci_tt_bw_info *tt_info)
2410{
2411 struct xhci_interval_bw *interval_bw;
2412 int normalized_interval;
2413
2414 if (xhci_is_async_ep(ep_bw->type))
2415 return;
2416
2417 if (udev->speed >= USB_SPEED_SUPER) {
2418 if (xhci_is_sync_in_ep(ep_bw->type))
2419 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2420 xhci_get_ss_bw_consumed(ep_bw);
2421 else
2422 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2423 xhci_get_ss_bw_consumed(ep_bw);
2424 return;
2425 }
2426
2427
2428
2429
2430 if (list_empty(&virt_ep->bw_endpoint_list))
2431 return;
2432
2433
2434
2435 if (udev->speed == USB_SPEED_HIGH)
2436 normalized_interval = ep_bw->ep_interval;
2437 else
2438 normalized_interval = ep_bw->ep_interval - 3;
2439
2440 if (normalized_interval == 0)
2441 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2442 interval_bw = &bw_table->interval_bw[normalized_interval];
2443 interval_bw->num_packets -= ep_bw->num_packets;
2444 switch (udev->speed) {
2445 case USB_SPEED_LOW:
2446 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2447 break;
2448 case USB_SPEED_FULL:
2449 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2450 break;
2451 case USB_SPEED_HIGH:
2452 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2453 break;
2454 case USB_SPEED_SUPER:
2455 case USB_SPEED_SUPER_PLUS:
2456 case USB_SPEED_UNKNOWN:
2457 case USB_SPEED_WIRELESS:
2458
2459
2460
2461 return;
2462 }
2463 if (tt_info)
2464 tt_info->active_eps -= 1;
2465 list_del_init(&virt_ep->bw_endpoint_list);
2466}
2467
2468static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2469 struct xhci_bw_info *ep_bw,
2470 struct xhci_interval_bw_table *bw_table,
2471 struct usb_device *udev,
2472 struct xhci_virt_ep *virt_ep,
2473 struct xhci_tt_bw_info *tt_info)
2474{
2475 struct xhci_interval_bw *interval_bw;
2476 struct xhci_virt_ep *smaller_ep;
2477 int normalized_interval;
2478
2479 if (xhci_is_async_ep(ep_bw->type))
2480 return;
2481
2482 if (udev->speed == USB_SPEED_SUPER) {
2483 if (xhci_is_sync_in_ep(ep_bw->type))
2484 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2485 xhci_get_ss_bw_consumed(ep_bw);
2486 else
2487 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2488 xhci_get_ss_bw_consumed(ep_bw);
2489 return;
2490 }
2491
2492
2493
2494
2495 if (udev->speed == USB_SPEED_HIGH)
2496 normalized_interval = ep_bw->ep_interval;
2497 else
2498 normalized_interval = ep_bw->ep_interval - 3;
2499
2500 if (normalized_interval == 0)
2501 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2502 interval_bw = &bw_table->interval_bw[normalized_interval];
2503 interval_bw->num_packets += ep_bw->num_packets;
2504 switch (udev->speed) {
2505 case USB_SPEED_LOW:
2506 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2507 break;
2508 case USB_SPEED_FULL:
2509 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2510 break;
2511 case USB_SPEED_HIGH:
2512 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2513 break;
2514 case USB_SPEED_SUPER:
2515 case USB_SPEED_SUPER_PLUS:
2516 case USB_SPEED_UNKNOWN:
2517 case USB_SPEED_WIRELESS:
2518
2519
2520
2521 return;
2522 }
2523
2524 if (tt_info)
2525 tt_info->active_eps += 1;
2526
2527 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2528 bw_endpoint_list) {
2529 if (ep_bw->max_packet_size >=
2530 smaller_ep->bw_info.max_packet_size) {
2531
2532 list_add_tail(&virt_ep->bw_endpoint_list,
2533 &smaller_ep->bw_endpoint_list);
2534 return;
2535 }
2536 }
2537
2538 list_add_tail(&virt_ep->bw_endpoint_list,
2539 &interval_bw->endpoints);
2540}
2541
2542void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2543 struct xhci_virt_device *virt_dev,
2544 int old_active_eps)
2545{
2546 struct xhci_root_port_bw_info *rh_bw_info;
2547 if (!virt_dev->tt_info)
2548 return;
2549
2550 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2551 if (old_active_eps == 0 &&
2552 virt_dev->tt_info->active_eps != 0) {
2553 rh_bw_info->num_active_tts += 1;
2554 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2555 } else if (old_active_eps != 0 &&
2556 virt_dev->tt_info->active_eps == 0) {
2557 rh_bw_info->num_active_tts -= 1;
2558 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2559 }
2560}
2561
2562static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2563 struct xhci_virt_device *virt_dev,
2564 struct xhci_container_ctx *in_ctx)
2565{
2566 struct xhci_bw_info ep_bw_info[31];
2567 int i;
2568 struct xhci_input_control_ctx *ctrl_ctx;
2569 int old_active_eps = 0;
2570
2571 if (virt_dev->tt_info)
2572 old_active_eps = virt_dev->tt_info->active_eps;
2573
2574 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2575 if (!ctrl_ctx) {
2576 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2577 __func__);
2578 return -ENOMEM;
2579 }
2580
2581 for (i = 0; i < 31; i++) {
2582 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2583 continue;
2584
2585
2586 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2587 sizeof(ep_bw_info[i]));
2588
2589
2590
2591 if (EP_IS_DROPPED(ctrl_ctx, i))
2592 xhci_drop_ep_from_interval_table(xhci,
2593 &virt_dev->eps[i].bw_info,
2594 virt_dev->bw_table,
2595 virt_dev->udev,
2596 &virt_dev->eps[i],
2597 virt_dev->tt_info);
2598 }
2599
2600 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2601 for (i = 0; i < 31; i++) {
2602
2603 if (EP_IS_ADDED(ctrl_ctx, i))
2604 xhci_add_ep_to_interval_table(xhci,
2605 &virt_dev->eps[i].bw_info,
2606 virt_dev->bw_table,
2607 virt_dev->udev,
2608 &virt_dev->eps[i],
2609 virt_dev->tt_info);
2610 }
2611
2612 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2613
2614
2615
2616 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2617 return 0;
2618 }
2619
2620
2621 for (i = 0; i < 31; i++) {
2622 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2623 continue;
2624
2625
2626
2627
2628 if (EP_IS_ADDED(ctrl_ctx, i)) {
2629 xhci_drop_ep_from_interval_table(xhci,
2630 &virt_dev->eps[i].bw_info,
2631 virt_dev->bw_table,
2632 virt_dev->udev,
2633 &virt_dev->eps[i],
2634 virt_dev->tt_info);
2635 }
2636
2637 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2638 sizeof(ep_bw_info[i]));
2639
2640 if (EP_IS_DROPPED(ctrl_ctx, i))
2641 xhci_add_ep_to_interval_table(xhci,
2642 &virt_dev->eps[i].bw_info,
2643 virt_dev->bw_table,
2644 virt_dev->udev,
2645 &virt_dev->eps[i],
2646 virt_dev->tt_info);
2647 }
2648 return -ENOMEM;
2649}
2650
2651
2652
2653
2654
2655static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2656 struct usb_device *udev,
2657 struct xhci_command *command,
2658 bool ctx_change, bool must_succeed)
2659{
2660 int ret;
2661 unsigned long flags;
2662 struct xhci_input_control_ctx *ctrl_ctx;
2663 struct xhci_virt_device *virt_dev;
2664 struct xhci_slot_ctx *slot_ctx;
2665
2666 if (!command)
2667 return -EINVAL;
2668
2669 spin_lock_irqsave(&xhci->lock, flags);
2670
2671 if (xhci->xhc_state & XHCI_STATE_DYING) {
2672 spin_unlock_irqrestore(&xhci->lock, flags);
2673 return -ESHUTDOWN;
2674 }
2675
2676 virt_dev = xhci->devs[udev->slot_id];
2677
2678 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2679 if (!ctrl_ctx) {
2680 spin_unlock_irqrestore(&xhci->lock, flags);
2681 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2682 __func__);
2683 return -ENOMEM;
2684 }
2685
2686 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2687 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2688 spin_unlock_irqrestore(&xhci->lock, flags);
2689 xhci_warn(xhci, "Not enough host resources, "
2690 "active endpoint contexts = %u\n",
2691 xhci->num_active_eps);
2692 return -ENOMEM;
2693 }
2694 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2695 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2696 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2697 xhci_free_host_resources(xhci, ctrl_ctx);
2698 spin_unlock_irqrestore(&xhci->lock, flags);
2699 xhci_warn(xhci, "Not enough bandwidth\n");
2700 return -ENOMEM;
2701 }
2702
2703 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2704 trace_xhci_configure_endpoint(slot_ctx);
2705
2706 if (!ctx_change)
2707 ret = xhci_queue_configure_endpoint(xhci, command,
2708 command->in_ctx->dma,
2709 udev->slot_id, must_succeed);
2710 else
2711 ret = xhci_queue_evaluate_context(xhci, command,
2712 command->in_ctx->dma,
2713 udev->slot_id, must_succeed);
2714 if (ret < 0) {
2715 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2716 xhci_free_host_resources(xhci, ctrl_ctx);
2717 spin_unlock_irqrestore(&xhci->lock, flags);
2718 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2719 "FIXME allocate a new ring segment");
2720 return -ENOMEM;
2721 }
2722 xhci_ring_cmd_db(xhci);
2723 spin_unlock_irqrestore(&xhci->lock, flags);
2724
2725
2726 wait_for_completion(command->completion);
2727
2728 if (!ctx_change)
2729 ret = xhci_configure_endpoint_result(xhci, udev,
2730 &command->status);
2731 else
2732 ret = xhci_evaluate_context_result(xhci, udev,
2733 &command->status);
2734
2735 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2736 spin_lock_irqsave(&xhci->lock, flags);
2737
2738
2739
2740 if (ret)
2741 xhci_free_host_resources(xhci, ctrl_ctx);
2742 else
2743 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2744 spin_unlock_irqrestore(&xhci->lock, flags);
2745 }
2746 return ret;
2747}
2748
2749static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2750 struct xhci_virt_device *vdev, int i)
2751{
2752 struct xhci_virt_ep *ep = &vdev->eps[i];
2753
2754 if (ep->ep_state & EP_HAS_STREAMS) {
2755 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2756 xhci_get_endpoint_address(i));
2757 xhci_free_stream_info(xhci, ep->stream_info);
2758 ep->stream_info = NULL;
2759 ep->ep_state &= ~EP_HAS_STREAMS;
2760 }
2761}
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2774{
2775 int i;
2776 int ret = 0;
2777 struct xhci_hcd *xhci;
2778 struct xhci_virt_device *virt_dev;
2779 struct xhci_input_control_ctx *ctrl_ctx;
2780 struct xhci_slot_ctx *slot_ctx;
2781 struct xhci_command *command;
2782
2783 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2784 if (ret <= 0)
2785 return ret;
2786 xhci = hcd_to_xhci(hcd);
2787 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2788 (xhci->xhc_state & XHCI_STATE_REMOVING))
2789 return -ENODEV;
2790
2791 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2792 virt_dev = xhci->devs[udev->slot_id];
2793
2794 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2795 if (!command)
2796 return -ENOMEM;
2797
2798 command->in_ctx = virt_dev->in_ctx;
2799
2800
2801 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2802 if (!ctrl_ctx) {
2803 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2804 __func__);
2805 ret = -ENOMEM;
2806 goto command_cleanup;
2807 }
2808 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2809 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2810 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2811
2812
2813 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2814 ctrl_ctx->drop_flags == 0) {
2815 ret = 0;
2816 goto command_cleanup;
2817 }
2818
2819 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2820 for (i = 31; i >= 1; i--) {
2821 __le32 le32 = cpu_to_le32(BIT(i));
2822
2823 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2824 || (ctrl_ctx->add_flags & le32) || i == 1) {
2825 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2826 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2827 break;
2828 }
2829 }
2830
2831 ret = xhci_configure_endpoint(xhci, udev, command,
2832 false, false);
2833 if (ret)
2834
2835 goto command_cleanup;
2836
2837
2838 for (i = 1; i < 31; i++) {
2839 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2840 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2841 xhci_free_endpoint_ring(xhci, virt_dev, i);
2842 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2843 }
2844 }
2845 xhci_zero_in_ctx(xhci, virt_dev);
2846
2847
2848
2849
2850 for (i = 1; i < 31; i++) {
2851 if (!virt_dev->eps[i].new_ring)
2852 continue;
2853
2854
2855
2856 if (virt_dev->eps[i].ring) {
2857 xhci_free_endpoint_ring(xhci, virt_dev, i);
2858 }
2859 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2860 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2861 virt_dev->eps[i].new_ring = NULL;
2862 }
2863command_cleanup:
2864 kfree(command->completion);
2865 kfree(command);
2866
2867 return ret;
2868}
2869
2870static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2871{
2872 struct xhci_hcd *xhci;
2873 struct xhci_virt_device *virt_dev;
2874 int i, ret;
2875
2876 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2877 if (ret <= 0)
2878 return;
2879 xhci = hcd_to_xhci(hcd);
2880
2881 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2882 virt_dev = xhci->devs[udev->slot_id];
2883
2884 for (i = 0; i < 31; i++) {
2885 if (virt_dev->eps[i].new_ring) {
2886 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
2887 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2888 virt_dev->eps[i].new_ring = NULL;
2889 }
2890 }
2891 xhci_zero_in_ctx(xhci, virt_dev);
2892}
2893
2894static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2895 struct xhci_container_ctx *in_ctx,
2896 struct xhci_container_ctx *out_ctx,
2897 struct xhci_input_control_ctx *ctrl_ctx,
2898 u32 add_flags, u32 drop_flags)
2899{
2900 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2901 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2902 xhci_slot_copy(xhci, in_ctx, out_ctx);
2903 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2904}
2905
2906static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2907 unsigned int slot_id, unsigned int ep_index,
2908 struct xhci_dequeue_state *deq_state)
2909{
2910 struct xhci_input_control_ctx *ctrl_ctx;
2911 struct xhci_container_ctx *in_ctx;
2912 struct xhci_ep_ctx *ep_ctx;
2913 u32 added_ctxs;
2914 dma_addr_t addr;
2915
2916 in_ctx = xhci->devs[slot_id]->in_ctx;
2917 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2918 if (!ctrl_ctx) {
2919 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2920 __func__);
2921 return;
2922 }
2923
2924 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2925 xhci->devs[slot_id]->out_ctx, ep_index);
2926 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2927 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2928 deq_state->new_deq_ptr);
2929 if (addr == 0) {
2930 xhci_warn(xhci, "WARN Cannot submit config ep after "
2931 "reset ep command\n");
2932 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2933 deq_state->new_deq_seg,
2934 deq_state->new_deq_ptr);
2935 return;
2936 }
2937 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2938
2939 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2940 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2941 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2942 added_ctxs, added_ctxs);
2943}
2944
2945void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
2946 unsigned int stream_id, struct xhci_td *td)
2947{
2948 struct xhci_dequeue_state deq_state;
2949 struct usb_device *udev = td->urb->dev;
2950
2951 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2952 "Cleaning up stalled endpoint ring");
2953
2954
2955
2956 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2957 ep_index, stream_id, td, &deq_state);
2958
2959 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2960 return;
2961
2962
2963
2964
2965 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2966 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2967 "Queueing new dequeue state");
2968 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2969 ep_index, &deq_state);
2970 } else {
2971
2972
2973
2974
2975
2976 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2977 "Setting up input context for "
2978 "configure endpoint command");
2979 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2980 ep_index, &deq_state);
2981 }
2982}
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996static void xhci_endpoint_reset(struct usb_hcd *hcd,
2997 struct usb_host_endpoint *host_ep)
2998{
2999 struct xhci_hcd *xhci;
3000 struct usb_device *udev;
3001 struct xhci_virt_device *vdev;
3002 struct xhci_virt_ep *ep;
3003 struct xhci_input_control_ctx *ctrl_ctx;
3004 struct xhci_command *stop_cmd, *cfg_cmd;
3005 unsigned int ep_index;
3006 unsigned long flags;
3007 u32 ep_flag;
3008
3009 xhci = hcd_to_xhci(hcd);
3010 if (!host_ep->hcpriv)
3011 return;
3012 udev = (struct usb_device *) host_ep->hcpriv;
3013 vdev = xhci->devs[udev->slot_id];
3014 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3015 ep = &vdev->eps[ep_index];
3016
3017
3018 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3019 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3020 return;
3021 }
3022
3023 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3024 usb_endpoint_xfer_isoc(&host_ep->desc))
3025 return;
3026
3027 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3028
3029 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3030 return;
3031
3032 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3033 if (!stop_cmd)
3034 return;
3035
3036 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3037 if (!cfg_cmd)
3038 goto cleanup;
3039
3040 spin_lock_irqsave(&xhci->lock, flags);
3041
3042
3043 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3044
3045
3046
3047
3048
3049
3050
3051 if (!list_empty(&ep->ring->td_list)) {
3052 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3053 spin_unlock_irqrestore(&xhci->lock, flags);
3054 xhci_free_command(xhci, cfg_cmd);
3055 goto cleanup;
3056 }
3057 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
3058 xhci_ring_cmd_db(xhci);
3059 spin_unlock_irqrestore(&xhci->lock, flags);
3060
3061 wait_for_completion(stop_cmd->completion);
3062
3063 spin_lock_irqsave(&xhci->lock, flags);
3064
3065
3066 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3067 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3068 ctrl_ctx, ep_flag, ep_flag);
3069 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3070
3071 xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3072 udev->slot_id, false);
3073 xhci_ring_cmd_db(xhci);
3074 spin_unlock_irqrestore(&xhci->lock, flags);
3075
3076 wait_for_completion(cfg_cmd->completion);
3077
3078 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3079 xhci_free_command(xhci, cfg_cmd);
3080cleanup:
3081 xhci_free_command(xhci, stop_cmd);
3082}
3083
3084static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3085 struct usb_device *udev, struct usb_host_endpoint *ep,
3086 unsigned int slot_id)
3087{
3088 int ret;
3089 unsigned int ep_index;
3090 unsigned int ep_state;
3091
3092 if (!ep)
3093 return -EINVAL;
3094 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3095 if (ret <= 0)
3096 return -EINVAL;
3097 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3098 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3099 " descriptor for ep 0x%x does not support streams\n",
3100 ep->desc.bEndpointAddress);
3101 return -EINVAL;
3102 }
3103
3104 ep_index = xhci_get_endpoint_index(&ep->desc);
3105 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3106 if (ep_state & EP_HAS_STREAMS ||
3107 ep_state & EP_GETTING_STREAMS) {
3108 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3109 "already has streams set up.\n",
3110 ep->desc.bEndpointAddress);
3111 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3112 "dynamic stream context array reallocation.\n");
3113 return -EINVAL;
3114 }
3115 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3116 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3117 "endpoint 0x%x; URBs are pending.\n",
3118 ep->desc.bEndpointAddress);
3119 return -EINVAL;
3120 }
3121 return 0;
3122}
3123
3124static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3125 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3126{
3127 unsigned int max_streams;
3128
3129
3130 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3131
3132
3133
3134
3135
3136
3137 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3138 if (*num_stream_ctxs > max_streams) {
3139 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3140 max_streams);
3141 *num_stream_ctxs = max_streams;
3142 *num_streams = max_streams;
3143 }
3144}
3145
3146
3147
3148
3149
3150static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3151 struct usb_device *udev,
3152 struct usb_host_endpoint **eps, unsigned int num_eps,
3153 unsigned int *num_streams, u32 *changed_ep_bitmask)
3154{
3155 unsigned int max_streams;
3156 unsigned int endpoint_flag;
3157 int i;
3158 int ret;
3159
3160 for (i = 0; i < num_eps; i++) {
3161 ret = xhci_check_streams_endpoint(xhci, udev,
3162 eps[i], udev->slot_id);
3163 if (ret < 0)
3164 return ret;
3165
3166 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3167 if (max_streams < (*num_streams - 1)) {
3168 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3169 eps[i]->desc.bEndpointAddress,
3170 max_streams);
3171 *num_streams = max_streams+1;
3172 }
3173
3174 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3175 if (*changed_ep_bitmask & endpoint_flag)
3176 return -EINVAL;
3177 *changed_ep_bitmask |= endpoint_flag;
3178 }
3179 return 0;
3180}
3181
3182static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3183 struct usb_device *udev,
3184 struct usb_host_endpoint **eps, unsigned int num_eps)
3185{
3186 u32 changed_ep_bitmask = 0;
3187 unsigned int slot_id;
3188 unsigned int ep_index;
3189 unsigned int ep_state;
3190 int i;
3191
3192 slot_id = udev->slot_id;
3193 if (!xhci->devs[slot_id])
3194 return 0;
3195
3196 for (i = 0; i < num_eps; i++) {
3197 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3198 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3199
3200 if (ep_state & EP_GETTING_NO_STREAMS) {
3201 xhci_warn(xhci, "WARN Can't disable streams for "
3202 "endpoint 0x%x, "
3203 "streams are being disabled already\n",
3204 eps[i]->desc.bEndpointAddress);
3205 return 0;
3206 }
3207
3208 if (!(ep_state & EP_HAS_STREAMS) &&
3209 !(ep_state & EP_GETTING_STREAMS)) {
3210 xhci_warn(xhci, "WARN Can't disable streams for "
3211 "endpoint 0x%x, "
3212 "streams are already disabled!\n",
3213 eps[i]->desc.bEndpointAddress);
3214 xhci_warn(xhci, "WARN xhci_free_streams() called "
3215 "with non-streams endpoint\n");
3216 return 0;
3217 }
3218 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3219 }
3220 return changed_ep_bitmask;
3221}
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3240 struct usb_host_endpoint **eps, unsigned int num_eps,
3241 unsigned int num_streams, gfp_t mem_flags)
3242{
3243 int i, ret;
3244 struct xhci_hcd *xhci;
3245 struct xhci_virt_device *vdev;
3246 struct xhci_command *config_cmd;
3247 struct xhci_input_control_ctx *ctrl_ctx;
3248 unsigned int ep_index;
3249 unsigned int num_stream_ctxs;
3250 unsigned int max_packet;
3251 unsigned long flags;
3252 u32 changed_ep_bitmask = 0;
3253
3254 if (!eps)
3255 return -EINVAL;
3256
3257
3258
3259
3260 num_streams += 1;
3261 xhci = hcd_to_xhci(hcd);
3262 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3263 num_streams);
3264
3265
3266 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3267 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3268 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3269 return -ENOSYS;
3270 }
3271
3272 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3273 if (!config_cmd)
3274 return -ENOMEM;
3275
3276 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3277 if (!ctrl_ctx) {
3278 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3279 __func__);
3280 xhci_free_command(xhci, config_cmd);
3281 return -ENOMEM;
3282 }
3283
3284
3285
3286
3287
3288 spin_lock_irqsave(&xhci->lock, flags);
3289 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3290 num_eps, &num_streams, &changed_ep_bitmask);
3291 if (ret < 0) {
3292 xhci_free_command(xhci, config_cmd);
3293 spin_unlock_irqrestore(&xhci->lock, flags);
3294 return ret;
3295 }
3296 if (num_streams <= 1) {
3297 xhci_warn(xhci, "WARN: endpoints can't handle "
3298 "more than one stream.\n");
3299 xhci_free_command(xhci, config_cmd);
3300 spin_unlock_irqrestore(&xhci->lock, flags);
3301 return -EINVAL;
3302 }
3303 vdev = xhci->devs[udev->slot_id];
3304
3305
3306
3307 for (i = 0; i < num_eps; i++) {
3308 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3309 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3310 }
3311 spin_unlock_irqrestore(&xhci->lock, flags);
3312
3313
3314
3315
3316
3317 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3318 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3319 num_stream_ctxs, num_streams);
3320
3321 for (i = 0; i < num_eps; i++) {
3322 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3323 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3324 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3325 num_stream_ctxs,
3326 num_streams,
3327 max_packet, mem_flags);
3328 if (!vdev->eps[ep_index].stream_info)
3329 goto cleanup;
3330
3331
3332
3333 }
3334
3335
3336 for (i = 0; i < num_eps; i++) {
3337 struct xhci_ep_ctx *ep_ctx;
3338
3339 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3340 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3341
3342 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3343 vdev->out_ctx, ep_index);
3344 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3345 vdev->eps[ep_index].stream_info);
3346 }
3347
3348
3349
3350 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3351 vdev->out_ctx, ctrl_ctx,
3352 changed_ep_bitmask, changed_ep_bitmask);
3353
3354
3355 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3356 false, false);
3357
3358
3359
3360
3361
3362 if (ret < 0)
3363 goto cleanup;
3364
3365 spin_lock_irqsave(&xhci->lock, flags);
3366 for (i = 0; i < num_eps; i++) {
3367 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3368 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3369 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3370 udev->slot_id, ep_index);
3371 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3372 }
3373 xhci_free_command(xhci, config_cmd);
3374 spin_unlock_irqrestore(&xhci->lock, flags);
3375
3376
3377 return num_streams - 1;
3378
3379cleanup:
3380
3381 for (i = 0; i < num_eps; i++) {
3382 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3383 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3384 vdev->eps[ep_index].stream_info = NULL;
3385
3386
3387
3388 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3389 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3390 xhci_endpoint_zero(xhci, vdev, eps[i]);
3391 }
3392 xhci_free_command(xhci, config_cmd);
3393 return -ENOMEM;
3394}
3395
3396
3397
3398
3399
3400
3401
3402static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3403 struct usb_host_endpoint **eps, unsigned int num_eps,
3404 gfp_t mem_flags)
3405{
3406 int i, ret;
3407 struct xhci_hcd *xhci;
3408 struct xhci_virt_device *vdev;
3409 struct xhci_command *command;
3410 struct xhci_input_control_ctx *ctrl_ctx;
3411 unsigned int ep_index;
3412 unsigned long flags;
3413 u32 changed_ep_bitmask;
3414
3415 xhci = hcd_to_xhci(hcd);
3416 vdev = xhci->devs[udev->slot_id];
3417
3418
3419 spin_lock_irqsave(&xhci->lock, flags);
3420 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3421 udev, eps, num_eps);
3422 if (changed_ep_bitmask == 0) {
3423 spin_unlock_irqrestore(&xhci->lock, flags);
3424 return -EINVAL;
3425 }
3426
3427
3428
3429
3430
3431 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3432 command = vdev->eps[ep_index].stream_info->free_streams_command;
3433 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3434 if (!ctrl_ctx) {
3435 spin_unlock_irqrestore(&xhci->lock, flags);
3436 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3437 __func__);
3438 return -EINVAL;
3439 }
3440
3441 for (i = 0; i < num_eps; i++) {
3442 struct xhci_ep_ctx *ep_ctx;
3443
3444 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3445 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3446 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3447 EP_GETTING_NO_STREAMS;
3448
3449 xhci_endpoint_copy(xhci, command->in_ctx,
3450 vdev->out_ctx, ep_index);
3451 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3452 &vdev->eps[ep_index]);
3453 }
3454 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3455 vdev->out_ctx, ctrl_ctx,
3456 changed_ep_bitmask, changed_ep_bitmask);
3457 spin_unlock_irqrestore(&xhci->lock, flags);
3458
3459
3460
3461
3462 ret = xhci_configure_endpoint(xhci, udev, command,
3463 false, true);
3464
3465
3466
3467
3468 if (ret < 0)
3469 return ret;
3470
3471 spin_lock_irqsave(&xhci->lock, flags);
3472 for (i = 0; i < num_eps; i++) {
3473 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3474 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3475 vdev->eps[ep_index].stream_info = NULL;
3476
3477
3478
3479 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3480 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3481 }
3482 spin_unlock_irqrestore(&xhci->lock, flags);
3483
3484 return 0;
3485}
3486
3487
3488
3489
3490
3491
3492
3493
3494void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3495 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3496{
3497 int i;
3498 unsigned int num_dropped_eps = 0;
3499 unsigned int drop_flags = 0;
3500
3501 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3502 if (virt_dev->eps[i].ring) {
3503 drop_flags |= 1 << i;
3504 num_dropped_eps++;
3505 }
3506 }
3507 xhci->num_active_eps -= num_dropped_eps;
3508 if (num_dropped_eps)
3509 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3510 "Dropped %u ep ctxs, flags = 0x%x, "
3511 "%u now active.",
3512 num_dropped_eps, drop_flags,
3513 xhci->num_active_eps);
3514}
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3535 struct usb_device *udev)
3536{
3537 int ret, i;
3538 unsigned long flags;
3539 struct xhci_hcd *xhci;
3540 unsigned int slot_id;
3541 struct xhci_virt_device *virt_dev;
3542 struct xhci_command *reset_device_cmd;
3543 struct xhci_slot_ctx *slot_ctx;
3544 int old_active_eps = 0;
3545
3546 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3547 if (ret <= 0)
3548 return ret;
3549 xhci = hcd_to_xhci(hcd);
3550 slot_id = udev->slot_id;
3551 virt_dev = xhci->devs[slot_id];
3552 if (!virt_dev) {
3553 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3554 "not exist. Re-allocate the device\n", slot_id);
3555 ret = xhci_alloc_dev(hcd, udev);
3556 if (ret == 1)
3557 return 0;
3558 else
3559 return -EINVAL;
3560 }
3561
3562 if (virt_dev->tt_info)
3563 old_active_eps = virt_dev->tt_info->active_eps;
3564
3565 if (virt_dev->udev != udev) {
3566
3567
3568
3569
3570 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3571 "not match the udev. Re-allocate the device\n",
3572 slot_id);
3573 ret = xhci_alloc_dev(hcd, udev);
3574 if (ret == 1)
3575 return 0;
3576 else
3577 return -EINVAL;
3578 }
3579
3580
3581 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3582 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3583 SLOT_STATE_DISABLED)
3584 return 0;
3585
3586 trace_xhci_discover_or_reset_device(slot_ctx);
3587
3588 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3589
3590
3591
3592
3593
3594
3595 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3596 if (!reset_device_cmd) {
3597 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3598 return -ENOMEM;
3599 }
3600
3601
3602 spin_lock_irqsave(&xhci->lock, flags);
3603
3604 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3605 if (ret) {
3606 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3607 spin_unlock_irqrestore(&xhci->lock, flags);
3608 goto command_cleanup;
3609 }
3610 xhci_ring_cmd_db(xhci);
3611 spin_unlock_irqrestore(&xhci->lock, flags);
3612
3613
3614 wait_for_completion(reset_device_cmd->completion);
3615
3616
3617
3618
3619
3620 ret = reset_device_cmd->status;
3621 switch (ret) {
3622 case COMP_COMMAND_ABORTED:
3623 case COMP_COMMAND_RING_STOPPED:
3624 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3625 ret = -ETIME;
3626 goto command_cleanup;
3627 case COMP_SLOT_NOT_ENABLED_ERROR:
3628 case COMP_CONTEXT_STATE_ERROR:
3629 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3630 slot_id,
3631 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3632 xhci_dbg(xhci, "Not freeing device rings.\n");
3633
3634 ret = 0;
3635 goto command_cleanup;
3636 case COMP_SUCCESS:
3637 xhci_dbg(xhci, "Successful reset device command.\n");
3638 break;
3639 default:
3640 if (xhci_is_vendor_info_code(xhci, ret))
3641 break;
3642 xhci_warn(xhci, "Unknown completion code %u for "
3643 "reset device command.\n", ret);
3644 ret = -EINVAL;
3645 goto command_cleanup;
3646 }
3647
3648
3649 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3650 spin_lock_irqsave(&xhci->lock, flags);
3651
3652 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3653 spin_unlock_irqrestore(&xhci->lock, flags);
3654 }
3655
3656
3657 for (i = 1; i < 31; i++) {
3658 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3659
3660 if (ep->ep_state & EP_HAS_STREAMS) {
3661 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3662 xhci_get_endpoint_address(i));
3663 xhci_free_stream_info(xhci, ep->stream_info);
3664 ep->stream_info = NULL;
3665 ep->ep_state &= ~EP_HAS_STREAMS;
3666 }
3667
3668 if (ep->ring) {
3669 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3670 xhci_free_endpoint_ring(xhci, virt_dev, i);
3671 }
3672 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3673 xhci_drop_ep_from_interval_table(xhci,
3674 &virt_dev->eps[i].bw_info,
3675 virt_dev->bw_table,
3676 udev,
3677 &virt_dev->eps[i],
3678 virt_dev->tt_info);
3679 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3680 }
3681
3682 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3683 ret = 0;
3684
3685command_cleanup:
3686 xhci_free_command(xhci, reset_device_cmd);
3687 return ret;
3688}
3689
3690
3691
3692
3693
3694
3695static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3696{
3697 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3698 struct xhci_virt_device *virt_dev;
3699 struct xhci_slot_ctx *slot_ctx;
3700 int i, ret;
3701
3702#ifndef CONFIG_USB_DEFAULT_PERSIST
3703
3704
3705
3706
3707
3708 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3709 pm_runtime_put_noidle(hcd->self.controller);
3710#endif
3711
3712 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3713
3714
3715
3716 if (ret <= 0 && ret != -ENODEV)
3717 return;
3718
3719 virt_dev = xhci->devs[udev->slot_id];
3720 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3721 trace_xhci_free_dev(slot_ctx);
3722
3723
3724 for (i = 0; i < 31; i++) {
3725 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3726 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3727 }
3728 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3729 virt_dev->udev = NULL;
3730 ret = xhci_disable_slot(xhci, udev->slot_id);
3731 if (ret)
3732 xhci_free_virt_device(xhci, udev->slot_id);
3733}
3734
3735int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3736{
3737 struct xhci_command *command;
3738 unsigned long flags;
3739 u32 state;
3740 int ret = 0;
3741
3742 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
3743 if (!command)
3744 return -ENOMEM;
3745
3746 spin_lock_irqsave(&xhci->lock, flags);
3747
3748 state = readl(&xhci->op_regs->status);
3749 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3750 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3751 spin_unlock_irqrestore(&xhci->lock, flags);
3752 kfree(command);
3753 return -ENODEV;
3754 }
3755
3756 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3757 slot_id);
3758 if (ret) {
3759 spin_unlock_irqrestore(&xhci->lock, flags);
3760 kfree(command);
3761 return ret;
3762 }
3763 xhci_ring_cmd_db(xhci);
3764 spin_unlock_irqrestore(&xhci->lock, flags);
3765 return ret;
3766}
3767
3768
3769
3770
3771
3772
3773
3774static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3775{
3776 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3777 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3778 "Not enough ep ctxs: "
3779 "%u active, need to add 1, limit is %u.",
3780 xhci->num_active_eps, xhci->limit_active_eps);
3781 return -ENOMEM;
3782 }
3783 xhci->num_active_eps += 1;
3784 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3785 "Adding 1 ep ctx, %u now active.",
3786 xhci->num_active_eps);
3787 return 0;
3788}
3789
3790
3791
3792
3793
3794
3795int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3796{
3797 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3798 struct xhci_virt_device *vdev;
3799 struct xhci_slot_ctx *slot_ctx;
3800 unsigned long flags;
3801 int ret, slot_id;
3802 struct xhci_command *command;
3803
3804 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3805 if (!command)
3806 return 0;
3807
3808 spin_lock_irqsave(&xhci->lock, flags);
3809 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3810 if (ret) {
3811 spin_unlock_irqrestore(&xhci->lock, flags);
3812 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3813 xhci_free_command(xhci, command);
3814 return 0;
3815 }
3816 xhci_ring_cmd_db(xhci);
3817 spin_unlock_irqrestore(&xhci->lock, flags);
3818
3819 wait_for_completion(command->completion);
3820 slot_id = command->slot_id;
3821
3822 if (!slot_id || command->status != COMP_SUCCESS) {
3823 xhci_err(xhci, "Error while assigning device slot ID\n");
3824 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3825 HCS_MAX_SLOTS(
3826 readl(&xhci->cap_regs->hcs_params1)));
3827 xhci_free_command(xhci, command);
3828 return 0;
3829 }
3830
3831 xhci_free_command(xhci, command);
3832
3833 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3834 spin_lock_irqsave(&xhci->lock, flags);
3835 ret = xhci_reserve_host_control_ep_resources(xhci);
3836 if (ret) {
3837 spin_unlock_irqrestore(&xhci->lock, flags);
3838 xhci_warn(xhci, "Not enough host resources, "
3839 "active endpoint contexts = %u\n",
3840 xhci->num_active_eps);
3841 goto disable_slot;
3842 }
3843 spin_unlock_irqrestore(&xhci->lock, flags);
3844 }
3845
3846
3847
3848
3849 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3850 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3851 goto disable_slot;
3852 }
3853 vdev = xhci->devs[slot_id];
3854 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
3855 trace_xhci_alloc_dev(slot_ctx);
3856
3857 udev->slot_id = slot_id;
3858
3859 xhci_debugfs_create_slot(xhci, slot_id);
3860
3861#ifndef CONFIG_USB_DEFAULT_PERSIST
3862
3863
3864
3865
3866 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3867 pm_runtime_get_noresume(hcd->self.controller);
3868#endif
3869
3870
3871
3872 return 1;
3873
3874disable_slot:
3875 ret = xhci_disable_slot(xhci, udev->slot_id);
3876 if (ret)
3877 xhci_free_virt_device(xhci, udev->slot_id);
3878
3879 return 0;
3880}
3881
3882
3883
3884
3885
3886static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3887 enum xhci_setup_dev setup)
3888{
3889 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3890 unsigned long flags;
3891 struct xhci_virt_device *virt_dev;
3892 int ret = 0;
3893 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3894 struct xhci_slot_ctx *slot_ctx;
3895 struct xhci_input_control_ctx *ctrl_ctx;
3896 u64 temp_64;
3897 struct xhci_command *command = NULL;
3898
3899 mutex_lock(&xhci->mutex);
3900
3901 if (xhci->xhc_state) {
3902 ret = -ESHUTDOWN;
3903 goto out;
3904 }
3905
3906 if (!udev->slot_id) {
3907 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3908 "Bad Slot ID %d", udev->slot_id);
3909 ret = -EINVAL;
3910 goto out;
3911 }
3912
3913 virt_dev = xhci->devs[udev->slot_id];
3914
3915 if (WARN_ON(!virt_dev)) {
3916
3917
3918
3919
3920
3921 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3922 udev->slot_id);
3923 ret = -EINVAL;
3924 goto out;
3925 }
3926 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3927 trace_xhci_setup_device_slot(slot_ctx);
3928
3929 if (setup == SETUP_CONTEXT_ONLY) {
3930 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3931 SLOT_STATE_DEFAULT) {
3932 xhci_dbg(xhci, "Slot already in default state\n");
3933 goto out;
3934 }
3935 }
3936
3937 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3938 if (!command) {
3939 ret = -ENOMEM;
3940 goto out;
3941 }
3942
3943 command->in_ctx = virt_dev->in_ctx;
3944
3945 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3946 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3947 if (!ctrl_ctx) {
3948 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3949 __func__);
3950 ret = -EINVAL;
3951 goto out;
3952 }
3953
3954
3955
3956
3957
3958 if (!slot_ctx->dev_info)
3959 xhci_setup_addressable_virt_dev(xhci, udev);
3960
3961 else
3962 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3963 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3964 ctrl_ctx->drop_flags = 0;
3965
3966 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3967 le32_to_cpu(slot_ctx->dev_info) >> 27);
3968
3969 spin_lock_irqsave(&xhci->lock, flags);
3970 trace_xhci_setup_device(virt_dev);
3971 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3972 udev->slot_id, setup);
3973 if (ret) {
3974 spin_unlock_irqrestore(&xhci->lock, flags);
3975 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3976 "FIXME: allocate a command ring segment");
3977 goto out;
3978 }
3979 xhci_ring_cmd_db(xhci);
3980 spin_unlock_irqrestore(&xhci->lock, flags);
3981
3982
3983 wait_for_completion(command->completion);
3984
3985
3986
3987
3988
3989 switch (command->status) {
3990 case COMP_COMMAND_ABORTED:
3991 case COMP_COMMAND_RING_STOPPED:
3992 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3993 ret = -ETIME;
3994 break;
3995 case COMP_CONTEXT_STATE_ERROR:
3996 case COMP_SLOT_NOT_ENABLED_ERROR:
3997 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3998 act, udev->slot_id);
3999 ret = -EINVAL;
4000 break;
4001 case COMP_USB_TRANSACTION_ERROR:
4002 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4003
4004 mutex_unlock(&xhci->mutex);
4005 ret = xhci_disable_slot(xhci, udev->slot_id);
4006 if (!ret)
4007 xhci_alloc_dev(hcd, udev);
4008 kfree(command->completion);
4009 kfree(command);
4010 return -EPROTO;
4011 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4012 dev_warn(&udev->dev,
4013 "ERROR: Incompatible device for setup %s command\n", act);
4014 ret = -ENODEV;
4015 break;
4016 case COMP_SUCCESS:
4017 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4018 "Successful setup %s command", act);
4019 break;
4020 default:
4021 xhci_err(xhci,
4022 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4023 act, command->status);
4024 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4025 ret = -EINVAL;
4026 break;
4027 }
4028 if (ret)
4029 goto out;
4030 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4031 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4032 "Op regs DCBAA ptr = %#016llx", temp_64);
4033 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4034 "Slot ID %d dcbaa entry @%p = %#016llx",
4035 udev->slot_id,
4036 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4037 (unsigned long long)
4038 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4039 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4040 "Output Context DMA address = %#08llx",
4041 (unsigned long long)virt_dev->out_ctx->dma);
4042 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4043 le32_to_cpu(slot_ctx->dev_info) >> 27);
4044
4045
4046
4047
4048 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4049 le32_to_cpu(slot_ctx->dev_info) >> 27);
4050
4051 ctrl_ctx->add_flags = 0;
4052 ctrl_ctx->drop_flags = 0;
4053
4054 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4055 "Internal device address = %d",
4056 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4057out:
4058 mutex_unlock(&xhci->mutex);
4059 if (command) {
4060 kfree(command->completion);
4061 kfree(command);
4062 }
4063 return ret;
4064}
4065
4066static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4067{
4068 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
4069}
4070
4071static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4072{
4073 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
4074}
4075
4076
4077
4078
4079
4080
4081
4082int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4083{
4084 struct xhci_hub *rhub;
4085
4086 rhub = xhci_get_rhub(hcd);
4087 return rhub->ports[port1 - 1]->hw_portnum + 1;
4088}
4089
4090
4091
4092
4093
4094static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4095 struct usb_device *udev, u16 max_exit_latency)
4096{
4097 struct xhci_virt_device *virt_dev;
4098 struct xhci_command *command;
4099 struct xhci_input_control_ctx *ctrl_ctx;
4100 struct xhci_slot_ctx *slot_ctx;
4101 unsigned long flags;
4102 int ret;
4103
4104 spin_lock_irqsave(&xhci->lock, flags);
4105
4106 virt_dev = xhci->devs[udev->slot_id];
4107
4108
4109
4110
4111
4112
4113
4114 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4115 spin_unlock_irqrestore(&xhci->lock, flags);
4116 return 0;
4117 }
4118
4119
4120 command = xhci->lpm_command;
4121 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4122 if (!ctrl_ctx) {
4123 spin_unlock_irqrestore(&xhci->lock, flags);
4124 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4125 __func__);
4126 return -ENOMEM;
4127 }
4128
4129 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4130 spin_unlock_irqrestore(&xhci->lock, flags);
4131
4132 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4133 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4134 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4135 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4136 slot_ctx->dev_state = 0;
4137
4138 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4139 "Set up evaluate context for LPM MEL change.");
4140
4141
4142 ret = xhci_configure_endpoint(xhci, udev, command,
4143 true, true);
4144
4145 if (!ret) {
4146 spin_lock_irqsave(&xhci->lock, flags);
4147 virt_dev->current_mel = max_exit_latency;
4148 spin_unlock_irqrestore(&xhci->lock, flags);
4149 }
4150 return ret;
4151}
4152
4153#ifdef CONFIG_PM
4154
4155
4156static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4157 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4158
4159
4160static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4161 struct usb_device *udev)
4162{
4163 int u2del, besl, besl_host;
4164 int besl_device = 0;
4165 u32 field;
4166
4167 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4168 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4169
4170 if (field & USB_BESL_SUPPORT) {
4171 for (besl_host = 0; besl_host < 16; besl_host++) {
4172 if (xhci_besl_encoding[besl_host] >= u2del)
4173 break;
4174 }
4175
4176 if (field & USB_BESL_BASELINE_VALID)
4177 besl_device = USB_GET_BESL_BASELINE(field);
4178 else if (field & USB_BESL_DEEP_VALID)
4179 besl_device = USB_GET_BESL_DEEP(field);
4180 } else {
4181 if (u2del <= 50)
4182 besl_host = 0;
4183 else
4184 besl_host = (u2del - 51) / 75 + 1;
4185 }
4186
4187 besl = besl_host + besl_device;
4188 if (besl > 15)
4189 besl = 15;
4190
4191 return besl;
4192}
4193
4194
4195static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4196{
4197 u32 field;
4198 int l1;
4199 int besld = 0;
4200 int hirdm = 0;
4201
4202 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4203
4204
4205 l1 = udev->l1_params.timeout / 256;
4206
4207
4208 if (field & USB_BESL_DEEP_VALID) {
4209 besld = USB_GET_BESL_DEEP(field);
4210 hirdm = 1;
4211 }
4212
4213 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4214}
4215
4216static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4217 struct usb_device *udev, int enable)
4218{
4219 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4220 struct xhci_port **ports;
4221 __le32 __iomem *pm_addr, *hlpm_addr;
4222 u32 pm_val, hlpm_val, field;
4223 unsigned int port_num;
4224 unsigned long flags;
4225 int hird, exit_latency;
4226 int ret;
4227
4228 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4229 !udev->lpm_capable)
4230 return -EPERM;
4231
4232 if (!udev->parent || udev->parent->parent ||
4233 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4234 return -EPERM;
4235
4236 if (udev->usb2_hw_lpm_capable != 1)
4237 return -EPERM;
4238
4239 spin_lock_irqsave(&xhci->lock, flags);
4240
4241 ports = xhci->usb2_rhub.ports;
4242 port_num = udev->portnum - 1;
4243 pm_addr = ports[port_num]->addr + PORTPMSC;
4244 pm_val = readl(pm_addr);
4245 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4246 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4247
4248 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4249 enable ? "enable" : "disable", port_num + 1);
4250
4251 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
4252
4253 if (udev->usb2_hw_lpm_besl_capable) {
4254
4255
4256
4257
4258 if ((field & USB_BESL_SUPPORT) &&
4259 (field & USB_BESL_BASELINE_VALID))
4260 hird = USB_GET_BESL_BASELINE(field);
4261 else
4262 hird = udev->l1_params.besl;
4263
4264 exit_latency = xhci_besl_encoding[hird];
4265 spin_unlock_irqrestore(&xhci->lock, flags);
4266
4267
4268
4269
4270
4271
4272
4273
4274 mutex_lock(hcd->bandwidth_mutex);
4275 ret = xhci_change_max_exit_latency(xhci, udev,
4276 exit_latency);
4277 mutex_unlock(hcd->bandwidth_mutex);
4278
4279 if (ret < 0)
4280 return ret;
4281 spin_lock_irqsave(&xhci->lock, flags);
4282
4283 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4284 writel(hlpm_val, hlpm_addr);
4285
4286 readl(hlpm_addr);
4287 } else {
4288 hird = xhci_calculate_hird_besl(xhci, udev);
4289 }
4290
4291 pm_val &= ~PORT_HIRD_MASK;
4292 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4293 writel(pm_val, pm_addr);
4294 pm_val = readl(pm_addr);
4295 pm_val |= PORT_HLE;
4296 writel(pm_val, pm_addr);
4297
4298 readl(pm_addr);
4299 } else {
4300 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4301 writel(pm_val, pm_addr);
4302
4303 readl(pm_addr);
4304 if (udev->usb2_hw_lpm_besl_capable) {
4305 spin_unlock_irqrestore(&xhci->lock, flags);
4306 mutex_lock(hcd->bandwidth_mutex);
4307 xhci_change_max_exit_latency(xhci, udev, 0);
4308 mutex_unlock(hcd->bandwidth_mutex);
4309 return 0;
4310 }
4311 }
4312
4313 spin_unlock_irqrestore(&xhci->lock, flags);
4314 return 0;
4315}
4316
4317
4318
4319
4320
4321static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4322 unsigned capability)
4323{
4324 u32 port_offset, port_count;
4325 int i;
4326
4327 for (i = 0; i < xhci->num_ext_caps; i++) {
4328 if (xhci->ext_caps[i] & capability) {
4329
4330 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4331 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4332 if (port >= port_offset &&
4333 port < port_offset + port_count)
4334 return 1;
4335 }
4336 }
4337 return 0;
4338}
4339
4340static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4341{
4342 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4343 int portnum = udev->portnum - 1;
4344
4345 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4346 !udev->lpm_capable)
4347 return 0;
4348
4349
4350 if (!udev->parent || udev->parent->parent ||
4351 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4352 return 0;
4353
4354 if (xhci->hw_lpm_support == 1 &&
4355 xhci_check_usb2_port_capability(
4356 xhci, portnum, XHCI_HLC)) {
4357 udev->usb2_hw_lpm_capable = 1;
4358 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4359 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4360 if (xhci_check_usb2_port_capability(xhci, portnum,
4361 XHCI_BLC))
4362 udev->usb2_hw_lpm_besl_capable = 1;
4363 }
4364
4365 return 0;
4366}
4367
4368
4369
4370
4371static unsigned long long xhci_service_interval_to_ns(
4372 struct usb_endpoint_descriptor *desc)
4373{
4374 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4375}
4376
4377static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4378 enum usb3_link_state state)
4379{
4380 unsigned long long sel;
4381 unsigned long long pel;
4382 unsigned int max_sel_pel;
4383 char *state_name;
4384
4385 switch (state) {
4386 case USB3_LPM_U1:
4387
4388 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4389 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4390 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4391 state_name = "U1";
4392 break;
4393 case USB3_LPM_U2:
4394 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4395 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4396 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4397 state_name = "U2";
4398 break;
4399 default:
4400 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4401 __func__);
4402 return USB3_LPM_DISABLED;
4403 }
4404
4405 if (sel <= max_sel_pel && pel <= max_sel_pel)
4406 return USB3_LPM_DEVICE_INITIATED;
4407
4408 if (sel > max_sel_pel)
4409 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4410 "due to long SEL %llu ms\n",
4411 state_name, sel);
4412 else
4413 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4414 "due to long PEL %llu ms\n",
4415 state_name, pel);
4416 return USB3_LPM_DISABLED;
4417}
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427static unsigned long long xhci_calculate_intel_u1_timeout(
4428 struct usb_device *udev,
4429 struct usb_endpoint_descriptor *desc)
4430{
4431 unsigned long long timeout_ns;
4432 int ep_type;
4433 int intr_type;
4434
4435 ep_type = usb_endpoint_type(desc);
4436 switch (ep_type) {
4437 case USB_ENDPOINT_XFER_CONTROL:
4438 timeout_ns = udev->u1_params.sel * 3;
4439 break;
4440 case USB_ENDPOINT_XFER_BULK:
4441 timeout_ns = udev->u1_params.sel * 5;
4442 break;
4443 case USB_ENDPOINT_XFER_INT:
4444 intr_type = usb_endpoint_interrupt_type(desc);
4445 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4446 timeout_ns = udev->u1_params.sel * 3;
4447 break;
4448 }
4449
4450
4451 case USB_ENDPOINT_XFER_ISOC:
4452 timeout_ns = xhci_service_interval_to_ns(desc);
4453 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4454 if (timeout_ns < udev->u1_params.sel * 2)
4455 timeout_ns = udev->u1_params.sel * 2;
4456 break;
4457 default:
4458 return 0;
4459 }
4460
4461 return timeout_ns;
4462}
4463
4464
4465static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4466 struct usb_device *udev,
4467 struct usb_endpoint_descriptor *desc)
4468{
4469 unsigned long long timeout_ns;
4470
4471 if (xhci->quirks & XHCI_INTEL_HOST)
4472 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4473 else
4474 timeout_ns = udev->u1_params.sel;
4475
4476
4477
4478
4479 if (timeout_ns == USB3_LPM_DISABLED)
4480 timeout_ns = 1;
4481 else
4482 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4483
4484
4485
4486
4487 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4488 return timeout_ns;
4489 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4490 "due to long timeout %llu ms\n", timeout_ns);
4491 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4492}
4493
4494
4495
4496
4497
4498
4499
4500static unsigned long long xhci_calculate_intel_u2_timeout(
4501 struct usb_device *udev,
4502 struct usb_endpoint_descriptor *desc)
4503{
4504 unsigned long long timeout_ns;
4505 unsigned long long u2_del_ns;
4506
4507 timeout_ns = 10 * 1000 * 1000;
4508
4509 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4510 (xhci_service_interval_to_ns(desc) > timeout_ns))
4511 timeout_ns = xhci_service_interval_to_ns(desc);
4512
4513 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4514 if (u2_del_ns > timeout_ns)
4515 timeout_ns = u2_del_ns;
4516
4517 return timeout_ns;
4518}
4519
4520
4521static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4522 struct usb_device *udev,
4523 struct usb_endpoint_descriptor *desc)
4524{
4525 unsigned long long timeout_ns;
4526
4527 if (xhci->quirks & XHCI_INTEL_HOST)
4528 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4529 else
4530 timeout_ns = udev->u2_params.sel;
4531
4532
4533 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4534
4535
4536
4537 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4538 return timeout_ns;
4539 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4540 "due to long timeout %llu ms\n", timeout_ns);
4541 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4542}
4543
4544static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4545 struct usb_device *udev,
4546 struct usb_endpoint_descriptor *desc,
4547 enum usb3_link_state state,
4548 u16 *timeout)
4549{
4550 if (state == USB3_LPM_U1)
4551 return xhci_calculate_u1_timeout(xhci, udev, desc);
4552 else if (state == USB3_LPM_U2)
4553 return xhci_calculate_u2_timeout(xhci, udev, desc);
4554
4555 return USB3_LPM_DISABLED;
4556}
4557
4558static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4559 struct usb_device *udev,
4560 struct usb_endpoint_descriptor *desc,
4561 enum usb3_link_state state,
4562 u16 *timeout)
4563{
4564 u16 alt_timeout;
4565
4566 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4567 desc, state, timeout);
4568
4569
4570
4571
4572
4573 if (alt_timeout == USB3_LPM_DISABLED ||
4574 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4575 *timeout = alt_timeout;
4576 return -E2BIG;
4577 }
4578 if (alt_timeout > *timeout)
4579 *timeout = alt_timeout;
4580 return 0;
4581}
4582
4583static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4584 struct usb_device *udev,
4585 struct usb_host_interface *alt,
4586 enum usb3_link_state state,
4587 u16 *timeout)
4588{
4589 int j;
4590
4591 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4592 if (xhci_update_timeout_for_endpoint(xhci, udev,
4593 &alt->endpoint[j].desc, state, timeout))
4594 return -E2BIG;
4595 continue;
4596 }
4597 return 0;
4598}
4599
4600static int xhci_check_intel_tier_policy(struct usb_device *udev,
4601 enum usb3_link_state state)
4602{
4603 struct usb_device *parent;
4604 unsigned int num_hubs;
4605
4606 if (state == USB3_LPM_U2)
4607 return 0;
4608
4609
4610 for (parent = udev->parent, num_hubs = 0; parent->parent;
4611 parent = parent->parent)
4612 num_hubs++;
4613
4614 if (num_hubs < 2)
4615 return 0;
4616
4617 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4618 " below second-tier hub.\n");
4619 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4620 "to decrease power consumption.\n");
4621 return -E2BIG;
4622}
4623
4624static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4625 struct usb_device *udev,
4626 enum usb3_link_state state)
4627{
4628 if (xhci->quirks & XHCI_INTEL_HOST)
4629 return xhci_check_intel_tier_policy(udev, state);
4630 else
4631 return 0;
4632}
4633
4634
4635
4636
4637
4638
4639static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4640 struct usb_device *udev, enum usb3_link_state state)
4641{
4642 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4643 struct usb_host_config *config;
4644 char *state_name;
4645 int i;
4646 u16 timeout = USB3_LPM_DISABLED;
4647
4648 if (state == USB3_LPM_U1)
4649 state_name = "U1";
4650 else if (state == USB3_LPM_U2)
4651 state_name = "U2";
4652 else {
4653 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4654 state);
4655 return timeout;
4656 }
4657
4658 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4659 return timeout;
4660
4661
4662
4663
4664 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4665 state, &timeout))
4666 return timeout;
4667
4668 config = udev->actconfig;
4669 if (!config)
4670 return timeout;
4671
4672 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4673 struct usb_driver *driver;
4674 struct usb_interface *intf = config->interface[i];
4675
4676 if (!intf)
4677 continue;
4678
4679
4680
4681
4682 if (intf->dev.driver) {
4683 driver = to_usb_driver(intf->dev.driver);
4684 if (driver && driver->disable_hub_initiated_lpm) {
4685 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4686 "at request of driver %s\n",
4687 state_name, driver->name);
4688 return xhci_get_timeout_no_hub_lpm(udev, state);
4689 }
4690 }
4691
4692
4693 if (!intf->cur_altsetting)
4694 continue;
4695
4696 if (xhci_update_timeout_for_interface(xhci, udev,
4697 intf->cur_altsetting,
4698 state, &timeout))
4699 return timeout;
4700 }
4701 return timeout;
4702}
4703
4704static int calculate_max_exit_latency(struct usb_device *udev,
4705 enum usb3_link_state state_changed,
4706 u16 hub_encoded_timeout)
4707{
4708 unsigned long long u1_mel_us = 0;
4709 unsigned long long u2_mel_us = 0;
4710 unsigned long long mel_us = 0;
4711 bool disabling_u1;
4712 bool disabling_u2;
4713 bool enabling_u1;
4714 bool enabling_u2;
4715
4716 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4717 hub_encoded_timeout == USB3_LPM_DISABLED);
4718 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4719 hub_encoded_timeout == USB3_LPM_DISABLED);
4720
4721 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4722 hub_encoded_timeout != USB3_LPM_DISABLED);
4723 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4724 hub_encoded_timeout != USB3_LPM_DISABLED);
4725
4726
4727
4728
4729 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4730 enabling_u1)
4731 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4732 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4733 enabling_u2)
4734 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4735
4736 if (u1_mel_us > u2_mel_us)
4737 mel_us = u1_mel_us;
4738 else
4739 mel_us = u2_mel_us;
4740
4741 if (mel_us > MAX_EXIT) {
4742 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4743 "is too big.\n", mel_us);
4744 return -E2BIG;
4745 }
4746 return mel_us;
4747}
4748
4749
4750static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4751 struct usb_device *udev, enum usb3_link_state state)
4752{
4753 struct xhci_hcd *xhci;
4754 u16 hub_encoded_timeout;
4755 int mel;
4756 int ret;
4757
4758 xhci = hcd_to_xhci(hcd);
4759
4760
4761
4762
4763 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4764 !xhci->devs[udev->slot_id])
4765 return USB3_LPM_DISABLED;
4766
4767 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4768 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4769 if (mel < 0) {
4770
4771 hub_encoded_timeout = USB3_LPM_DISABLED;
4772 mel = 0;
4773 }
4774
4775 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4776 if (ret)
4777 return ret;
4778 return hub_encoded_timeout;
4779}
4780
4781static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4782 struct usb_device *udev, enum usb3_link_state state)
4783{
4784 struct xhci_hcd *xhci;
4785 u16 mel;
4786
4787 xhci = hcd_to_xhci(hcd);
4788 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4789 !xhci->devs[udev->slot_id])
4790 return 0;
4791
4792 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4793 return xhci_change_max_exit_latency(xhci, udev, mel);
4794}
4795#else
4796
4797static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4798 struct usb_device *udev, int enable)
4799{
4800 return 0;
4801}
4802
4803static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4804{
4805 return 0;
4806}
4807
4808static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4809 struct usb_device *udev, enum usb3_link_state state)
4810{
4811 return USB3_LPM_DISABLED;
4812}
4813
4814static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4815 struct usb_device *udev, enum usb3_link_state state)
4816{
4817 return 0;
4818}
4819#endif
4820
4821
4822
4823
4824
4825
4826static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4827 struct usb_tt *tt, gfp_t mem_flags)
4828{
4829 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4830 struct xhci_virt_device *vdev;
4831 struct xhci_command *config_cmd;
4832 struct xhci_input_control_ctx *ctrl_ctx;
4833 struct xhci_slot_ctx *slot_ctx;
4834 unsigned long flags;
4835 unsigned think_time;
4836 int ret;
4837
4838
4839 if (!hdev->parent)
4840 return 0;
4841
4842 vdev = xhci->devs[hdev->slot_id];
4843 if (!vdev) {
4844 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4845 return -EINVAL;
4846 }
4847
4848 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
4849 if (!config_cmd)
4850 return -ENOMEM;
4851
4852 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4853 if (!ctrl_ctx) {
4854 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4855 __func__);
4856 xhci_free_command(xhci, config_cmd);
4857 return -ENOMEM;
4858 }
4859
4860 spin_lock_irqsave(&xhci->lock, flags);
4861 if (hdev->speed == USB_SPEED_HIGH &&
4862 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4863 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4864 xhci_free_command(xhci, config_cmd);
4865 spin_unlock_irqrestore(&xhci->lock, flags);
4866 return -ENOMEM;
4867 }
4868
4869 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4870 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4871 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4872 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4873
4874
4875
4876
4877
4878 if (tt->multi)
4879 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4880 else if (hdev->speed == USB_SPEED_FULL)
4881 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4882
4883 if (xhci->hci_version > 0x95) {
4884 xhci_dbg(xhci, "xHCI version %x needs hub "
4885 "TT think time and number of ports\n",
4886 (unsigned int) xhci->hci_version);
4887 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4888
4889
4890
4891
4892
4893
4894
4895 think_time = tt->think_time;
4896 if (think_time != 0)
4897 think_time = (think_time / 666) - 1;
4898 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4899 slot_ctx->tt_info |=
4900 cpu_to_le32(TT_THINK_TIME(think_time));
4901 } else {
4902 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4903 "TT think time or number of ports\n",
4904 (unsigned int) xhci->hci_version);
4905 }
4906 slot_ctx->dev_state = 0;
4907 spin_unlock_irqrestore(&xhci->lock, flags);
4908
4909 xhci_dbg(xhci, "Set up %s for hub device.\n",
4910 (xhci->hci_version > 0x95) ?
4911 "configure endpoint" : "evaluate context");
4912
4913
4914
4915
4916 if (xhci->hci_version > 0x95)
4917 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4918 false, false);
4919 else
4920 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4921 true, false);
4922
4923 xhci_free_command(xhci, config_cmd);
4924 return ret;
4925}
4926
4927static int xhci_get_frame(struct usb_hcd *hcd)
4928{
4929 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4930
4931 return readl(&xhci->run_regs->microframe_index) >> 3;
4932}
4933
4934int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4935{
4936 struct xhci_hcd *xhci;
4937
4938
4939
4940
4941 struct device *dev = hcd->self.sysdev;
4942 unsigned int minor_rev;
4943 int retval;
4944
4945
4946 hcd->self.sg_tablesize = ~0;
4947
4948
4949 hcd->self.no_sg_constraint = 1;
4950
4951
4952 hcd->self.no_stop_on_short = 1;
4953
4954 xhci = hcd_to_xhci(hcd);
4955
4956 if (usb_hcd_is_primary_hcd(hcd)) {
4957 xhci->main_hcd = hcd;
4958 xhci->usb2_rhub.hcd = hcd;
4959
4960
4961
4962 hcd->speed = HCD_USB2;
4963 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4964
4965
4966
4967
4968
4969 hcd->has_tt = 1;
4970 } else {
4971
4972
4973
4974
4975 minor_rev = xhci->usb3_rhub.min_rev;
4976 if (minor_rev) {
4977 hcd->speed = HCD_USB31;
4978 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
4979 }
4980 xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
4981 minor_rev,
4982 minor_rev ? "Enhanced" : "");
4983
4984 xhci->usb3_rhub.hcd = hcd;
4985
4986
4987
4988 return 0;
4989 }
4990
4991 mutex_init(&xhci->mutex);
4992 xhci->cap_regs = hcd->regs;
4993 xhci->op_regs = hcd->regs +
4994 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4995 xhci->run_regs = hcd->regs +
4996 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4997
4998 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4999 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5000 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5001 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
5002 xhci->hci_version = HC_VERSION(xhci->hcc_params);
5003 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5004 if (xhci->hci_version > 0x100)
5005 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5006
5007 xhci->quirks |= quirks;
5008
5009 get_quirks(dev, xhci);
5010
5011
5012
5013
5014
5015 if (xhci->hci_version > 0x96)
5016 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5017
5018
5019 retval = xhci_halt(xhci);
5020 if (retval)
5021 return retval;
5022
5023 xhci_zero_64b_regs(xhci);
5024
5025 xhci_dbg(xhci, "Resetting HCD\n");
5026
5027 retval = xhci_reset(xhci);
5028 if (retval)
5029 return retval;
5030 xhci_dbg(xhci, "Reset complete\n");
5031
5032
5033
5034
5035
5036
5037
5038
5039 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5040 xhci->hcc_params &= ~BIT(0);
5041
5042
5043
5044 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5045 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5046 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5047 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5048 } else {
5049
5050
5051
5052
5053 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5054 if (retval)
5055 return retval;
5056 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5057 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5058 }
5059
5060 xhci_dbg(xhci, "Calling HCD init\n");
5061
5062 retval = xhci_init(hcd);
5063 if (retval)
5064 return retval;
5065 xhci_dbg(xhci, "Called HCD init\n");
5066
5067 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5068 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5069
5070 return 0;
5071}
5072EXPORT_SYMBOL_GPL(xhci_gen_setup);
5073
5074static const struct hc_driver xhci_hc_driver = {
5075 .description = "xhci-hcd",
5076 .product_desc = "xHCI Host Controller",
5077 .hcd_priv_size = sizeof(struct xhci_hcd),
5078
5079
5080
5081
5082 .irq = xhci_irq,
5083 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
5084
5085
5086
5087
5088 .reset = NULL,
5089 .start = xhci_run,
5090 .stop = xhci_stop,
5091 .shutdown = xhci_shutdown,
5092
5093
5094
5095
5096 .urb_enqueue = xhci_urb_enqueue,
5097 .urb_dequeue = xhci_urb_dequeue,
5098 .alloc_dev = xhci_alloc_dev,
5099 .free_dev = xhci_free_dev,
5100 .alloc_streams = xhci_alloc_streams,
5101 .free_streams = xhci_free_streams,
5102 .add_endpoint = xhci_add_endpoint,
5103 .drop_endpoint = xhci_drop_endpoint,
5104 .endpoint_reset = xhci_endpoint_reset,
5105 .check_bandwidth = xhci_check_bandwidth,
5106 .reset_bandwidth = xhci_reset_bandwidth,
5107 .address_device = xhci_address_device,
5108 .enable_device = xhci_enable_device,
5109 .update_hub_device = xhci_update_hub_device,
5110 .reset_device = xhci_discover_or_reset_device,
5111
5112
5113
5114
5115 .get_frame_number = xhci_get_frame,
5116
5117
5118
5119
5120 .hub_control = xhci_hub_control,
5121 .hub_status_data = xhci_hub_status_data,
5122 .bus_suspend = xhci_bus_suspend,
5123 .bus_resume = xhci_bus_resume,
5124
5125
5126
5127
5128 .update_device = xhci_update_device,
5129 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5130 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5131 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5132 .find_raw_port_number = xhci_find_raw_port_number,
5133};
5134
5135void xhci_init_driver(struct hc_driver *drv,
5136 const struct xhci_driver_overrides *over)
5137{
5138 BUG_ON(!over);
5139
5140
5141 *drv = xhci_hc_driver;
5142
5143 if (over) {
5144 drv->hcd_priv_size += over->extra_priv_size;
5145 if (over->reset)
5146 drv->reset = over->reset;
5147 if (over->start)
5148 drv->start = over->start;
5149 }
5150}
5151EXPORT_SYMBOL_GPL(xhci_init_driver);
5152
5153MODULE_DESCRIPTION(DRIVER_DESC);
5154MODULE_AUTHOR(DRIVER_AUTHOR);
5155MODULE_LICENSE("GPL");
5156
5157static int __init xhci_hcd_init(void)
5158{
5159
5160
5161
5162
5163 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5164 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5165 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5166
5167
5168
5169 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5170 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5171 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5172 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5173 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5174
5175 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5176
5177 if (usb_disabled())
5178 return -ENODEV;
5179
5180 xhci_debugfs_create_root();
5181
5182 return 0;
5183}
5184
5185
5186
5187
5188
5189static void __exit xhci_hcd_fini(void)
5190{
5191 xhci_debugfs_remove_root();
5192}
5193
5194module_init(xhci_hcd_init);
5195module_exit(xhci_hcd_fini);
5196