1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30#include <linux/dma-mapping.h>
31
32#include "xhci.h"
33#include "xhci-trace.h"
34#include "xhci-mtk.h"
35
36#define DRIVER_AUTHOR "Sarah Sharp"
37#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
38
39#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
40
41
42static int link_quirk;
43module_param(link_quirk, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
45
46static unsigned int quirks;
47module_param(quirks, uint, S_IRUGO);
48MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
65{
66 u32 result;
67
68 do {
69 result = readl(ptr);
70 if (result == ~(u32)0)
71 return -ENODEV;
72 result &= mask;
73 if (result == done)
74 return 0;
75 udelay(1);
76 usec--;
77 } while (usec > 0);
78 return -ETIMEDOUT;
79}
80
81
82
83
84void xhci_quiesce(struct xhci_hcd *xhci)
85{
86 u32 halted;
87 u32 cmd;
88 u32 mask;
89
90 mask = ~(XHCI_IRQS);
91 halted = readl(&xhci->op_regs->status) & STS_HALT;
92 if (!halted)
93 mask &= ~CMD_RUN;
94
95 cmd = readl(&xhci->op_regs->command);
96 cmd &= mask;
97 writel(cmd, &xhci->op_regs->command);
98}
99
100
101
102
103
104
105
106
107
108int xhci_halt(struct xhci_hcd *xhci)
109{
110 int ret;
111 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
112 xhci_quiesce(xhci);
113
114 ret = xhci_handshake(&xhci->op_regs->status,
115 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
116 if (!ret) {
117 xhci->xhc_state |= XHCI_STATE_HALTED;
118 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
119 } else
120 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
121 XHCI_MAX_HALT_USEC);
122 return ret;
123}
124
125
126
127
128static int xhci_start(struct xhci_hcd *xhci)
129{
130 u32 temp;
131 int ret;
132
133 temp = readl(&xhci->op_regs->command);
134 temp |= (CMD_RUN);
135 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
136 temp);
137 writel(temp, &xhci->op_regs->command);
138
139
140
141
142
143 ret = xhci_handshake(&xhci->op_regs->status,
144 STS_HALT, 0, XHCI_MAX_HALT_USEC);
145 if (ret == -ETIMEDOUT)
146 xhci_err(xhci, "Host took too long to start, "
147 "waited %u microseconds.\n",
148 XHCI_MAX_HALT_USEC);
149 if (!ret)
150
151 xhci->xhc_state = 0;
152
153 return ret;
154}
155
156
157
158
159
160
161
162
163int xhci_reset(struct xhci_hcd *xhci)
164{
165 u32 command;
166 u32 state;
167 int ret, i;
168
169 state = readl(&xhci->op_regs->status);
170 if ((state & STS_HALT) == 0) {
171 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
172 return 0;
173 }
174
175 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
176 command = readl(&xhci->op_regs->command);
177 command |= CMD_RESET;
178 writel(command, &xhci->op_regs->command);
179
180
181
182
183
184
185
186
187 if (xhci->quirks & XHCI_INTEL_HOST)
188 udelay(1000);
189
190 ret = xhci_handshake(&xhci->op_regs->command,
191 CMD_RESET, 0, 10 * 1000 * 1000);
192 if (ret)
193 return ret;
194
195 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
196 "Wait for controller to be ready for doorbell rings");
197
198
199
200
201 ret = xhci_handshake(&xhci->op_regs->status,
202 STS_CNR, 0, 10 * 1000 * 1000);
203
204 for (i = 0; i < 2; ++i) {
205 xhci->bus_state[i].port_c_suspend = 0;
206 xhci->bus_state[i].suspended_ports = 0;
207 xhci->bus_state[i].resuming_ports = 0;
208 }
209
210 return ret;
211}
212
213#ifdef CONFIG_PCI
214static int xhci_free_msi(struct xhci_hcd *xhci)
215{
216 int i;
217
218 if (!xhci->msix_entries)
219 return -EINVAL;
220
221 for (i = 0; i < xhci->msix_count; i++)
222 if (xhci->msix_entries[i].vector)
223 free_irq(xhci->msix_entries[i].vector,
224 xhci_to_hcd(xhci));
225 return 0;
226}
227
228
229
230
231static int xhci_setup_msi(struct xhci_hcd *xhci)
232{
233 int ret;
234 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
235
236 ret = pci_enable_msi(pdev);
237 if (ret) {
238 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
239 "failed to allocate MSI entry");
240 return ret;
241 }
242
243 ret = request_irq(pdev->irq, xhci_msi_irq,
244 0, "xhci_hcd", xhci_to_hcd(xhci));
245 if (ret) {
246 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
247 "disable MSI interrupt");
248 pci_disable_msi(pdev);
249 }
250
251 return ret;
252}
253
254
255
256
257
258static void xhci_free_irq(struct xhci_hcd *xhci)
259{
260 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
261 int ret;
262
263
264 if (xhci_to_hcd(xhci)->irq > 0)
265 return;
266
267 ret = xhci_free_msi(xhci);
268 if (!ret)
269 return;
270 if (pdev->irq > 0)
271 free_irq(pdev->irq, xhci_to_hcd(xhci));
272
273 return;
274}
275
276
277
278
279static int xhci_setup_msix(struct xhci_hcd *xhci)
280{
281 int i, ret = 0;
282 struct usb_hcd *hcd = xhci_to_hcd(xhci);
283 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
284
285
286
287
288
289
290
291
292 xhci->msix_count = min(num_online_cpus() + 1,
293 HCS_MAX_INTRS(xhci->hcs_params1));
294
295 xhci->msix_entries =
296 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
297 GFP_KERNEL);
298 if (!xhci->msix_entries) {
299 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
300 return -ENOMEM;
301 }
302
303 for (i = 0; i < xhci->msix_count; i++) {
304 xhci->msix_entries[i].entry = i;
305 xhci->msix_entries[i].vector = 0;
306 }
307
308 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
309 if (ret) {
310 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
311 "Failed to enable MSI-X");
312 goto free_entries;
313 }
314
315 for (i = 0; i < xhci->msix_count; i++) {
316 ret = request_irq(xhci->msix_entries[i].vector,
317 xhci_msi_irq,
318 0, "xhci_hcd", xhci_to_hcd(xhci));
319 if (ret)
320 goto disable_msix;
321 }
322
323 hcd->msix_enabled = 1;
324 return ret;
325
326disable_msix:
327 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
328 xhci_free_irq(xhci);
329 pci_disable_msix(pdev);
330free_entries:
331 kfree(xhci->msix_entries);
332 xhci->msix_entries = NULL;
333 return ret;
334}
335
336
337static void xhci_cleanup_msix(struct xhci_hcd *xhci)
338{
339 struct usb_hcd *hcd = xhci_to_hcd(xhci);
340 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
341
342 if (xhci->quirks & XHCI_PLAT)
343 return;
344
345 xhci_free_irq(xhci);
346
347 if (xhci->msix_entries) {
348 pci_disable_msix(pdev);
349 kfree(xhci->msix_entries);
350 xhci->msix_entries = NULL;
351 } else {
352 pci_disable_msi(pdev);
353 }
354
355 hcd->msix_enabled = 0;
356 return;
357}
358
359static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
360{
361 int i;
362
363 if (xhci->msix_entries) {
364 for (i = 0; i < xhci->msix_count; i++)
365 synchronize_irq(xhci->msix_entries[i].vector);
366 }
367}
368
369static int xhci_try_enable_msi(struct usb_hcd *hcd)
370{
371 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
372 struct pci_dev *pdev;
373 int ret;
374
375
376 if (xhci->quirks & XHCI_PLAT)
377 return 0;
378
379 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
380
381
382
383
384 if (xhci->quirks & XHCI_BROKEN_MSI)
385 goto legacy_irq;
386
387
388 if (hcd->irq)
389 free_irq(hcd->irq, hcd);
390 hcd->irq = 0;
391
392 ret = xhci_setup_msix(xhci);
393 if (ret)
394
395 ret = xhci_setup_msi(xhci);
396
397 if (!ret)
398
399 return 0;
400
401 if (!pdev->irq) {
402 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
403 return -EINVAL;
404 }
405
406 legacy_irq:
407 if (!strlen(hcd->irq_descr))
408 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
409 hcd->driver->description, hcd->self.busnum);
410
411
412 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
413 hcd->irq_descr, hcd);
414 if (ret) {
415 xhci_err(xhci, "request interrupt %d failed\n",
416 pdev->irq);
417 return ret;
418 }
419 hcd->irq = pdev->irq;
420 return 0;
421}
422
423#else
424
425static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
426{
427 return 0;
428}
429
430static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
431{
432}
433
434static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
435{
436}
437
438#endif
439
440static void compliance_mode_recovery(unsigned long arg)
441{
442 struct xhci_hcd *xhci;
443 struct usb_hcd *hcd;
444 u32 temp;
445 int i;
446
447 xhci = (struct xhci_hcd *)arg;
448
449 for (i = 0; i < xhci->num_usb3_ports; i++) {
450 temp = readl(xhci->usb3_ports[i]);
451 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
452
453
454
455
456 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
457 "Compliance mode detected->port %d",
458 i + 1);
459 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
460 "Attempting compliance mode recovery");
461 hcd = xhci->shared_hcd;
462
463 if (hcd->state == HC_STATE_SUSPENDED)
464 usb_hcd_resume_root_hub(hcd);
465
466 usb_hcd_poll_rh_status(hcd);
467 }
468 }
469
470 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
471 mod_timer(&xhci->comp_mode_recovery_timer,
472 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
473}
474
475
476
477
478
479
480
481
482
483
484
485static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
486{
487 xhci->port_status_u0 = 0;
488 setup_timer(&xhci->comp_mode_recovery_timer,
489 compliance_mode_recovery, (unsigned long)xhci);
490 xhci->comp_mode_recovery_timer.expires = jiffies +
491 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
492
493 add_timer(&xhci->comp_mode_recovery_timer);
494 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
495 "Compliance mode recovery timer initialized");
496}
497
498
499
500
501
502
503
504static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
505{
506 const char *dmi_product_name, *dmi_sys_vendor;
507
508 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
509 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
510 if (!dmi_product_name || !dmi_sys_vendor)
511 return false;
512
513 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
514 return false;
515
516 if (strstr(dmi_product_name, "Z420") ||
517 strstr(dmi_product_name, "Z620") ||
518 strstr(dmi_product_name, "Z820") ||
519 strstr(dmi_product_name, "Z1 Workstation"))
520 return true;
521
522 return false;
523}
524
525static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
526{
527 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
528}
529
530
531
532
533
534
535
536
537
538int xhci_init(struct usb_hcd *hcd)
539{
540 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
541 int retval = 0;
542
543 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
544 spin_lock_init(&xhci->lock);
545 if (xhci->hci_version == 0x95 && link_quirk) {
546 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
547 "QUIRK: Not clearing Link TRB chain bits.");
548 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
549 } else {
550 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
551 "xHCI doesn't need link TRB QUIRK");
552 }
553 retval = xhci_mem_init(xhci, GFP_KERNEL);
554 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
555
556
557 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
558 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
559 compliance_mode_recovery_timer_init(xhci);
560 }
561
562 return retval;
563}
564
565
566
567
568static int xhci_run_finished(struct xhci_hcd *xhci)
569{
570 if (xhci_start(xhci)) {
571 xhci_halt(xhci);
572 return -ENODEV;
573 }
574 xhci->shared_hcd->state = HC_STATE_RUNNING;
575 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
576
577 if (xhci->quirks & XHCI_NEC_HOST)
578 xhci_ring_cmd_db(xhci);
579
580 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
581 "Finished xhci_run for USB3 roothub");
582 return 0;
583}
584
585
586
587
588
589
590
591
592
593
594
595
596
597int xhci_run(struct usb_hcd *hcd)
598{
599 u32 temp;
600 u64 temp_64;
601 int ret;
602 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
603
604
605
606
607
608 hcd->uses_new_polling = 1;
609 if (!usb_hcd_is_primary_hcd(hcd))
610 return xhci_run_finished(xhci);
611
612 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
613
614 ret = xhci_try_enable_msi(hcd);
615 if (ret)
616 return ret;
617
618 xhci_dbg(xhci, "Command ring memory map follows:\n");
619 xhci_debug_ring(xhci, xhci->cmd_ring);
620 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
621 xhci_dbg_cmd_ptrs(xhci);
622
623 xhci_dbg(xhci, "ERST memory map follows:\n");
624 xhci_dbg_erst(xhci, &xhci->erst);
625 xhci_dbg(xhci, "Event ring:\n");
626 xhci_debug_ring(xhci, xhci->event_ring);
627 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
628 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
629 temp_64 &= ~ERST_PTR_MASK;
630 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
631 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
632
633 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
634 "// Set the interrupt modulation register");
635 temp = readl(&xhci->ir_set->irq_control);
636 temp &= ~ER_IRQ_INTERVAL_MASK;
637
638
639
640
641 temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160);
642 writel(temp, &xhci->ir_set->irq_control);
643
644
645 temp = readl(&xhci->op_regs->command);
646 temp |= (CMD_EIE);
647 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
648 "// Enable interrupts, cmd = 0x%x.", temp);
649 writel(temp, &xhci->op_regs->command);
650
651 temp = readl(&xhci->ir_set->irq_pending);
652 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
653 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
654 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
655 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
656 xhci_print_ir_set(xhci, 0);
657
658 if (xhci->quirks & XHCI_NEC_HOST) {
659 struct xhci_command *command;
660 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
661 if (!command)
662 return -ENOMEM;
663 xhci_queue_vendor_command(xhci, command, 0, 0, 0,
664 TRB_TYPE(TRB_NEC_GET_FW));
665 }
666 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
667 "Finished xhci_run for USB2 roothub");
668 return 0;
669}
670EXPORT_SYMBOL_GPL(xhci_run);
671
672
673
674
675
676
677
678
679
680
681void xhci_stop(struct usb_hcd *hcd)
682{
683 u32 temp;
684 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
685
686 mutex_lock(&xhci->mutex);
687
688 if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
689 spin_lock_irq(&xhci->lock);
690
691 xhci->xhc_state |= XHCI_STATE_HALTED;
692 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
693 xhci_halt(xhci);
694 xhci_reset(xhci);
695
696 spin_unlock_irq(&xhci->lock);
697 }
698
699 if (!usb_hcd_is_primary_hcd(hcd)) {
700 mutex_unlock(&xhci->mutex);
701 return;
702 }
703
704 xhci_cleanup_msix(xhci);
705
706
707 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
708 (!(xhci_all_ports_seen_u0(xhci)))) {
709 del_timer_sync(&xhci->comp_mode_recovery_timer);
710 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
711 "%s: compliance mode recovery timer deleted",
712 __func__);
713 }
714
715 if (xhci->quirks & XHCI_AMD_PLL_FIX)
716 usb_amd_dev_put();
717
718 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
719 "// Disabling event ring interrupts");
720 temp = readl(&xhci->op_regs->status);
721 writel(temp & ~STS_EINT, &xhci->op_regs->status);
722 temp = readl(&xhci->ir_set->irq_pending);
723 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
724 xhci_print_ir_set(xhci, 0);
725
726 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
727 xhci_mem_cleanup(xhci);
728 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
729 "xhci_stop completed - status = %x",
730 readl(&xhci->op_regs->status));
731 mutex_unlock(&xhci->mutex);
732}
733
734
735
736
737
738
739
740
741
742
743void xhci_shutdown(struct usb_hcd *hcd)
744{
745 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
746
747 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
748 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
749
750 spin_lock_irq(&xhci->lock);
751 xhci_halt(xhci);
752
753 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
754 xhci_reset(xhci);
755 spin_unlock_irq(&xhci->lock);
756
757 xhci_cleanup_msix(xhci);
758
759 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
760 "xhci_shutdown completed - status = %x",
761 readl(&xhci->op_regs->status));
762
763
764 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
765 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
766}
767
768#ifdef CONFIG_PM
769static void xhci_save_registers(struct xhci_hcd *xhci)
770{
771 xhci->s3.command = readl(&xhci->op_regs->command);
772 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
773 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
774 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
775 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
776 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
777 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
778 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
779 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
780}
781
782static void xhci_restore_registers(struct xhci_hcd *xhci)
783{
784 writel(xhci->s3.command, &xhci->op_regs->command);
785 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
786 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
787 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
788 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
789 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
790 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
791 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
792 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
793}
794
795static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
796{
797 u64 val_64;
798
799
800 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
801 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
802 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
803 xhci->cmd_ring->dequeue) &
804 (u64) ~CMD_RING_RSVD_BITS) |
805 xhci->cmd_ring->cycle_state;
806 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
807 "// Setting command ring address to 0x%llx",
808 (long unsigned long) val_64);
809 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
810}
811
812
813
814
815
816
817
818
819
820
821static void xhci_clear_command_ring(struct xhci_hcd *xhci)
822{
823 struct xhci_ring *ring;
824 struct xhci_segment *seg;
825
826 ring = xhci->cmd_ring;
827 seg = ring->deq_seg;
828 do {
829 memset(seg->trbs, 0,
830 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
831 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
832 cpu_to_le32(~TRB_CYCLE);
833 seg = seg->next;
834 } while (seg != ring->deq_seg);
835
836
837 ring->deq_seg = ring->first_seg;
838 ring->dequeue = ring->first_seg->trbs;
839 ring->enq_seg = ring->deq_seg;
840 ring->enqueue = ring->dequeue;
841
842 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
843
844
845
846
847 ring->cycle_state = 1;
848
849
850
851
852
853
854
855
856 xhci_set_cmd_ring_deq(xhci);
857}
858
859static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
860{
861 int port_index;
862 __le32 __iomem **port_array;
863 unsigned long flags;
864 u32 t1, t2;
865
866 spin_lock_irqsave(&xhci->lock, flags);
867
868
869 port_index = xhci->num_usb3_ports;
870 port_array = xhci->usb3_ports;
871 while (port_index--) {
872 t1 = readl(port_array[port_index]);
873 t1 = xhci_port_state_to_neutral(t1);
874 t2 = t1 & ~PORT_WAKE_BITS;
875 if (t1 != t2)
876 writel(t2, port_array[port_index]);
877 }
878
879
880 port_index = xhci->num_usb2_ports;
881 port_array = xhci->usb2_ports;
882 while (port_index--) {
883 t1 = readl(port_array[port_index]);
884 t1 = xhci_port_state_to_neutral(t1);
885 t2 = t1 & ~PORT_WAKE_BITS;
886 if (t1 != t2)
887 writel(t2, port_array[port_index]);
888 }
889
890 spin_unlock_irqrestore(&xhci->lock, flags);
891}
892
893
894
895
896
897
898
899int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
900{
901 int rc = 0;
902 unsigned int delay = XHCI_MAX_HALT_USEC;
903 struct usb_hcd *hcd = xhci_to_hcd(xhci);
904 u32 command;
905
906 if (!hcd->state)
907 return 0;
908
909 if (hcd->state != HC_STATE_SUSPENDED ||
910 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
911 return -EINVAL;
912
913
914 if (!do_wakeup)
915 xhci_disable_port_wake_on_bits(xhci);
916
917
918 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
919 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
920 del_timer_sync(&hcd->rh_timer);
921 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
922 del_timer_sync(&xhci->shared_hcd->rh_timer);
923
924 spin_lock_irq(&xhci->lock);
925 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
926 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
927
928
929
930
931 command = readl(&xhci->op_regs->command);
932 command &= ~CMD_RUN;
933 writel(command, &xhci->op_regs->command);
934
935
936 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
937
938 if (xhci_handshake(&xhci->op_regs->status,
939 STS_HALT, STS_HALT, delay)) {
940 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
941 spin_unlock_irq(&xhci->lock);
942 return -ETIMEDOUT;
943 }
944 xhci_clear_command_ring(xhci);
945
946
947 xhci_save_registers(xhci);
948
949
950 command = readl(&xhci->op_regs->command);
951 command |= CMD_CSS;
952 writel(command, &xhci->op_regs->command);
953 if (xhci_handshake(&xhci->op_regs->status,
954 STS_SAVE, 0, 10 * 1000)) {
955 xhci_warn(xhci, "WARN: xHC save state timeout\n");
956 spin_unlock_irq(&xhci->lock);
957 return -ETIMEDOUT;
958 }
959 spin_unlock_irq(&xhci->lock);
960
961
962
963
964
965 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
966 (!(xhci_all_ports_seen_u0(xhci)))) {
967 del_timer_sync(&xhci->comp_mode_recovery_timer);
968 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
969 "%s: compliance mode recovery timer deleted",
970 __func__);
971 }
972
973
974
975 xhci_msix_sync_irqs(xhci);
976
977 return rc;
978}
979EXPORT_SYMBOL_GPL(xhci_suspend);
980
981
982
983
984
985
986
987int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
988{
989 u32 command, temp = 0, status;
990 struct usb_hcd *hcd = xhci_to_hcd(xhci);
991 struct usb_hcd *secondary_hcd;
992 int retval = 0;
993 bool comp_timer_running = false;
994
995 if (!hcd->state)
996 return 0;
997
998
999
1000
1001 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1002 time_before(jiffies,
1003 xhci->bus_state[1].next_statechange))
1004 msleep(100);
1005
1006 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1007 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1008
1009 spin_lock_irq(&xhci->lock);
1010 if (xhci->quirks & XHCI_RESET_ON_RESUME)
1011 hibernated = true;
1012
1013 if (!hibernated) {
1014
1015 xhci_restore_registers(xhci);
1016
1017 xhci_set_cmd_ring_deq(xhci);
1018
1019
1020 command = readl(&xhci->op_regs->command);
1021 command |= CMD_CRS;
1022 writel(command, &xhci->op_regs->command);
1023 if (xhci_handshake(&xhci->op_regs->status,
1024 STS_RESTORE, 0, 10 * 1000)) {
1025 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1026 spin_unlock_irq(&xhci->lock);
1027 return -ETIMEDOUT;
1028 }
1029 temp = readl(&xhci->op_regs->status);
1030 }
1031
1032
1033 if ((temp & STS_SRE) || hibernated) {
1034
1035 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1036 !(xhci_all_ports_seen_u0(xhci))) {
1037 del_timer_sync(&xhci->comp_mode_recovery_timer);
1038 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1039 "Compliance Mode Recovery Timer deleted!");
1040 }
1041
1042
1043 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1044 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1045
1046 xhci_dbg(xhci, "Stop HCD\n");
1047 xhci_halt(xhci);
1048 xhci_reset(xhci);
1049 spin_unlock_irq(&xhci->lock);
1050 xhci_cleanup_msix(xhci);
1051
1052 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1053 temp = readl(&xhci->op_regs->status);
1054 writel(temp & ~STS_EINT, &xhci->op_regs->status);
1055 temp = readl(&xhci->ir_set->irq_pending);
1056 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1057 xhci_print_ir_set(xhci, 0);
1058
1059 xhci_dbg(xhci, "cleaning up memory\n");
1060 xhci_mem_cleanup(xhci);
1061 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1062 readl(&xhci->op_regs->status));
1063
1064
1065
1066
1067
1068 if (!usb_hcd_is_primary_hcd(hcd))
1069 secondary_hcd = hcd;
1070 else
1071 secondary_hcd = xhci->shared_hcd;
1072
1073 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1074 retval = xhci_init(hcd->primary_hcd);
1075 if (retval)
1076 return retval;
1077 comp_timer_running = true;
1078
1079 xhci_dbg(xhci, "Start the primary HCD\n");
1080 retval = xhci_run(hcd->primary_hcd);
1081 if (!retval) {
1082 xhci_dbg(xhci, "Start the secondary HCD\n");
1083 retval = xhci_run(secondary_hcd);
1084 }
1085 hcd->state = HC_STATE_SUSPENDED;
1086 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1087 goto done;
1088 }
1089
1090
1091 command = readl(&xhci->op_regs->command);
1092 command |= CMD_RUN;
1093 writel(command, &xhci->op_regs->command);
1094 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1095 0, 250 * 1000);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 spin_unlock_irq(&xhci->lock);
1107
1108 done:
1109 if (retval == 0) {
1110
1111 status = readl(&xhci->op_regs->status);
1112 if (status & STS_EINT) {
1113 usb_hcd_resume_root_hub(xhci->shared_hcd);
1114 usb_hcd_resume_root_hub(hcd);
1115 }
1116 }
1117
1118
1119
1120
1121
1122
1123
1124 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1125 compliance_mode_recovery_timer_init(xhci);
1126
1127
1128 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1129 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1130 usb_hcd_poll_rh_status(xhci->shared_hcd);
1131 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1132 usb_hcd_poll_rh_status(hcd);
1133
1134 return retval;
1135}
1136EXPORT_SYMBOL_GPL(xhci_resume);
1137#endif
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1152{
1153 unsigned int index;
1154 if (usb_endpoint_xfer_control(desc))
1155 index = (unsigned int) (usb_endpoint_num(desc)*2);
1156 else
1157 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1158 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1159 return index;
1160}
1161
1162
1163
1164
1165unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1166{
1167 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1168 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1169 return direction | number;
1170}
1171
1172
1173
1174
1175
1176unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1177{
1178 return 1 << (xhci_get_endpoint_index(desc) + 1);
1179}
1180
1181
1182
1183
1184
1185unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1186{
1187 return 1 << (ep_index + 1);
1188}
1189
1190
1191
1192
1193
1194
1195
1196unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1197{
1198 return fls(added_ctxs) - 1;
1199}
1200
1201
1202
1203
1204static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1205 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1206 const char *func) {
1207 struct xhci_hcd *xhci;
1208 struct xhci_virt_device *virt_dev;
1209
1210 if (!hcd || (check_ep && !ep) || !udev) {
1211 pr_debug("xHCI %s called with invalid args\n", func);
1212 return -EINVAL;
1213 }
1214 if (!udev->parent) {
1215 pr_debug("xHCI %s called for root hub\n", func);
1216 return 0;
1217 }
1218
1219 xhci = hcd_to_xhci(hcd);
1220 if (check_virt_dev) {
1221 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1222 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1223 func);
1224 return -EINVAL;
1225 }
1226
1227 virt_dev = xhci->devs[udev->slot_id];
1228 if (virt_dev->udev != udev) {
1229 xhci_dbg(xhci, "xHCI %s called with udev and "
1230 "virt_dev does not match\n", func);
1231 return -EINVAL;
1232 }
1233 }
1234
1235 if (xhci->xhc_state & XHCI_STATE_HALTED)
1236 return -ENODEV;
1237
1238 return 1;
1239}
1240
1241static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1242 struct usb_device *udev, struct xhci_command *command,
1243 bool ctx_change, bool must_succeed);
1244
1245
1246
1247
1248
1249
1250
1251static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1252 unsigned int ep_index, struct urb *urb)
1253{
1254 struct xhci_container_ctx *out_ctx;
1255 struct xhci_input_control_ctx *ctrl_ctx;
1256 struct xhci_ep_ctx *ep_ctx;
1257 struct xhci_command *command;
1258 int max_packet_size;
1259 int hw_max_packet_size;
1260 int ret = 0;
1261
1262 out_ctx = xhci->devs[slot_id]->out_ctx;
1263 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1264 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1265 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1266 if (hw_max_packet_size != max_packet_size) {
1267 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1268 "Max Packet Size for ep 0 changed.");
1269 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1270 "Max packet size in usb_device = %d",
1271 max_packet_size);
1272 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1273 "Max packet size in xHCI HW = %d",
1274 hw_max_packet_size);
1275 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1276 "Issuing evaluate context command.");
1277
1278
1279
1280
1281
1282
1283 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
1284 if (!command)
1285 return -ENOMEM;
1286
1287 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1288 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1289 if (!ctrl_ctx) {
1290 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1291 __func__);
1292 ret = -ENOMEM;
1293 goto command_cleanup;
1294 }
1295
1296 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1297 xhci->devs[slot_id]->out_ctx, ep_index);
1298
1299 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1300 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1301 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1302
1303 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1304 ctrl_ctx->drop_flags = 0;
1305
1306 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1307 xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
1308 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1309 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1310
1311 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1312 true, false);
1313
1314
1315
1316
1317 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1318command_cleanup:
1319 kfree(command->completion);
1320 kfree(command);
1321 }
1322 return ret;
1323}
1324
1325
1326
1327
1328
1329int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1330{
1331 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1332 struct xhci_td *buffer;
1333 unsigned long flags;
1334 int ret = 0;
1335 unsigned int slot_id, ep_index;
1336 struct urb_priv *urb_priv;
1337 int size, i;
1338
1339 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1340 true, true, __func__) <= 0)
1341 return -EINVAL;
1342
1343 slot_id = urb->dev->slot_id;
1344 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1345
1346 if (!HCD_HW_ACCESSIBLE(hcd)) {
1347 if (!in_interrupt())
1348 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1349 ret = -ESHUTDOWN;
1350 goto exit;
1351 }
1352
1353 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1354 size = urb->number_of_packets;
1355 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1356 urb->transfer_buffer_length > 0 &&
1357 urb->transfer_flags & URB_ZERO_PACKET &&
1358 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1359 size = 2;
1360 else
1361 size = 1;
1362
1363 urb_priv = kzalloc(sizeof(struct urb_priv) +
1364 size * sizeof(struct xhci_td *), mem_flags);
1365 if (!urb_priv)
1366 return -ENOMEM;
1367
1368 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1369 if (!buffer) {
1370 kfree(urb_priv);
1371 return -ENOMEM;
1372 }
1373
1374 for (i = 0; i < size; i++) {
1375 urb_priv->td[i] = buffer;
1376 buffer++;
1377 }
1378
1379 urb_priv->length = size;
1380 urb_priv->td_cnt = 0;
1381 urb->hcpriv = urb_priv;
1382
1383 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1384
1385
1386
1387 if (urb->dev->speed == USB_SPEED_FULL) {
1388 ret = xhci_check_maxpacket(xhci, slot_id,
1389 ep_index, urb);
1390 if (ret < 0) {
1391 xhci_urb_free_priv(urb_priv);
1392 urb->hcpriv = NULL;
1393 return ret;
1394 }
1395 }
1396
1397
1398
1399
1400 spin_lock_irqsave(&xhci->lock, flags);
1401 if (xhci->xhc_state & XHCI_STATE_DYING)
1402 goto dying;
1403 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1404 slot_id, ep_index);
1405 if (ret)
1406 goto free_priv;
1407 spin_unlock_irqrestore(&xhci->lock, flags);
1408 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1409 spin_lock_irqsave(&xhci->lock, flags);
1410 if (xhci->xhc_state & XHCI_STATE_DYING)
1411 goto dying;
1412 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1413 EP_GETTING_STREAMS) {
1414 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1415 "is transitioning to using streams.\n");
1416 ret = -EINVAL;
1417 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1418 EP_GETTING_NO_STREAMS) {
1419 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1420 "is transitioning to "
1421 "not having streams.\n");
1422 ret = -EINVAL;
1423 } else {
1424 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1425 slot_id, ep_index);
1426 }
1427 if (ret)
1428 goto free_priv;
1429 spin_unlock_irqrestore(&xhci->lock, flags);
1430 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1431 spin_lock_irqsave(&xhci->lock, flags);
1432 if (xhci->xhc_state & XHCI_STATE_DYING)
1433 goto dying;
1434 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1435 slot_id, ep_index);
1436 if (ret)
1437 goto free_priv;
1438 spin_unlock_irqrestore(&xhci->lock, flags);
1439 } else {
1440 spin_lock_irqsave(&xhci->lock, flags);
1441 if (xhci->xhc_state & XHCI_STATE_DYING)
1442 goto dying;
1443 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1444 slot_id, ep_index);
1445 if (ret)
1446 goto free_priv;
1447 spin_unlock_irqrestore(&xhci->lock, flags);
1448 }
1449exit:
1450 return ret;
1451dying:
1452 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1453 "non-responsive xHCI host.\n",
1454 urb->ep->desc.bEndpointAddress, urb);
1455 ret = -ESHUTDOWN;
1456free_priv:
1457 xhci_urb_free_priv(urb_priv);
1458 urb->hcpriv = NULL;
1459 spin_unlock_irqrestore(&xhci->lock, flags);
1460 return ret;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1495{
1496 unsigned long flags;
1497 int ret, i;
1498 u32 temp;
1499 struct xhci_hcd *xhci;
1500 struct urb_priv *urb_priv;
1501 struct xhci_td *td;
1502 unsigned int ep_index;
1503 struct xhci_ring *ep_ring;
1504 struct xhci_virt_ep *ep;
1505 struct xhci_command *command;
1506
1507 xhci = hcd_to_xhci(hcd);
1508 spin_lock_irqsave(&xhci->lock, flags);
1509
1510 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1511 if (ret || !urb->hcpriv)
1512 goto done;
1513 temp = readl(&xhci->op_regs->status);
1514 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1515 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1516 "HW died, freeing TD.");
1517 urb_priv = urb->hcpriv;
1518 for (i = urb_priv->td_cnt;
1519 i < urb_priv->length && xhci->devs[urb->dev->slot_id];
1520 i++) {
1521 td = urb_priv->td[i];
1522 if (!list_empty(&td->td_list))
1523 list_del_init(&td->td_list);
1524 if (!list_empty(&td->cancelled_td_list))
1525 list_del_init(&td->cancelled_td_list);
1526 }
1527
1528 usb_hcd_unlink_urb_from_ep(hcd, urb);
1529 spin_unlock_irqrestore(&xhci->lock, flags);
1530 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1531 xhci_urb_free_priv(urb_priv);
1532 return ret;
1533 }
1534 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1535 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1536 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1537 "Ep 0x%x: URB %p to be canceled on "
1538 "non-responsive xHCI host.",
1539 urb->ep->desc.bEndpointAddress, urb);
1540
1541
1542
1543
1544
1545 goto done;
1546 }
1547
1548 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1549 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1550 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1551 if (!ep_ring) {
1552 ret = -EINVAL;
1553 goto done;
1554 }
1555
1556 urb_priv = urb->hcpriv;
1557 i = urb_priv->td_cnt;
1558 if (i < urb_priv->length)
1559 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1560 "Cancel URB %p, dev %s, ep 0x%x, "
1561 "starting at offset 0x%llx",
1562 urb, urb->dev->devpath,
1563 urb->ep->desc.bEndpointAddress,
1564 (unsigned long long) xhci_trb_virt_to_dma(
1565 urb_priv->td[i]->start_seg,
1566 urb_priv->td[i]->first_trb));
1567
1568 for (; i < urb_priv->length; i++) {
1569 td = urb_priv->td[i];
1570 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1571 }
1572
1573
1574
1575
1576 if (!(ep->ep_state & EP_HALT_PENDING)) {
1577 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1578 if (!command) {
1579 ret = -ENOMEM;
1580 goto done;
1581 }
1582 ep->ep_state |= EP_HALT_PENDING;
1583 ep->stop_cmds_pending++;
1584 ep->stop_cmd_timer.expires = jiffies +
1585 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1586 add_timer(&ep->stop_cmd_timer);
1587 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1588 ep_index, 0);
1589 xhci_ring_cmd_db(xhci);
1590 }
1591done:
1592 spin_unlock_irqrestore(&xhci->lock, flags);
1593 return ret;
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1610 struct usb_host_endpoint *ep)
1611{
1612 struct xhci_hcd *xhci;
1613 struct xhci_container_ctx *in_ctx, *out_ctx;
1614 struct xhci_input_control_ctx *ctrl_ctx;
1615 unsigned int ep_index;
1616 struct xhci_ep_ctx *ep_ctx;
1617 u32 drop_flag;
1618 u32 new_add_flags, new_drop_flags;
1619 int ret;
1620
1621 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1622 if (ret <= 0)
1623 return ret;
1624 xhci = hcd_to_xhci(hcd);
1625 if (xhci->xhc_state & XHCI_STATE_DYING)
1626 return -ENODEV;
1627
1628 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1629 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1630 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1631 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1632 __func__, drop_flag);
1633 return 0;
1634 }
1635
1636 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1637 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1638 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1639 if (!ctrl_ctx) {
1640 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1641 __func__);
1642 return 0;
1643 }
1644
1645 ep_index = xhci_get_endpoint_index(&ep->desc);
1646 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1647
1648
1649
1650 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1651 cpu_to_le32(EP_STATE_DISABLED)) ||
1652 le32_to_cpu(ctrl_ctx->drop_flags) &
1653 xhci_get_endpoint_flag(&ep->desc)) {
1654
1655 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1656 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1657 __func__, ep);
1658 return 0;
1659 }
1660
1661 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1662 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1663
1664 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1665 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1666
1667 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1668
1669 if (xhci->quirks & XHCI_MTK_HOST)
1670 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1671
1672 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1673 (unsigned int) ep->desc.bEndpointAddress,
1674 udev->slot_id,
1675 (unsigned int) new_drop_flags,
1676 (unsigned int) new_add_flags);
1677 return 0;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1694 struct usb_host_endpoint *ep)
1695{
1696 struct xhci_hcd *xhci;
1697 struct xhci_container_ctx *in_ctx;
1698 unsigned int ep_index;
1699 struct xhci_input_control_ctx *ctrl_ctx;
1700 u32 added_ctxs;
1701 u32 new_add_flags, new_drop_flags;
1702 struct xhci_virt_device *virt_dev;
1703 int ret = 0;
1704
1705 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1706 if (ret <= 0) {
1707
1708 ep->hcpriv = NULL;
1709 return ret;
1710 }
1711 xhci = hcd_to_xhci(hcd);
1712 if (xhci->xhc_state & XHCI_STATE_DYING)
1713 return -ENODEV;
1714
1715 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1716 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1717
1718
1719
1720
1721 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1722 __func__, added_ctxs);
1723 return 0;
1724 }
1725
1726 virt_dev = xhci->devs[udev->slot_id];
1727 in_ctx = virt_dev->in_ctx;
1728 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1729 if (!ctrl_ctx) {
1730 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1731 __func__);
1732 return 0;
1733 }
1734
1735 ep_index = xhci_get_endpoint_index(&ep->desc);
1736
1737
1738
1739 if (virt_dev->eps[ep_index].ring &&
1740 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1741 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1742 "without dropping it.\n",
1743 (unsigned int) ep->desc.bEndpointAddress);
1744 return -EINVAL;
1745 }
1746
1747
1748
1749
1750 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1751 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1752 __func__, ep);
1753 return 0;
1754 }
1755
1756
1757
1758
1759
1760
1761 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1762 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1763 __func__, ep->desc.bEndpointAddress);
1764 return -ENOMEM;
1765 }
1766
1767 if (xhci->quirks & XHCI_MTK_HOST) {
1768 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1769 if (ret < 0) {
1770 xhci_free_or_cache_endpoint_ring(xhci,
1771 virt_dev, ep_index);
1772 return ret;
1773 }
1774 }
1775
1776 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1777 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1778
1779
1780
1781
1782
1783
1784
1785 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1786
1787
1788 ep->hcpriv = udev;
1789
1790 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1791 (unsigned int) ep->desc.bEndpointAddress,
1792 udev->slot_id,
1793 (unsigned int) new_drop_flags,
1794 (unsigned int) new_add_flags);
1795 return 0;
1796}
1797
1798static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1799{
1800 struct xhci_input_control_ctx *ctrl_ctx;
1801 struct xhci_ep_ctx *ep_ctx;
1802 struct xhci_slot_ctx *slot_ctx;
1803 int i;
1804
1805 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1806 if (!ctrl_ctx) {
1807 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1808 __func__);
1809 return;
1810 }
1811
1812
1813
1814
1815
1816
1817 ctrl_ctx->drop_flags = 0;
1818 ctrl_ctx->add_flags = 0;
1819 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1820 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1821
1822 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1823 for (i = 1; i < 31; ++i) {
1824 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1825 ep_ctx->ep_info = 0;
1826 ep_ctx->ep_info2 = 0;
1827 ep_ctx->deq = 0;
1828 ep_ctx->tx_info = 0;
1829 }
1830}
1831
1832static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1833 struct usb_device *udev, u32 *cmd_status)
1834{
1835 int ret;
1836
1837 switch (*cmd_status) {
1838 case COMP_CMD_ABORT:
1839 case COMP_CMD_STOP:
1840 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1841 ret = -ETIME;
1842 break;
1843 case COMP_ENOMEM:
1844 dev_warn(&udev->dev,
1845 "Not enough host controller resources for new device state.\n");
1846 ret = -ENOMEM;
1847
1848 break;
1849 case COMP_BW_ERR:
1850 case COMP_2ND_BW_ERR:
1851 dev_warn(&udev->dev,
1852 "Not enough bandwidth for new device state.\n");
1853 ret = -ENOSPC;
1854
1855 break;
1856 case COMP_TRB_ERR:
1857
1858 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1859 "add flag = 1, "
1860 "and endpoint is not disabled.\n");
1861 ret = -EINVAL;
1862 break;
1863 case COMP_DEV_ERR:
1864 dev_warn(&udev->dev,
1865 "ERROR: Incompatible device for endpoint configure command.\n");
1866 ret = -ENODEV;
1867 break;
1868 case COMP_SUCCESS:
1869 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1870 "Successful Endpoint Configure command");
1871 ret = 0;
1872 break;
1873 default:
1874 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1875 *cmd_status);
1876 ret = -EINVAL;
1877 break;
1878 }
1879 return ret;
1880}
1881
1882static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1883 struct usb_device *udev, u32 *cmd_status)
1884{
1885 int ret;
1886 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1887
1888 switch (*cmd_status) {
1889 case COMP_CMD_ABORT:
1890 case COMP_CMD_STOP:
1891 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1892 ret = -ETIME;
1893 break;
1894 case COMP_EINVAL:
1895 dev_warn(&udev->dev,
1896 "WARN: xHCI driver setup invalid evaluate context command.\n");
1897 ret = -EINVAL;
1898 break;
1899 case COMP_EBADSLT:
1900 dev_warn(&udev->dev,
1901 "WARN: slot not enabled for evaluate context command.\n");
1902 ret = -EINVAL;
1903 break;
1904 case COMP_CTX_STATE:
1905 dev_warn(&udev->dev,
1906 "WARN: invalid context state for evaluate context command.\n");
1907 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1908 ret = -EINVAL;
1909 break;
1910 case COMP_DEV_ERR:
1911 dev_warn(&udev->dev,
1912 "ERROR: Incompatible device for evaluate context command.\n");
1913 ret = -ENODEV;
1914 break;
1915 case COMP_MEL_ERR:
1916
1917 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1918 ret = -EINVAL;
1919 break;
1920 case COMP_SUCCESS:
1921 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1922 "Successful evaluate context command");
1923 ret = 0;
1924 break;
1925 default:
1926 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1927 *cmd_status);
1928 ret = -EINVAL;
1929 break;
1930 }
1931 return ret;
1932}
1933
1934static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1935 struct xhci_input_control_ctx *ctrl_ctx)
1936{
1937 u32 valid_add_flags;
1938 u32 valid_drop_flags;
1939
1940
1941
1942
1943
1944 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1945 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1946
1947
1948
1949
1950
1951 return hweight32(valid_add_flags) -
1952 hweight32(valid_add_flags & valid_drop_flags);
1953}
1954
1955static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1956 struct xhci_input_control_ctx *ctrl_ctx)
1957{
1958 u32 valid_add_flags;
1959 u32 valid_drop_flags;
1960
1961 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1962 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1963
1964 return hweight32(valid_drop_flags) -
1965 hweight32(valid_add_flags & valid_drop_flags);
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1982 struct xhci_input_control_ctx *ctrl_ctx)
1983{
1984 u32 added_eps;
1985
1986 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1987 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1988 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1989 "Not enough ep ctxs: "
1990 "%u active, need to add %u, limit is %u.",
1991 xhci->num_active_eps, added_eps,
1992 xhci->limit_active_eps);
1993 return -ENOMEM;
1994 }
1995 xhci->num_active_eps += added_eps;
1996 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1997 "Adding %u ep ctxs, %u now active.", added_eps,
1998 xhci->num_active_eps);
1999 return 0;
2000}
2001
2002
2003
2004
2005
2006
2007
2008static void xhci_free_host_resources(struct xhci_hcd *xhci,
2009 struct xhci_input_control_ctx *ctrl_ctx)
2010{
2011 u32 num_failed_eps;
2012
2013 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2014 xhci->num_active_eps -= num_failed_eps;
2015 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2016 "Removing %u failed ep ctxs, %u now active.",
2017 num_failed_eps,
2018 xhci->num_active_eps);
2019}
2020
2021
2022
2023
2024
2025
2026
2027static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2028 struct xhci_input_control_ctx *ctrl_ctx)
2029{
2030 u32 num_dropped_eps;
2031
2032 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2033 xhci->num_active_eps -= num_dropped_eps;
2034 if (num_dropped_eps)
2035 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2036 "Removing %u dropped ep ctxs, %u now active.",
2037 num_dropped_eps,
2038 xhci->num_active_eps);
2039}
2040
2041static unsigned int xhci_get_block_size(struct usb_device *udev)
2042{
2043 switch (udev->speed) {
2044 case USB_SPEED_LOW:
2045 case USB_SPEED_FULL:
2046 return FS_BLOCK;
2047 case USB_SPEED_HIGH:
2048 return HS_BLOCK;
2049 case USB_SPEED_SUPER:
2050 case USB_SPEED_SUPER_PLUS:
2051 return SS_BLOCK;
2052 case USB_SPEED_UNKNOWN:
2053 case USB_SPEED_WIRELESS:
2054 default:
2055
2056 return 1;
2057 }
2058}
2059
2060static unsigned int
2061xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2062{
2063 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2064 return LS_OVERHEAD;
2065 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2066 return FS_OVERHEAD;
2067 return HS_OVERHEAD;
2068}
2069
2070
2071
2072
2073
2074static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2075 struct xhci_virt_device *virt_dev,
2076 int old_active_eps)
2077{
2078 struct xhci_interval_bw_table *bw_table;
2079 struct xhci_tt_bw_info *tt_info;
2080
2081
2082 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2083 tt_info = virt_dev->tt_info;
2084
2085
2086
2087
2088 if (old_active_eps)
2089 return 0;
2090 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2091 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2092 return -ENOMEM;
2093 return 0;
2094 }
2095
2096
2097
2098
2099
2100
2101 return 0;
2102}
2103
2104static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2105 struct xhci_virt_device *virt_dev)
2106{
2107 unsigned int bw_reserved;
2108
2109 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2110 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2111 return -ENOMEM;
2112
2113 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2114 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2115 return -ENOMEM;
2116
2117 return 0;
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161static int xhci_check_bw_table(struct xhci_hcd *xhci,
2162 struct xhci_virt_device *virt_dev,
2163 int old_active_eps)
2164{
2165 unsigned int bw_reserved;
2166 unsigned int max_bandwidth;
2167 unsigned int bw_used;
2168 unsigned int block_size;
2169 struct xhci_interval_bw_table *bw_table;
2170 unsigned int packet_size = 0;
2171 unsigned int overhead = 0;
2172 unsigned int packets_transmitted = 0;
2173 unsigned int packets_remaining = 0;
2174 unsigned int i;
2175
2176 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2177 return xhci_check_ss_bw(xhci, virt_dev);
2178
2179 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2180 max_bandwidth = HS_BW_LIMIT;
2181
2182 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2183 } else {
2184 max_bandwidth = FS_BW_LIMIT;
2185 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2186 }
2187
2188 bw_table = virt_dev->bw_table;
2189
2190
2191
2192 block_size = xhci_get_block_size(virt_dev->udev);
2193
2194
2195
2196
2197 if (virt_dev->tt_info) {
2198 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2199 "Recalculating BW for rootport %u",
2200 virt_dev->real_port);
2201 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2202 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2203 "newly activated TT.\n");
2204 return -ENOMEM;
2205 }
2206 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2207 "Recalculating BW for TT slot %u port %u",
2208 virt_dev->tt_info->slot_id,
2209 virt_dev->tt_info->ttport);
2210 } else {
2211 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2212 "Recalculating BW for rootport %u",
2213 virt_dev->real_port);
2214 }
2215
2216
2217
2218
2219 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2220 bw_table->interval_bw[0].num_packets *
2221 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2222
2223 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2224 unsigned int bw_added;
2225 unsigned int largest_mps;
2226 unsigned int interval_overhead;
2227
2228
2229
2230
2231
2232
2233 packets_remaining = 2 * packets_remaining +
2234 bw_table->interval_bw[i].num_packets;
2235
2236
2237
2238
2239 if (list_empty(&bw_table->interval_bw[i].endpoints))
2240 largest_mps = 0;
2241 else {
2242 struct xhci_virt_ep *virt_ep;
2243 struct list_head *ep_entry;
2244
2245 ep_entry = bw_table->interval_bw[i].endpoints.next;
2246 virt_ep = list_entry(ep_entry,
2247 struct xhci_virt_ep, bw_endpoint_list);
2248
2249 largest_mps = DIV_ROUND_UP(
2250 virt_ep->bw_info.max_packet_size,
2251 block_size);
2252 }
2253 if (largest_mps > packet_size)
2254 packet_size = largest_mps;
2255
2256
2257 interval_overhead = xhci_get_largest_overhead(
2258 &bw_table->interval_bw[i]);
2259 if (interval_overhead > overhead)
2260 overhead = interval_overhead;
2261
2262
2263
2264
2265 packets_transmitted = packets_remaining >> (i + 1);
2266
2267
2268 bw_added = packets_transmitted * (overhead + packet_size);
2269
2270
2271 packets_remaining = packets_remaining % (1 << (i + 1));
2272
2273
2274
2275
2276
2277 if (packets_remaining == 0) {
2278 packet_size = 0;
2279 overhead = 0;
2280 } else if (packets_transmitted > 0) {
2281
2282
2283
2284
2285
2286 packet_size = largest_mps;
2287 overhead = interval_overhead;
2288 }
2289
2290
2291
2292 bw_used += bw_added;
2293 if (bw_used > max_bandwidth) {
2294 xhci_warn(xhci, "Not enough bandwidth. "
2295 "Proposed: %u, Max: %u\n",
2296 bw_used, max_bandwidth);
2297 return -ENOMEM;
2298 }
2299 }
2300
2301
2302
2303
2304
2305
2306 if (packets_remaining > 0)
2307 bw_used += overhead + packet_size;
2308
2309 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2310 unsigned int port_index = virt_dev->real_port - 1;
2311
2312
2313
2314
2315
2316 bw_used += TT_HS_OVERHEAD *
2317 xhci->rh_bw[port_index].num_active_tts;
2318 }
2319
2320 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2321 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2322 "Available: %u " "percent",
2323 bw_used, max_bandwidth, bw_reserved,
2324 (max_bandwidth - bw_used - bw_reserved) * 100 /
2325 max_bandwidth);
2326
2327 bw_used += bw_reserved;
2328 if (bw_used > max_bandwidth) {
2329 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2330 bw_used, max_bandwidth);
2331 return -ENOMEM;
2332 }
2333
2334 bw_table->bw_used = bw_used;
2335 return 0;
2336}
2337
2338static bool xhci_is_async_ep(unsigned int ep_type)
2339{
2340 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2341 ep_type != ISOC_IN_EP &&
2342 ep_type != INT_IN_EP);
2343}
2344
2345static bool xhci_is_sync_in_ep(unsigned int ep_type)
2346{
2347 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2348}
2349
2350static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2351{
2352 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2353
2354 if (ep_bw->ep_interval == 0)
2355 return SS_OVERHEAD_BURST +
2356 (ep_bw->mult * ep_bw->num_packets *
2357 (SS_OVERHEAD + mps));
2358 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2359 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2360 1 << ep_bw->ep_interval);
2361
2362}
2363
2364void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2365 struct xhci_bw_info *ep_bw,
2366 struct xhci_interval_bw_table *bw_table,
2367 struct usb_device *udev,
2368 struct xhci_virt_ep *virt_ep,
2369 struct xhci_tt_bw_info *tt_info)
2370{
2371 struct xhci_interval_bw *interval_bw;
2372 int normalized_interval;
2373
2374 if (xhci_is_async_ep(ep_bw->type))
2375 return;
2376
2377 if (udev->speed >= USB_SPEED_SUPER) {
2378 if (xhci_is_sync_in_ep(ep_bw->type))
2379 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2380 xhci_get_ss_bw_consumed(ep_bw);
2381 else
2382 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2383 xhci_get_ss_bw_consumed(ep_bw);
2384 return;
2385 }
2386
2387
2388
2389
2390 if (list_empty(&virt_ep->bw_endpoint_list))
2391 return;
2392
2393
2394
2395 if (udev->speed == USB_SPEED_HIGH)
2396 normalized_interval = ep_bw->ep_interval;
2397 else
2398 normalized_interval = ep_bw->ep_interval - 3;
2399
2400 if (normalized_interval == 0)
2401 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2402 interval_bw = &bw_table->interval_bw[normalized_interval];
2403 interval_bw->num_packets -= ep_bw->num_packets;
2404 switch (udev->speed) {
2405 case USB_SPEED_LOW:
2406 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2407 break;
2408 case USB_SPEED_FULL:
2409 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2410 break;
2411 case USB_SPEED_HIGH:
2412 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2413 break;
2414 case USB_SPEED_SUPER:
2415 case USB_SPEED_SUPER_PLUS:
2416 case USB_SPEED_UNKNOWN:
2417 case USB_SPEED_WIRELESS:
2418
2419
2420
2421 return;
2422 }
2423 if (tt_info)
2424 tt_info->active_eps -= 1;
2425 list_del_init(&virt_ep->bw_endpoint_list);
2426}
2427
2428static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2429 struct xhci_bw_info *ep_bw,
2430 struct xhci_interval_bw_table *bw_table,
2431 struct usb_device *udev,
2432 struct xhci_virt_ep *virt_ep,
2433 struct xhci_tt_bw_info *tt_info)
2434{
2435 struct xhci_interval_bw *interval_bw;
2436 struct xhci_virt_ep *smaller_ep;
2437 int normalized_interval;
2438
2439 if (xhci_is_async_ep(ep_bw->type))
2440 return;
2441
2442 if (udev->speed == USB_SPEED_SUPER) {
2443 if (xhci_is_sync_in_ep(ep_bw->type))
2444 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2445 xhci_get_ss_bw_consumed(ep_bw);
2446 else
2447 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2448 xhci_get_ss_bw_consumed(ep_bw);
2449 return;
2450 }
2451
2452
2453
2454
2455 if (udev->speed == USB_SPEED_HIGH)
2456 normalized_interval = ep_bw->ep_interval;
2457 else
2458 normalized_interval = ep_bw->ep_interval - 3;
2459
2460 if (normalized_interval == 0)
2461 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2462 interval_bw = &bw_table->interval_bw[normalized_interval];
2463 interval_bw->num_packets += ep_bw->num_packets;
2464 switch (udev->speed) {
2465 case USB_SPEED_LOW:
2466 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2467 break;
2468 case USB_SPEED_FULL:
2469 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2470 break;
2471 case USB_SPEED_HIGH:
2472 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2473 break;
2474 case USB_SPEED_SUPER:
2475 case USB_SPEED_SUPER_PLUS:
2476 case USB_SPEED_UNKNOWN:
2477 case USB_SPEED_WIRELESS:
2478
2479
2480
2481 return;
2482 }
2483
2484 if (tt_info)
2485 tt_info->active_eps += 1;
2486
2487 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2488 bw_endpoint_list) {
2489 if (ep_bw->max_packet_size >=
2490 smaller_ep->bw_info.max_packet_size) {
2491
2492 list_add_tail(&virt_ep->bw_endpoint_list,
2493 &smaller_ep->bw_endpoint_list);
2494 return;
2495 }
2496 }
2497
2498 list_add_tail(&virt_ep->bw_endpoint_list,
2499 &interval_bw->endpoints);
2500}
2501
2502void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2503 struct xhci_virt_device *virt_dev,
2504 int old_active_eps)
2505{
2506 struct xhci_root_port_bw_info *rh_bw_info;
2507 if (!virt_dev->tt_info)
2508 return;
2509
2510 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2511 if (old_active_eps == 0 &&
2512 virt_dev->tt_info->active_eps != 0) {
2513 rh_bw_info->num_active_tts += 1;
2514 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2515 } else if (old_active_eps != 0 &&
2516 virt_dev->tt_info->active_eps == 0) {
2517 rh_bw_info->num_active_tts -= 1;
2518 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2519 }
2520}
2521
2522static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2523 struct xhci_virt_device *virt_dev,
2524 struct xhci_container_ctx *in_ctx)
2525{
2526 struct xhci_bw_info ep_bw_info[31];
2527 int i;
2528 struct xhci_input_control_ctx *ctrl_ctx;
2529 int old_active_eps = 0;
2530
2531 if (virt_dev->tt_info)
2532 old_active_eps = virt_dev->tt_info->active_eps;
2533
2534 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2535 if (!ctrl_ctx) {
2536 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2537 __func__);
2538 return -ENOMEM;
2539 }
2540
2541 for (i = 0; i < 31; i++) {
2542 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2543 continue;
2544
2545
2546 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2547 sizeof(ep_bw_info[i]));
2548
2549
2550
2551 if (EP_IS_DROPPED(ctrl_ctx, i))
2552 xhci_drop_ep_from_interval_table(xhci,
2553 &virt_dev->eps[i].bw_info,
2554 virt_dev->bw_table,
2555 virt_dev->udev,
2556 &virt_dev->eps[i],
2557 virt_dev->tt_info);
2558 }
2559
2560 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2561 for (i = 0; i < 31; i++) {
2562
2563 if (EP_IS_ADDED(ctrl_ctx, i))
2564 xhci_add_ep_to_interval_table(xhci,
2565 &virt_dev->eps[i].bw_info,
2566 virt_dev->bw_table,
2567 virt_dev->udev,
2568 &virt_dev->eps[i],
2569 virt_dev->tt_info);
2570 }
2571
2572 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2573
2574
2575
2576 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2577 return 0;
2578 }
2579
2580
2581 for (i = 0; i < 31; i++) {
2582 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2583 continue;
2584
2585
2586
2587
2588 if (EP_IS_ADDED(ctrl_ctx, i)) {
2589 xhci_drop_ep_from_interval_table(xhci,
2590 &virt_dev->eps[i].bw_info,
2591 virt_dev->bw_table,
2592 virt_dev->udev,
2593 &virt_dev->eps[i],
2594 virt_dev->tt_info);
2595 }
2596
2597 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2598 sizeof(ep_bw_info[i]));
2599
2600 if (EP_IS_DROPPED(ctrl_ctx, i))
2601 xhci_add_ep_to_interval_table(xhci,
2602 &virt_dev->eps[i].bw_info,
2603 virt_dev->bw_table,
2604 virt_dev->udev,
2605 &virt_dev->eps[i],
2606 virt_dev->tt_info);
2607 }
2608 return -ENOMEM;
2609}
2610
2611
2612
2613
2614
2615static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2616 struct usb_device *udev,
2617 struct xhci_command *command,
2618 bool ctx_change, bool must_succeed)
2619{
2620 int ret;
2621 unsigned long flags;
2622 struct xhci_input_control_ctx *ctrl_ctx;
2623 struct xhci_virt_device *virt_dev;
2624
2625 if (!command)
2626 return -EINVAL;
2627
2628 spin_lock_irqsave(&xhci->lock, flags);
2629 virt_dev = xhci->devs[udev->slot_id];
2630
2631 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2632 if (!ctrl_ctx) {
2633 spin_unlock_irqrestore(&xhci->lock, flags);
2634 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2635 __func__);
2636 return -ENOMEM;
2637 }
2638
2639 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2640 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2641 spin_unlock_irqrestore(&xhci->lock, flags);
2642 xhci_warn(xhci, "Not enough host resources, "
2643 "active endpoint contexts = %u\n",
2644 xhci->num_active_eps);
2645 return -ENOMEM;
2646 }
2647 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2648 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2649 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2650 xhci_free_host_resources(xhci, ctrl_ctx);
2651 spin_unlock_irqrestore(&xhci->lock, flags);
2652 xhci_warn(xhci, "Not enough bandwidth\n");
2653 return -ENOMEM;
2654 }
2655
2656 if (!ctx_change)
2657 ret = xhci_queue_configure_endpoint(xhci, command,
2658 command->in_ctx->dma,
2659 udev->slot_id, must_succeed);
2660 else
2661 ret = xhci_queue_evaluate_context(xhci, command,
2662 command->in_ctx->dma,
2663 udev->slot_id, must_succeed);
2664 if (ret < 0) {
2665 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2666 xhci_free_host_resources(xhci, ctrl_ctx);
2667 spin_unlock_irqrestore(&xhci->lock, flags);
2668 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2669 "FIXME allocate a new ring segment");
2670 return -ENOMEM;
2671 }
2672 xhci_ring_cmd_db(xhci);
2673 spin_unlock_irqrestore(&xhci->lock, flags);
2674
2675
2676 wait_for_completion(command->completion);
2677
2678 if (!ctx_change)
2679 ret = xhci_configure_endpoint_result(xhci, udev,
2680 &command->status);
2681 else
2682 ret = xhci_evaluate_context_result(xhci, udev,
2683 &command->status);
2684
2685 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2686 spin_lock_irqsave(&xhci->lock, flags);
2687
2688
2689
2690 if (ret)
2691 xhci_free_host_resources(xhci, ctrl_ctx);
2692 else
2693 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2694 spin_unlock_irqrestore(&xhci->lock, flags);
2695 }
2696 return ret;
2697}
2698
2699static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2700 struct xhci_virt_device *vdev, int i)
2701{
2702 struct xhci_virt_ep *ep = &vdev->eps[i];
2703
2704 if (ep->ep_state & EP_HAS_STREAMS) {
2705 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2706 xhci_get_endpoint_address(i));
2707 xhci_free_stream_info(xhci, ep->stream_info);
2708 ep->stream_info = NULL;
2709 ep->ep_state &= ~EP_HAS_STREAMS;
2710 }
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2724{
2725 int i;
2726 int ret = 0;
2727 struct xhci_hcd *xhci;
2728 struct xhci_virt_device *virt_dev;
2729 struct xhci_input_control_ctx *ctrl_ctx;
2730 struct xhci_slot_ctx *slot_ctx;
2731 struct xhci_command *command;
2732
2733 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2734 if (ret <= 0)
2735 return ret;
2736 xhci = hcd_to_xhci(hcd);
2737 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2738 (xhci->xhc_state & XHCI_STATE_REMOVING))
2739 return -ENODEV;
2740
2741 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2742 virt_dev = xhci->devs[udev->slot_id];
2743
2744 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
2745 if (!command)
2746 return -ENOMEM;
2747
2748 command->in_ctx = virt_dev->in_ctx;
2749
2750
2751 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2752 if (!ctrl_ctx) {
2753 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2754 __func__);
2755 ret = -ENOMEM;
2756 goto command_cleanup;
2757 }
2758 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2759 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2760 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2761
2762
2763 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2764 ctrl_ctx->drop_flags == 0) {
2765 ret = 0;
2766 goto command_cleanup;
2767 }
2768
2769 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2770 for (i = 31; i >= 1; i--) {
2771 __le32 le32 = cpu_to_le32(BIT(i));
2772
2773 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2774 || (ctrl_ctx->add_flags & le32) || i == 1) {
2775 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2776 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2777 break;
2778 }
2779 }
2780 xhci_dbg(xhci, "New Input Control Context:\n");
2781 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2782 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2783
2784 ret = xhci_configure_endpoint(xhci, udev, command,
2785 false, false);
2786 if (ret)
2787
2788 goto command_cleanup;
2789
2790 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2791 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2792 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2793
2794
2795 for (i = 1; i < 31; ++i) {
2796 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2797 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2798 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2799 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2800 }
2801 }
2802 xhci_zero_in_ctx(xhci, virt_dev);
2803
2804
2805
2806
2807 for (i = 1; i < 31; ++i) {
2808 if (!virt_dev->eps[i].new_ring)
2809 continue;
2810
2811
2812
2813 if (virt_dev->eps[i].ring) {
2814 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2815 }
2816 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2817 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2818 virt_dev->eps[i].new_ring = NULL;
2819 }
2820command_cleanup:
2821 kfree(command->completion);
2822 kfree(command);
2823
2824 return ret;
2825}
2826
2827void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2828{
2829 struct xhci_hcd *xhci;
2830 struct xhci_virt_device *virt_dev;
2831 int i, ret;
2832
2833 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2834 if (ret <= 0)
2835 return;
2836 xhci = hcd_to_xhci(hcd);
2837
2838 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2839 virt_dev = xhci->devs[udev->slot_id];
2840
2841 for (i = 0; i < 31; ++i) {
2842 if (virt_dev->eps[i].new_ring) {
2843 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2844 virt_dev->eps[i].new_ring = NULL;
2845 }
2846 }
2847 xhci_zero_in_ctx(xhci, virt_dev);
2848}
2849
2850static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2851 struct xhci_container_ctx *in_ctx,
2852 struct xhci_container_ctx *out_ctx,
2853 struct xhci_input_control_ctx *ctrl_ctx,
2854 u32 add_flags, u32 drop_flags)
2855{
2856 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2857 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2858 xhci_slot_copy(xhci, in_ctx, out_ctx);
2859 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2860
2861 xhci_dbg(xhci, "Input Context:\n");
2862 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2863}
2864
2865static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2866 unsigned int slot_id, unsigned int ep_index,
2867 struct xhci_dequeue_state *deq_state)
2868{
2869 struct xhci_input_control_ctx *ctrl_ctx;
2870 struct xhci_container_ctx *in_ctx;
2871 struct xhci_ep_ctx *ep_ctx;
2872 u32 added_ctxs;
2873 dma_addr_t addr;
2874
2875 in_ctx = xhci->devs[slot_id]->in_ctx;
2876 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2877 if (!ctrl_ctx) {
2878 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2879 __func__);
2880 return;
2881 }
2882
2883 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2884 xhci->devs[slot_id]->out_ctx, ep_index);
2885 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2886 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2887 deq_state->new_deq_ptr);
2888 if (addr == 0) {
2889 xhci_warn(xhci, "WARN Cannot submit config ep after "
2890 "reset ep command\n");
2891 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2892 deq_state->new_deq_seg,
2893 deq_state->new_deq_ptr);
2894 return;
2895 }
2896 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2897
2898 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2899 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2900 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2901 added_ctxs, added_ctxs);
2902}
2903
2904void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2905 unsigned int ep_index, struct xhci_td *td)
2906{
2907 struct xhci_dequeue_state deq_state;
2908 struct xhci_virt_ep *ep;
2909 struct usb_device *udev = td->urb->dev;
2910
2911 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2912 "Cleaning up stalled endpoint ring");
2913 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2914
2915
2916
2917 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2918 ep_index, ep->stopped_stream, td, &deq_state);
2919
2920 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2921 return;
2922
2923
2924
2925
2926 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2927 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2928 "Queueing new dequeue state");
2929 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2930 ep_index, ep->stopped_stream, &deq_state);
2931 } else {
2932
2933
2934
2935
2936
2937 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2938 "Setting up input context for "
2939 "configure endpoint command");
2940 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2941 ep_index, &deq_state);
2942 }
2943}
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953void xhci_endpoint_reset(struct usb_hcd *hcd,
2954 struct usb_host_endpoint *ep)
2955{
2956 struct xhci_hcd *xhci;
2957
2958 xhci = hcd_to_xhci(hcd);
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2971 ep->desc.bEndpointAddress);
2972}
2973
2974static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2975 struct usb_device *udev, struct usb_host_endpoint *ep,
2976 unsigned int slot_id)
2977{
2978 int ret;
2979 unsigned int ep_index;
2980 unsigned int ep_state;
2981
2982 if (!ep)
2983 return -EINVAL;
2984 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2985 if (ret <= 0)
2986 return -EINVAL;
2987 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
2988 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2989 " descriptor for ep 0x%x does not support streams\n",
2990 ep->desc.bEndpointAddress);
2991 return -EINVAL;
2992 }
2993
2994 ep_index = xhci_get_endpoint_index(&ep->desc);
2995 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2996 if (ep_state & EP_HAS_STREAMS ||
2997 ep_state & EP_GETTING_STREAMS) {
2998 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2999 "already has streams set up.\n",
3000 ep->desc.bEndpointAddress);
3001 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3002 "dynamic stream context array reallocation.\n");
3003 return -EINVAL;
3004 }
3005 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3006 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3007 "endpoint 0x%x; URBs are pending.\n",
3008 ep->desc.bEndpointAddress);
3009 return -EINVAL;
3010 }
3011 return 0;
3012}
3013
3014static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3015 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3016{
3017 unsigned int max_streams;
3018
3019
3020 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3021
3022
3023
3024
3025
3026
3027 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3028 if (*num_stream_ctxs > max_streams) {
3029 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3030 max_streams);
3031 *num_stream_ctxs = max_streams;
3032 *num_streams = max_streams;
3033 }
3034}
3035
3036
3037
3038
3039
3040static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3041 struct usb_device *udev,
3042 struct usb_host_endpoint **eps, unsigned int num_eps,
3043 unsigned int *num_streams, u32 *changed_ep_bitmask)
3044{
3045 unsigned int max_streams;
3046 unsigned int endpoint_flag;
3047 int i;
3048 int ret;
3049
3050 for (i = 0; i < num_eps; i++) {
3051 ret = xhci_check_streams_endpoint(xhci, udev,
3052 eps[i], udev->slot_id);
3053 if (ret < 0)
3054 return ret;
3055
3056 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3057 if (max_streams < (*num_streams - 1)) {
3058 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3059 eps[i]->desc.bEndpointAddress,
3060 max_streams);
3061 *num_streams = max_streams+1;
3062 }
3063
3064 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3065 if (*changed_ep_bitmask & endpoint_flag)
3066 return -EINVAL;
3067 *changed_ep_bitmask |= endpoint_flag;
3068 }
3069 return 0;
3070}
3071
3072static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3073 struct usb_device *udev,
3074 struct usb_host_endpoint **eps, unsigned int num_eps)
3075{
3076 u32 changed_ep_bitmask = 0;
3077 unsigned int slot_id;
3078 unsigned int ep_index;
3079 unsigned int ep_state;
3080 int i;
3081
3082 slot_id = udev->slot_id;
3083 if (!xhci->devs[slot_id])
3084 return 0;
3085
3086 for (i = 0; i < num_eps; i++) {
3087 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3088 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3089
3090 if (ep_state & EP_GETTING_NO_STREAMS) {
3091 xhci_warn(xhci, "WARN Can't disable streams for "
3092 "endpoint 0x%x, "
3093 "streams are being disabled already\n",
3094 eps[i]->desc.bEndpointAddress);
3095 return 0;
3096 }
3097
3098 if (!(ep_state & EP_HAS_STREAMS) &&
3099 !(ep_state & EP_GETTING_STREAMS)) {
3100 xhci_warn(xhci, "WARN Can't disable streams for "
3101 "endpoint 0x%x, "
3102 "streams are already disabled!\n",
3103 eps[i]->desc.bEndpointAddress);
3104 xhci_warn(xhci, "WARN xhci_free_streams() called "
3105 "with non-streams endpoint\n");
3106 return 0;
3107 }
3108 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3109 }
3110 return changed_ep_bitmask;
3111}
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3130 struct usb_host_endpoint **eps, unsigned int num_eps,
3131 unsigned int num_streams, gfp_t mem_flags)
3132{
3133 int i, ret;
3134 struct xhci_hcd *xhci;
3135 struct xhci_virt_device *vdev;
3136 struct xhci_command *config_cmd;
3137 struct xhci_input_control_ctx *ctrl_ctx;
3138 unsigned int ep_index;
3139 unsigned int num_stream_ctxs;
3140 unsigned int max_packet;
3141 unsigned long flags;
3142 u32 changed_ep_bitmask = 0;
3143
3144 if (!eps)
3145 return -EINVAL;
3146
3147
3148
3149
3150 num_streams += 1;
3151 xhci = hcd_to_xhci(hcd);
3152 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3153 num_streams);
3154
3155
3156 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3157 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3158 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3159 return -ENOSYS;
3160 }
3161
3162 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3163 if (!config_cmd) {
3164 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3165 return -ENOMEM;
3166 }
3167 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3168 if (!ctrl_ctx) {
3169 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3170 __func__);
3171 xhci_free_command(xhci, config_cmd);
3172 return -ENOMEM;
3173 }
3174
3175
3176
3177
3178
3179 spin_lock_irqsave(&xhci->lock, flags);
3180 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3181 num_eps, &num_streams, &changed_ep_bitmask);
3182 if (ret < 0) {
3183 xhci_free_command(xhci, config_cmd);
3184 spin_unlock_irqrestore(&xhci->lock, flags);
3185 return ret;
3186 }
3187 if (num_streams <= 1) {
3188 xhci_warn(xhci, "WARN: endpoints can't handle "
3189 "more than one stream.\n");
3190 xhci_free_command(xhci, config_cmd);
3191 spin_unlock_irqrestore(&xhci->lock, flags);
3192 return -EINVAL;
3193 }
3194 vdev = xhci->devs[udev->slot_id];
3195
3196
3197
3198 for (i = 0; i < num_eps; i++) {
3199 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3200 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3201 }
3202 spin_unlock_irqrestore(&xhci->lock, flags);
3203
3204
3205
3206
3207
3208 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3209 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3210 num_stream_ctxs, num_streams);
3211
3212 for (i = 0; i < num_eps; i++) {
3213 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3214 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc));
3215 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3216 num_stream_ctxs,
3217 num_streams,
3218 max_packet, mem_flags);
3219 if (!vdev->eps[ep_index].stream_info)
3220 goto cleanup;
3221
3222
3223
3224 }
3225
3226
3227 for (i = 0; i < num_eps; i++) {
3228 struct xhci_ep_ctx *ep_ctx;
3229
3230 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3231 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3232
3233 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3234 vdev->out_ctx, ep_index);
3235 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3236 vdev->eps[ep_index].stream_info);
3237 }
3238
3239
3240
3241 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3242 vdev->out_ctx, ctrl_ctx,
3243 changed_ep_bitmask, changed_ep_bitmask);
3244
3245
3246 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3247 false, false);
3248
3249
3250
3251
3252
3253 if (ret < 0)
3254 goto cleanup;
3255
3256 spin_lock_irqsave(&xhci->lock, flags);
3257 for (i = 0; i < num_eps; i++) {
3258 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3259 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3260 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3261 udev->slot_id, ep_index);
3262 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3263 }
3264 xhci_free_command(xhci, config_cmd);
3265 spin_unlock_irqrestore(&xhci->lock, flags);
3266
3267
3268 return num_streams - 1;
3269
3270cleanup:
3271
3272 for (i = 0; i < num_eps; i++) {
3273 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3274 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3275 vdev->eps[ep_index].stream_info = NULL;
3276
3277
3278
3279 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3280 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3281 xhci_endpoint_zero(xhci, vdev, eps[i]);
3282 }
3283 xhci_free_command(xhci, config_cmd);
3284 return -ENOMEM;
3285}
3286
3287
3288
3289
3290
3291
3292
3293int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3294 struct usb_host_endpoint **eps, unsigned int num_eps,
3295 gfp_t mem_flags)
3296{
3297 int i, ret;
3298 struct xhci_hcd *xhci;
3299 struct xhci_virt_device *vdev;
3300 struct xhci_command *command;
3301 struct xhci_input_control_ctx *ctrl_ctx;
3302 unsigned int ep_index;
3303 unsigned long flags;
3304 u32 changed_ep_bitmask;
3305
3306 xhci = hcd_to_xhci(hcd);
3307 vdev = xhci->devs[udev->slot_id];
3308
3309
3310 spin_lock_irqsave(&xhci->lock, flags);
3311 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3312 udev, eps, num_eps);
3313 if (changed_ep_bitmask == 0) {
3314 spin_unlock_irqrestore(&xhci->lock, flags);
3315 return -EINVAL;
3316 }
3317
3318
3319
3320
3321
3322 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3323 command = vdev->eps[ep_index].stream_info->free_streams_command;
3324 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3325 if (!ctrl_ctx) {
3326 spin_unlock_irqrestore(&xhci->lock, flags);
3327 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3328 __func__);
3329 return -EINVAL;
3330 }
3331
3332 for (i = 0; i < num_eps; i++) {
3333 struct xhci_ep_ctx *ep_ctx;
3334
3335 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3336 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3337 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3338 EP_GETTING_NO_STREAMS;
3339
3340 xhci_endpoint_copy(xhci, command->in_ctx,
3341 vdev->out_ctx, ep_index);
3342 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3343 &vdev->eps[ep_index]);
3344 }
3345 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3346 vdev->out_ctx, ctrl_ctx,
3347 changed_ep_bitmask, changed_ep_bitmask);
3348 spin_unlock_irqrestore(&xhci->lock, flags);
3349
3350
3351
3352
3353 ret = xhci_configure_endpoint(xhci, udev, command,
3354 false, true);
3355
3356
3357
3358
3359 if (ret < 0)
3360 return ret;
3361
3362 spin_lock_irqsave(&xhci->lock, flags);
3363 for (i = 0; i < num_eps; i++) {
3364 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3365 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3366 vdev->eps[ep_index].stream_info = NULL;
3367
3368
3369
3370 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3371 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3372 }
3373 spin_unlock_irqrestore(&xhci->lock, flags);
3374
3375 return 0;
3376}
3377
3378
3379
3380
3381
3382
3383
3384
3385void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3386 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3387{
3388 int i;
3389 unsigned int num_dropped_eps = 0;
3390 unsigned int drop_flags = 0;
3391
3392 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3393 if (virt_dev->eps[i].ring) {
3394 drop_flags |= 1 << i;
3395 num_dropped_eps++;
3396 }
3397 }
3398 xhci->num_active_eps -= num_dropped_eps;
3399 if (num_dropped_eps)
3400 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3401 "Dropped %u ep ctxs, flags = 0x%x, "
3402 "%u now active.",
3403 num_dropped_eps, drop_flags,
3404 xhci->num_active_eps);
3405}
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3426{
3427 int ret, i;
3428 unsigned long flags;
3429 struct xhci_hcd *xhci;
3430 unsigned int slot_id;
3431 struct xhci_virt_device *virt_dev;
3432 struct xhci_command *reset_device_cmd;
3433 int last_freed_endpoint;
3434 struct xhci_slot_ctx *slot_ctx;
3435 int old_active_eps = 0;
3436
3437 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3438 if (ret <= 0)
3439 return ret;
3440 xhci = hcd_to_xhci(hcd);
3441 slot_id = udev->slot_id;
3442 virt_dev = xhci->devs[slot_id];
3443 if (!virt_dev) {
3444 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3445 "not exist. Re-allocate the device\n", slot_id);
3446 ret = xhci_alloc_dev(hcd, udev);
3447 if (ret == 1)
3448 return 0;
3449 else
3450 return -EINVAL;
3451 }
3452
3453 if (virt_dev->tt_info)
3454 old_active_eps = virt_dev->tt_info->active_eps;
3455
3456 if (virt_dev->udev != udev) {
3457
3458
3459
3460
3461 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3462 "not match the udev. Re-allocate the device\n",
3463 slot_id);
3464 ret = xhci_alloc_dev(hcd, udev);
3465 if (ret == 1)
3466 return 0;
3467 else
3468 return -EINVAL;
3469 }
3470
3471
3472 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3473 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3474 SLOT_STATE_DISABLED)
3475 return 0;
3476
3477 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3478
3479
3480
3481
3482
3483
3484 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3485 if (!reset_device_cmd) {
3486 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3487 return -ENOMEM;
3488 }
3489
3490
3491 spin_lock_irqsave(&xhci->lock, flags);
3492
3493 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3494 if (ret) {
3495 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3496 spin_unlock_irqrestore(&xhci->lock, flags);
3497 goto command_cleanup;
3498 }
3499 xhci_ring_cmd_db(xhci);
3500 spin_unlock_irqrestore(&xhci->lock, flags);
3501
3502
3503 wait_for_completion(reset_device_cmd->completion);
3504
3505
3506
3507
3508
3509 ret = reset_device_cmd->status;
3510 switch (ret) {
3511 case COMP_CMD_ABORT:
3512 case COMP_CMD_STOP:
3513 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3514 ret = -ETIME;
3515 goto command_cleanup;
3516 case COMP_EBADSLT:
3517 case COMP_CTX_STATE:
3518 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3519 slot_id,
3520 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3521 xhci_dbg(xhci, "Not freeing device rings.\n");
3522
3523 ret = 0;
3524 goto command_cleanup;
3525 case COMP_SUCCESS:
3526 xhci_dbg(xhci, "Successful reset device command.\n");
3527 break;
3528 default:
3529 if (xhci_is_vendor_info_code(xhci, ret))
3530 break;
3531 xhci_warn(xhci, "Unknown completion code %u for "
3532 "reset device command.\n", ret);
3533 ret = -EINVAL;
3534 goto command_cleanup;
3535 }
3536
3537
3538 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3539 spin_lock_irqsave(&xhci->lock, flags);
3540
3541 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3542 spin_unlock_irqrestore(&xhci->lock, flags);
3543 }
3544
3545
3546 last_freed_endpoint = 1;
3547 for (i = 1; i < 31; ++i) {
3548 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3549
3550 if (ep->ep_state & EP_HAS_STREAMS) {
3551 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3552 xhci_get_endpoint_address(i));
3553 xhci_free_stream_info(xhci, ep->stream_info);
3554 ep->stream_info = NULL;
3555 ep->ep_state &= ~EP_HAS_STREAMS;
3556 }
3557
3558 if (ep->ring) {
3559 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3560 last_freed_endpoint = i;
3561 }
3562 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3563 xhci_drop_ep_from_interval_table(xhci,
3564 &virt_dev->eps[i].bw_info,
3565 virt_dev->bw_table,
3566 udev,
3567 &virt_dev->eps[i],
3568 virt_dev->tt_info);
3569 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3570 }
3571
3572 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3573
3574 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3575 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3576 ret = 0;
3577
3578command_cleanup:
3579 xhci_free_command(xhci, reset_device_cmd);
3580 return ret;
3581}
3582
3583
3584
3585
3586
3587
3588void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3589{
3590 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3591 struct xhci_virt_device *virt_dev;
3592 unsigned long flags;
3593 u32 state;
3594 int i, ret;
3595 struct xhci_command *command;
3596
3597 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3598 if (!command)
3599 return;
3600
3601#ifndef CONFIG_USB_DEFAULT_PERSIST
3602
3603
3604
3605
3606
3607 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3608 pm_runtime_put_noidle(hcd->self.controller);
3609#endif
3610
3611 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3612
3613
3614
3615 if (ret <= 0 && ret != -ENODEV) {
3616 kfree(command);
3617 return;
3618 }
3619
3620 virt_dev = xhci->devs[udev->slot_id];
3621
3622
3623 for (i = 0; i < 31; ++i) {
3624 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3625 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3626 }
3627
3628 spin_lock_irqsave(&xhci->lock, flags);
3629
3630 state = readl(&xhci->op_regs->status);
3631 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3632 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3633 xhci_free_virt_device(xhci, udev->slot_id);
3634 spin_unlock_irqrestore(&xhci->lock, flags);
3635 kfree(command);
3636 return;
3637 }
3638
3639 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3640 udev->slot_id)) {
3641 spin_unlock_irqrestore(&xhci->lock, flags);
3642 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3643 return;
3644 }
3645 xhci_ring_cmd_db(xhci);
3646 spin_unlock_irqrestore(&xhci->lock, flags);
3647
3648
3649
3650
3651
3652}
3653
3654
3655
3656
3657
3658
3659
3660static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3661{
3662 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3663 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3664 "Not enough ep ctxs: "
3665 "%u active, need to add 1, limit is %u.",
3666 xhci->num_active_eps, xhci->limit_active_eps);
3667 return -ENOMEM;
3668 }
3669 xhci->num_active_eps += 1;
3670 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3671 "Adding 1 ep ctx, %u now active.",
3672 xhci->num_active_eps);
3673 return 0;
3674}
3675
3676
3677
3678
3679
3680
3681int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3682{
3683 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3684 unsigned long flags;
3685 int ret, slot_id;
3686 struct xhci_command *command;
3687
3688 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3689 if (!command)
3690 return 0;
3691
3692
3693 mutex_lock(&xhci->mutex);
3694 spin_lock_irqsave(&xhci->lock, flags);
3695 command->completion = &xhci->addr_dev;
3696 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3697 if (ret) {
3698 spin_unlock_irqrestore(&xhci->lock, flags);
3699 mutex_unlock(&xhci->mutex);
3700 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3701 kfree(command);
3702 return 0;
3703 }
3704 xhci_ring_cmd_db(xhci);
3705 spin_unlock_irqrestore(&xhci->lock, flags);
3706
3707 wait_for_completion(command->completion);
3708 slot_id = xhci->slot_id;
3709 mutex_unlock(&xhci->mutex);
3710
3711 if (!slot_id || command->status != COMP_SUCCESS) {
3712 xhci_err(xhci, "Error while assigning device slot ID\n");
3713 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3714 HCS_MAX_SLOTS(
3715 readl(&xhci->cap_regs->hcs_params1)));
3716 kfree(command);
3717 return 0;
3718 }
3719
3720 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3721 spin_lock_irqsave(&xhci->lock, flags);
3722 ret = xhci_reserve_host_control_ep_resources(xhci);
3723 if (ret) {
3724 spin_unlock_irqrestore(&xhci->lock, flags);
3725 xhci_warn(xhci, "Not enough host resources, "
3726 "active endpoint contexts = %u\n",
3727 xhci->num_active_eps);
3728 goto disable_slot;
3729 }
3730 spin_unlock_irqrestore(&xhci->lock, flags);
3731 }
3732
3733
3734
3735
3736 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3737 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3738 goto disable_slot;
3739 }
3740 udev->slot_id = slot_id;
3741
3742#ifndef CONFIG_USB_DEFAULT_PERSIST
3743
3744
3745
3746
3747 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3748 pm_runtime_get_noresume(hcd->self.controller);
3749#endif
3750
3751
3752 kfree(command);
3753
3754
3755 return 1;
3756
3757disable_slot:
3758
3759 spin_lock_irqsave(&xhci->lock, flags);
3760 command->completion = NULL;
3761 command->status = 0;
3762 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3763 udev->slot_id))
3764 xhci_ring_cmd_db(xhci);
3765 spin_unlock_irqrestore(&xhci->lock, flags);
3766 return 0;
3767}
3768
3769
3770
3771
3772
3773static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3774 enum xhci_setup_dev setup)
3775{
3776 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3777 unsigned long flags;
3778 struct xhci_virt_device *virt_dev;
3779 int ret = 0;
3780 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3781 struct xhci_slot_ctx *slot_ctx;
3782 struct xhci_input_control_ctx *ctrl_ctx;
3783 u64 temp_64;
3784 struct xhci_command *command = NULL;
3785
3786 mutex_lock(&xhci->mutex);
3787
3788 if (xhci->xhc_state)
3789 goto out;
3790
3791 if (!udev->slot_id) {
3792 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3793 "Bad Slot ID %d", udev->slot_id);
3794 ret = -EINVAL;
3795 goto out;
3796 }
3797
3798 virt_dev = xhci->devs[udev->slot_id];
3799
3800 if (WARN_ON(!virt_dev)) {
3801
3802
3803
3804
3805
3806 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3807 udev->slot_id);
3808 ret = -EINVAL;
3809 goto out;
3810 }
3811
3812 if (setup == SETUP_CONTEXT_ONLY) {
3813 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3814 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3815 SLOT_STATE_DEFAULT) {
3816 xhci_dbg(xhci, "Slot already in default state\n");
3817 goto out;
3818 }
3819 }
3820
3821 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3822 if (!command) {
3823 ret = -ENOMEM;
3824 goto out;
3825 }
3826
3827 command->in_ctx = virt_dev->in_ctx;
3828 command->completion = &xhci->addr_dev;
3829
3830 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3831 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3832 if (!ctrl_ctx) {
3833 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3834 __func__);
3835 ret = -EINVAL;
3836 goto out;
3837 }
3838
3839
3840
3841
3842
3843 if (!slot_ctx->dev_info)
3844 xhci_setup_addressable_virt_dev(xhci, udev);
3845
3846 else
3847 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3848 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3849 ctrl_ctx->drop_flags = 0;
3850
3851 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3852 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3853 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3854 le32_to_cpu(slot_ctx->dev_info) >> 27);
3855
3856 spin_lock_irqsave(&xhci->lock, flags);
3857 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3858 udev->slot_id, setup);
3859 if (ret) {
3860 spin_unlock_irqrestore(&xhci->lock, flags);
3861 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3862 "FIXME: allocate a command ring segment");
3863 goto out;
3864 }
3865 xhci_ring_cmd_db(xhci);
3866 spin_unlock_irqrestore(&xhci->lock, flags);
3867
3868
3869 wait_for_completion(command->completion);
3870
3871
3872
3873
3874
3875 switch (command->status) {
3876 case COMP_CMD_ABORT:
3877 case COMP_CMD_STOP:
3878 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3879 ret = -ETIME;
3880 break;
3881 case COMP_CTX_STATE:
3882 case COMP_EBADSLT:
3883 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3884 act, udev->slot_id);
3885 ret = -EINVAL;
3886 break;
3887 case COMP_TX_ERR:
3888 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3889 ret = -EPROTO;
3890 break;
3891 case COMP_DEV_ERR:
3892 dev_warn(&udev->dev,
3893 "ERROR: Incompatible device for setup %s command\n", act);
3894 ret = -ENODEV;
3895 break;
3896 case COMP_SUCCESS:
3897 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3898 "Successful setup %s command", act);
3899 break;
3900 default:
3901 xhci_err(xhci,
3902 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3903 act, command->status);
3904 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3905 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3906 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3907 ret = -EINVAL;
3908 break;
3909 }
3910 if (ret)
3911 goto out;
3912 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3913 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3914 "Op regs DCBAA ptr = %#016llx", temp_64);
3915 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3916 "Slot ID %d dcbaa entry @%p = %#016llx",
3917 udev->slot_id,
3918 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3919 (unsigned long long)
3920 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3921 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3922 "Output Context DMA address = %#08llx",
3923 (unsigned long long)virt_dev->out_ctx->dma);
3924 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3925 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3926 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3927 le32_to_cpu(slot_ctx->dev_info) >> 27);
3928 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3929 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3930
3931
3932
3933
3934 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3935 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3936 le32_to_cpu(slot_ctx->dev_info) >> 27);
3937
3938 ctrl_ctx->add_flags = 0;
3939 ctrl_ctx->drop_flags = 0;
3940
3941 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3942 "Internal device address = %d",
3943 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3944out:
3945 mutex_unlock(&xhci->mutex);
3946 kfree(command);
3947 return ret;
3948}
3949
3950int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3951{
3952 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3953}
3954
3955int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3956{
3957 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3958}
3959
3960
3961
3962
3963
3964
3965
3966int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3967{
3968 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3969 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3970 __le32 __iomem *addr;
3971 int raw_port;
3972
3973 if (hcd->speed < HCD_USB3)
3974 addr = xhci->usb2_ports[port1 - 1];
3975 else
3976 addr = xhci->usb3_ports[port1 - 1];
3977
3978 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3979 return raw_port;
3980}
3981
3982
3983
3984
3985
3986static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3987 struct usb_device *udev, u16 max_exit_latency)
3988{
3989 struct xhci_virt_device *virt_dev;
3990 struct xhci_command *command;
3991 struct xhci_input_control_ctx *ctrl_ctx;
3992 struct xhci_slot_ctx *slot_ctx;
3993 unsigned long flags;
3994 int ret;
3995
3996 spin_lock_irqsave(&xhci->lock, flags);
3997
3998 virt_dev = xhci->devs[udev->slot_id];
3999
4000
4001
4002
4003
4004
4005
4006 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4007 spin_unlock_irqrestore(&xhci->lock, flags);
4008 return 0;
4009 }
4010
4011
4012 command = xhci->lpm_command;
4013 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4014 if (!ctrl_ctx) {
4015 spin_unlock_irqrestore(&xhci->lock, flags);
4016 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4017 __func__);
4018 return -ENOMEM;
4019 }
4020
4021 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4022 spin_unlock_irqrestore(&xhci->lock, flags);
4023
4024 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4025 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4026 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4027 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4028 slot_ctx->dev_state = 0;
4029
4030 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4031 "Set up evaluate context for LPM MEL change.");
4032 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4033 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4034
4035
4036 ret = xhci_configure_endpoint(xhci, udev, command,
4037 true, true);
4038 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4039 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4040
4041 if (!ret) {
4042 spin_lock_irqsave(&xhci->lock, flags);
4043 virt_dev->current_mel = max_exit_latency;
4044 spin_unlock_irqrestore(&xhci->lock, flags);
4045 }
4046 return ret;
4047}
4048
4049#ifdef CONFIG_PM
4050
4051
4052static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4053 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4054
4055
4056static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4057 struct usb_device *udev)
4058{
4059 int u2del, besl, besl_host;
4060 int besl_device = 0;
4061 u32 field;
4062
4063 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4064 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4065
4066 if (field & USB_BESL_SUPPORT) {
4067 for (besl_host = 0; besl_host < 16; besl_host++) {
4068 if (xhci_besl_encoding[besl_host] >= u2del)
4069 break;
4070 }
4071
4072 if (field & USB_BESL_BASELINE_VALID)
4073 besl_device = USB_GET_BESL_BASELINE(field);
4074 else if (field & USB_BESL_DEEP_VALID)
4075 besl_device = USB_GET_BESL_DEEP(field);
4076 } else {
4077 if (u2del <= 50)
4078 besl_host = 0;
4079 else
4080 besl_host = (u2del - 51) / 75 + 1;
4081 }
4082
4083 besl = besl_host + besl_device;
4084 if (besl > 15)
4085 besl = 15;
4086
4087 return besl;
4088}
4089
4090
4091static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4092{
4093 u32 field;
4094 int l1;
4095 int besld = 0;
4096 int hirdm = 0;
4097
4098 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4099
4100
4101 l1 = udev->l1_params.timeout / 256;
4102
4103
4104 if (field & USB_BESL_DEEP_VALID) {
4105 besld = USB_GET_BESL_DEEP(field);
4106 hirdm = 1;
4107 }
4108
4109 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4110}
4111
4112int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4113 struct usb_device *udev, int enable)
4114{
4115 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4116 __le32 __iomem **port_array;
4117 __le32 __iomem *pm_addr, *hlpm_addr;
4118 u32 pm_val, hlpm_val, field;
4119 unsigned int port_num;
4120 unsigned long flags;
4121 int hird, exit_latency;
4122 int ret;
4123
4124 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4125 !udev->lpm_capable)
4126 return -EPERM;
4127
4128 if (!udev->parent || udev->parent->parent ||
4129 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4130 return -EPERM;
4131
4132 if (udev->usb2_hw_lpm_capable != 1)
4133 return -EPERM;
4134
4135 spin_lock_irqsave(&xhci->lock, flags);
4136
4137 port_array = xhci->usb2_ports;
4138 port_num = udev->portnum - 1;
4139 pm_addr = port_array[port_num] + PORTPMSC;
4140 pm_val = readl(pm_addr);
4141 hlpm_addr = port_array[port_num] + PORTHLPMC;
4142 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4143
4144 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4145 enable ? "enable" : "disable", port_num + 1);
4146
4147 if (enable) {
4148
4149 if (udev->usb2_hw_lpm_besl_capable) {
4150
4151
4152
4153
4154 if ((field & USB_BESL_SUPPORT) &&
4155 (field & USB_BESL_BASELINE_VALID))
4156 hird = USB_GET_BESL_BASELINE(field);
4157 else
4158 hird = udev->l1_params.besl;
4159
4160 exit_latency = xhci_besl_encoding[hird];
4161 spin_unlock_irqrestore(&xhci->lock, flags);
4162
4163
4164
4165
4166
4167
4168
4169
4170 mutex_lock(hcd->bandwidth_mutex);
4171 ret = xhci_change_max_exit_latency(xhci, udev,
4172 exit_latency);
4173 mutex_unlock(hcd->bandwidth_mutex);
4174
4175 if (ret < 0)
4176 return ret;
4177 spin_lock_irqsave(&xhci->lock, flags);
4178
4179 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4180 writel(hlpm_val, hlpm_addr);
4181
4182 readl(hlpm_addr);
4183 } else {
4184 hird = xhci_calculate_hird_besl(xhci, udev);
4185 }
4186
4187 pm_val &= ~PORT_HIRD_MASK;
4188 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4189 writel(pm_val, pm_addr);
4190 pm_val = readl(pm_addr);
4191 pm_val |= PORT_HLE;
4192 writel(pm_val, pm_addr);
4193
4194 readl(pm_addr);
4195 } else {
4196 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4197 writel(pm_val, pm_addr);
4198
4199 readl(pm_addr);
4200 if (udev->usb2_hw_lpm_besl_capable) {
4201 spin_unlock_irqrestore(&xhci->lock, flags);
4202 mutex_lock(hcd->bandwidth_mutex);
4203 xhci_change_max_exit_latency(xhci, udev, 0);
4204 mutex_unlock(hcd->bandwidth_mutex);
4205 return 0;
4206 }
4207 }
4208
4209 spin_unlock_irqrestore(&xhci->lock, flags);
4210 return 0;
4211}
4212
4213
4214
4215
4216
4217static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4218 unsigned capability)
4219{
4220 u32 port_offset, port_count;
4221 int i;
4222
4223 for (i = 0; i < xhci->num_ext_caps; i++) {
4224 if (xhci->ext_caps[i] & capability) {
4225
4226 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4227 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4228 if (port >= port_offset &&
4229 port < port_offset + port_count)
4230 return 1;
4231 }
4232 }
4233 return 0;
4234}
4235
4236int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4237{
4238 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4239 int portnum = udev->portnum - 1;
4240
4241 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4242 !udev->lpm_capable)
4243 return 0;
4244
4245
4246 if (!udev->parent || udev->parent->parent ||
4247 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4248 return 0;
4249
4250 if (xhci->hw_lpm_support == 1 &&
4251 xhci_check_usb2_port_capability(
4252 xhci, portnum, XHCI_HLC)) {
4253 udev->usb2_hw_lpm_capable = 1;
4254 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4255 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4256 if (xhci_check_usb2_port_capability(xhci, portnum,
4257 XHCI_BLC))
4258 udev->usb2_hw_lpm_besl_capable = 1;
4259 }
4260
4261 return 0;
4262}
4263
4264
4265
4266
4267static unsigned long long xhci_service_interval_to_ns(
4268 struct usb_endpoint_descriptor *desc)
4269{
4270 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4271}
4272
4273static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4274 enum usb3_link_state state)
4275{
4276 unsigned long long sel;
4277 unsigned long long pel;
4278 unsigned int max_sel_pel;
4279 char *state_name;
4280
4281 switch (state) {
4282 case USB3_LPM_U1:
4283
4284 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4285 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4286 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4287 state_name = "U1";
4288 break;
4289 case USB3_LPM_U2:
4290 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4291 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4292 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4293 state_name = "U2";
4294 break;
4295 default:
4296 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4297 __func__);
4298 return USB3_LPM_DISABLED;
4299 }
4300
4301 if (sel <= max_sel_pel && pel <= max_sel_pel)
4302 return USB3_LPM_DEVICE_INITIATED;
4303
4304 if (sel > max_sel_pel)
4305 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4306 "due to long SEL %llu ms\n",
4307 state_name, sel);
4308 else
4309 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4310 "due to long PEL %llu ms\n",
4311 state_name, pel);
4312 return USB3_LPM_DISABLED;
4313}
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323static unsigned long long xhci_calculate_intel_u1_timeout(
4324 struct usb_device *udev,
4325 struct usb_endpoint_descriptor *desc)
4326{
4327 unsigned long long timeout_ns;
4328 int ep_type;
4329 int intr_type;
4330
4331 ep_type = usb_endpoint_type(desc);
4332 switch (ep_type) {
4333 case USB_ENDPOINT_XFER_CONTROL:
4334 timeout_ns = udev->u1_params.sel * 3;
4335 break;
4336 case USB_ENDPOINT_XFER_BULK:
4337 timeout_ns = udev->u1_params.sel * 5;
4338 break;
4339 case USB_ENDPOINT_XFER_INT:
4340 intr_type = usb_endpoint_interrupt_type(desc);
4341 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4342 timeout_ns = udev->u1_params.sel * 3;
4343 break;
4344 }
4345
4346 case USB_ENDPOINT_XFER_ISOC:
4347 timeout_ns = xhci_service_interval_to_ns(desc);
4348 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4349 if (timeout_ns < udev->u1_params.sel * 2)
4350 timeout_ns = udev->u1_params.sel * 2;
4351 break;
4352 default:
4353 return 0;
4354 }
4355
4356 return timeout_ns;
4357}
4358
4359
4360static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4361 struct usb_device *udev,
4362 struct usb_endpoint_descriptor *desc)
4363{
4364 unsigned long long timeout_ns;
4365
4366 if (xhci->quirks & XHCI_INTEL_HOST)
4367 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4368 else
4369 timeout_ns = udev->u1_params.sel;
4370
4371
4372
4373
4374 if (timeout_ns == USB3_LPM_DISABLED)
4375 timeout_ns = 1;
4376 else
4377 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4378
4379
4380
4381
4382 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4383 return timeout_ns;
4384 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4385 "due to long timeout %llu ms\n", timeout_ns);
4386 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4387}
4388
4389
4390
4391
4392
4393
4394
4395static unsigned long long xhci_calculate_intel_u2_timeout(
4396 struct usb_device *udev,
4397 struct usb_endpoint_descriptor *desc)
4398{
4399 unsigned long long timeout_ns;
4400 unsigned long long u2_del_ns;
4401
4402 timeout_ns = 10 * 1000 * 1000;
4403
4404 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4405 (xhci_service_interval_to_ns(desc) > timeout_ns))
4406 timeout_ns = xhci_service_interval_to_ns(desc);
4407
4408 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4409 if (u2_del_ns > timeout_ns)
4410 timeout_ns = u2_del_ns;
4411
4412 return timeout_ns;
4413}
4414
4415
4416static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4417 struct usb_device *udev,
4418 struct usb_endpoint_descriptor *desc)
4419{
4420 unsigned long long timeout_ns;
4421
4422 if (xhci->quirks & XHCI_INTEL_HOST)
4423 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4424 else
4425 timeout_ns = udev->u2_params.sel;
4426
4427
4428 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4429
4430
4431
4432 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4433 return timeout_ns;
4434 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4435 "due to long timeout %llu ms\n", timeout_ns);
4436 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4437}
4438
4439static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4440 struct usb_device *udev,
4441 struct usb_endpoint_descriptor *desc,
4442 enum usb3_link_state state,
4443 u16 *timeout)
4444{
4445 if (state == USB3_LPM_U1)
4446 return xhci_calculate_u1_timeout(xhci, udev, desc);
4447 else if (state == USB3_LPM_U2)
4448 return xhci_calculate_u2_timeout(xhci, udev, desc);
4449
4450 return USB3_LPM_DISABLED;
4451}
4452
4453static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4454 struct usb_device *udev,
4455 struct usb_endpoint_descriptor *desc,
4456 enum usb3_link_state state,
4457 u16 *timeout)
4458{
4459 u16 alt_timeout;
4460
4461 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4462 desc, state, timeout);
4463
4464
4465
4466
4467
4468 if (alt_timeout == USB3_LPM_DISABLED ||
4469 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4470 *timeout = alt_timeout;
4471 return -E2BIG;
4472 }
4473 if (alt_timeout > *timeout)
4474 *timeout = alt_timeout;
4475 return 0;
4476}
4477
4478static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4479 struct usb_device *udev,
4480 struct usb_host_interface *alt,
4481 enum usb3_link_state state,
4482 u16 *timeout)
4483{
4484 int j;
4485
4486 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4487 if (xhci_update_timeout_for_endpoint(xhci, udev,
4488 &alt->endpoint[j].desc, state, timeout))
4489 return -E2BIG;
4490 continue;
4491 }
4492 return 0;
4493}
4494
4495static int xhci_check_intel_tier_policy(struct usb_device *udev,
4496 enum usb3_link_state state)
4497{
4498 struct usb_device *parent;
4499 unsigned int num_hubs;
4500
4501 if (state == USB3_LPM_U2)
4502 return 0;
4503
4504
4505 for (parent = udev->parent, num_hubs = 0; parent->parent;
4506 parent = parent->parent)
4507 num_hubs++;
4508
4509 if (num_hubs < 2)
4510 return 0;
4511
4512 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4513 " below second-tier hub.\n");
4514 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4515 "to decrease power consumption.\n");
4516 return -E2BIG;
4517}
4518
4519static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4520 struct usb_device *udev,
4521 enum usb3_link_state state)
4522{
4523 if (xhci->quirks & XHCI_INTEL_HOST)
4524 return xhci_check_intel_tier_policy(udev, state);
4525 else
4526 return 0;
4527}
4528
4529
4530
4531
4532
4533
4534static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4535 struct usb_device *udev, enum usb3_link_state state)
4536{
4537 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4538 struct usb_host_config *config;
4539 char *state_name;
4540 int i;
4541 u16 timeout = USB3_LPM_DISABLED;
4542
4543 if (state == USB3_LPM_U1)
4544 state_name = "U1";
4545 else if (state == USB3_LPM_U2)
4546 state_name = "U2";
4547 else {
4548 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4549 state);
4550 return timeout;
4551 }
4552
4553 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4554 return timeout;
4555
4556
4557
4558
4559 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4560 state, &timeout))
4561 return timeout;
4562
4563 config = udev->actconfig;
4564 if (!config)
4565 return timeout;
4566
4567 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4568 struct usb_driver *driver;
4569 struct usb_interface *intf = config->interface[i];
4570
4571 if (!intf)
4572 continue;
4573
4574
4575
4576
4577 if (intf->dev.driver) {
4578 driver = to_usb_driver(intf->dev.driver);
4579 if (driver && driver->disable_hub_initiated_lpm) {
4580 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4581 "at request of driver %s\n",
4582 state_name, driver->name);
4583 return xhci_get_timeout_no_hub_lpm(udev, state);
4584 }
4585 }
4586
4587
4588 if (!intf->cur_altsetting)
4589 continue;
4590
4591 if (xhci_update_timeout_for_interface(xhci, udev,
4592 intf->cur_altsetting,
4593 state, &timeout))
4594 return timeout;
4595 }
4596 return timeout;
4597}
4598
4599static int calculate_max_exit_latency(struct usb_device *udev,
4600 enum usb3_link_state state_changed,
4601 u16 hub_encoded_timeout)
4602{
4603 unsigned long long u1_mel_us = 0;
4604 unsigned long long u2_mel_us = 0;
4605 unsigned long long mel_us = 0;
4606 bool disabling_u1;
4607 bool disabling_u2;
4608 bool enabling_u1;
4609 bool enabling_u2;
4610
4611 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4612 hub_encoded_timeout == USB3_LPM_DISABLED);
4613 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4614 hub_encoded_timeout == USB3_LPM_DISABLED);
4615
4616 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4617 hub_encoded_timeout != USB3_LPM_DISABLED);
4618 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4619 hub_encoded_timeout != USB3_LPM_DISABLED);
4620
4621
4622
4623
4624 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4625 enabling_u1)
4626 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4627 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4628 enabling_u2)
4629 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4630
4631 if (u1_mel_us > u2_mel_us)
4632 mel_us = u1_mel_us;
4633 else
4634 mel_us = u2_mel_us;
4635
4636 if (mel_us > MAX_EXIT) {
4637 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4638 "is too big.\n", mel_us);
4639 return -E2BIG;
4640 }
4641 return mel_us;
4642}
4643
4644
4645int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4646 struct usb_device *udev, enum usb3_link_state state)
4647{
4648 struct xhci_hcd *xhci;
4649 u16 hub_encoded_timeout;
4650 int mel;
4651 int ret;
4652
4653 xhci = hcd_to_xhci(hcd);
4654
4655
4656
4657
4658 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4659 !xhci->devs[udev->slot_id])
4660 return USB3_LPM_DISABLED;
4661
4662 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4663 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4664 if (mel < 0) {
4665
4666 hub_encoded_timeout = USB3_LPM_DISABLED;
4667 mel = 0;
4668 }
4669
4670 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4671 if (ret)
4672 return ret;
4673 return hub_encoded_timeout;
4674}
4675
4676int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4677 struct usb_device *udev, enum usb3_link_state state)
4678{
4679 struct xhci_hcd *xhci;
4680 u16 mel;
4681
4682 xhci = hcd_to_xhci(hcd);
4683 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4684 !xhci->devs[udev->slot_id])
4685 return 0;
4686
4687 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4688 return xhci_change_max_exit_latency(xhci, udev, mel);
4689}
4690#else
4691
4692int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4693 struct usb_device *udev, int enable)
4694{
4695 return 0;
4696}
4697
4698int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4699{
4700 return 0;
4701}
4702
4703int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4704 struct usb_device *udev, enum usb3_link_state state)
4705{
4706 return USB3_LPM_DISABLED;
4707}
4708
4709int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4710 struct usb_device *udev, enum usb3_link_state state)
4711{
4712 return 0;
4713}
4714#endif
4715
4716
4717
4718
4719
4720
4721int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4722 struct usb_tt *tt, gfp_t mem_flags)
4723{
4724 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4725 struct xhci_virt_device *vdev;
4726 struct xhci_command *config_cmd;
4727 struct xhci_input_control_ctx *ctrl_ctx;
4728 struct xhci_slot_ctx *slot_ctx;
4729 unsigned long flags;
4730 unsigned think_time;
4731 int ret;
4732
4733
4734 if (!hdev->parent)
4735 return 0;
4736
4737 vdev = xhci->devs[hdev->slot_id];
4738 if (!vdev) {
4739 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4740 return -EINVAL;
4741 }
4742 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4743 if (!config_cmd) {
4744 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4745 return -ENOMEM;
4746 }
4747 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4748 if (!ctrl_ctx) {
4749 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4750 __func__);
4751 xhci_free_command(xhci, config_cmd);
4752 return -ENOMEM;
4753 }
4754
4755 spin_lock_irqsave(&xhci->lock, flags);
4756 if (hdev->speed == USB_SPEED_HIGH &&
4757 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4758 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4759 xhci_free_command(xhci, config_cmd);
4760 spin_unlock_irqrestore(&xhci->lock, flags);
4761 return -ENOMEM;
4762 }
4763
4764 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4765 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4766 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4767 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4768
4769
4770
4771
4772
4773 if (tt->multi)
4774 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4775 else if (hdev->speed == USB_SPEED_FULL)
4776 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4777
4778 if (xhci->hci_version > 0x95) {
4779 xhci_dbg(xhci, "xHCI version %x needs hub "
4780 "TT think time and number of ports\n",
4781 (unsigned int) xhci->hci_version);
4782 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4783
4784
4785
4786
4787
4788
4789
4790 think_time = tt->think_time;
4791 if (think_time != 0)
4792 think_time = (think_time / 666) - 1;
4793 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4794 slot_ctx->tt_info |=
4795 cpu_to_le32(TT_THINK_TIME(think_time));
4796 } else {
4797 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4798 "TT think time or number of ports\n",
4799 (unsigned int) xhci->hci_version);
4800 }
4801 slot_ctx->dev_state = 0;
4802 spin_unlock_irqrestore(&xhci->lock, flags);
4803
4804 xhci_dbg(xhci, "Set up %s for hub device.\n",
4805 (xhci->hci_version > 0x95) ?
4806 "configure endpoint" : "evaluate context");
4807 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4808 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4809
4810
4811
4812
4813 if (xhci->hci_version > 0x95)
4814 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4815 false, false);
4816 else
4817 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4818 true, false);
4819
4820 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4821 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4822
4823 xhci_free_command(xhci, config_cmd);
4824 return ret;
4825}
4826
4827int xhci_get_frame(struct usb_hcd *hcd)
4828{
4829 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4830
4831 return readl(&xhci->run_regs->microframe_index) >> 3;
4832}
4833
4834int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4835{
4836 struct xhci_hcd *xhci;
4837 struct device *dev = hcd->self.controller;
4838 int retval;
4839
4840
4841 hcd->self.sg_tablesize = ~0;
4842
4843
4844 hcd->self.no_sg_constraint = 1;
4845
4846
4847 hcd->self.no_stop_on_short = 1;
4848
4849 xhci = hcd_to_xhci(hcd);
4850
4851 if (usb_hcd_is_primary_hcd(hcd)) {
4852 xhci->main_hcd = hcd;
4853
4854
4855
4856 hcd->speed = HCD_USB2;
4857 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4858
4859
4860
4861
4862
4863 hcd->has_tt = 1;
4864 } else {
4865 if (xhci->sbrn == 0x31) {
4866 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
4867 hcd->speed = HCD_USB31;
4868 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
4869 }
4870
4871
4872
4873 return 0;
4874 }
4875
4876 mutex_init(&xhci->mutex);
4877 xhci->cap_regs = hcd->regs;
4878 xhci->op_regs = hcd->regs +
4879 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4880 xhci->run_regs = hcd->regs +
4881 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4882
4883 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4884 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4885 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4886 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4887 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4888 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4889 if (xhci->hci_version > 0x100)
4890 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4891 xhci_print_registers(xhci);
4892
4893 xhci->quirks |= quirks;
4894
4895 get_quirks(dev, xhci);
4896
4897
4898
4899
4900
4901 if (xhci->hci_version > 0x96)
4902 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4903
4904
4905 retval = xhci_halt(xhci);
4906 if (retval)
4907 return retval;
4908
4909 xhci_dbg(xhci, "Resetting HCD\n");
4910
4911 retval = xhci_reset(xhci);
4912 if (retval)
4913 return retval;
4914 xhci_dbg(xhci, "Reset complete\n");
4915
4916
4917
4918
4919
4920
4921
4922
4923 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4924 xhci->hcc_params &= ~BIT(0);
4925
4926
4927
4928 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4929 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4930 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4931 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4932 } else {
4933
4934
4935
4936
4937 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
4938 if (retval)
4939 return retval;
4940 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
4941 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
4942 }
4943
4944 xhci_dbg(xhci, "Calling HCD init\n");
4945
4946 retval = xhci_init(hcd);
4947 if (retval)
4948 return retval;
4949 xhci_dbg(xhci, "Called HCD init\n");
4950
4951 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4952 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4953
4954 return 0;
4955}
4956EXPORT_SYMBOL_GPL(xhci_gen_setup);
4957
4958static const struct hc_driver xhci_hc_driver = {
4959 .description = "xhci-hcd",
4960 .product_desc = "xHCI Host Controller",
4961 .hcd_priv_size = sizeof(struct xhci_hcd),
4962
4963
4964
4965
4966 .irq = xhci_irq,
4967 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4968
4969
4970
4971
4972 .reset = NULL,
4973 .start = xhci_run,
4974 .stop = xhci_stop,
4975 .shutdown = xhci_shutdown,
4976
4977
4978
4979
4980 .urb_enqueue = xhci_urb_enqueue,
4981 .urb_dequeue = xhci_urb_dequeue,
4982 .alloc_dev = xhci_alloc_dev,
4983 .free_dev = xhci_free_dev,
4984 .alloc_streams = xhci_alloc_streams,
4985 .free_streams = xhci_free_streams,
4986 .add_endpoint = xhci_add_endpoint,
4987 .drop_endpoint = xhci_drop_endpoint,
4988 .endpoint_reset = xhci_endpoint_reset,
4989 .check_bandwidth = xhci_check_bandwidth,
4990 .reset_bandwidth = xhci_reset_bandwidth,
4991 .address_device = xhci_address_device,
4992 .enable_device = xhci_enable_device,
4993 .update_hub_device = xhci_update_hub_device,
4994 .reset_device = xhci_discover_or_reset_device,
4995
4996
4997
4998
4999 .get_frame_number = xhci_get_frame,
5000
5001
5002
5003
5004 .hub_control = xhci_hub_control,
5005 .hub_status_data = xhci_hub_status_data,
5006 .bus_suspend = xhci_bus_suspend,
5007 .bus_resume = xhci_bus_resume,
5008
5009
5010
5011
5012 .update_device = xhci_update_device,
5013 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5014 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5015 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5016 .find_raw_port_number = xhci_find_raw_port_number,
5017};
5018
5019void xhci_init_driver(struct hc_driver *drv,
5020 const struct xhci_driver_overrides *over)
5021{
5022 BUG_ON(!over);
5023
5024
5025 *drv = xhci_hc_driver;
5026
5027 if (over) {
5028 drv->hcd_priv_size += over->extra_priv_size;
5029 if (over->reset)
5030 drv->reset = over->reset;
5031 if (over->start)
5032 drv->start = over->start;
5033 }
5034}
5035EXPORT_SYMBOL_GPL(xhci_init_driver);
5036
5037MODULE_DESCRIPTION(DRIVER_DESC);
5038MODULE_AUTHOR(DRIVER_AUTHOR);
5039MODULE_LICENSE("GPL");
5040
5041static int __init xhci_hcd_init(void)
5042{
5043
5044
5045
5046
5047 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5048 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5049 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5050
5051
5052
5053 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5054 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5055 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5056 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5057 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5058
5059 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5060
5061 if (usb_disabled())
5062 return -ENODEV;
5063
5064 return 0;
5065}
5066
5067
5068
5069
5070
5071static void __exit xhci_hcd_fini(void) { }
5072
5073module_init(xhci_hcd_init);
5074module_exit(xhci_hcd_fini);
5075