1
2
3
4
5
6
7
8
9
10
11#include <linux/pci.h>
12#include <linux/iopoll.h>
13#include <linux/irq.h>
14#include <linux/log2.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/slab.h>
18#include <linux/dmi.h>
19#include <linux/dma-mapping.h>
20
21#include "xhci.h"
22#include "xhci-trace.h"
23#include "xhci-mtk.h"
24#include "xhci-debugfs.h"
25#include "xhci-dbgcap.h"
26
27#define DRIVER_AUTHOR "Sarah Sharp"
28#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
29
30#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
31
32
33static int link_quirk;
34module_param(link_quirk, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
36
37static unsigned long long quirks;
38module_param(quirks, ullong, S_IRUGO);
39MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
40
41static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
42{
43 struct xhci_segment *seg = ring->first_seg;
44
45 if (!td || !td->start_seg)
46 return false;
47 do {
48 if (seg == td->start_seg)
49 return true;
50 seg = seg->next;
51 } while (seg && seg != ring->first_seg);
52
53 return false;
54}
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
70{
71 u32 result;
72 int ret;
73
74 ret = readl_poll_timeout_atomic(ptr, result,
75 (result & mask) == done ||
76 result == U32_MAX,
77 1, usec);
78 if (result == U32_MAX)
79 return -ENODEV;
80
81 return ret;
82}
83
84
85
86
87void xhci_quiesce(struct xhci_hcd *xhci)
88{
89 u32 halted;
90 u32 cmd;
91 u32 mask;
92
93 mask = ~(XHCI_IRQS);
94 halted = readl(&xhci->op_regs->status) & STS_HALT;
95 if (!halted)
96 mask &= ~CMD_RUN;
97
98 cmd = readl(&xhci->op_regs->command);
99 cmd &= mask;
100 writel(cmd, &xhci->op_regs->command);
101}
102
103
104
105
106
107
108
109
110
111int xhci_halt(struct xhci_hcd *xhci)
112{
113 int ret;
114 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
115 xhci_quiesce(xhci);
116
117 ret = xhci_handshake(&xhci->op_regs->status,
118 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
119 if (ret) {
120 xhci_warn(xhci, "Host halt failed, %d\n", ret);
121 return ret;
122 }
123 xhci->xhc_state |= XHCI_STATE_HALTED;
124 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
125 return ret;
126}
127
128
129
130
131int xhci_start(struct xhci_hcd *xhci)
132{
133 u32 temp;
134 int ret;
135
136 temp = readl(&xhci->op_regs->command);
137 temp |= (CMD_RUN);
138 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
139 temp);
140 writel(temp, &xhci->op_regs->command);
141
142
143
144
145
146 ret = xhci_handshake(&xhci->op_regs->status,
147 STS_HALT, 0, XHCI_MAX_HALT_USEC);
148 if (ret == -ETIMEDOUT)
149 xhci_err(xhci, "Host took too long to start, "
150 "waited %u microseconds.\n",
151 XHCI_MAX_HALT_USEC);
152 if (!ret)
153
154 xhci->xhc_state = 0;
155
156 return ret;
157}
158
159
160
161
162
163
164
165
166int xhci_reset(struct xhci_hcd *xhci)
167{
168 u32 command;
169 u32 state;
170 int ret;
171
172 state = readl(&xhci->op_regs->status);
173
174 if (state == ~(u32)0) {
175 xhci_warn(xhci, "Host not accessible, reset failed.\n");
176 return -ENODEV;
177 }
178
179 if ((state & STS_HALT) == 0) {
180 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
181 return 0;
182 }
183
184 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
185 command = readl(&xhci->op_regs->command);
186 command |= CMD_RESET;
187 writel(command, &xhci->op_regs->command);
188
189
190
191
192
193
194
195
196 if (xhci->quirks & XHCI_INTEL_HOST)
197 udelay(1000);
198
199 ret = xhci_handshake(&xhci->op_regs->command,
200 CMD_RESET, 0, 10 * 1000 * 1000);
201 if (ret)
202 return ret;
203
204 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
205 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
206
207 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
208 "Wait for controller to be ready for doorbell rings");
209
210
211
212
213 ret = xhci_handshake(&xhci->op_regs->status,
214 STS_CNR, 0, 10 * 1000 * 1000);
215
216 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
217 xhci->usb2_rhub.bus_state.suspended_ports = 0;
218 xhci->usb2_rhub.bus_state.resuming_ports = 0;
219 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
220 xhci->usb3_rhub.bus_state.suspended_ports = 0;
221 xhci->usb3_rhub.bus_state.resuming_ports = 0;
222
223 return ret;
224}
225
226static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
227{
228 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
229 int err, i;
230 u64 val;
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
247 return;
248
249 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
250
251
252 val = readl(&xhci->op_regs->command);
253 val &= ~CMD_HSEIE;
254 writel(val, &xhci->op_regs->command);
255
256
257 val = readl(&xhci->op_regs->status);
258 val |= STS_FATAL;
259 writel(val, &xhci->op_regs->status);
260
261
262 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
263 if (upper_32_bits(val))
264 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
265 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
266 if (upper_32_bits(val))
267 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
268
269 for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
270 struct xhci_intr_reg __iomem *ir;
271
272 ir = &xhci->run_regs->ir_set[i];
273 val = xhci_read_64(xhci, &ir->erst_base);
274 if (upper_32_bits(val))
275 xhci_write_64(xhci, 0, &ir->erst_base);
276 val= xhci_read_64(xhci, &ir->erst_dequeue);
277 if (upper_32_bits(val))
278 xhci_write_64(xhci, 0, &ir->erst_dequeue);
279 }
280
281
282 err = xhci_handshake(&xhci->op_regs->status,
283 STS_FATAL, STS_FATAL,
284 XHCI_MAX_HALT_USEC);
285 if (!err)
286 xhci_info(xhci, "Fault detected\n");
287}
288
289#ifdef CONFIG_USB_PCI
290
291
292
293static int xhci_setup_msi(struct xhci_hcd *xhci)
294{
295 int ret;
296
297
298
299 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
300
301 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
302 if (ret < 0) {
303 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
304 "failed to allocate MSI entry");
305 return ret;
306 }
307
308 ret = request_irq(pdev->irq, xhci_msi_irq,
309 0, "xhci_hcd", xhci_to_hcd(xhci));
310 if (ret) {
311 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
312 "disable MSI interrupt");
313 pci_free_irq_vectors(pdev);
314 }
315
316 return ret;
317}
318
319
320
321
322static int xhci_setup_msix(struct xhci_hcd *xhci)
323{
324 int i, ret = 0;
325 struct usb_hcd *hcd = xhci_to_hcd(xhci);
326 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
327
328
329
330
331
332
333
334
335 xhci->msix_count = min(num_online_cpus() + 1,
336 HCS_MAX_INTRS(xhci->hcs_params1));
337
338 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
339 PCI_IRQ_MSIX);
340 if (ret < 0) {
341 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
342 "Failed to enable MSI-X");
343 return ret;
344 }
345
346 for (i = 0; i < xhci->msix_count; i++) {
347 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
348 "xhci_hcd", xhci_to_hcd(xhci));
349 if (ret)
350 goto disable_msix;
351 }
352
353 hcd->msix_enabled = 1;
354 return ret;
355
356disable_msix:
357 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
358 while (--i >= 0)
359 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
360 pci_free_irq_vectors(pdev);
361 return ret;
362}
363
364
365static void xhci_cleanup_msix(struct xhci_hcd *xhci)
366{
367 struct usb_hcd *hcd = xhci_to_hcd(xhci);
368 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
369
370 if (xhci->quirks & XHCI_PLAT)
371 return;
372
373
374 if (hcd->irq > 0)
375 return;
376
377 if (hcd->msix_enabled) {
378 int i;
379
380 for (i = 0; i < xhci->msix_count; i++)
381 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
382 } else {
383 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
384 }
385
386 pci_free_irq_vectors(pdev);
387 hcd->msix_enabled = 0;
388}
389
390static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
391{
392 struct usb_hcd *hcd = xhci_to_hcd(xhci);
393
394 if (hcd->msix_enabled) {
395 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
396 int i;
397
398 for (i = 0; i < xhci->msix_count; i++)
399 synchronize_irq(pci_irq_vector(pdev, i));
400 }
401}
402
403static int xhci_try_enable_msi(struct usb_hcd *hcd)
404{
405 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
406 struct pci_dev *pdev;
407 int ret;
408
409
410 if (xhci->quirks & XHCI_PLAT)
411 return 0;
412
413 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
414
415
416
417
418 if (xhci->quirks & XHCI_BROKEN_MSI)
419 goto legacy_irq;
420
421
422 if (hcd->irq)
423 free_irq(hcd->irq, hcd);
424 hcd->irq = 0;
425
426 ret = xhci_setup_msix(xhci);
427 if (ret)
428
429 ret = xhci_setup_msi(xhci);
430
431 if (!ret) {
432 hcd->msi_enabled = 1;
433 return 0;
434 }
435
436 if (!pdev->irq) {
437 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
438 return -EINVAL;
439 }
440
441 legacy_irq:
442 if (!strlen(hcd->irq_descr))
443 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
444 hcd->driver->description, hcd->self.busnum);
445
446
447 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
448 hcd->irq_descr, hcd);
449 if (ret) {
450 xhci_err(xhci, "request interrupt %d failed\n",
451 pdev->irq);
452 return ret;
453 }
454 hcd->irq = pdev->irq;
455 return 0;
456}
457
458#else
459
460static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
461{
462 return 0;
463}
464
465static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
466{
467}
468
469static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
470{
471}
472
473#endif
474
475static void compliance_mode_recovery(struct timer_list *t)
476{
477 struct xhci_hcd *xhci;
478 struct usb_hcd *hcd;
479 struct xhci_hub *rhub;
480 u32 temp;
481 int i;
482
483 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
484 rhub = &xhci->usb3_rhub;
485
486 for (i = 0; i < rhub->num_ports; i++) {
487 temp = readl(rhub->ports[i]->addr);
488 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
489
490
491
492
493 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
494 "Compliance mode detected->port %d",
495 i + 1);
496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
497 "Attempting compliance mode recovery");
498 hcd = xhci->shared_hcd;
499
500 if (hcd->state == HC_STATE_SUSPENDED)
501 usb_hcd_resume_root_hub(hcd);
502
503 usb_hcd_poll_rh_status(hcd);
504 }
505 }
506
507 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
508 mod_timer(&xhci->comp_mode_recovery_timer,
509 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
510}
511
512
513
514
515
516
517
518
519
520
521
522static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
523{
524 xhci->port_status_u0 = 0;
525 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
526 0);
527 xhci->comp_mode_recovery_timer.expires = jiffies +
528 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
529
530 add_timer(&xhci->comp_mode_recovery_timer);
531 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
532 "Compliance mode recovery timer initialized");
533}
534
535
536
537
538
539
540
541static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
542{
543 const char *dmi_product_name, *dmi_sys_vendor;
544
545 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
546 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
547 if (!dmi_product_name || !dmi_sys_vendor)
548 return false;
549
550 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
551 return false;
552
553 if (strstr(dmi_product_name, "Z420") ||
554 strstr(dmi_product_name, "Z620") ||
555 strstr(dmi_product_name, "Z820") ||
556 strstr(dmi_product_name, "Z1 Workstation"))
557 return true;
558
559 return false;
560}
561
562static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
563{
564 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
565}
566
567
568
569
570
571
572
573
574
575static int xhci_init(struct usb_hcd *hcd)
576{
577 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
578 int retval = 0;
579
580 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
581 spin_lock_init(&xhci->lock);
582 if (xhci->hci_version == 0x95 && link_quirk) {
583 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
584 "QUIRK: Not clearing Link TRB chain bits.");
585 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
586 } else {
587 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
588 "xHCI doesn't need link TRB QUIRK");
589 }
590 retval = xhci_mem_init(xhci, GFP_KERNEL);
591 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
592
593
594 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
595 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
596 compliance_mode_recovery_timer_init(xhci);
597 }
598
599 return retval;
600}
601
602
603
604
605static int xhci_run_finished(struct xhci_hcd *xhci)
606{
607 if (xhci_start(xhci)) {
608 xhci_halt(xhci);
609 return -ENODEV;
610 }
611 xhci->shared_hcd->state = HC_STATE_RUNNING;
612 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
613
614 if (xhci->quirks & XHCI_NEC_HOST)
615 xhci_ring_cmd_db(xhci);
616
617 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
618 "Finished xhci_run for USB3 roothub");
619 return 0;
620}
621
622
623
624
625
626
627
628
629
630
631
632
633
634int xhci_run(struct usb_hcd *hcd)
635{
636 u32 temp;
637 u64 temp_64;
638 int ret;
639 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
640
641
642
643
644
645 hcd->uses_new_polling = 1;
646 if (!usb_hcd_is_primary_hcd(hcd))
647 return xhci_run_finished(xhci);
648
649 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
650
651 ret = xhci_try_enable_msi(hcd);
652 if (ret)
653 return ret;
654
655 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
656 temp_64 &= ~ERST_PTR_MASK;
657 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
658 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
659
660 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
661 "// Set the interrupt modulation register");
662 temp = readl(&xhci->ir_set->irq_control);
663 temp &= ~ER_IRQ_INTERVAL_MASK;
664 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
665 writel(temp, &xhci->ir_set->irq_control);
666
667
668 temp = readl(&xhci->op_regs->command);
669 temp |= (CMD_EIE);
670 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
671 "// Enable interrupts, cmd = 0x%x.", temp);
672 writel(temp, &xhci->op_regs->command);
673
674 temp = readl(&xhci->ir_set->irq_pending);
675 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
676 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
677 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
678 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
679
680 if (xhci->quirks & XHCI_NEC_HOST) {
681 struct xhci_command *command;
682
683 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
684 if (!command)
685 return -ENOMEM;
686
687 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
688 TRB_TYPE(TRB_NEC_GET_FW));
689 if (ret)
690 xhci_free_command(xhci, command);
691 }
692 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
693 "Finished xhci_run for USB2 roothub");
694
695 xhci_dbc_init(xhci);
696
697 xhci_debugfs_init(xhci);
698
699 return 0;
700}
701EXPORT_SYMBOL_GPL(xhci_run);
702
703
704
705
706
707
708
709
710
711
712static void xhci_stop(struct usb_hcd *hcd)
713{
714 u32 temp;
715 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
716
717 mutex_lock(&xhci->mutex);
718
719
720 if (!usb_hcd_is_primary_hcd(hcd)) {
721 mutex_unlock(&xhci->mutex);
722 return;
723 }
724
725 xhci_dbc_exit(xhci);
726
727 spin_lock_irq(&xhci->lock);
728 xhci->xhc_state |= XHCI_STATE_HALTED;
729 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
730 xhci_halt(xhci);
731 xhci_reset(xhci);
732 spin_unlock_irq(&xhci->lock);
733
734 xhci_cleanup_msix(xhci);
735
736
737 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
738 (!(xhci_all_ports_seen_u0(xhci)))) {
739 del_timer_sync(&xhci->comp_mode_recovery_timer);
740 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
741 "%s: compliance mode recovery timer deleted",
742 __func__);
743 }
744
745 if (xhci->quirks & XHCI_AMD_PLL_FIX)
746 usb_amd_dev_put();
747
748 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
749 "// Disabling event ring interrupts");
750 temp = readl(&xhci->op_regs->status);
751 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
752 temp = readl(&xhci->ir_set->irq_pending);
753 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
754
755 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
756 xhci_mem_cleanup(xhci);
757 xhci_debugfs_exit(xhci);
758 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
759 "xhci_stop completed - status = %x",
760 readl(&xhci->op_regs->status));
761 mutex_unlock(&xhci->mutex);
762}
763
764
765
766
767
768
769
770
771
772
773static void xhci_shutdown(struct usb_hcd *hcd)
774{
775 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
776
777 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
778 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
779
780 spin_lock_irq(&xhci->lock);
781 xhci_halt(xhci);
782
783 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
784 xhci_reset(xhci);
785 spin_unlock_irq(&xhci->lock);
786
787 xhci_cleanup_msix(xhci);
788
789 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
790 "xhci_shutdown completed - status = %x",
791 readl(&xhci->op_regs->status));
792
793
794 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
795 pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
796}
797
798#ifdef CONFIG_PM
799static void xhci_save_registers(struct xhci_hcd *xhci)
800{
801 xhci->s3.command = readl(&xhci->op_regs->command);
802 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
803 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
804 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
805 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
806 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
807 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
808 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
809 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
810}
811
812static void xhci_restore_registers(struct xhci_hcd *xhci)
813{
814 writel(xhci->s3.command, &xhci->op_regs->command);
815 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
816 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
817 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
818 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
819 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
820 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
821 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
822 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
823}
824
825static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
826{
827 u64 val_64;
828
829
830 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
831 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
832 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
833 xhci->cmd_ring->dequeue) &
834 (u64) ~CMD_RING_RSVD_BITS) |
835 xhci->cmd_ring->cycle_state;
836 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
837 "// Setting command ring address to 0x%llx",
838 (long unsigned long) val_64);
839 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
840}
841
842
843
844
845
846
847
848
849
850
851static void xhci_clear_command_ring(struct xhci_hcd *xhci)
852{
853 struct xhci_ring *ring;
854 struct xhci_segment *seg;
855
856 ring = xhci->cmd_ring;
857 seg = ring->deq_seg;
858 do {
859 memset(seg->trbs, 0,
860 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
861 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
862 cpu_to_le32(~TRB_CYCLE);
863 seg = seg->next;
864 } while (seg != ring->deq_seg);
865
866
867 ring->deq_seg = ring->first_seg;
868 ring->dequeue = ring->first_seg->trbs;
869 ring->enq_seg = ring->deq_seg;
870 ring->enqueue = ring->dequeue;
871
872 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
873
874
875
876
877 ring->cycle_state = 1;
878
879
880
881
882
883
884
885
886 xhci_set_cmd_ring_deq(xhci);
887}
888
889static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
890{
891 struct xhci_port **ports;
892 int port_index;
893 unsigned long flags;
894 u32 t1, t2, portsc;
895
896 spin_lock_irqsave(&xhci->lock, flags);
897
898
899 port_index = xhci->usb3_rhub.num_ports;
900 ports = xhci->usb3_rhub.ports;
901 while (port_index--) {
902 t1 = readl(ports[port_index]->addr);
903 portsc = t1;
904 t1 = xhci_port_state_to_neutral(t1);
905 t2 = t1 & ~PORT_WAKE_BITS;
906 if (t1 != t2) {
907 writel(t2, ports[port_index]->addr);
908 xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
909 xhci->usb3_rhub.hcd->self.busnum,
910 port_index + 1, portsc, t2);
911 }
912 }
913
914
915 port_index = xhci->usb2_rhub.num_ports;
916 ports = xhci->usb2_rhub.ports;
917 while (port_index--) {
918 t1 = readl(ports[port_index]->addr);
919 portsc = t1;
920 t1 = xhci_port_state_to_neutral(t1);
921 t2 = t1 & ~PORT_WAKE_BITS;
922 if (t1 != t2) {
923 writel(t2, ports[port_index]->addr);
924 xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
925 xhci->usb2_rhub.hcd->self.busnum,
926 port_index + 1, portsc, t2);
927 }
928 }
929 spin_unlock_irqrestore(&xhci->lock, flags);
930}
931
932static bool xhci_pending_portevent(struct xhci_hcd *xhci)
933{
934 struct xhci_port **ports;
935 int port_index;
936 u32 status;
937 u32 portsc;
938
939 status = readl(&xhci->op_regs->status);
940 if (status & STS_EINT)
941 return true;
942
943
944
945
946
947
948 port_index = xhci->usb2_rhub.num_ports;
949 ports = xhci->usb2_rhub.ports;
950 while (port_index--) {
951 portsc = readl(ports[port_index]->addr);
952 if (portsc & PORT_CHANGE_MASK ||
953 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
954 return true;
955 }
956 port_index = xhci->usb3_rhub.num_ports;
957 ports = xhci->usb3_rhub.ports;
958 while (port_index--) {
959 portsc = readl(ports[port_index]->addr);
960 if (portsc & PORT_CHANGE_MASK ||
961 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
962 return true;
963 }
964 return false;
965}
966
967
968
969
970
971
972
973int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
974{
975 int rc = 0;
976 unsigned int delay = XHCI_MAX_HALT_USEC;
977 struct usb_hcd *hcd = xhci_to_hcd(xhci);
978 u32 command;
979 u32 res;
980
981 if (!hcd->state)
982 return 0;
983
984 if (hcd->state != HC_STATE_SUSPENDED ||
985 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
986 return -EINVAL;
987
988 xhci_dbc_suspend(xhci);
989
990
991 if (!do_wakeup)
992 xhci_disable_port_wake_on_bits(xhci);
993
994
995 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
996 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
997 del_timer_sync(&hcd->rh_timer);
998 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
999 del_timer_sync(&xhci->shared_hcd->rh_timer);
1000
1001 if (xhci->quirks & XHCI_SUSPEND_DELAY)
1002 usleep_range(1000, 1500);
1003
1004 spin_lock_irq(&xhci->lock);
1005 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1006 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1007
1008
1009
1010
1011 command = readl(&xhci->op_regs->command);
1012 command &= ~CMD_RUN;
1013 writel(command, &xhci->op_regs->command);
1014
1015
1016 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1017
1018 if (xhci_handshake(&xhci->op_regs->status,
1019 STS_HALT, STS_HALT, delay)) {
1020 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1021 spin_unlock_irq(&xhci->lock);
1022 return -ETIMEDOUT;
1023 }
1024 xhci_clear_command_ring(xhci);
1025
1026
1027 xhci_save_registers(xhci);
1028
1029
1030 command = readl(&xhci->op_regs->command);
1031 command |= CMD_CSS;
1032 writel(command, &xhci->op_regs->command);
1033 xhci->broken_suspend = 0;
1034 if (xhci_handshake(&xhci->op_regs->status,
1035 STS_SAVE, 0, 10 * 1000)) {
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045 res = readl(&xhci->op_regs->status);
1046 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1047 (((res & STS_SRE) == 0) &&
1048 ((res & STS_HCE) == 0))) {
1049 xhci->broken_suspend = 1;
1050 } else {
1051 xhci_warn(xhci, "WARN: xHC save state timeout\n");
1052 spin_unlock_irq(&xhci->lock);
1053 return -ETIMEDOUT;
1054 }
1055 }
1056 spin_unlock_irq(&xhci->lock);
1057
1058
1059
1060
1061
1062 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1063 (!(xhci_all_ports_seen_u0(xhci)))) {
1064 del_timer_sync(&xhci->comp_mode_recovery_timer);
1065 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1066 "%s: compliance mode recovery timer deleted",
1067 __func__);
1068 }
1069
1070
1071
1072 xhci_msix_sync_irqs(xhci);
1073
1074 return rc;
1075}
1076EXPORT_SYMBOL_GPL(xhci_suspend);
1077
1078
1079
1080
1081
1082
1083
1084int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1085{
1086 u32 command, temp = 0;
1087 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1088 struct usb_hcd *secondary_hcd;
1089 int retval = 0;
1090 bool comp_timer_running = false;
1091
1092 if (!hcd->state)
1093 return 0;
1094
1095
1096
1097
1098
1099 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1100 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1101 msleep(100);
1102
1103 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1104 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1105
1106 spin_lock_irq(&xhci->lock);
1107 if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
1108 hibernated = true;
1109
1110 if (!hibernated) {
1111
1112 xhci_restore_registers(xhci);
1113
1114 xhci_set_cmd_ring_deq(xhci);
1115
1116
1117 command = readl(&xhci->op_regs->command);
1118 command |= CMD_CRS;
1119 writel(command, &xhci->op_regs->command);
1120
1121
1122
1123
1124
1125 if (xhci_handshake(&xhci->op_regs->status,
1126 STS_RESTORE, 0, 100 * 1000)) {
1127 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1128 spin_unlock_irq(&xhci->lock);
1129 return -ETIMEDOUT;
1130 }
1131 temp = readl(&xhci->op_regs->status);
1132 }
1133
1134
1135 if ((temp & STS_SRE) || hibernated) {
1136
1137 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1138 !(xhci_all_ports_seen_u0(xhci))) {
1139 del_timer_sync(&xhci->comp_mode_recovery_timer);
1140 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1141 "Compliance Mode Recovery Timer deleted!");
1142 }
1143
1144
1145 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1146 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1147
1148 xhci_dbg(xhci, "Stop HCD\n");
1149 xhci_halt(xhci);
1150 xhci_zero_64b_regs(xhci);
1151 xhci_reset(xhci);
1152 spin_unlock_irq(&xhci->lock);
1153 xhci_cleanup_msix(xhci);
1154
1155 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1156 temp = readl(&xhci->op_regs->status);
1157 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1158 temp = readl(&xhci->ir_set->irq_pending);
1159 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1160
1161 xhci_dbg(xhci, "cleaning up memory\n");
1162 xhci_mem_cleanup(xhci);
1163 xhci_debugfs_exit(xhci);
1164 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1165 readl(&xhci->op_regs->status));
1166
1167
1168
1169
1170
1171 if (!usb_hcd_is_primary_hcd(hcd))
1172 secondary_hcd = hcd;
1173 else
1174 secondary_hcd = xhci->shared_hcd;
1175
1176 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1177 retval = xhci_init(hcd->primary_hcd);
1178 if (retval)
1179 return retval;
1180 comp_timer_running = true;
1181
1182 xhci_dbg(xhci, "Start the primary HCD\n");
1183 retval = xhci_run(hcd->primary_hcd);
1184 if (!retval) {
1185 xhci_dbg(xhci, "Start the secondary HCD\n");
1186 retval = xhci_run(secondary_hcd);
1187 }
1188 hcd->state = HC_STATE_SUSPENDED;
1189 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1190 goto done;
1191 }
1192
1193
1194 command = readl(&xhci->op_regs->command);
1195 command |= CMD_RUN;
1196 writel(command, &xhci->op_regs->command);
1197 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1198 0, 250 * 1000);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 spin_unlock_irq(&xhci->lock);
1210
1211 xhci_dbc_resume(xhci);
1212
1213 done:
1214 if (retval == 0) {
1215
1216 if (xhci_pending_portevent(xhci)) {
1217 usb_hcd_resume_root_hub(xhci->shared_hcd);
1218 usb_hcd_resume_root_hub(hcd);
1219 }
1220 }
1221
1222
1223
1224
1225
1226
1227
1228 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1229 compliance_mode_recovery_timer_init(xhci);
1230
1231 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1232 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1233
1234
1235 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1236 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1237 usb_hcd_poll_rh_status(xhci->shared_hcd);
1238 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1239 usb_hcd_poll_rh_status(hcd);
1240
1241 return retval;
1242}
1243EXPORT_SYMBOL_GPL(xhci_resume);
1244#endif
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1255 gfp_t mem_flags)
1256{
1257 if (xhci_urb_suitable_for_idt(urb))
1258 return 0;
1259
1260 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1274{
1275 unsigned int index;
1276 if (usb_endpoint_xfer_control(desc))
1277 index = (unsigned int) (usb_endpoint_num(desc)*2);
1278 else
1279 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1280 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1281 return index;
1282}
1283
1284
1285
1286
1287unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1288{
1289 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1290 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1291 return direction | number;
1292}
1293
1294
1295
1296
1297
1298static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1299{
1300 return 1 << (xhci_get_endpoint_index(desc) + 1);
1301}
1302
1303
1304
1305
1306
1307static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1308{
1309 return 1 << (ep_index + 1);
1310}
1311
1312
1313
1314
1315
1316
1317
1318unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1319{
1320 return fls(added_ctxs) - 1;
1321}
1322
1323
1324
1325
1326static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1327 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1328 const char *func) {
1329 struct xhci_hcd *xhci;
1330 struct xhci_virt_device *virt_dev;
1331
1332 if (!hcd || (check_ep && !ep) || !udev) {
1333 pr_debug("xHCI %s called with invalid args\n", func);
1334 return -EINVAL;
1335 }
1336 if (!udev->parent) {
1337 pr_debug("xHCI %s called for root hub\n", func);
1338 return 0;
1339 }
1340
1341 xhci = hcd_to_xhci(hcd);
1342 if (check_virt_dev) {
1343 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1344 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1345 func);
1346 return -EINVAL;
1347 }
1348
1349 virt_dev = xhci->devs[udev->slot_id];
1350 if (virt_dev->udev != udev) {
1351 xhci_dbg(xhci, "xHCI %s called with udev and "
1352 "virt_dev does not match\n", func);
1353 return -EINVAL;
1354 }
1355 }
1356
1357 if (xhci->xhc_state & XHCI_STATE_HALTED)
1358 return -ENODEV;
1359
1360 return 1;
1361}
1362
1363static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1364 struct usb_device *udev, struct xhci_command *command,
1365 bool ctx_change, bool must_succeed);
1366
1367
1368
1369
1370
1371
1372
1373static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1374 unsigned int ep_index, struct urb *urb)
1375{
1376 struct xhci_container_ctx *out_ctx;
1377 struct xhci_input_control_ctx *ctrl_ctx;
1378 struct xhci_ep_ctx *ep_ctx;
1379 struct xhci_command *command;
1380 int max_packet_size;
1381 int hw_max_packet_size;
1382 int ret = 0;
1383
1384 out_ctx = xhci->devs[slot_id]->out_ctx;
1385 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1386 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1387 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1388 if (hw_max_packet_size != max_packet_size) {
1389 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1390 "Max Packet Size for ep 0 changed.");
1391 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1392 "Max packet size in usb_device = %d",
1393 max_packet_size);
1394 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1395 "Max packet size in xHCI HW = %d",
1396 hw_max_packet_size);
1397 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1398 "Issuing evaluate context command.");
1399
1400
1401
1402
1403
1404
1405 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1406 if (!command)
1407 return -ENOMEM;
1408
1409 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1410 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1411 if (!ctrl_ctx) {
1412 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1413 __func__);
1414 ret = -ENOMEM;
1415 goto command_cleanup;
1416 }
1417
1418 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1419 xhci->devs[slot_id]->out_ctx, ep_index);
1420
1421 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1422 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1423 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1424
1425 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1426 ctrl_ctx->drop_flags = 0;
1427
1428 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1429 true, false);
1430
1431
1432
1433
1434 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1435command_cleanup:
1436 kfree(command->completion);
1437 kfree(command);
1438 }
1439 return ret;
1440}
1441
1442
1443
1444
1445
1446static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1447{
1448 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1449 unsigned long flags;
1450 int ret = 0;
1451 unsigned int slot_id, ep_index;
1452 unsigned int *ep_state;
1453 struct urb_priv *urb_priv;
1454 int num_tds;
1455
1456 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1457 true, true, __func__) <= 0)
1458 return -EINVAL;
1459
1460 slot_id = urb->dev->slot_id;
1461 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1462 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1463
1464 if (!HCD_HW_ACCESSIBLE(hcd)) {
1465 if (!in_interrupt())
1466 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1467 return -ESHUTDOWN;
1468 }
1469 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1470 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1471 return -ENODEV;
1472 }
1473
1474 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1475 num_tds = urb->number_of_packets;
1476 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1477 urb->transfer_buffer_length > 0 &&
1478 urb->transfer_flags & URB_ZERO_PACKET &&
1479 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1480 num_tds = 2;
1481 else
1482 num_tds = 1;
1483
1484 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1485 if (!urb_priv)
1486 return -ENOMEM;
1487
1488 urb_priv->num_tds = num_tds;
1489 urb_priv->num_tds_done = 0;
1490 urb->hcpriv = urb_priv;
1491
1492 trace_xhci_urb_enqueue(urb);
1493
1494 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1495
1496
1497
1498 if (urb->dev->speed == USB_SPEED_FULL) {
1499 ret = xhci_check_maxpacket(xhci, slot_id,
1500 ep_index, urb);
1501 if (ret < 0) {
1502 xhci_urb_free_priv(urb_priv);
1503 urb->hcpriv = NULL;
1504 return ret;
1505 }
1506 }
1507 }
1508
1509 spin_lock_irqsave(&xhci->lock, flags);
1510
1511 if (xhci->xhc_state & XHCI_STATE_DYING) {
1512 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1513 urb->ep->desc.bEndpointAddress, urb);
1514 ret = -ESHUTDOWN;
1515 goto free_priv;
1516 }
1517 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1518 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1519 *ep_state);
1520 ret = -EINVAL;
1521 goto free_priv;
1522 }
1523 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1524 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1525 ret = -EINVAL;
1526 goto free_priv;
1527 }
1528
1529 switch (usb_endpoint_type(&urb->ep->desc)) {
1530
1531 case USB_ENDPOINT_XFER_CONTROL:
1532 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1533 slot_id, ep_index);
1534 break;
1535 case USB_ENDPOINT_XFER_BULK:
1536 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1537 slot_id, ep_index);
1538 break;
1539 case USB_ENDPOINT_XFER_INT:
1540 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1541 slot_id, ep_index);
1542 break;
1543 case USB_ENDPOINT_XFER_ISOC:
1544 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1545 slot_id, ep_index);
1546 }
1547
1548 if (ret) {
1549free_priv:
1550 xhci_urb_free_priv(urb_priv);
1551 urb->hcpriv = NULL;
1552 }
1553 spin_unlock_irqrestore(&xhci->lock, flags);
1554 return ret;
1555}
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1589{
1590 unsigned long flags;
1591 int ret, i;
1592 u32 temp;
1593 struct xhci_hcd *xhci;
1594 struct urb_priv *urb_priv;
1595 struct xhci_td *td;
1596 unsigned int ep_index;
1597 struct xhci_ring *ep_ring;
1598 struct xhci_virt_ep *ep;
1599 struct xhci_command *command;
1600 struct xhci_virt_device *vdev;
1601
1602 xhci = hcd_to_xhci(hcd);
1603 spin_lock_irqsave(&xhci->lock, flags);
1604
1605 trace_xhci_urb_dequeue(urb);
1606
1607
1608 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1609 if (ret)
1610 goto done;
1611
1612
1613 vdev = xhci->devs[urb->dev->slot_id];
1614 urb_priv = urb->hcpriv;
1615 if (!vdev || !urb_priv)
1616 goto err_giveback;
1617
1618 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1619 ep = &vdev->eps[ep_index];
1620 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1621 if (!ep || !ep_ring)
1622 goto err_giveback;
1623
1624
1625 temp = readl(&xhci->op_regs->status);
1626 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1627 xhci_hc_died(xhci);
1628 goto done;
1629 }
1630
1631
1632
1633
1634
1635
1636 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1637 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1638 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1639 td = &urb_priv->td[i];
1640 if (!list_empty(&td->cancelled_td_list))
1641 list_del_init(&td->cancelled_td_list);
1642 }
1643 goto err_giveback;
1644 }
1645
1646 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1647 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1648 "HC halted, freeing TD manually.");
1649 for (i = urb_priv->num_tds_done;
1650 i < urb_priv->num_tds;
1651 i++) {
1652 td = &urb_priv->td[i];
1653 if (!list_empty(&td->td_list))
1654 list_del_init(&td->td_list);
1655 if (!list_empty(&td->cancelled_td_list))
1656 list_del_init(&td->cancelled_td_list);
1657 }
1658 goto err_giveback;
1659 }
1660
1661 i = urb_priv->num_tds_done;
1662 if (i < urb_priv->num_tds)
1663 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1664 "Cancel URB %p, dev %s, ep 0x%x, "
1665 "starting at offset 0x%llx",
1666 urb, urb->dev->devpath,
1667 urb->ep->desc.bEndpointAddress,
1668 (unsigned long long) xhci_trb_virt_to_dma(
1669 urb_priv->td[i].start_seg,
1670 urb_priv->td[i].first_trb));
1671
1672 for (; i < urb_priv->num_tds; i++) {
1673 td = &urb_priv->td[i];
1674 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1675 }
1676
1677
1678
1679
1680 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1681 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1682 if (!command) {
1683 ret = -ENOMEM;
1684 goto done;
1685 }
1686 ep->ep_state |= EP_STOP_CMD_PENDING;
1687 ep->stop_cmd_timer.expires = jiffies +
1688 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1689 add_timer(&ep->stop_cmd_timer);
1690 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1691 ep_index, 0);
1692 xhci_ring_cmd_db(xhci);
1693 }
1694done:
1695 spin_unlock_irqrestore(&xhci->lock, flags);
1696 return ret;
1697
1698err_giveback:
1699 if (urb_priv)
1700 xhci_urb_free_priv(urb_priv);
1701 usb_hcd_unlink_urb_from_ep(hcd, urb);
1702 spin_unlock_irqrestore(&xhci->lock, flags);
1703 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1704 return ret;
1705}
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1721 struct usb_host_endpoint *ep)
1722{
1723 struct xhci_hcd *xhci;
1724 struct xhci_container_ctx *in_ctx, *out_ctx;
1725 struct xhci_input_control_ctx *ctrl_ctx;
1726 unsigned int ep_index;
1727 struct xhci_ep_ctx *ep_ctx;
1728 u32 drop_flag;
1729 u32 new_add_flags, new_drop_flags;
1730 int ret;
1731
1732 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1733 if (ret <= 0)
1734 return ret;
1735 xhci = hcd_to_xhci(hcd);
1736 if (xhci->xhc_state & XHCI_STATE_DYING)
1737 return -ENODEV;
1738
1739 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1740 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1741 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1742 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1743 __func__, drop_flag);
1744 return 0;
1745 }
1746
1747 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1748 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1749 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1750 if (!ctrl_ctx) {
1751 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1752 __func__);
1753 return 0;
1754 }
1755
1756 ep_index = xhci_get_endpoint_index(&ep->desc);
1757 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1758
1759
1760
1761 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1762 le32_to_cpu(ctrl_ctx->drop_flags) &
1763 xhci_get_endpoint_flag(&ep->desc)) {
1764
1765 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1766 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1767 __func__, ep);
1768 return 0;
1769 }
1770
1771 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1772 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1773
1774 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1775 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1776
1777 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1778
1779 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1780
1781 if (xhci->quirks & XHCI_MTK_HOST)
1782 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1783
1784 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1785 (unsigned int) ep->desc.bEndpointAddress,
1786 udev->slot_id,
1787 (unsigned int) new_drop_flags,
1788 (unsigned int) new_add_flags);
1789 return 0;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1806 struct usb_host_endpoint *ep)
1807{
1808 struct xhci_hcd *xhci;
1809 struct xhci_container_ctx *in_ctx;
1810 unsigned int ep_index;
1811 struct xhci_input_control_ctx *ctrl_ctx;
1812 struct xhci_ep_ctx *ep_ctx;
1813 u32 added_ctxs;
1814 u32 new_add_flags, new_drop_flags;
1815 struct xhci_virt_device *virt_dev;
1816 int ret = 0;
1817
1818 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1819 if (ret <= 0) {
1820
1821 ep->hcpriv = NULL;
1822 return ret;
1823 }
1824 xhci = hcd_to_xhci(hcd);
1825 if (xhci->xhc_state & XHCI_STATE_DYING)
1826 return -ENODEV;
1827
1828 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1829 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1830
1831
1832
1833
1834 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1835 __func__, added_ctxs);
1836 return 0;
1837 }
1838
1839 virt_dev = xhci->devs[udev->slot_id];
1840 in_ctx = virt_dev->in_ctx;
1841 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1842 if (!ctrl_ctx) {
1843 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1844 __func__);
1845 return 0;
1846 }
1847
1848 ep_index = xhci_get_endpoint_index(&ep->desc);
1849
1850
1851
1852 if (virt_dev->eps[ep_index].ring &&
1853 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1854 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1855 "without dropping it.\n",
1856 (unsigned int) ep->desc.bEndpointAddress);
1857 return -EINVAL;
1858 }
1859
1860
1861
1862
1863 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1864 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1865 __func__, ep);
1866 return 0;
1867 }
1868
1869
1870
1871
1872
1873
1874 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1875 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1876 __func__, ep->desc.bEndpointAddress);
1877 return -ENOMEM;
1878 }
1879
1880 if (xhci->quirks & XHCI_MTK_HOST) {
1881 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1882 if (ret < 0) {
1883 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1884 virt_dev->eps[ep_index].new_ring = NULL;
1885 return ret;
1886 }
1887 }
1888
1889 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1890 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1891
1892
1893
1894
1895
1896
1897
1898 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1899
1900
1901 ep->hcpriv = udev;
1902
1903 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1904 trace_xhci_add_endpoint(ep_ctx);
1905
1906 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
1907
1908 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1909 (unsigned int) ep->desc.bEndpointAddress,
1910 udev->slot_id,
1911 (unsigned int) new_drop_flags,
1912 (unsigned int) new_add_flags);
1913 return 0;
1914}
1915
1916static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1917{
1918 struct xhci_input_control_ctx *ctrl_ctx;
1919 struct xhci_ep_ctx *ep_ctx;
1920 struct xhci_slot_ctx *slot_ctx;
1921 int i;
1922
1923 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1924 if (!ctrl_ctx) {
1925 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1926 __func__);
1927 return;
1928 }
1929
1930
1931
1932
1933
1934
1935 ctrl_ctx->drop_flags = 0;
1936 ctrl_ctx->add_flags = 0;
1937 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1938 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1939
1940 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1941 for (i = 1; i < 31; i++) {
1942 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1943 ep_ctx->ep_info = 0;
1944 ep_ctx->ep_info2 = 0;
1945 ep_ctx->deq = 0;
1946 ep_ctx->tx_info = 0;
1947 }
1948}
1949
1950static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1951 struct usb_device *udev, u32 *cmd_status)
1952{
1953 int ret;
1954
1955 switch (*cmd_status) {
1956 case COMP_COMMAND_ABORTED:
1957 case COMP_COMMAND_RING_STOPPED:
1958 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1959 ret = -ETIME;
1960 break;
1961 case COMP_RESOURCE_ERROR:
1962 dev_warn(&udev->dev,
1963 "Not enough host controller resources for new device state.\n");
1964 ret = -ENOMEM;
1965
1966 break;
1967 case COMP_BANDWIDTH_ERROR:
1968 case COMP_SECONDARY_BANDWIDTH_ERROR:
1969 dev_warn(&udev->dev,
1970 "Not enough bandwidth for new device state.\n");
1971 ret = -ENOSPC;
1972
1973 break;
1974 case COMP_TRB_ERROR:
1975
1976 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1977 "add flag = 1, "
1978 "and endpoint is not disabled.\n");
1979 ret = -EINVAL;
1980 break;
1981 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1982 dev_warn(&udev->dev,
1983 "ERROR: Incompatible device for endpoint configure command.\n");
1984 ret = -ENODEV;
1985 break;
1986 case COMP_SUCCESS:
1987 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1988 "Successful Endpoint Configure command");
1989 ret = 0;
1990 break;
1991 default:
1992 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1993 *cmd_status);
1994 ret = -EINVAL;
1995 break;
1996 }
1997 return ret;
1998}
1999
2000static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2001 struct usb_device *udev, u32 *cmd_status)
2002{
2003 int ret;
2004
2005 switch (*cmd_status) {
2006 case COMP_COMMAND_ABORTED:
2007 case COMP_COMMAND_RING_STOPPED:
2008 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2009 ret = -ETIME;
2010 break;
2011 case COMP_PARAMETER_ERROR:
2012 dev_warn(&udev->dev,
2013 "WARN: xHCI driver setup invalid evaluate context command.\n");
2014 ret = -EINVAL;
2015 break;
2016 case COMP_SLOT_NOT_ENABLED_ERROR:
2017 dev_warn(&udev->dev,
2018 "WARN: slot not enabled for evaluate context command.\n");
2019 ret = -EINVAL;
2020 break;
2021 case COMP_CONTEXT_STATE_ERROR:
2022 dev_warn(&udev->dev,
2023 "WARN: invalid context state for evaluate context command.\n");
2024 ret = -EINVAL;
2025 break;
2026 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2027 dev_warn(&udev->dev,
2028 "ERROR: Incompatible device for evaluate context command.\n");
2029 ret = -ENODEV;
2030 break;
2031 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2032
2033 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2034 ret = -EINVAL;
2035 break;
2036 case COMP_SUCCESS:
2037 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2038 "Successful evaluate context command");
2039 ret = 0;
2040 break;
2041 default:
2042 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2043 *cmd_status);
2044 ret = -EINVAL;
2045 break;
2046 }
2047 return ret;
2048}
2049
2050static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2051 struct xhci_input_control_ctx *ctrl_ctx)
2052{
2053 u32 valid_add_flags;
2054 u32 valid_drop_flags;
2055
2056
2057
2058
2059
2060 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2061 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2062
2063
2064
2065
2066
2067 return hweight32(valid_add_flags) -
2068 hweight32(valid_add_flags & valid_drop_flags);
2069}
2070
2071static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2072 struct xhci_input_control_ctx *ctrl_ctx)
2073{
2074 u32 valid_add_flags;
2075 u32 valid_drop_flags;
2076
2077 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2078 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2079
2080 return hweight32(valid_drop_flags) -
2081 hweight32(valid_add_flags & valid_drop_flags);
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2098 struct xhci_input_control_ctx *ctrl_ctx)
2099{
2100 u32 added_eps;
2101
2102 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2103 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2104 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2105 "Not enough ep ctxs: "
2106 "%u active, need to add %u, limit is %u.",
2107 xhci->num_active_eps, added_eps,
2108 xhci->limit_active_eps);
2109 return -ENOMEM;
2110 }
2111 xhci->num_active_eps += added_eps;
2112 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2113 "Adding %u ep ctxs, %u now active.", added_eps,
2114 xhci->num_active_eps);
2115 return 0;
2116}
2117
2118
2119
2120
2121
2122
2123
2124static void xhci_free_host_resources(struct xhci_hcd *xhci,
2125 struct xhci_input_control_ctx *ctrl_ctx)
2126{
2127 u32 num_failed_eps;
2128
2129 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2130 xhci->num_active_eps -= num_failed_eps;
2131 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2132 "Removing %u failed ep ctxs, %u now active.",
2133 num_failed_eps,
2134 xhci->num_active_eps);
2135}
2136
2137
2138
2139
2140
2141
2142
2143static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2144 struct xhci_input_control_ctx *ctrl_ctx)
2145{
2146 u32 num_dropped_eps;
2147
2148 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2149 xhci->num_active_eps -= num_dropped_eps;
2150 if (num_dropped_eps)
2151 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2152 "Removing %u dropped ep ctxs, %u now active.",
2153 num_dropped_eps,
2154 xhci->num_active_eps);
2155}
2156
2157static unsigned int xhci_get_block_size(struct usb_device *udev)
2158{
2159 switch (udev->speed) {
2160 case USB_SPEED_LOW:
2161 case USB_SPEED_FULL:
2162 return FS_BLOCK;
2163 case USB_SPEED_HIGH:
2164 return HS_BLOCK;
2165 case USB_SPEED_SUPER:
2166 case USB_SPEED_SUPER_PLUS:
2167 return SS_BLOCK;
2168 case USB_SPEED_UNKNOWN:
2169 case USB_SPEED_WIRELESS:
2170 default:
2171
2172 return 1;
2173 }
2174}
2175
2176static unsigned int
2177xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2178{
2179 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2180 return LS_OVERHEAD;
2181 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2182 return FS_OVERHEAD;
2183 return HS_OVERHEAD;
2184}
2185
2186
2187
2188
2189
2190static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2191 struct xhci_virt_device *virt_dev,
2192 int old_active_eps)
2193{
2194 struct xhci_interval_bw_table *bw_table;
2195 struct xhci_tt_bw_info *tt_info;
2196
2197
2198 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2199 tt_info = virt_dev->tt_info;
2200
2201
2202
2203
2204 if (old_active_eps)
2205 return 0;
2206 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2207 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2208 return -ENOMEM;
2209 return 0;
2210 }
2211
2212
2213
2214
2215
2216
2217 return 0;
2218}
2219
2220static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2221 struct xhci_virt_device *virt_dev)
2222{
2223 unsigned int bw_reserved;
2224
2225 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2226 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2227 return -ENOMEM;
2228
2229 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2230 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2231 return -ENOMEM;
2232
2233 return 0;
2234}
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277static int xhci_check_bw_table(struct xhci_hcd *xhci,
2278 struct xhci_virt_device *virt_dev,
2279 int old_active_eps)
2280{
2281 unsigned int bw_reserved;
2282 unsigned int max_bandwidth;
2283 unsigned int bw_used;
2284 unsigned int block_size;
2285 struct xhci_interval_bw_table *bw_table;
2286 unsigned int packet_size = 0;
2287 unsigned int overhead = 0;
2288 unsigned int packets_transmitted = 0;
2289 unsigned int packets_remaining = 0;
2290 unsigned int i;
2291
2292 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2293 return xhci_check_ss_bw(xhci, virt_dev);
2294
2295 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2296 max_bandwidth = HS_BW_LIMIT;
2297
2298 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2299 } else {
2300 max_bandwidth = FS_BW_LIMIT;
2301 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2302 }
2303
2304 bw_table = virt_dev->bw_table;
2305
2306
2307
2308 block_size = xhci_get_block_size(virt_dev->udev);
2309
2310
2311
2312
2313 if (virt_dev->tt_info) {
2314 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2315 "Recalculating BW for rootport %u",
2316 virt_dev->real_port);
2317 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2318 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2319 "newly activated TT.\n");
2320 return -ENOMEM;
2321 }
2322 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2323 "Recalculating BW for TT slot %u port %u",
2324 virt_dev->tt_info->slot_id,
2325 virt_dev->tt_info->ttport);
2326 } else {
2327 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2328 "Recalculating BW for rootport %u",
2329 virt_dev->real_port);
2330 }
2331
2332
2333
2334
2335 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2336 bw_table->interval_bw[0].num_packets *
2337 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2338
2339 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2340 unsigned int bw_added;
2341 unsigned int largest_mps;
2342 unsigned int interval_overhead;
2343
2344
2345
2346
2347
2348
2349 packets_remaining = 2 * packets_remaining +
2350 bw_table->interval_bw[i].num_packets;
2351
2352
2353
2354
2355 if (list_empty(&bw_table->interval_bw[i].endpoints))
2356 largest_mps = 0;
2357 else {
2358 struct xhci_virt_ep *virt_ep;
2359 struct list_head *ep_entry;
2360
2361 ep_entry = bw_table->interval_bw[i].endpoints.next;
2362 virt_ep = list_entry(ep_entry,
2363 struct xhci_virt_ep, bw_endpoint_list);
2364
2365 largest_mps = DIV_ROUND_UP(
2366 virt_ep->bw_info.max_packet_size,
2367 block_size);
2368 }
2369 if (largest_mps > packet_size)
2370 packet_size = largest_mps;
2371
2372
2373 interval_overhead = xhci_get_largest_overhead(
2374 &bw_table->interval_bw[i]);
2375 if (interval_overhead > overhead)
2376 overhead = interval_overhead;
2377
2378
2379
2380
2381 packets_transmitted = packets_remaining >> (i + 1);
2382
2383
2384 bw_added = packets_transmitted * (overhead + packet_size);
2385
2386
2387 packets_remaining = packets_remaining % (1 << (i + 1));
2388
2389
2390
2391
2392
2393 if (packets_remaining == 0) {
2394 packet_size = 0;
2395 overhead = 0;
2396 } else if (packets_transmitted > 0) {
2397
2398
2399
2400
2401
2402 packet_size = largest_mps;
2403 overhead = interval_overhead;
2404 }
2405
2406
2407
2408 bw_used += bw_added;
2409 if (bw_used > max_bandwidth) {
2410 xhci_warn(xhci, "Not enough bandwidth. "
2411 "Proposed: %u, Max: %u\n",
2412 bw_used, max_bandwidth);
2413 return -ENOMEM;
2414 }
2415 }
2416
2417
2418
2419
2420
2421
2422 if (packets_remaining > 0)
2423 bw_used += overhead + packet_size;
2424
2425 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2426 unsigned int port_index = virt_dev->real_port - 1;
2427
2428
2429
2430
2431
2432 bw_used += TT_HS_OVERHEAD *
2433 xhci->rh_bw[port_index].num_active_tts;
2434 }
2435
2436 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2437 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2438 "Available: %u " "percent",
2439 bw_used, max_bandwidth, bw_reserved,
2440 (max_bandwidth - bw_used - bw_reserved) * 100 /
2441 max_bandwidth);
2442
2443 bw_used += bw_reserved;
2444 if (bw_used > max_bandwidth) {
2445 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2446 bw_used, max_bandwidth);
2447 return -ENOMEM;
2448 }
2449
2450 bw_table->bw_used = bw_used;
2451 return 0;
2452}
2453
2454static bool xhci_is_async_ep(unsigned int ep_type)
2455{
2456 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2457 ep_type != ISOC_IN_EP &&
2458 ep_type != INT_IN_EP);
2459}
2460
2461static bool xhci_is_sync_in_ep(unsigned int ep_type)
2462{
2463 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2464}
2465
2466static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2467{
2468 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2469
2470 if (ep_bw->ep_interval == 0)
2471 return SS_OVERHEAD_BURST +
2472 (ep_bw->mult * ep_bw->num_packets *
2473 (SS_OVERHEAD + mps));
2474 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2475 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2476 1 << ep_bw->ep_interval);
2477
2478}
2479
2480static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2481 struct xhci_bw_info *ep_bw,
2482 struct xhci_interval_bw_table *bw_table,
2483 struct usb_device *udev,
2484 struct xhci_virt_ep *virt_ep,
2485 struct xhci_tt_bw_info *tt_info)
2486{
2487 struct xhci_interval_bw *interval_bw;
2488 int normalized_interval;
2489
2490 if (xhci_is_async_ep(ep_bw->type))
2491 return;
2492
2493 if (udev->speed >= USB_SPEED_SUPER) {
2494 if (xhci_is_sync_in_ep(ep_bw->type))
2495 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2496 xhci_get_ss_bw_consumed(ep_bw);
2497 else
2498 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2499 xhci_get_ss_bw_consumed(ep_bw);
2500 return;
2501 }
2502
2503
2504
2505
2506 if (list_empty(&virt_ep->bw_endpoint_list))
2507 return;
2508
2509
2510
2511 if (udev->speed == USB_SPEED_HIGH)
2512 normalized_interval = ep_bw->ep_interval;
2513 else
2514 normalized_interval = ep_bw->ep_interval - 3;
2515
2516 if (normalized_interval == 0)
2517 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2518 interval_bw = &bw_table->interval_bw[normalized_interval];
2519 interval_bw->num_packets -= ep_bw->num_packets;
2520 switch (udev->speed) {
2521 case USB_SPEED_LOW:
2522 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2523 break;
2524 case USB_SPEED_FULL:
2525 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2526 break;
2527 case USB_SPEED_HIGH:
2528 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2529 break;
2530 case USB_SPEED_SUPER:
2531 case USB_SPEED_SUPER_PLUS:
2532 case USB_SPEED_UNKNOWN:
2533 case USB_SPEED_WIRELESS:
2534
2535
2536
2537 return;
2538 }
2539 if (tt_info)
2540 tt_info->active_eps -= 1;
2541 list_del_init(&virt_ep->bw_endpoint_list);
2542}
2543
2544static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2545 struct xhci_bw_info *ep_bw,
2546 struct xhci_interval_bw_table *bw_table,
2547 struct usb_device *udev,
2548 struct xhci_virt_ep *virt_ep,
2549 struct xhci_tt_bw_info *tt_info)
2550{
2551 struct xhci_interval_bw *interval_bw;
2552 struct xhci_virt_ep *smaller_ep;
2553 int normalized_interval;
2554
2555 if (xhci_is_async_ep(ep_bw->type))
2556 return;
2557
2558 if (udev->speed == USB_SPEED_SUPER) {
2559 if (xhci_is_sync_in_ep(ep_bw->type))
2560 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2561 xhci_get_ss_bw_consumed(ep_bw);
2562 else
2563 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2564 xhci_get_ss_bw_consumed(ep_bw);
2565 return;
2566 }
2567
2568
2569
2570
2571 if (udev->speed == USB_SPEED_HIGH)
2572 normalized_interval = ep_bw->ep_interval;
2573 else
2574 normalized_interval = ep_bw->ep_interval - 3;
2575
2576 if (normalized_interval == 0)
2577 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2578 interval_bw = &bw_table->interval_bw[normalized_interval];
2579 interval_bw->num_packets += ep_bw->num_packets;
2580 switch (udev->speed) {
2581 case USB_SPEED_LOW:
2582 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2583 break;
2584 case USB_SPEED_FULL:
2585 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2586 break;
2587 case USB_SPEED_HIGH:
2588 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2589 break;
2590 case USB_SPEED_SUPER:
2591 case USB_SPEED_SUPER_PLUS:
2592 case USB_SPEED_UNKNOWN:
2593 case USB_SPEED_WIRELESS:
2594
2595
2596
2597 return;
2598 }
2599
2600 if (tt_info)
2601 tt_info->active_eps += 1;
2602
2603 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2604 bw_endpoint_list) {
2605 if (ep_bw->max_packet_size >=
2606 smaller_ep->bw_info.max_packet_size) {
2607
2608 list_add_tail(&virt_ep->bw_endpoint_list,
2609 &smaller_ep->bw_endpoint_list);
2610 return;
2611 }
2612 }
2613
2614 list_add_tail(&virt_ep->bw_endpoint_list,
2615 &interval_bw->endpoints);
2616}
2617
2618void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2619 struct xhci_virt_device *virt_dev,
2620 int old_active_eps)
2621{
2622 struct xhci_root_port_bw_info *rh_bw_info;
2623 if (!virt_dev->tt_info)
2624 return;
2625
2626 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2627 if (old_active_eps == 0 &&
2628 virt_dev->tt_info->active_eps != 0) {
2629 rh_bw_info->num_active_tts += 1;
2630 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2631 } else if (old_active_eps != 0 &&
2632 virt_dev->tt_info->active_eps == 0) {
2633 rh_bw_info->num_active_tts -= 1;
2634 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2635 }
2636}
2637
2638static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2639 struct xhci_virt_device *virt_dev,
2640 struct xhci_container_ctx *in_ctx)
2641{
2642 struct xhci_bw_info ep_bw_info[31];
2643 int i;
2644 struct xhci_input_control_ctx *ctrl_ctx;
2645 int old_active_eps = 0;
2646
2647 if (virt_dev->tt_info)
2648 old_active_eps = virt_dev->tt_info->active_eps;
2649
2650 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2651 if (!ctrl_ctx) {
2652 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2653 __func__);
2654 return -ENOMEM;
2655 }
2656
2657 for (i = 0; i < 31; i++) {
2658 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2659 continue;
2660
2661
2662 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2663 sizeof(ep_bw_info[i]));
2664
2665
2666
2667 if (EP_IS_DROPPED(ctrl_ctx, i))
2668 xhci_drop_ep_from_interval_table(xhci,
2669 &virt_dev->eps[i].bw_info,
2670 virt_dev->bw_table,
2671 virt_dev->udev,
2672 &virt_dev->eps[i],
2673 virt_dev->tt_info);
2674 }
2675
2676 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2677 for (i = 0; i < 31; i++) {
2678
2679 if (EP_IS_ADDED(ctrl_ctx, i))
2680 xhci_add_ep_to_interval_table(xhci,
2681 &virt_dev->eps[i].bw_info,
2682 virt_dev->bw_table,
2683 virt_dev->udev,
2684 &virt_dev->eps[i],
2685 virt_dev->tt_info);
2686 }
2687
2688 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2689
2690
2691
2692 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2693 return 0;
2694 }
2695
2696
2697 for (i = 0; i < 31; i++) {
2698 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2699 continue;
2700
2701
2702
2703
2704 if (EP_IS_ADDED(ctrl_ctx, i)) {
2705 xhci_drop_ep_from_interval_table(xhci,
2706 &virt_dev->eps[i].bw_info,
2707 virt_dev->bw_table,
2708 virt_dev->udev,
2709 &virt_dev->eps[i],
2710 virt_dev->tt_info);
2711 }
2712
2713 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2714 sizeof(ep_bw_info[i]));
2715
2716 if (EP_IS_DROPPED(ctrl_ctx, i))
2717 xhci_add_ep_to_interval_table(xhci,
2718 &virt_dev->eps[i].bw_info,
2719 virt_dev->bw_table,
2720 virt_dev->udev,
2721 &virt_dev->eps[i],
2722 virt_dev->tt_info);
2723 }
2724 return -ENOMEM;
2725}
2726
2727
2728
2729
2730
2731static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2732 struct usb_device *udev,
2733 struct xhci_command *command,
2734 bool ctx_change, bool must_succeed)
2735{
2736 int ret;
2737 unsigned long flags;
2738 struct xhci_input_control_ctx *ctrl_ctx;
2739 struct xhci_virt_device *virt_dev;
2740 struct xhci_slot_ctx *slot_ctx;
2741
2742 if (!command)
2743 return -EINVAL;
2744
2745 spin_lock_irqsave(&xhci->lock, flags);
2746
2747 if (xhci->xhc_state & XHCI_STATE_DYING) {
2748 spin_unlock_irqrestore(&xhci->lock, flags);
2749 return -ESHUTDOWN;
2750 }
2751
2752 virt_dev = xhci->devs[udev->slot_id];
2753
2754 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2755 if (!ctrl_ctx) {
2756 spin_unlock_irqrestore(&xhci->lock, flags);
2757 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2758 __func__);
2759 return -ENOMEM;
2760 }
2761
2762 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2763 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2764 spin_unlock_irqrestore(&xhci->lock, flags);
2765 xhci_warn(xhci, "Not enough host resources, "
2766 "active endpoint contexts = %u\n",
2767 xhci->num_active_eps);
2768 return -ENOMEM;
2769 }
2770 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2771 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2772 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2773 xhci_free_host_resources(xhci, ctrl_ctx);
2774 spin_unlock_irqrestore(&xhci->lock, flags);
2775 xhci_warn(xhci, "Not enough bandwidth\n");
2776 return -ENOMEM;
2777 }
2778
2779 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2780
2781 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2782 trace_xhci_configure_endpoint(slot_ctx);
2783
2784 if (!ctx_change)
2785 ret = xhci_queue_configure_endpoint(xhci, command,
2786 command->in_ctx->dma,
2787 udev->slot_id, must_succeed);
2788 else
2789 ret = xhci_queue_evaluate_context(xhci, command,
2790 command->in_ctx->dma,
2791 udev->slot_id, must_succeed);
2792 if (ret < 0) {
2793 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2794 xhci_free_host_resources(xhci, ctrl_ctx);
2795 spin_unlock_irqrestore(&xhci->lock, flags);
2796 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2797 "FIXME allocate a new ring segment");
2798 return -ENOMEM;
2799 }
2800 xhci_ring_cmd_db(xhci);
2801 spin_unlock_irqrestore(&xhci->lock, flags);
2802
2803
2804 wait_for_completion(command->completion);
2805
2806 if (!ctx_change)
2807 ret = xhci_configure_endpoint_result(xhci, udev,
2808 &command->status);
2809 else
2810 ret = xhci_evaluate_context_result(xhci, udev,
2811 &command->status);
2812
2813 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2814 spin_lock_irqsave(&xhci->lock, flags);
2815
2816
2817
2818 if (ret)
2819 xhci_free_host_resources(xhci, ctrl_ctx);
2820 else
2821 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2822 spin_unlock_irqrestore(&xhci->lock, flags);
2823 }
2824 return ret;
2825}
2826
2827static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2828 struct xhci_virt_device *vdev, int i)
2829{
2830 struct xhci_virt_ep *ep = &vdev->eps[i];
2831
2832 if (ep->ep_state & EP_HAS_STREAMS) {
2833 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2834 xhci_get_endpoint_address(i));
2835 xhci_free_stream_info(xhci, ep->stream_info);
2836 ep->stream_info = NULL;
2837 ep->ep_state &= ~EP_HAS_STREAMS;
2838 }
2839}
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2852{
2853 int i;
2854 int ret = 0;
2855 struct xhci_hcd *xhci;
2856 struct xhci_virt_device *virt_dev;
2857 struct xhci_input_control_ctx *ctrl_ctx;
2858 struct xhci_slot_ctx *slot_ctx;
2859 struct xhci_command *command;
2860
2861 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2862 if (ret <= 0)
2863 return ret;
2864 xhci = hcd_to_xhci(hcd);
2865 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2866 (xhci->xhc_state & XHCI_STATE_REMOVING))
2867 return -ENODEV;
2868
2869 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2870 virt_dev = xhci->devs[udev->slot_id];
2871
2872 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2873 if (!command)
2874 return -ENOMEM;
2875
2876 command->in_ctx = virt_dev->in_ctx;
2877
2878
2879 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2880 if (!ctrl_ctx) {
2881 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2882 __func__);
2883 ret = -ENOMEM;
2884 goto command_cleanup;
2885 }
2886 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2887 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2888 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2889
2890
2891 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2892 ctrl_ctx->drop_flags == 0) {
2893 ret = 0;
2894 goto command_cleanup;
2895 }
2896
2897 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2898 for (i = 31; i >= 1; i--) {
2899 __le32 le32 = cpu_to_le32(BIT(i));
2900
2901 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2902 || (ctrl_ctx->add_flags & le32) || i == 1) {
2903 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2904 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2905 break;
2906 }
2907 }
2908
2909 ret = xhci_configure_endpoint(xhci, udev, command,
2910 false, false);
2911 if (ret)
2912
2913 goto command_cleanup;
2914
2915
2916 for (i = 1; i < 31; i++) {
2917 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2918 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2919 xhci_free_endpoint_ring(xhci, virt_dev, i);
2920 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2921 }
2922 }
2923 xhci_zero_in_ctx(xhci, virt_dev);
2924
2925
2926
2927
2928 for (i = 1; i < 31; i++) {
2929 if (!virt_dev->eps[i].new_ring)
2930 continue;
2931
2932
2933
2934 if (virt_dev->eps[i].ring) {
2935 xhci_free_endpoint_ring(xhci, virt_dev, i);
2936 }
2937 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2938 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2939 virt_dev->eps[i].new_ring = NULL;
2940 }
2941command_cleanup:
2942 kfree(command->completion);
2943 kfree(command);
2944
2945 return ret;
2946}
2947
2948static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2949{
2950 struct xhci_hcd *xhci;
2951 struct xhci_virt_device *virt_dev;
2952 int i, ret;
2953
2954 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2955 if (ret <= 0)
2956 return;
2957 xhci = hcd_to_xhci(hcd);
2958
2959 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2960 virt_dev = xhci->devs[udev->slot_id];
2961
2962 for (i = 0; i < 31; i++) {
2963 if (virt_dev->eps[i].new_ring) {
2964 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
2965 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2966 virt_dev->eps[i].new_ring = NULL;
2967 }
2968 }
2969 xhci_zero_in_ctx(xhci, virt_dev);
2970}
2971
2972static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2973 struct xhci_container_ctx *in_ctx,
2974 struct xhci_container_ctx *out_ctx,
2975 struct xhci_input_control_ctx *ctrl_ctx,
2976 u32 add_flags, u32 drop_flags)
2977{
2978 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2979 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2980 xhci_slot_copy(xhci, in_ctx, out_ctx);
2981 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2982}
2983
2984static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2985 unsigned int slot_id, unsigned int ep_index,
2986 struct xhci_dequeue_state *deq_state)
2987{
2988 struct xhci_input_control_ctx *ctrl_ctx;
2989 struct xhci_container_ctx *in_ctx;
2990 struct xhci_ep_ctx *ep_ctx;
2991 u32 added_ctxs;
2992 dma_addr_t addr;
2993
2994 in_ctx = xhci->devs[slot_id]->in_ctx;
2995 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2996 if (!ctrl_ctx) {
2997 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2998 __func__);
2999 return;
3000 }
3001
3002 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
3003 xhci->devs[slot_id]->out_ctx, ep_index);
3004 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
3005 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3006 deq_state->new_deq_ptr);
3007 if (addr == 0) {
3008 xhci_warn(xhci, "WARN Cannot submit config ep after "
3009 "reset ep command\n");
3010 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
3011 deq_state->new_deq_seg,
3012 deq_state->new_deq_ptr);
3013 return;
3014 }
3015 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
3016
3017 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
3018 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
3019 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
3020 added_ctxs, added_ctxs);
3021}
3022
3023void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
3024 unsigned int stream_id, struct xhci_td *td)
3025{
3026 struct xhci_dequeue_state deq_state;
3027 struct usb_device *udev = td->urb->dev;
3028
3029 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3030 "Cleaning up stalled endpoint ring");
3031
3032
3033
3034 xhci_find_new_dequeue_state(xhci, udev->slot_id,
3035 ep_index, stream_id, td, &deq_state);
3036
3037 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
3038 return;
3039
3040
3041
3042
3043 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
3044 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3045 "Queueing new dequeue state");
3046 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
3047 ep_index, &deq_state);
3048 } else {
3049
3050
3051
3052
3053
3054 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3055 "Setting up input context for "
3056 "configure endpoint command");
3057 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
3058 ep_index, &deq_state);
3059 }
3060}
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074static void xhci_endpoint_reset(struct usb_hcd *hcd,
3075 struct usb_host_endpoint *host_ep)
3076{
3077 struct xhci_hcd *xhci;
3078 struct usb_device *udev;
3079 struct xhci_virt_device *vdev;
3080 struct xhci_virt_ep *ep;
3081 struct xhci_input_control_ctx *ctrl_ctx;
3082 struct xhci_command *stop_cmd, *cfg_cmd;
3083 unsigned int ep_index;
3084 unsigned long flags;
3085 u32 ep_flag;
3086
3087 xhci = hcd_to_xhci(hcd);
3088 if (!host_ep->hcpriv)
3089 return;
3090 udev = (struct usb_device *) host_ep->hcpriv;
3091 vdev = xhci->devs[udev->slot_id];
3092
3093
3094
3095
3096
3097
3098 if (!udev->slot_id || !vdev)
3099 return;
3100 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3101 ep = &vdev->eps[ep_index];
3102 if (!ep)
3103 return;
3104
3105
3106 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3107 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3108 return;
3109 }
3110
3111 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3112 usb_endpoint_xfer_isoc(&host_ep->desc))
3113 return;
3114
3115 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3116
3117 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3118 return;
3119
3120 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3121 if (!stop_cmd)
3122 return;
3123
3124 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3125 if (!cfg_cmd)
3126 goto cleanup;
3127
3128 spin_lock_irqsave(&xhci->lock, flags);
3129
3130
3131 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3132
3133
3134
3135
3136
3137
3138
3139 if (!list_empty(&ep->ring->td_list)) {
3140 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3141 spin_unlock_irqrestore(&xhci->lock, flags);
3142 xhci_free_command(xhci, cfg_cmd);
3143 goto cleanup;
3144 }
3145 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
3146 xhci_ring_cmd_db(xhci);
3147 spin_unlock_irqrestore(&xhci->lock, flags);
3148
3149 wait_for_completion(stop_cmd->completion);
3150
3151 spin_lock_irqsave(&xhci->lock, flags);
3152
3153
3154 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3155 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3156 ctrl_ctx, ep_flag, ep_flag);
3157 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3158
3159 xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3160 udev->slot_id, false);
3161 xhci_ring_cmd_db(xhci);
3162 spin_unlock_irqrestore(&xhci->lock, flags);
3163
3164 wait_for_completion(cfg_cmd->completion);
3165
3166 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3167 xhci_free_command(xhci, cfg_cmd);
3168cleanup:
3169 xhci_free_command(xhci, stop_cmd);
3170}
3171
3172static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3173 struct usb_device *udev, struct usb_host_endpoint *ep,
3174 unsigned int slot_id)
3175{
3176 int ret;
3177 unsigned int ep_index;
3178 unsigned int ep_state;
3179
3180 if (!ep)
3181 return -EINVAL;
3182 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3183 if (ret <= 0)
3184 return -EINVAL;
3185 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3186 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3187 " descriptor for ep 0x%x does not support streams\n",
3188 ep->desc.bEndpointAddress);
3189 return -EINVAL;
3190 }
3191
3192 ep_index = xhci_get_endpoint_index(&ep->desc);
3193 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3194 if (ep_state & EP_HAS_STREAMS ||
3195 ep_state & EP_GETTING_STREAMS) {
3196 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3197 "already has streams set up.\n",
3198 ep->desc.bEndpointAddress);
3199 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3200 "dynamic stream context array reallocation.\n");
3201 return -EINVAL;
3202 }
3203 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3204 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3205 "endpoint 0x%x; URBs are pending.\n",
3206 ep->desc.bEndpointAddress);
3207 return -EINVAL;
3208 }
3209 return 0;
3210}
3211
3212static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3213 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3214{
3215 unsigned int max_streams;
3216
3217
3218 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3219
3220
3221
3222
3223
3224
3225 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3226 if (*num_stream_ctxs > max_streams) {
3227 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3228 max_streams);
3229 *num_stream_ctxs = max_streams;
3230 *num_streams = max_streams;
3231 }
3232}
3233
3234
3235
3236
3237
3238static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3239 struct usb_device *udev,
3240 struct usb_host_endpoint **eps, unsigned int num_eps,
3241 unsigned int *num_streams, u32 *changed_ep_bitmask)
3242{
3243 unsigned int max_streams;
3244 unsigned int endpoint_flag;
3245 int i;
3246 int ret;
3247
3248 for (i = 0; i < num_eps; i++) {
3249 ret = xhci_check_streams_endpoint(xhci, udev,
3250 eps[i], udev->slot_id);
3251 if (ret < 0)
3252 return ret;
3253
3254 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3255 if (max_streams < (*num_streams - 1)) {
3256 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3257 eps[i]->desc.bEndpointAddress,
3258 max_streams);
3259 *num_streams = max_streams+1;
3260 }
3261
3262 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3263 if (*changed_ep_bitmask & endpoint_flag)
3264 return -EINVAL;
3265 *changed_ep_bitmask |= endpoint_flag;
3266 }
3267 return 0;
3268}
3269
3270static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3271 struct usb_device *udev,
3272 struct usb_host_endpoint **eps, unsigned int num_eps)
3273{
3274 u32 changed_ep_bitmask = 0;
3275 unsigned int slot_id;
3276 unsigned int ep_index;
3277 unsigned int ep_state;
3278 int i;
3279
3280 slot_id = udev->slot_id;
3281 if (!xhci->devs[slot_id])
3282 return 0;
3283
3284 for (i = 0; i < num_eps; i++) {
3285 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3286 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3287
3288 if (ep_state & EP_GETTING_NO_STREAMS) {
3289 xhci_warn(xhci, "WARN Can't disable streams for "
3290 "endpoint 0x%x, "
3291 "streams are being disabled already\n",
3292 eps[i]->desc.bEndpointAddress);
3293 return 0;
3294 }
3295
3296 if (!(ep_state & EP_HAS_STREAMS) &&
3297 !(ep_state & EP_GETTING_STREAMS)) {
3298 xhci_warn(xhci, "WARN Can't disable streams for "
3299 "endpoint 0x%x, "
3300 "streams are already disabled!\n",
3301 eps[i]->desc.bEndpointAddress);
3302 xhci_warn(xhci, "WARN xhci_free_streams() called "
3303 "with non-streams endpoint\n");
3304 return 0;
3305 }
3306 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3307 }
3308 return changed_ep_bitmask;
3309}
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3328 struct usb_host_endpoint **eps, unsigned int num_eps,
3329 unsigned int num_streams, gfp_t mem_flags)
3330{
3331 int i, ret;
3332 struct xhci_hcd *xhci;
3333 struct xhci_virt_device *vdev;
3334 struct xhci_command *config_cmd;
3335 struct xhci_input_control_ctx *ctrl_ctx;
3336 unsigned int ep_index;
3337 unsigned int num_stream_ctxs;
3338 unsigned int max_packet;
3339 unsigned long flags;
3340 u32 changed_ep_bitmask = 0;
3341
3342 if (!eps)
3343 return -EINVAL;
3344
3345
3346
3347
3348 num_streams += 1;
3349 xhci = hcd_to_xhci(hcd);
3350 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3351 num_streams);
3352
3353
3354 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3355 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3356 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3357 return -ENOSYS;
3358 }
3359
3360 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3361 if (!config_cmd)
3362 return -ENOMEM;
3363
3364 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3365 if (!ctrl_ctx) {
3366 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3367 __func__);
3368 xhci_free_command(xhci, config_cmd);
3369 return -ENOMEM;
3370 }
3371
3372
3373
3374
3375
3376 spin_lock_irqsave(&xhci->lock, flags);
3377 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3378 num_eps, &num_streams, &changed_ep_bitmask);
3379 if (ret < 0) {
3380 xhci_free_command(xhci, config_cmd);
3381 spin_unlock_irqrestore(&xhci->lock, flags);
3382 return ret;
3383 }
3384 if (num_streams <= 1) {
3385 xhci_warn(xhci, "WARN: endpoints can't handle "
3386 "more than one stream.\n");
3387 xhci_free_command(xhci, config_cmd);
3388 spin_unlock_irqrestore(&xhci->lock, flags);
3389 return -EINVAL;
3390 }
3391 vdev = xhci->devs[udev->slot_id];
3392
3393
3394
3395 for (i = 0; i < num_eps; i++) {
3396 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3397 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3398 }
3399 spin_unlock_irqrestore(&xhci->lock, flags);
3400
3401
3402
3403
3404
3405 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3406 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3407 num_stream_ctxs, num_streams);
3408
3409 for (i = 0; i < num_eps; i++) {
3410 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3411 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3412 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3413 num_stream_ctxs,
3414 num_streams,
3415 max_packet, mem_flags);
3416 if (!vdev->eps[ep_index].stream_info)
3417 goto cleanup;
3418
3419
3420
3421 }
3422
3423
3424 for (i = 0; i < num_eps; i++) {
3425 struct xhci_ep_ctx *ep_ctx;
3426
3427 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3428 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3429
3430 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3431 vdev->out_ctx, ep_index);
3432 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3433 vdev->eps[ep_index].stream_info);
3434 }
3435
3436
3437
3438 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3439 vdev->out_ctx, ctrl_ctx,
3440 changed_ep_bitmask, changed_ep_bitmask);
3441
3442
3443 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3444 false, false);
3445
3446
3447
3448
3449
3450 if (ret < 0)
3451 goto cleanup;
3452
3453 spin_lock_irqsave(&xhci->lock, flags);
3454 for (i = 0; i < num_eps; i++) {
3455 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3456 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3457 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3458 udev->slot_id, ep_index);
3459 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3460 }
3461 xhci_free_command(xhci, config_cmd);
3462 spin_unlock_irqrestore(&xhci->lock, flags);
3463
3464
3465 return num_streams - 1;
3466
3467cleanup:
3468
3469 for (i = 0; i < num_eps; i++) {
3470 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3471 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3472 vdev->eps[ep_index].stream_info = NULL;
3473
3474
3475
3476 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3477 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3478 xhci_endpoint_zero(xhci, vdev, eps[i]);
3479 }
3480 xhci_free_command(xhci, config_cmd);
3481 return -ENOMEM;
3482}
3483
3484
3485
3486
3487
3488
3489
3490static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3491 struct usb_host_endpoint **eps, unsigned int num_eps,
3492 gfp_t mem_flags)
3493{
3494 int i, ret;
3495 struct xhci_hcd *xhci;
3496 struct xhci_virt_device *vdev;
3497 struct xhci_command *command;
3498 struct xhci_input_control_ctx *ctrl_ctx;
3499 unsigned int ep_index;
3500 unsigned long flags;
3501 u32 changed_ep_bitmask;
3502
3503 xhci = hcd_to_xhci(hcd);
3504 vdev = xhci->devs[udev->slot_id];
3505
3506
3507 spin_lock_irqsave(&xhci->lock, flags);
3508 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3509 udev, eps, num_eps);
3510 if (changed_ep_bitmask == 0) {
3511 spin_unlock_irqrestore(&xhci->lock, flags);
3512 return -EINVAL;
3513 }
3514
3515
3516
3517
3518
3519 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3520 command = vdev->eps[ep_index].stream_info->free_streams_command;
3521 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3522 if (!ctrl_ctx) {
3523 spin_unlock_irqrestore(&xhci->lock, flags);
3524 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3525 __func__);
3526 return -EINVAL;
3527 }
3528
3529 for (i = 0; i < num_eps; i++) {
3530 struct xhci_ep_ctx *ep_ctx;
3531
3532 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3533 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3534 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3535 EP_GETTING_NO_STREAMS;
3536
3537 xhci_endpoint_copy(xhci, command->in_ctx,
3538 vdev->out_ctx, ep_index);
3539 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3540 &vdev->eps[ep_index]);
3541 }
3542 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3543 vdev->out_ctx, ctrl_ctx,
3544 changed_ep_bitmask, changed_ep_bitmask);
3545 spin_unlock_irqrestore(&xhci->lock, flags);
3546
3547
3548
3549
3550 ret = xhci_configure_endpoint(xhci, udev, command,
3551 false, true);
3552
3553
3554
3555
3556 if (ret < 0)
3557 return ret;
3558
3559 spin_lock_irqsave(&xhci->lock, flags);
3560 for (i = 0; i < num_eps; i++) {
3561 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3562 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3563 vdev->eps[ep_index].stream_info = NULL;
3564
3565
3566
3567 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3568 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3569 }
3570 spin_unlock_irqrestore(&xhci->lock, flags);
3571
3572 return 0;
3573}
3574
3575
3576
3577
3578
3579
3580
3581
3582void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3583 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3584{
3585 int i;
3586 unsigned int num_dropped_eps = 0;
3587 unsigned int drop_flags = 0;
3588
3589 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3590 if (virt_dev->eps[i].ring) {
3591 drop_flags |= 1 << i;
3592 num_dropped_eps++;
3593 }
3594 }
3595 xhci->num_active_eps -= num_dropped_eps;
3596 if (num_dropped_eps)
3597 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3598 "Dropped %u ep ctxs, flags = 0x%x, "
3599 "%u now active.",
3600 num_dropped_eps, drop_flags,
3601 xhci->num_active_eps);
3602}
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3623 struct usb_device *udev)
3624{
3625 int ret, i;
3626 unsigned long flags;
3627 struct xhci_hcd *xhci;
3628 unsigned int slot_id;
3629 struct xhci_virt_device *virt_dev;
3630 struct xhci_command *reset_device_cmd;
3631 struct xhci_slot_ctx *slot_ctx;
3632 int old_active_eps = 0;
3633
3634 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3635 if (ret <= 0)
3636 return ret;
3637 xhci = hcd_to_xhci(hcd);
3638 slot_id = udev->slot_id;
3639 virt_dev = xhci->devs[slot_id];
3640 if (!virt_dev) {
3641 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3642 "not exist. Re-allocate the device\n", slot_id);
3643 ret = xhci_alloc_dev(hcd, udev);
3644 if (ret == 1)
3645 return 0;
3646 else
3647 return -EINVAL;
3648 }
3649
3650 if (virt_dev->tt_info)
3651 old_active_eps = virt_dev->tt_info->active_eps;
3652
3653 if (virt_dev->udev != udev) {
3654
3655
3656
3657
3658 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3659 "not match the udev. Re-allocate the device\n",
3660 slot_id);
3661 ret = xhci_alloc_dev(hcd, udev);
3662 if (ret == 1)
3663 return 0;
3664 else
3665 return -EINVAL;
3666 }
3667
3668
3669 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3670 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3671 SLOT_STATE_DISABLED)
3672 return 0;
3673
3674 trace_xhci_discover_or_reset_device(slot_ctx);
3675
3676 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3677
3678
3679
3680
3681
3682
3683 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3684 if (!reset_device_cmd) {
3685 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3686 return -ENOMEM;
3687 }
3688
3689
3690 spin_lock_irqsave(&xhci->lock, flags);
3691
3692 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3693 if (ret) {
3694 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3695 spin_unlock_irqrestore(&xhci->lock, flags);
3696 goto command_cleanup;
3697 }
3698 xhci_ring_cmd_db(xhci);
3699 spin_unlock_irqrestore(&xhci->lock, flags);
3700
3701
3702 wait_for_completion(reset_device_cmd->completion);
3703
3704
3705
3706
3707
3708 ret = reset_device_cmd->status;
3709 switch (ret) {
3710 case COMP_COMMAND_ABORTED:
3711 case COMP_COMMAND_RING_STOPPED:
3712 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3713 ret = -ETIME;
3714 goto command_cleanup;
3715 case COMP_SLOT_NOT_ENABLED_ERROR:
3716 case COMP_CONTEXT_STATE_ERROR:
3717 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3718 slot_id,
3719 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3720 xhci_dbg(xhci, "Not freeing device rings.\n");
3721
3722 ret = 0;
3723 goto command_cleanup;
3724 case COMP_SUCCESS:
3725 xhci_dbg(xhci, "Successful reset device command.\n");
3726 break;
3727 default:
3728 if (xhci_is_vendor_info_code(xhci, ret))
3729 break;
3730 xhci_warn(xhci, "Unknown completion code %u for "
3731 "reset device command.\n", ret);
3732 ret = -EINVAL;
3733 goto command_cleanup;
3734 }
3735
3736
3737 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3738 spin_lock_irqsave(&xhci->lock, flags);
3739
3740 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3741 spin_unlock_irqrestore(&xhci->lock, flags);
3742 }
3743
3744
3745 for (i = 1; i < 31; i++) {
3746 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3747
3748 if (ep->ep_state & EP_HAS_STREAMS) {
3749 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3750 xhci_get_endpoint_address(i));
3751 xhci_free_stream_info(xhci, ep->stream_info);
3752 ep->stream_info = NULL;
3753 ep->ep_state &= ~EP_HAS_STREAMS;
3754 }
3755
3756 if (ep->ring) {
3757 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3758 xhci_free_endpoint_ring(xhci, virt_dev, i);
3759 }
3760 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3761 xhci_drop_ep_from_interval_table(xhci,
3762 &virt_dev->eps[i].bw_info,
3763 virt_dev->bw_table,
3764 udev,
3765 &virt_dev->eps[i],
3766 virt_dev->tt_info);
3767 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3768 }
3769
3770 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3771 virt_dev->flags = 0;
3772 ret = 0;
3773
3774command_cleanup:
3775 xhci_free_command(xhci, reset_device_cmd);
3776 return ret;
3777}
3778
3779
3780
3781
3782
3783
3784static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3785{
3786 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3787 struct xhci_virt_device *virt_dev;
3788 struct xhci_slot_ctx *slot_ctx;
3789 int i, ret;
3790
3791#ifndef CONFIG_USB_DEFAULT_PERSIST
3792
3793
3794
3795
3796
3797 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3798 pm_runtime_put_noidle(hcd->self.controller);
3799#endif
3800
3801 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3802
3803
3804
3805 if (ret <= 0 && ret != -ENODEV)
3806 return;
3807
3808 virt_dev = xhci->devs[udev->slot_id];
3809 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3810 trace_xhci_free_dev(slot_ctx);
3811
3812
3813 for (i = 0; i < 31; i++) {
3814 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3815 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3816 }
3817 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3818 virt_dev->udev = NULL;
3819 ret = xhci_disable_slot(xhci, udev->slot_id);
3820 if (ret)
3821 xhci_free_virt_device(xhci, udev->slot_id);
3822}
3823
3824int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3825{
3826 struct xhci_command *command;
3827 unsigned long flags;
3828 u32 state;
3829 int ret = 0;
3830
3831 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
3832 if (!command)
3833 return -ENOMEM;
3834
3835 spin_lock_irqsave(&xhci->lock, flags);
3836
3837 state = readl(&xhci->op_regs->status);
3838 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3839 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3840 spin_unlock_irqrestore(&xhci->lock, flags);
3841 kfree(command);
3842 return -ENODEV;
3843 }
3844
3845 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3846 slot_id);
3847 if (ret) {
3848 spin_unlock_irqrestore(&xhci->lock, flags);
3849 kfree(command);
3850 return ret;
3851 }
3852 xhci_ring_cmd_db(xhci);
3853 spin_unlock_irqrestore(&xhci->lock, flags);
3854 return ret;
3855}
3856
3857
3858
3859
3860
3861
3862
3863static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3864{
3865 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3866 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3867 "Not enough ep ctxs: "
3868 "%u active, need to add 1, limit is %u.",
3869 xhci->num_active_eps, xhci->limit_active_eps);
3870 return -ENOMEM;
3871 }
3872 xhci->num_active_eps += 1;
3873 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3874 "Adding 1 ep ctx, %u now active.",
3875 xhci->num_active_eps);
3876 return 0;
3877}
3878
3879
3880
3881
3882
3883
3884int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3885{
3886 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3887 struct xhci_virt_device *vdev;
3888 struct xhci_slot_ctx *slot_ctx;
3889 unsigned long flags;
3890 int ret, slot_id;
3891 struct xhci_command *command;
3892
3893 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3894 if (!command)
3895 return 0;
3896
3897 spin_lock_irqsave(&xhci->lock, flags);
3898 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3899 if (ret) {
3900 spin_unlock_irqrestore(&xhci->lock, flags);
3901 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3902 xhci_free_command(xhci, command);
3903 return 0;
3904 }
3905 xhci_ring_cmd_db(xhci);
3906 spin_unlock_irqrestore(&xhci->lock, flags);
3907
3908 wait_for_completion(command->completion);
3909 slot_id = command->slot_id;
3910
3911 if (!slot_id || command->status != COMP_SUCCESS) {
3912 xhci_err(xhci, "Error while assigning device slot ID\n");
3913 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3914 HCS_MAX_SLOTS(
3915 readl(&xhci->cap_regs->hcs_params1)));
3916 xhci_free_command(xhci, command);
3917 return 0;
3918 }
3919
3920 xhci_free_command(xhci, command);
3921
3922 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3923 spin_lock_irqsave(&xhci->lock, flags);
3924 ret = xhci_reserve_host_control_ep_resources(xhci);
3925 if (ret) {
3926 spin_unlock_irqrestore(&xhci->lock, flags);
3927 xhci_warn(xhci, "Not enough host resources, "
3928 "active endpoint contexts = %u\n",
3929 xhci->num_active_eps);
3930 goto disable_slot;
3931 }
3932 spin_unlock_irqrestore(&xhci->lock, flags);
3933 }
3934
3935
3936
3937
3938 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3939 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3940 goto disable_slot;
3941 }
3942 vdev = xhci->devs[slot_id];
3943 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
3944 trace_xhci_alloc_dev(slot_ctx);
3945
3946 udev->slot_id = slot_id;
3947
3948 xhci_debugfs_create_slot(xhci, slot_id);
3949
3950#ifndef CONFIG_USB_DEFAULT_PERSIST
3951
3952
3953
3954
3955 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3956 pm_runtime_get_noresume(hcd->self.controller);
3957#endif
3958
3959
3960
3961 return 1;
3962
3963disable_slot:
3964 ret = xhci_disable_slot(xhci, udev->slot_id);
3965 if (ret)
3966 xhci_free_virt_device(xhci, udev->slot_id);
3967
3968 return 0;
3969}
3970
3971
3972
3973
3974
3975static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3976 enum xhci_setup_dev setup)
3977{
3978 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3979 unsigned long flags;
3980 struct xhci_virt_device *virt_dev;
3981 int ret = 0;
3982 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3983 struct xhci_slot_ctx *slot_ctx;
3984 struct xhci_input_control_ctx *ctrl_ctx;
3985 u64 temp_64;
3986 struct xhci_command *command = NULL;
3987
3988 mutex_lock(&xhci->mutex);
3989
3990 if (xhci->xhc_state) {
3991 ret = -ESHUTDOWN;
3992 goto out;
3993 }
3994
3995 if (!udev->slot_id) {
3996 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3997 "Bad Slot ID %d", udev->slot_id);
3998 ret = -EINVAL;
3999 goto out;
4000 }
4001
4002 virt_dev = xhci->devs[udev->slot_id];
4003
4004 if (WARN_ON(!virt_dev)) {
4005
4006
4007
4008
4009
4010 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4011 udev->slot_id);
4012 ret = -EINVAL;
4013 goto out;
4014 }
4015 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4016 trace_xhci_setup_device_slot(slot_ctx);
4017
4018 if (setup == SETUP_CONTEXT_ONLY) {
4019 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4020 SLOT_STATE_DEFAULT) {
4021 xhci_dbg(xhci, "Slot already in default state\n");
4022 goto out;
4023 }
4024 }
4025
4026 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4027 if (!command) {
4028 ret = -ENOMEM;
4029 goto out;
4030 }
4031
4032 command->in_ctx = virt_dev->in_ctx;
4033
4034 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4035 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4036 if (!ctrl_ctx) {
4037 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4038 __func__);
4039 ret = -EINVAL;
4040 goto out;
4041 }
4042
4043
4044
4045
4046
4047 if (!slot_ctx->dev_info)
4048 xhci_setup_addressable_virt_dev(xhci, udev);
4049
4050 else
4051 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4052 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4053 ctrl_ctx->drop_flags = 0;
4054
4055 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4056 le32_to_cpu(slot_ctx->dev_info) >> 27);
4057
4058 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4059 spin_lock_irqsave(&xhci->lock, flags);
4060 trace_xhci_setup_device(virt_dev);
4061 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4062 udev->slot_id, setup);
4063 if (ret) {
4064 spin_unlock_irqrestore(&xhci->lock, flags);
4065 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4066 "FIXME: allocate a command ring segment");
4067 goto out;
4068 }
4069 xhci_ring_cmd_db(xhci);
4070 spin_unlock_irqrestore(&xhci->lock, flags);
4071
4072
4073 wait_for_completion(command->completion);
4074
4075
4076
4077
4078
4079 switch (command->status) {
4080 case COMP_COMMAND_ABORTED:
4081 case COMP_COMMAND_RING_STOPPED:
4082 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4083 ret = -ETIME;
4084 break;
4085 case COMP_CONTEXT_STATE_ERROR:
4086 case COMP_SLOT_NOT_ENABLED_ERROR:
4087 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4088 act, udev->slot_id);
4089 ret = -EINVAL;
4090 break;
4091 case COMP_USB_TRANSACTION_ERROR:
4092 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4093
4094 mutex_unlock(&xhci->mutex);
4095 ret = xhci_disable_slot(xhci, udev->slot_id);
4096 if (!ret)
4097 xhci_alloc_dev(hcd, udev);
4098 kfree(command->completion);
4099 kfree(command);
4100 return -EPROTO;
4101 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4102 dev_warn(&udev->dev,
4103 "ERROR: Incompatible device for setup %s command\n", act);
4104 ret = -ENODEV;
4105 break;
4106 case COMP_SUCCESS:
4107 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4108 "Successful setup %s command", act);
4109 break;
4110 default:
4111 xhci_err(xhci,
4112 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4113 act, command->status);
4114 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4115 ret = -EINVAL;
4116 break;
4117 }
4118 if (ret)
4119 goto out;
4120 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4121 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4122 "Op regs DCBAA ptr = %#016llx", temp_64);
4123 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4124 "Slot ID %d dcbaa entry @%p = %#016llx",
4125 udev->slot_id,
4126 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4127 (unsigned long long)
4128 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4129 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4130 "Output Context DMA address = %#08llx",
4131 (unsigned long long)virt_dev->out_ctx->dma);
4132 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4133 le32_to_cpu(slot_ctx->dev_info) >> 27);
4134
4135
4136
4137
4138 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4139 le32_to_cpu(slot_ctx->dev_info) >> 27);
4140
4141 ctrl_ctx->add_flags = 0;
4142 ctrl_ctx->drop_flags = 0;
4143 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4144 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4145
4146 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4147 "Internal device address = %d",
4148 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4149out:
4150 mutex_unlock(&xhci->mutex);
4151 if (command) {
4152 kfree(command->completion);
4153 kfree(command);
4154 }
4155 return ret;
4156}
4157
4158static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4159{
4160 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
4161}
4162
4163static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4164{
4165 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
4166}
4167
4168
4169
4170
4171
4172
4173
4174int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4175{
4176 struct xhci_hub *rhub;
4177
4178 rhub = xhci_get_rhub(hcd);
4179 return rhub->ports[port1 - 1]->hw_portnum + 1;
4180}
4181
4182
4183
4184
4185
4186static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4187 struct usb_device *udev, u16 max_exit_latency)
4188{
4189 struct xhci_virt_device *virt_dev;
4190 struct xhci_command *command;
4191 struct xhci_input_control_ctx *ctrl_ctx;
4192 struct xhci_slot_ctx *slot_ctx;
4193 unsigned long flags;
4194 int ret;
4195
4196 spin_lock_irqsave(&xhci->lock, flags);
4197
4198 virt_dev = xhci->devs[udev->slot_id];
4199
4200
4201
4202
4203
4204
4205
4206 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4207 spin_unlock_irqrestore(&xhci->lock, flags);
4208 return 0;
4209 }
4210
4211
4212 command = xhci->lpm_command;
4213 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4214 if (!ctrl_ctx) {
4215 spin_unlock_irqrestore(&xhci->lock, flags);
4216 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4217 __func__);
4218 return -ENOMEM;
4219 }
4220
4221 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4222 spin_unlock_irqrestore(&xhci->lock, flags);
4223
4224 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4225 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4226 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4227 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4228 slot_ctx->dev_state = 0;
4229
4230 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4231 "Set up evaluate context for LPM MEL change.");
4232
4233
4234 ret = xhci_configure_endpoint(xhci, udev, command,
4235 true, true);
4236
4237 if (!ret) {
4238 spin_lock_irqsave(&xhci->lock, flags);
4239 virt_dev->current_mel = max_exit_latency;
4240 spin_unlock_irqrestore(&xhci->lock, flags);
4241 }
4242 return ret;
4243}
4244
4245#ifdef CONFIG_PM
4246
4247
4248static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4249 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4250
4251
4252static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4253 struct usb_device *udev)
4254{
4255 int u2del, besl, besl_host;
4256 int besl_device = 0;
4257 u32 field;
4258
4259 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4260 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4261
4262 if (field & USB_BESL_SUPPORT) {
4263 for (besl_host = 0; besl_host < 16; besl_host++) {
4264 if (xhci_besl_encoding[besl_host] >= u2del)
4265 break;
4266 }
4267
4268 if (field & USB_BESL_BASELINE_VALID)
4269 besl_device = USB_GET_BESL_BASELINE(field);
4270 else if (field & USB_BESL_DEEP_VALID)
4271 besl_device = USB_GET_BESL_DEEP(field);
4272 } else {
4273 if (u2del <= 50)
4274 besl_host = 0;
4275 else
4276 besl_host = (u2del - 51) / 75 + 1;
4277 }
4278
4279 besl = besl_host + besl_device;
4280 if (besl > 15)
4281 besl = 15;
4282
4283 return besl;
4284}
4285
4286
4287static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4288{
4289 u32 field;
4290 int l1;
4291 int besld = 0;
4292 int hirdm = 0;
4293
4294 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4295
4296
4297 l1 = udev->l1_params.timeout / 256;
4298
4299
4300 if (field & USB_BESL_DEEP_VALID) {
4301 besld = USB_GET_BESL_DEEP(field);
4302 hirdm = 1;
4303 }
4304
4305 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4306}
4307
4308static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4309 struct usb_device *udev, int enable)
4310{
4311 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4312 struct xhci_port **ports;
4313 __le32 __iomem *pm_addr, *hlpm_addr;
4314 u32 pm_val, hlpm_val, field;
4315 unsigned int port_num;
4316 unsigned long flags;
4317 int hird, exit_latency;
4318 int ret;
4319
4320 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4321 !udev->lpm_capable)
4322 return -EPERM;
4323
4324 if (!udev->parent || udev->parent->parent ||
4325 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4326 return -EPERM;
4327
4328 if (udev->usb2_hw_lpm_capable != 1)
4329 return -EPERM;
4330
4331 spin_lock_irqsave(&xhci->lock, flags);
4332
4333 ports = xhci->usb2_rhub.ports;
4334 port_num = udev->portnum - 1;
4335 pm_addr = ports[port_num]->addr + PORTPMSC;
4336 pm_val = readl(pm_addr);
4337 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4338
4339 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4340 enable ? "enable" : "disable", port_num + 1);
4341
4342 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
4343
4344 if (udev->usb2_hw_lpm_besl_capable) {
4345
4346
4347
4348
4349 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4350 if ((field & USB_BESL_SUPPORT) &&
4351 (field & USB_BESL_BASELINE_VALID))
4352 hird = USB_GET_BESL_BASELINE(field);
4353 else
4354 hird = udev->l1_params.besl;
4355
4356 exit_latency = xhci_besl_encoding[hird];
4357 spin_unlock_irqrestore(&xhci->lock, flags);
4358
4359
4360
4361
4362
4363
4364
4365
4366 mutex_lock(hcd->bandwidth_mutex);
4367 ret = xhci_change_max_exit_latency(xhci, udev,
4368 exit_latency);
4369 mutex_unlock(hcd->bandwidth_mutex);
4370
4371 if (ret < 0)
4372 return ret;
4373 spin_lock_irqsave(&xhci->lock, flags);
4374
4375 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4376 writel(hlpm_val, hlpm_addr);
4377
4378 readl(hlpm_addr);
4379 } else {
4380 hird = xhci_calculate_hird_besl(xhci, udev);
4381 }
4382
4383 pm_val &= ~PORT_HIRD_MASK;
4384 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4385 writel(pm_val, pm_addr);
4386 pm_val = readl(pm_addr);
4387 pm_val |= PORT_HLE;
4388 writel(pm_val, pm_addr);
4389
4390 readl(pm_addr);
4391 } else {
4392 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4393 writel(pm_val, pm_addr);
4394
4395 readl(pm_addr);
4396 if (udev->usb2_hw_lpm_besl_capable) {
4397 spin_unlock_irqrestore(&xhci->lock, flags);
4398 mutex_lock(hcd->bandwidth_mutex);
4399 xhci_change_max_exit_latency(xhci, udev, 0);
4400 mutex_unlock(hcd->bandwidth_mutex);
4401 return 0;
4402 }
4403 }
4404
4405 spin_unlock_irqrestore(&xhci->lock, flags);
4406 return 0;
4407}
4408
4409
4410
4411
4412
4413static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4414 unsigned capability)
4415{
4416 u32 port_offset, port_count;
4417 int i;
4418
4419 for (i = 0; i < xhci->num_ext_caps; i++) {
4420 if (xhci->ext_caps[i] & capability) {
4421
4422 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4423 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4424 if (port >= port_offset &&
4425 port < port_offset + port_count)
4426 return 1;
4427 }
4428 }
4429 return 0;
4430}
4431
4432static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4433{
4434 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4435 int portnum = udev->portnum - 1;
4436
4437 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4438 return 0;
4439
4440
4441 if (!udev->parent || udev->parent->parent ||
4442 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4443 return 0;
4444
4445 if (xhci->hw_lpm_support == 1 &&
4446 xhci_check_usb2_port_capability(
4447 xhci, portnum, XHCI_HLC)) {
4448 udev->usb2_hw_lpm_capable = 1;
4449 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4450 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4451 if (xhci_check_usb2_port_capability(xhci, portnum,
4452 XHCI_BLC))
4453 udev->usb2_hw_lpm_besl_capable = 1;
4454 }
4455
4456 return 0;
4457}
4458
4459
4460
4461
4462static unsigned long long xhci_service_interval_to_ns(
4463 struct usb_endpoint_descriptor *desc)
4464{
4465 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4466}
4467
4468static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4469 enum usb3_link_state state)
4470{
4471 unsigned long long sel;
4472 unsigned long long pel;
4473 unsigned int max_sel_pel;
4474 char *state_name;
4475
4476 switch (state) {
4477 case USB3_LPM_U1:
4478
4479 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4480 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4481 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4482 state_name = "U1";
4483 break;
4484 case USB3_LPM_U2:
4485 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4486 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4487 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4488 state_name = "U2";
4489 break;
4490 default:
4491 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4492 __func__);
4493 return USB3_LPM_DISABLED;
4494 }
4495
4496 if (sel <= max_sel_pel && pel <= max_sel_pel)
4497 return USB3_LPM_DEVICE_INITIATED;
4498
4499 if (sel > max_sel_pel)
4500 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4501 "due to long SEL %llu ms\n",
4502 state_name, sel);
4503 else
4504 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4505 "due to long PEL %llu ms\n",
4506 state_name, pel);
4507 return USB3_LPM_DISABLED;
4508}
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518static unsigned long long xhci_calculate_intel_u1_timeout(
4519 struct usb_device *udev,
4520 struct usb_endpoint_descriptor *desc)
4521{
4522 unsigned long long timeout_ns;
4523 int ep_type;
4524 int intr_type;
4525
4526 ep_type = usb_endpoint_type(desc);
4527 switch (ep_type) {
4528 case USB_ENDPOINT_XFER_CONTROL:
4529 timeout_ns = udev->u1_params.sel * 3;
4530 break;
4531 case USB_ENDPOINT_XFER_BULK:
4532 timeout_ns = udev->u1_params.sel * 5;
4533 break;
4534 case USB_ENDPOINT_XFER_INT:
4535 intr_type = usb_endpoint_interrupt_type(desc);
4536 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4537 timeout_ns = udev->u1_params.sel * 3;
4538 break;
4539 }
4540
4541
4542 case USB_ENDPOINT_XFER_ISOC:
4543 timeout_ns = xhci_service_interval_to_ns(desc);
4544 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4545 if (timeout_ns < udev->u1_params.sel * 2)
4546 timeout_ns = udev->u1_params.sel * 2;
4547 break;
4548 default:
4549 return 0;
4550 }
4551
4552 return timeout_ns;
4553}
4554
4555
4556static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4557 struct usb_device *udev,
4558 struct usb_endpoint_descriptor *desc)
4559{
4560 unsigned long long timeout_ns;
4561
4562
4563 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4564 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4565 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4566 return USB3_LPM_DISABLED;
4567 }
4568 }
4569
4570 if (xhci->quirks & XHCI_INTEL_HOST)
4571 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4572 else
4573 timeout_ns = udev->u1_params.sel;
4574
4575
4576
4577
4578 if (timeout_ns == USB3_LPM_DISABLED)
4579 timeout_ns = 1;
4580 else
4581 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4582
4583
4584
4585
4586 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4587 return timeout_ns;
4588 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4589 "due to long timeout %llu ms\n", timeout_ns);
4590 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4591}
4592
4593
4594
4595
4596
4597
4598
4599static unsigned long long xhci_calculate_intel_u2_timeout(
4600 struct usb_device *udev,
4601 struct usb_endpoint_descriptor *desc)
4602{
4603 unsigned long long timeout_ns;
4604 unsigned long long u2_del_ns;
4605
4606 timeout_ns = 10 * 1000 * 1000;
4607
4608 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4609 (xhci_service_interval_to_ns(desc) > timeout_ns))
4610 timeout_ns = xhci_service_interval_to_ns(desc);
4611
4612 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4613 if (u2_del_ns > timeout_ns)
4614 timeout_ns = u2_del_ns;
4615
4616 return timeout_ns;
4617}
4618
4619
4620static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4621 struct usb_device *udev,
4622 struct usb_endpoint_descriptor *desc)
4623{
4624 unsigned long long timeout_ns;
4625
4626
4627 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4628 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4629 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4630 return USB3_LPM_DISABLED;
4631 }
4632 }
4633
4634 if (xhci->quirks & XHCI_INTEL_HOST)
4635 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4636 else
4637 timeout_ns = udev->u2_params.sel;
4638
4639
4640 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4641
4642
4643
4644 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4645 return timeout_ns;
4646 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4647 "due to long timeout %llu ms\n", timeout_ns);
4648 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4649}
4650
4651static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4652 struct usb_device *udev,
4653 struct usb_endpoint_descriptor *desc,
4654 enum usb3_link_state state,
4655 u16 *timeout)
4656{
4657 if (state == USB3_LPM_U1)
4658 return xhci_calculate_u1_timeout(xhci, udev, desc);
4659 else if (state == USB3_LPM_U2)
4660 return xhci_calculate_u2_timeout(xhci, udev, desc);
4661
4662 return USB3_LPM_DISABLED;
4663}
4664
4665static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4666 struct usb_device *udev,
4667 struct usb_endpoint_descriptor *desc,
4668 enum usb3_link_state state,
4669 u16 *timeout)
4670{
4671 u16 alt_timeout;
4672
4673 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4674 desc, state, timeout);
4675
4676
4677
4678
4679
4680 if (alt_timeout == USB3_LPM_DISABLED ||
4681 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4682 *timeout = alt_timeout;
4683 return -E2BIG;
4684 }
4685 if (alt_timeout > *timeout)
4686 *timeout = alt_timeout;
4687 return 0;
4688}
4689
4690static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4691 struct usb_device *udev,
4692 struct usb_host_interface *alt,
4693 enum usb3_link_state state,
4694 u16 *timeout)
4695{
4696 int j;
4697
4698 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4699 if (xhci_update_timeout_for_endpoint(xhci, udev,
4700 &alt->endpoint[j].desc, state, timeout))
4701 return -E2BIG;
4702 continue;
4703 }
4704 return 0;
4705}
4706
4707static int xhci_check_intel_tier_policy(struct usb_device *udev,
4708 enum usb3_link_state state)
4709{
4710 struct usb_device *parent;
4711 unsigned int num_hubs;
4712
4713 if (state == USB3_LPM_U2)
4714 return 0;
4715
4716
4717 for (parent = udev->parent, num_hubs = 0; parent->parent;
4718 parent = parent->parent)
4719 num_hubs++;
4720
4721 if (num_hubs < 2)
4722 return 0;
4723
4724 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4725 " below second-tier hub.\n");
4726 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4727 "to decrease power consumption.\n");
4728 return -E2BIG;
4729}
4730
4731static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4732 struct usb_device *udev,
4733 enum usb3_link_state state)
4734{
4735 if (xhci->quirks & XHCI_INTEL_HOST)
4736 return xhci_check_intel_tier_policy(udev, state);
4737 else
4738 return 0;
4739}
4740
4741
4742
4743
4744
4745
4746static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4747 struct usb_device *udev, enum usb3_link_state state)
4748{
4749 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4750 struct usb_host_config *config;
4751 char *state_name;
4752 int i;
4753 u16 timeout = USB3_LPM_DISABLED;
4754
4755 if (state == USB3_LPM_U1)
4756 state_name = "U1";
4757 else if (state == USB3_LPM_U2)
4758 state_name = "U2";
4759 else {
4760 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4761 state);
4762 return timeout;
4763 }
4764
4765 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4766 return timeout;
4767
4768
4769
4770
4771 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4772 state, &timeout))
4773 return timeout;
4774
4775 config = udev->actconfig;
4776 if (!config)
4777 return timeout;
4778
4779 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4780 struct usb_driver *driver;
4781 struct usb_interface *intf = config->interface[i];
4782
4783 if (!intf)
4784 continue;
4785
4786
4787
4788
4789 if (intf->dev.driver) {
4790 driver = to_usb_driver(intf->dev.driver);
4791 if (driver && driver->disable_hub_initiated_lpm) {
4792 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4793 "at request of driver %s\n",
4794 state_name, driver->name);
4795 return xhci_get_timeout_no_hub_lpm(udev, state);
4796 }
4797 }
4798
4799
4800 if (!intf->cur_altsetting)
4801 continue;
4802
4803 if (xhci_update_timeout_for_interface(xhci, udev,
4804 intf->cur_altsetting,
4805 state, &timeout))
4806 return timeout;
4807 }
4808 return timeout;
4809}
4810
4811static int calculate_max_exit_latency(struct usb_device *udev,
4812 enum usb3_link_state state_changed,
4813 u16 hub_encoded_timeout)
4814{
4815 unsigned long long u1_mel_us = 0;
4816 unsigned long long u2_mel_us = 0;
4817 unsigned long long mel_us = 0;
4818 bool disabling_u1;
4819 bool disabling_u2;
4820 bool enabling_u1;
4821 bool enabling_u2;
4822
4823 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4824 hub_encoded_timeout == USB3_LPM_DISABLED);
4825 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4826 hub_encoded_timeout == USB3_LPM_DISABLED);
4827
4828 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4829 hub_encoded_timeout != USB3_LPM_DISABLED);
4830 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4831 hub_encoded_timeout != USB3_LPM_DISABLED);
4832
4833
4834
4835
4836 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4837 enabling_u1)
4838 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4839 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4840 enabling_u2)
4841 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4842
4843 if (u1_mel_us > u2_mel_us)
4844 mel_us = u1_mel_us;
4845 else
4846 mel_us = u2_mel_us;
4847
4848 if (mel_us > MAX_EXIT) {
4849 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4850 "is too big.\n", mel_us);
4851 return -E2BIG;
4852 }
4853 return mel_us;
4854}
4855
4856
4857static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4858 struct usb_device *udev, enum usb3_link_state state)
4859{
4860 struct xhci_hcd *xhci;
4861 u16 hub_encoded_timeout;
4862 int mel;
4863 int ret;
4864
4865 xhci = hcd_to_xhci(hcd);
4866
4867
4868
4869
4870 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4871 !xhci->devs[udev->slot_id])
4872 return USB3_LPM_DISABLED;
4873
4874 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4875 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4876 if (mel < 0) {
4877
4878 hub_encoded_timeout = USB3_LPM_DISABLED;
4879 mel = 0;
4880 }
4881
4882 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4883 if (ret)
4884 return ret;
4885 return hub_encoded_timeout;
4886}
4887
4888static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4889 struct usb_device *udev, enum usb3_link_state state)
4890{
4891 struct xhci_hcd *xhci;
4892 u16 mel;
4893
4894 xhci = hcd_to_xhci(hcd);
4895 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4896 !xhci->devs[udev->slot_id])
4897 return 0;
4898
4899 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4900 return xhci_change_max_exit_latency(xhci, udev, mel);
4901}
4902#else
4903
4904static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4905 struct usb_device *udev, int enable)
4906{
4907 return 0;
4908}
4909
4910static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4911{
4912 return 0;
4913}
4914
4915static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4916 struct usb_device *udev, enum usb3_link_state state)
4917{
4918 return USB3_LPM_DISABLED;
4919}
4920
4921static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4922 struct usb_device *udev, enum usb3_link_state state)
4923{
4924 return 0;
4925}
4926#endif
4927
4928
4929
4930
4931
4932
4933static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4934 struct usb_tt *tt, gfp_t mem_flags)
4935{
4936 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4937 struct xhci_virt_device *vdev;
4938 struct xhci_command *config_cmd;
4939 struct xhci_input_control_ctx *ctrl_ctx;
4940 struct xhci_slot_ctx *slot_ctx;
4941 unsigned long flags;
4942 unsigned think_time;
4943 int ret;
4944
4945
4946 if (!hdev->parent)
4947 return 0;
4948
4949 vdev = xhci->devs[hdev->slot_id];
4950 if (!vdev) {
4951 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4952 return -EINVAL;
4953 }
4954
4955 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
4956 if (!config_cmd)
4957 return -ENOMEM;
4958
4959 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4960 if (!ctrl_ctx) {
4961 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4962 __func__);
4963 xhci_free_command(xhci, config_cmd);
4964 return -ENOMEM;
4965 }
4966
4967 spin_lock_irqsave(&xhci->lock, flags);
4968 if (hdev->speed == USB_SPEED_HIGH &&
4969 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4970 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4971 xhci_free_command(xhci, config_cmd);
4972 spin_unlock_irqrestore(&xhci->lock, flags);
4973 return -ENOMEM;
4974 }
4975
4976 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4977 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4978 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4979 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4980
4981
4982
4983
4984
4985 if (tt->multi)
4986 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4987 else if (hdev->speed == USB_SPEED_FULL)
4988 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4989
4990 if (xhci->hci_version > 0x95) {
4991 xhci_dbg(xhci, "xHCI version %x needs hub "
4992 "TT think time and number of ports\n",
4993 (unsigned int) xhci->hci_version);
4994 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4995
4996
4997
4998
4999
5000
5001
5002 think_time = tt->think_time;
5003 if (think_time != 0)
5004 think_time = (think_time / 666) - 1;
5005 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5006 slot_ctx->tt_info |=
5007 cpu_to_le32(TT_THINK_TIME(think_time));
5008 } else {
5009 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5010 "TT think time or number of ports\n",
5011 (unsigned int) xhci->hci_version);
5012 }
5013 slot_ctx->dev_state = 0;
5014 spin_unlock_irqrestore(&xhci->lock, flags);
5015
5016 xhci_dbg(xhci, "Set up %s for hub device.\n",
5017 (xhci->hci_version > 0x95) ?
5018 "configure endpoint" : "evaluate context");
5019
5020
5021
5022
5023 if (xhci->hci_version > 0x95)
5024 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5025 false, false);
5026 else
5027 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5028 true, false);
5029
5030 xhci_free_command(xhci, config_cmd);
5031 return ret;
5032}
5033
5034static int xhci_get_frame(struct usb_hcd *hcd)
5035{
5036 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5037
5038 return readl(&xhci->run_regs->microframe_index) >> 3;
5039}
5040
5041int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5042{
5043 struct xhci_hcd *xhci;
5044
5045
5046
5047
5048 struct device *dev = hcd->self.sysdev;
5049 unsigned int minor_rev;
5050 int retval;
5051
5052
5053 hcd->self.sg_tablesize = ~0;
5054
5055
5056 hcd->self.no_sg_constraint = 1;
5057
5058
5059 hcd->self.no_stop_on_short = 1;
5060
5061 xhci = hcd_to_xhci(hcd);
5062
5063 if (usb_hcd_is_primary_hcd(hcd)) {
5064 xhci->main_hcd = hcd;
5065 xhci->usb2_rhub.hcd = hcd;
5066
5067
5068
5069 hcd->speed = HCD_USB2;
5070 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5071
5072
5073
5074
5075
5076 hcd->has_tt = 1;
5077 } else {
5078
5079
5080
5081
5082
5083 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5084
5085 switch (minor_rev) {
5086 case 2:
5087 hcd->speed = HCD_USB32;
5088 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5089 hcd->self.root_hub->rx_lanes = 2;
5090 hcd->self.root_hub->tx_lanes = 2;
5091 break;
5092 case 1:
5093 hcd->speed = HCD_USB31;
5094 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5095 break;
5096 }
5097 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5098 minor_rev,
5099 minor_rev ? "Enhanced " : "");
5100
5101 xhci->usb3_rhub.hcd = hcd;
5102
5103
5104
5105 return 0;
5106 }
5107
5108 mutex_init(&xhci->mutex);
5109 xhci->cap_regs = hcd->regs;
5110 xhci->op_regs = hcd->regs +
5111 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5112 xhci->run_regs = hcd->regs +
5113 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5114
5115 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5116 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5117 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5118 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
5119 xhci->hci_version = HC_VERSION(xhci->hcc_params);
5120 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5121 if (xhci->hci_version > 0x100)
5122 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5123
5124 xhci->quirks |= quirks;
5125
5126 get_quirks(dev, xhci);
5127
5128
5129
5130
5131
5132 if (xhci->hci_version > 0x96)
5133 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5134
5135
5136 retval = xhci_halt(xhci);
5137 if (retval)
5138 return retval;
5139
5140 xhci_zero_64b_regs(xhci);
5141
5142 xhci_dbg(xhci, "Resetting HCD\n");
5143
5144 retval = xhci_reset(xhci);
5145 if (retval)
5146 return retval;
5147 xhci_dbg(xhci, "Reset complete\n");
5148
5149
5150
5151
5152
5153
5154
5155
5156 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5157 xhci->hcc_params &= ~BIT(0);
5158
5159
5160
5161 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5162 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5163 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5164 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5165 } else {
5166
5167
5168
5169
5170 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5171 if (retval)
5172 return retval;
5173 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5174 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5175 }
5176
5177 xhci_dbg(xhci, "Calling HCD init\n");
5178
5179 retval = xhci_init(hcd);
5180 if (retval)
5181 return retval;
5182 xhci_dbg(xhci, "Called HCD init\n");
5183
5184 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5185 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5186
5187 return 0;
5188}
5189EXPORT_SYMBOL_GPL(xhci_gen_setup);
5190
5191static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5192 struct usb_host_endpoint *ep)
5193{
5194 struct xhci_hcd *xhci;
5195 struct usb_device *udev;
5196 unsigned int slot_id;
5197 unsigned int ep_index;
5198 unsigned long flags;
5199
5200 xhci = hcd_to_xhci(hcd);
5201 udev = (struct usb_device *)ep->hcpriv;
5202 slot_id = udev->slot_id;
5203 ep_index = xhci_get_endpoint_index(&ep->desc);
5204
5205 spin_lock_irqsave(&xhci->lock, flags);
5206 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5207 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5208 spin_unlock_irqrestore(&xhci->lock, flags);
5209}
5210
5211static const struct hc_driver xhci_hc_driver = {
5212 .description = "xhci-hcd",
5213 .product_desc = "xHCI Host Controller",
5214 .hcd_priv_size = sizeof(struct xhci_hcd),
5215
5216
5217
5218
5219 .irq = xhci_irq,
5220 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
5221
5222
5223
5224
5225 .reset = NULL,
5226 .start = xhci_run,
5227 .stop = xhci_stop,
5228 .shutdown = xhci_shutdown,
5229
5230
5231
5232
5233 .map_urb_for_dma = xhci_map_urb_for_dma,
5234 .urb_enqueue = xhci_urb_enqueue,
5235 .urb_dequeue = xhci_urb_dequeue,
5236 .alloc_dev = xhci_alloc_dev,
5237 .free_dev = xhci_free_dev,
5238 .alloc_streams = xhci_alloc_streams,
5239 .free_streams = xhci_free_streams,
5240 .add_endpoint = xhci_add_endpoint,
5241 .drop_endpoint = xhci_drop_endpoint,
5242 .endpoint_reset = xhci_endpoint_reset,
5243 .check_bandwidth = xhci_check_bandwidth,
5244 .reset_bandwidth = xhci_reset_bandwidth,
5245 .address_device = xhci_address_device,
5246 .enable_device = xhci_enable_device,
5247 .update_hub_device = xhci_update_hub_device,
5248 .reset_device = xhci_discover_or_reset_device,
5249
5250
5251
5252
5253 .get_frame_number = xhci_get_frame,
5254
5255
5256
5257
5258 .hub_control = xhci_hub_control,
5259 .hub_status_data = xhci_hub_status_data,
5260 .bus_suspend = xhci_bus_suspend,
5261 .bus_resume = xhci_bus_resume,
5262 .get_resuming_ports = xhci_get_resuming_ports,
5263
5264
5265
5266
5267 .update_device = xhci_update_device,
5268 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5269 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5270 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5271 .find_raw_port_number = xhci_find_raw_port_number,
5272 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5273};
5274
5275void xhci_init_driver(struct hc_driver *drv,
5276 const struct xhci_driver_overrides *over)
5277{
5278 BUG_ON(!over);
5279
5280
5281 *drv = xhci_hc_driver;
5282
5283 if (over) {
5284 drv->hcd_priv_size += over->extra_priv_size;
5285 if (over->reset)
5286 drv->reset = over->reset;
5287 if (over->start)
5288 drv->start = over->start;
5289 }
5290}
5291EXPORT_SYMBOL_GPL(xhci_init_driver);
5292
5293MODULE_DESCRIPTION(DRIVER_DESC);
5294MODULE_AUTHOR(DRIVER_AUTHOR);
5295MODULE_LICENSE("GPL");
5296
5297static int __init xhci_hcd_init(void)
5298{
5299
5300
5301
5302
5303 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5304 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5305 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5306
5307
5308
5309 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5310 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5311 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5312 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5313 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5314
5315 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5316
5317 if (usb_disabled())
5318 return -ENODEV;
5319
5320 xhci_debugfs_create_root();
5321
5322 return 0;
5323}
5324
5325
5326
5327
5328
5329static void __exit xhci_hcd_fini(void)
5330{
5331 xhci_debugfs_remove_root();
5332}
5333
5334module_init(xhci_hcd_init);
5335module_exit(xhci_hcd_fini);
5336