1
2
3
4
5
6
7
8
9
10
11#include <linux/pci.h>
12#include <linux/iopoll.h>
13#include <linux/irq.h>
14#include <linux/log2.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/slab.h>
18#include <linux/dmi.h>
19#include <linux/dma-mapping.h>
20
21#include "xhci.h"
22#include "xhci-trace.h"
23#include "xhci-debugfs.h"
24#include "xhci-dbgcap.h"
25
26#define DRIVER_AUTHOR "Sarah Sharp"
27#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
28
29#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
30
31
32static int link_quirk;
33module_param(link_quirk, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
35
36static unsigned long long quirks;
37module_param(quirks, ullong, S_IRUGO);
38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
39
40static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
41{
42 struct xhci_segment *seg = ring->first_seg;
43
44 if (!td || !td->start_seg)
45 return false;
46 do {
47 if (seg == td->start_seg)
48 return true;
49 seg = seg->next;
50 } while (seg && seg != ring->first_seg);
51
52 return false;
53}
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
69{
70 u32 result;
71 int ret;
72
73 ret = readl_poll_timeout_atomic(ptr, result,
74 (result & mask) == done ||
75 result == U32_MAX,
76 1, usec);
77 if (result == U32_MAX)
78 return -ENODEV;
79
80 return ret;
81}
82
83
84
85
86void xhci_quiesce(struct xhci_hcd *xhci)
87{
88 u32 halted;
89 u32 cmd;
90 u32 mask;
91
92 mask = ~(XHCI_IRQS);
93 halted = readl(&xhci->op_regs->status) & STS_HALT;
94 if (!halted)
95 mask &= ~CMD_RUN;
96
97 cmd = readl(&xhci->op_regs->command);
98 cmd &= mask;
99 writel(cmd, &xhci->op_regs->command);
100}
101
102
103
104
105
106
107
108
109
110int xhci_halt(struct xhci_hcd *xhci)
111{
112 int ret;
113 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
114 xhci_quiesce(xhci);
115
116 ret = xhci_handshake(&xhci->op_regs->status,
117 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
118 if (ret) {
119 xhci_warn(xhci, "Host halt failed, %d\n", ret);
120 return ret;
121 }
122 xhci->xhc_state |= XHCI_STATE_HALTED;
123 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
124 return ret;
125}
126
127
128
129
130int xhci_start(struct xhci_hcd *xhci)
131{
132 u32 temp;
133 int ret;
134
135 temp = readl(&xhci->op_regs->command);
136 temp |= (CMD_RUN);
137 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
138 temp);
139 writel(temp, &xhci->op_regs->command);
140
141
142
143
144
145 ret = xhci_handshake(&xhci->op_regs->status,
146 STS_HALT, 0, XHCI_MAX_HALT_USEC);
147 if (ret == -ETIMEDOUT)
148 xhci_err(xhci, "Host took too long to start, "
149 "waited %u microseconds.\n",
150 XHCI_MAX_HALT_USEC);
151 if (!ret)
152
153 xhci->xhc_state = 0;
154
155 return ret;
156}
157
158
159
160
161
162
163
164
165int xhci_reset(struct xhci_hcd *xhci)
166{
167 u32 command;
168 u32 state;
169 int ret;
170
171 state = readl(&xhci->op_regs->status);
172
173 if (state == ~(u32)0) {
174 xhci_warn(xhci, "Host not accessible, reset failed.\n");
175 return -ENODEV;
176 }
177
178 if ((state & STS_HALT) == 0) {
179 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
180 return 0;
181 }
182
183 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
184 command = readl(&xhci->op_regs->command);
185 command |= CMD_RESET;
186 writel(command, &xhci->op_regs->command);
187
188
189
190
191
192
193
194
195 if (xhci->quirks & XHCI_INTEL_HOST)
196 udelay(1000);
197
198 ret = xhci_handshake(&xhci->op_regs->command,
199 CMD_RESET, 0, 10 * 1000 * 1000);
200 if (ret)
201 return ret;
202
203 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
204 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
205
206 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
207 "Wait for controller to be ready for doorbell rings");
208
209
210
211
212 ret = xhci_handshake(&xhci->op_regs->status,
213 STS_CNR, 0, 10 * 1000 * 1000);
214
215 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
216 xhci->usb2_rhub.bus_state.suspended_ports = 0;
217 xhci->usb2_rhub.bus_state.resuming_ports = 0;
218 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
219 xhci->usb3_rhub.bus_state.suspended_ports = 0;
220 xhci->usb3_rhub.bus_state.resuming_ports = 0;
221
222 return ret;
223}
224
225static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
226{
227 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
228 int err, i;
229 u64 val;
230 u32 intrs;
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
247 return;
248
249 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
250
251
252 val = readl(&xhci->op_regs->command);
253 val &= ~CMD_HSEIE;
254 writel(val, &xhci->op_regs->command);
255
256
257 val = readl(&xhci->op_regs->status);
258 val |= STS_FATAL;
259 writel(val, &xhci->op_regs->status);
260
261
262 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
263 if (upper_32_bits(val))
264 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
265 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
266 if (upper_32_bits(val))
267 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
268
269 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
270 ARRAY_SIZE(xhci->run_regs->ir_set));
271
272 for (i = 0; i < intrs; i++) {
273 struct xhci_intr_reg __iomem *ir;
274
275 ir = &xhci->run_regs->ir_set[i];
276 val = xhci_read_64(xhci, &ir->erst_base);
277 if (upper_32_bits(val))
278 xhci_write_64(xhci, 0, &ir->erst_base);
279 val= xhci_read_64(xhci, &ir->erst_dequeue);
280 if (upper_32_bits(val))
281 xhci_write_64(xhci, 0, &ir->erst_dequeue);
282 }
283
284
285 err = xhci_handshake(&xhci->op_regs->status,
286 STS_FATAL, STS_FATAL,
287 XHCI_MAX_HALT_USEC);
288 if (!err)
289 xhci_info(xhci, "Fault detected\n");
290}
291
292#ifdef CONFIG_USB_PCI
293
294
295
296static int xhci_setup_msi(struct xhci_hcd *xhci)
297{
298 int ret;
299
300
301
302 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
303
304 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
305 if (ret < 0) {
306 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
307 "failed to allocate MSI entry");
308 return ret;
309 }
310
311 ret = request_irq(pdev->irq, xhci_msi_irq,
312 0, "xhci_hcd", xhci_to_hcd(xhci));
313 if (ret) {
314 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
315 "disable MSI interrupt");
316 pci_free_irq_vectors(pdev);
317 }
318
319 return ret;
320}
321
322
323
324
325static int xhci_setup_msix(struct xhci_hcd *xhci)
326{
327 int i, ret = 0;
328 struct usb_hcd *hcd = xhci_to_hcd(xhci);
329 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
330
331
332
333
334
335
336
337
338 xhci->msix_count = min(num_online_cpus() + 1,
339 HCS_MAX_INTRS(xhci->hcs_params1));
340
341 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
342 PCI_IRQ_MSIX);
343 if (ret < 0) {
344 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
345 "Failed to enable MSI-X");
346 return ret;
347 }
348
349 for (i = 0; i < xhci->msix_count; i++) {
350 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
351 "xhci_hcd", xhci_to_hcd(xhci));
352 if (ret)
353 goto disable_msix;
354 }
355
356 hcd->msix_enabled = 1;
357 return ret;
358
359disable_msix:
360 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
361 while (--i >= 0)
362 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
363 pci_free_irq_vectors(pdev);
364 return ret;
365}
366
367
368static void xhci_cleanup_msix(struct xhci_hcd *xhci)
369{
370 struct usb_hcd *hcd = xhci_to_hcd(xhci);
371 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
372
373 if (xhci->quirks & XHCI_PLAT)
374 return;
375
376
377 if (hcd->irq > 0)
378 return;
379
380 if (hcd->msix_enabled) {
381 int i;
382
383 for (i = 0; i < xhci->msix_count; i++)
384 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
385 } else {
386 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
387 }
388
389 pci_free_irq_vectors(pdev);
390 hcd->msix_enabled = 0;
391}
392
393static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
394{
395 struct usb_hcd *hcd = xhci_to_hcd(xhci);
396
397 if (hcd->msix_enabled) {
398 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
399 int i;
400
401 for (i = 0; i < xhci->msix_count; i++)
402 synchronize_irq(pci_irq_vector(pdev, i));
403 }
404}
405
406static int xhci_try_enable_msi(struct usb_hcd *hcd)
407{
408 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
409 struct pci_dev *pdev;
410 int ret;
411
412
413 if (xhci->quirks & XHCI_PLAT)
414 return 0;
415
416 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
417
418
419
420
421 if (xhci->quirks & XHCI_BROKEN_MSI)
422 goto legacy_irq;
423
424
425 if (hcd->irq)
426 free_irq(hcd->irq, hcd);
427 hcd->irq = 0;
428
429 ret = xhci_setup_msix(xhci);
430 if (ret)
431
432 ret = xhci_setup_msi(xhci);
433
434 if (!ret) {
435 hcd->msi_enabled = 1;
436 return 0;
437 }
438
439 if (!pdev->irq) {
440 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
441 return -EINVAL;
442 }
443
444 legacy_irq:
445 if (!strlen(hcd->irq_descr))
446 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
447 hcd->driver->description, hcd->self.busnum);
448
449
450 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
451 hcd->irq_descr, hcd);
452 if (ret) {
453 xhci_err(xhci, "request interrupt %d failed\n",
454 pdev->irq);
455 return ret;
456 }
457 hcd->irq = pdev->irq;
458 return 0;
459}
460
461#else
462
463static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
464{
465 return 0;
466}
467
468static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
469{
470}
471
472static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
473{
474}
475
476#endif
477
478static void compliance_mode_recovery(struct timer_list *t)
479{
480 struct xhci_hcd *xhci;
481 struct usb_hcd *hcd;
482 struct xhci_hub *rhub;
483 u32 temp;
484 int i;
485
486 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
487 rhub = &xhci->usb3_rhub;
488
489 for (i = 0; i < rhub->num_ports; i++) {
490 temp = readl(rhub->ports[i]->addr);
491 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
492
493
494
495
496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
497 "Compliance mode detected->port %d",
498 i + 1);
499 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
500 "Attempting compliance mode recovery");
501 hcd = xhci->shared_hcd;
502
503 if (hcd->state == HC_STATE_SUSPENDED)
504 usb_hcd_resume_root_hub(hcd);
505
506 usb_hcd_poll_rh_status(hcd);
507 }
508 }
509
510 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
511 mod_timer(&xhci->comp_mode_recovery_timer,
512 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
513}
514
515
516
517
518
519
520
521
522
523
524
525static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
526{
527 xhci->port_status_u0 = 0;
528 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
529 0);
530 xhci->comp_mode_recovery_timer.expires = jiffies +
531 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
532
533 add_timer(&xhci->comp_mode_recovery_timer);
534 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
535 "Compliance mode recovery timer initialized");
536}
537
538
539
540
541
542
543
544static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
545{
546 const char *dmi_product_name, *dmi_sys_vendor;
547
548 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
549 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
550 if (!dmi_product_name || !dmi_sys_vendor)
551 return false;
552
553 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
554 return false;
555
556 if (strstr(dmi_product_name, "Z420") ||
557 strstr(dmi_product_name, "Z620") ||
558 strstr(dmi_product_name, "Z820") ||
559 strstr(dmi_product_name, "Z1 Workstation"))
560 return true;
561
562 return false;
563}
564
565static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
566{
567 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
568}
569
570
571
572
573
574
575
576
577
578static int xhci_init(struct usb_hcd *hcd)
579{
580 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
581 int retval = 0;
582
583 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
584 spin_lock_init(&xhci->lock);
585 if (xhci->hci_version == 0x95 && link_quirk) {
586 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
587 "QUIRK: Not clearing Link TRB chain bits.");
588 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
589 } else {
590 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
591 "xHCI doesn't need link TRB QUIRK");
592 }
593 retval = xhci_mem_init(xhci, GFP_KERNEL);
594 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
595
596
597 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
598 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
599 compliance_mode_recovery_timer_init(xhci);
600 }
601
602 return retval;
603}
604
605
606
607
608static int xhci_run_finished(struct xhci_hcd *xhci)
609{
610 if (xhci_start(xhci)) {
611 xhci_halt(xhci);
612 return -ENODEV;
613 }
614 xhci->shared_hcd->state = HC_STATE_RUNNING;
615 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
616
617 if (xhci->quirks & XHCI_NEC_HOST)
618 xhci_ring_cmd_db(xhci);
619
620 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
621 "Finished xhci_run for USB3 roothub");
622 return 0;
623}
624
625
626
627
628
629
630
631
632
633
634
635
636
637int xhci_run(struct usb_hcd *hcd)
638{
639 u32 temp;
640 u64 temp_64;
641 int ret;
642 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
643
644
645
646
647
648 hcd->uses_new_polling = 1;
649 if (!usb_hcd_is_primary_hcd(hcd))
650 return xhci_run_finished(xhci);
651
652 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
653
654 ret = xhci_try_enable_msi(hcd);
655 if (ret)
656 return ret;
657
658 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
659 temp_64 &= ~ERST_PTR_MASK;
660 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
661 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
662
663 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
664 "// Set the interrupt modulation register");
665 temp = readl(&xhci->ir_set->irq_control);
666 temp &= ~ER_IRQ_INTERVAL_MASK;
667 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
668 writel(temp, &xhci->ir_set->irq_control);
669
670
671 temp = readl(&xhci->op_regs->command);
672 temp |= (CMD_EIE);
673 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
674 "// Enable interrupts, cmd = 0x%x.", temp);
675 writel(temp, &xhci->op_regs->command);
676
677 temp = readl(&xhci->ir_set->irq_pending);
678 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
679 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
680 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
681 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
682
683 if (xhci->quirks & XHCI_NEC_HOST) {
684 struct xhci_command *command;
685
686 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
687 if (!command)
688 return -ENOMEM;
689
690 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
691 TRB_TYPE(TRB_NEC_GET_FW));
692 if (ret)
693 xhci_free_command(xhci, command);
694 }
695 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
696 "Finished xhci_run for USB2 roothub");
697
698 xhci_dbc_init(xhci);
699
700 xhci_debugfs_init(xhci);
701
702 return 0;
703}
704EXPORT_SYMBOL_GPL(xhci_run);
705
706
707
708
709
710
711
712
713
714
715static void xhci_stop(struct usb_hcd *hcd)
716{
717 u32 temp;
718 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
719
720 mutex_lock(&xhci->mutex);
721
722
723 if (!usb_hcd_is_primary_hcd(hcd)) {
724 mutex_unlock(&xhci->mutex);
725 return;
726 }
727
728 xhci_dbc_exit(xhci);
729
730 spin_lock_irq(&xhci->lock);
731 xhci->xhc_state |= XHCI_STATE_HALTED;
732 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
733 xhci_halt(xhci);
734 xhci_reset(xhci);
735 spin_unlock_irq(&xhci->lock);
736
737 xhci_cleanup_msix(xhci);
738
739
740 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
741 (!(xhci_all_ports_seen_u0(xhci)))) {
742 del_timer_sync(&xhci->comp_mode_recovery_timer);
743 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
744 "%s: compliance mode recovery timer deleted",
745 __func__);
746 }
747
748 if (xhci->quirks & XHCI_AMD_PLL_FIX)
749 usb_amd_dev_put();
750
751 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
752 "// Disabling event ring interrupts");
753 temp = readl(&xhci->op_regs->status);
754 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
755 temp = readl(&xhci->ir_set->irq_pending);
756 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
757
758 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
759 xhci_mem_cleanup(xhci);
760 xhci_debugfs_exit(xhci);
761 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
762 "xhci_stop completed - status = %x",
763 readl(&xhci->op_regs->status));
764 mutex_unlock(&xhci->mutex);
765}
766
767
768
769
770
771
772
773
774
775
776void xhci_shutdown(struct usb_hcd *hcd)
777{
778 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
779
780 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
781 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
782
783 spin_lock_irq(&xhci->lock);
784 xhci_halt(xhci);
785
786 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
787 xhci_reset(xhci);
788 spin_unlock_irq(&xhci->lock);
789
790 xhci_cleanup_msix(xhci);
791
792 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
793 "xhci_shutdown completed - status = %x",
794 readl(&xhci->op_regs->status));
795}
796EXPORT_SYMBOL_GPL(xhci_shutdown);
797
798#ifdef CONFIG_PM
799static void xhci_save_registers(struct xhci_hcd *xhci)
800{
801 xhci->s3.command = readl(&xhci->op_regs->command);
802 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
803 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
804 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
805 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
806 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
807 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
808 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
809 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
810}
811
812static void xhci_restore_registers(struct xhci_hcd *xhci)
813{
814 writel(xhci->s3.command, &xhci->op_regs->command);
815 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
816 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
817 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
818 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
819 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
820 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
821 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
822 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
823}
824
825static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
826{
827 u64 val_64;
828
829
830 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
831 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
832 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
833 xhci->cmd_ring->dequeue) &
834 (u64) ~CMD_RING_RSVD_BITS) |
835 xhci->cmd_ring->cycle_state;
836 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
837 "// Setting command ring address to 0x%llx",
838 (long unsigned long) val_64);
839 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
840}
841
842
843
844
845
846
847
848
849
850
851static void xhci_clear_command_ring(struct xhci_hcd *xhci)
852{
853 struct xhci_ring *ring;
854 struct xhci_segment *seg;
855
856 ring = xhci->cmd_ring;
857 seg = ring->deq_seg;
858 do {
859 memset(seg->trbs, 0,
860 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
861 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
862 cpu_to_le32(~TRB_CYCLE);
863 seg = seg->next;
864 } while (seg != ring->deq_seg);
865
866
867 ring->deq_seg = ring->first_seg;
868 ring->dequeue = ring->first_seg->trbs;
869 ring->enq_seg = ring->deq_seg;
870 ring->enqueue = ring->dequeue;
871
872 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
873
874
875
876
877 ring->cycle_state = 1;
878
879
880
881
882
883
884
885
886 xhci_set_cmd_ring_deq(xhci);
887}
888
889
890
891
892
893
894
895
896
897
898static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
899 struct xhci_hub *rhub,
900 bool do_wakeup)
901{
902 unsigned long flags;
903 u32 t1, t2, portsc;
904 int i;
905
906 spin_lock_irqsave(&xhci->lock, flags);
907
908 for (i = 0; i < rhub->num_ports; i++) {
909 portsc = readl(rhub->ports[i]->addr);
910 t1 = xhci_port_state_to_neutral(portsc);
911 t2 = t1;
912
913
914 if (!do_wakeup)
915 t2 &= ~PORT_WAKE_BITS;
916
917
918 if (!(portsc & (PORT_CSC | PORT_CONNECT)))
919 t2 |= PORT_CSC;
920
921 if (t1 != t2) {
922 writel(t2, rhub->ports[i]->addr);
923 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
924 rhub->hcd->self.busnum, i + 1, portsc, t2);
925 }
926 }
927 spin_unlock_irqrestore(&xhci->lock, flags);
928}
929
930static bool xhci_pending_portevent(struct xhci_hcd *xhci)
931{
932 struct xhci_port **ports;
933 int port_index;
934 u32 status;
935 u32 portsc;
936
937 status = readl(&xhci->op_regs->status);
938 if (status & STS_EINT)
939 return true;
940
941
942
943
944
945
946 port_index = xhci->usb2_rhub.num_ports;
947 ports = xhci->usb2_rhub.ports;
948 while (port_index--) {
949 portsc = readl(ports[port_index]->addr);
950 if (portsc & PORT_CHANGE_MASK ||
951 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
952 return true;
953 }
954 port_index = xhci->usb3_rhub.num_ports;
955 ports = xhci->usb3_rhub.ports;
956 while (port_index--) {
957 portsc = readl(ports[port_index]->addr);
958 if (portsc & PORT_CHANGE_MASK ||
959 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
960 return true;
961 }
962 return false;
963}
964
965
966
967
968
969
970
971int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
972{
973 int rc = 0;
974 unsigned int delay = XHCI_MAX_HALT_USEC * 2;
975 struct usb_hcd *hcd = xhci_to_hcd(xhci);
976 u32 command;
977 u32 res;
978
979 if (!hcd->state)
980 return 0;
981
982 if (hcd->state != HC_STATE_SUSPENDED ||
983 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
984 return -EINVAL;
985
986
987 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
988 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
989
990 if (!HCD_HW_ACCESSIBLE(hcd))
991 return 0;
992
993 xhci_dbc_suspend(xhci);
994
995
996 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
997 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
998 del_timer_sync(&hcd->rh_timer);
999 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1000 del_timer_sync(&xhci->shared_hcd->rh_timer);
1001
1002 if (xhci->quirks & XHCI_SUSPEND_DELAY)
1003 usleep_range(1000, 1500);
1004
1005 spin_lock_irq(&xhci->lock);
1006 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1007 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1008
1009
1010
1011
1012 command = readl(&xhci->op_regs->command);
1013 command &= ~CMD_RUN;
1014 writel(command, &xhci->op_regs->command);
1015
1016
1017 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1018
1019 if (xhci_handshake(&xhci->op_regs->status,
1020 STS_HALT, STS_HALT, delay)) {
1021 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1022 spin_unlock_irq(&xhci->lock);
1023 return -ETIMEDOUT;
1024 }
1025 xhci_clear_command_ring(xhci);
1026
1027
1028 xhci_save_registers(xhci);
1029
1030
1031 command = readl(&xhci->op_regs->command);
1032 command |= CMD_CSS;
1033 writel(command, &xhci->op_regs->command);
1034 xhci->broken_suspend = 0;
1035 if (xhci_handshake(&xhci->op_regs->status,
1036 STS_SAVE, 0, 20 * 1000)) {
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 res = readl(&xhci->op_regs->status);
1047 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1048 (((res & STS_SRE) == 0) &&
1049 ((res & STS_HCE) == 0))) {
1050 xhci->broken_suspend = 1;
1051 } else {
1052 xhci_warn(xhci, "WARN: xHC save state timeout\n");
1053 spin_unlock_irq(&xhci->lock);
1054 return -ETIMEDOUT;
1055 }
1056 }
1057 spin_unlock_irq(&xhci->lock);
1058
1059
1060
1061
1062
1063 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1064 (!(xhci_all_ports_seen_u0(xhci)))) {
1065 del_timer_sync(&xhci->comp_mode_recovery_timer);
1066 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1067 "%s: compliance mode recovery timer deleted",
1068 __func__);
1069 }
1070
1071
1072
1073 xhci_msix_sync_irqs(xhci);
1074
1075 return rc;
1076}
1077EXPORT_SYMBOL_GPL(xhci_suspend);
1078
1079
1080
1081
1082
1083
1084
1085int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1086{
1087 u32 command, temp = 0;
1088 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1089 struct usb_hcd *secondary_hcd;
1090 int retval = 0;
1091 bool comp_timer_running = false;
1092 bool pending_portevent = false;
1093
1094 if (!hcd->state)
1095 return 0;
1096
1097
1098
1099
1100
1101 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1102 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1103 msleep(100);
1104
1105 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1106 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1107
1108 spin_lock_irq(&xhci->lock);
1109 if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
1110 hibernated = true;
1111
1112 if (!hibernated) {
1113
1114
1115
1116
1117 retval = xhci_handshake(&xhci->op_regs->status,
1118 STS_CNR, 0, 10 * 1000 * 1000);
1119 if (retval) {
1120 xhci_warn(xhci, "Controller not ready at resume %d\n",
1121 retval);
1122 spin_unlock_irq(&xhci->lock);
1123 return retval;
1124 }
1125
1126 xhci_restore_registers(xhci);
1127
1128 xhci_set_cmd_ring_deq(xhci);
1129
1130
1131 command = readl(&xhci->op_regs->command);
1132 command |= CMD_CRS;
1133 writel(command, &xhci->op_regs->command);
1134
1135
1136
1137
1138
1139 if (xhci_handshake(&xhci->op_regs->status,
1140 STS_RESTORE, 0, 100 * 1000)) {
1141 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1142 spin_unlock_irq(&xhci->lock);
1143 return -ETIMEDOUT;
1144 }
1145 temp = readl(&xhci->op_regs->status);
1146 }
1147
1148
1149 if ((temp & STS_SRE) || hibernated) {
1150
1151 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1152 !(xhci_all_ports_seen_u0(xhci))) {
1153 del_timer_sync(&xhci->comp_mode_recovery_timer);
1154 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1155 "Compliance Mode Recovery Timer deleted!");
1156 }
1157
1158
1159 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1160 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1161
1162 xhci_dbg(xhci, "Stop HCD\n");
1163 xhci_halt(xhci);
1164 xhci_zero_64b_regs(xhci);
1165 retval = xhci_reset(xhci);
1166 spin_unlock_irq(&xhci->lock);
1167 if (retval)
1168 return retval;
1169 xhci_cleanup_msix(xhci);
1170
1171 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1172 temp = readl(&xhci->op_regs->status);
1173 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1174 temp = readl(&xhci->ir_set->irq_pending);
1175 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1176
1177 xhci_dbg(xhci, "cleaning up memory\n");
1178 xhci_mem_cleanup(xhci);
1179 xhci_debugfs_exit(xhci);
1180 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1181 readl(&xhci->op_regs->status));
1182
1183
1184
1185
1186
1187 if (!usb_hcd_is_primary_hcd(hcd))
1188 secondary_hcd = hcd;
1189 else
1190 secondary_hcd = xhci->shared_hcd;
1191
1192 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1193 retval = xhci_init(hcd->primary_hcd);
1194 if (retval)
1195 return retval;
1196 comp_timer_running = true;
1197
1198 xhci_dbg(xhci, "Start the primary HCD\n");
1199 retval = xhci_run(hcd->primary_hcd);
1200 if (!retval) {
1201 xhci_dbg(xhci, "Start the secondary HCD\n");
1202 retval = xhci_run(secondary_hcd);
1203 }
1204 hcd->state = HC_STATE_SUSPENDED;
1205 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1206 goto done;
1207 }
1208
1209
1210 command = readl(&xhci->op_regs->command);
1211 command |= CMD_RUN;
1212 writel(command, &xhci->op_regs->command);
1213 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1214 0, 250 * 1000);
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 spin_unlock_irq(&xhci->lock);
1226
1227 xhci_dbc_resume(xhci);
1228
1229 done:
1230 if (retval == 0) {
1231
1232
1233
1234
1235
1236 pending_portevent = xhci_pending_portevent(xhci);
1237 if (!pending_portevent) {
1238 msleep(120);
1239 pending_portevent = xhci_pending_portevent(xhci);
1240 }
1241
1242 if (pending_portevent) {
1243 usb_hcd_resume_root_hub(xhci->shared_hcd);
1244 usb_hcd_resume_root_hub(hcd);
1245 }
1246 }
1247
1248
1249
1250
1251
1252
1253 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1254 compliance_mode_recovery_timer_init(xhci);
1255
1256 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1257 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1258
1259
1260 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1261 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1262 usb_hcd_poll_rh_status(xhci->shared_hcd);
1263 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1264 usb_hcd_poll_rh_status(hcd);
1265
1266 return retval;
1267}
1268EXPORT_SYMBOL_GPL(xhci_resume);
1269#endif
1270
1271
1272
1273static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
1274{
1275 void *temp;
1276 int ret = 0;
1277 unsigned int buf_len;
1278 enum dma_data_direction dir;
1279
1280 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1281 buf_len = urb->transfer_buffer_length;
1282
1283 temp = kzalloc_node(buf_len, GFP_ATOMIC,
1284 dev_to_node(hcd->self.sysdev));
1285
1286 if (usb_urb_dir_out(urb))
1287 sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1288 temp, buf_len, 0);
1289
1290 urb->transfer_buffer = temp;
1291 urb->transfer_dma = dma_map_single(hcd->self.sysdev,
1292 urb->transfer_buffer,
1293 urb->transfer_buffer_length,
1294 dir);
1295
1296 if (dma_mapping_error(hcd->self.sysdev,
1297 urb->transfer_dma)) {
1298 ret = -EAGAIN;
1299 kfree(temp);
1300 } else {
1301 urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1302 }
1303
1304 return ret;
1305}
1306
1307static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
1308 struct urb *urb)
1309{
1310 bool ret = false;
1311 unsigned int i;
1312 unsigned int len = 0;
1313 unsigned int trb_size;
1314 unsigned int max_pkt;
1315 struct scatterlist *sg;
1316 struct scatterlist *tail_sg;
1317
1318 tail_sg = urb->sg;
1319 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
1320
1321 if (!urb->num_sgs)
1322 return ret;
1323
1324 if (urb->dev->speed >= USB_SPEED_SUPER)
1325 trb_size = TRB_CACHE_SIZE_SS;
1326 else
1327 trb_size = TRB_CACHE_SIZE_HS;
1328
1329 if (urb->transfer_buffer_length != 0 &&
1330 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1331 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
1332 len = len + sg->length;
1333 if (i > trb_size - 2) {
1334 len = len - tail_sg->length;
1335 if (len < max_pkt) {
1336 ret = true;
1337 break;
1338 }
1339
1340 tail_sg = sg_next(tail_sg);
1341 }
1342 }
1343 }
1344 return ret;
1345}
1346
1347static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
1348{
1349 unsigned int len;
1350 unsigned int buf_len;
1351 enum dma_data_direction dir;
1352
1353 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1354
1355 buf_len = urb->transfer_buffer_length;
1356
1357 if (IS_ENABLED(CONFIG_HAS_DMA) &&
1358 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1359 dma_unmap_single(hcd->self.sysdev,
1360 urb->transfer_dma,
1361 urb->transfer_buffer_length,
1362 dir);
1363
1364 if (usb_urb_dir_in(urb)) {
1365 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
1366 urb->transfer_buffer,
1367 buf_len,
1368 0);
1369 if (len != buf_len) {
1370 xhci_dbg(hcd_to_xhci(hcd),
1371 "Copy from tmp buf to urb sg list failed\n");
1372 urb->actual_length = len;
1373 }
1374 }
1375 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
1376 kfree(urb->transfer_buffer);
1377 urb->transfer_buffer = NULL;
1378}
1379
1380
1381
1382
1383
1384
1385
1386static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1387 gfp_t mem_flags)
1388{
1389 struct xhci_hcd *xhci;
1390
1391 xhci = hcd_to_xhci(hcd);
1392
1393 if (xhci_urb_suitable_for_idt(urb))
1394 return 0;
1395
1396 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1397 if (xhci_urb_temp_buffer_required(hcd, urb))
1398 return xhci_map_temp_buffer(hcd, urb);
1399 }
1400 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1401}
1402
1403static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1404{
1405 struct xhci_hcd *xhci;
1406 bool unmap_temp_buf = false;
1407
1408 xhci = hcd_to_xhci(hcd);
1409
1410 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1411 unmap_temp_buf = true;
1412
1413 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1414 xhci_unmap_temp_buf(hcd, urb);
1415 else
1416 usb_hcd_unmap_urb_for_dma(hcd, urb);
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1430{
1431 unsigned int index;
1432 if (usb_endpoint_xfer_control(desc))
1433 index = (unsigned int) (usb_endpoint_num(desc)*2);
1434 else
1435 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1436 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1437 return index;
1438}
1439EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1440
1441
1442
1443
1444unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1445{
1446 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1447 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1448 return direction | number;
1449}
1450
1451
1452
1453
1454
1455static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1456{
1457 return 1 << (xhci_get_endpoint_index(desc) + 1);
1458}
1459
1460
1461
1462
1463
1464
1465
1466unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1467{
1468 return fls(added_ctxs) - 1;
1469}
1470
1471
1472
1473
1474static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1475 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1476 const char *func) {
1477 struct xhci_hcd *xhci;
1478 struct xhci_virt_device *virt_dev;
1479
1480 if (!hcd || (check_ep && !ep) || !udev) {
1481 pr_debug("xHCI %s called with invalid args\n", func);
1482 return -EINVAL;
1483 }
1484 if (!udev->parent) {
1485 pr_debug("xHCI %s called for root hub\n", func);
1486 return 0;
1487 }
1488
1489 xhci = hcd_to_xhci(hcd);
1490 if (check_virt_dev) {
1491 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1492 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1493 func);
1494 return -EINVAL;
1495 }
1496
1497 virt_dev = xhci->devs[udev->slot_id];
1498 if (virt_dev->udev != udev) {
1499 xhci_dbg(xhci, "xHCI %s called with udev and "
1500 "virt_dev does not match\n", func);
1501 return -EINVAL;
1502 }
1503 }
1504
1505 if (xhci->xhc_state & XHCI_STATE_HALTED)
1506 return -ENODEV;
1507
1508 return 1;
1509}
1510
1511static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1512 struct usb_device *udev, struct xhci_command *command,
1513 bool ctx_change, bool must_succeed);
1514
1515
1516
1517
1518
1519
1520
1521static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1522 unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
1523{
1524 struct xhci_container_ctx *out_ctx;
1525 struct xhci_input_control_ctx *ctrl_ctx;
1526 struct xhci_ep_ctx *ep_ctx;
1527 struct xhci_command *command;
1528 int max_packet_size;
1529 int hw_max_packet_size;
1530 int ret = 0;
1531
1532 out_ctx = xhci->devs[slot_id]->out_ctx;
1533 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1534 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1535 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1536 if (hw_max_packet_size != max_packet_size) {
1537 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1538 "Max Packet Size for ep 0 changed.");
1539 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1540 "Max packet size in usb_device = %d",
1541 max_packet_size);
1542 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1543 "Max packet size in xHCI HW = %d",
1544 hw_max_packet_size);
1545 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1546 "Issuing evaluate context command.");
1547
1548
1549
1550
1551
1552
1553 command = xhci_alloc_command(xhci, true, mem_flags);
1554 if (!command)
1555 return -ENOMEM;
1556
1557 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1558 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1559 if (!ctrl_ctx) {
1560 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1561 __func__);
1562 ret = -ENOMEM;
1563 goto command_cleanup;
1564 }
1565
1566 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1567 xhci->devs[slot_id]->out_ctx, ep_index);
1568
1569 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1570 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);
1571 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1572 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1573
1574 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1575 ctrl_ctx->drop_flags = 0;
1576
1577 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1578 true, false);
1579
1580
1581
1582
1583 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1584command_cleanup:
1585 kfree(command->completion);
1586 kfree(command);
1587 }
1588 return ret;
1589}
1590
1591
1592
1593
1594
1595static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1596{
1597 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1598 unsigned long flags;
1599 int ret = 0;
1600 unsigned int slot_id, ep_index;
1601 unsigned int *ep_state;
1602 struct urb_priv *urb_priv;
1603 int num_tds;
1604
1605 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1606 true, true, __func__) <= 0)
1607 return -EINVAL;
1608
1609 slot_id = urb->dev->slot_id;
1610 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1611 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1612
1613 if (!HCD_HW_ACCESSIBLE(hcd))
1614 return -ESHUTDOWN;
1615
1616 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1617 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1618 return -ENODEV;
1619 }
1620
1621 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1622 num_tds = urb->number_of_packets;
1623 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1624 urb->transfer_buffer_length > 0 &&
1625 urb->transfer_flags & URB_ZERO_PACKET &&
1626 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1627 num_tds = 2;
1628 else
1629 num_tds = 1;
1630
1631 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1632 if (!urb_priv)
1633 return -ENOMEM;
1634
1635 urb_priv->num_tds = num_tds;
1636 urb_priv->num_tds_done = 0;
1637 urb->hcpriv = urb_priv;
1638
1639 trace_xhci_urb_enqueue(urb);
1640
1641 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1642
1643
1644
1645 if (urb->dev->speed == USB_SPEED_FULL) {
1646 ret = xhci_check_maxpacket(xhci, slot_id,
1647 ep_index, urb, mem_flags);
1648 if (ret < 0) {
1649 xhci_urb_free_priv(urb_priv);
1650 urb->hcpriv = NULL;
1651 return ret;
1652 }
1653 }
1654 }
1655
1656 spin_lock_irqsave(&xhci->lock, flags);
1657
1658 if (xhci->xhc_state & XHCI_STATE_DYING) {
1659 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1660 urb->ep->desc.bEndpointAddress, urb);
1661 ret = -ESHUTDOWN;
1662 goto free_priv;
1663 }
1664 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1665 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1666 *ep_state);
1667 ret = -EINVAL;
1668 goto free_priv;
1669 }
1670 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1671 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1672 ret = -EINVAL;
1673 goto free_priv;
1674 }
1675
1676 switch (usb_endpoint_type(&urb->ep->desc)) {
1677
1678 case USB_ENDPOINT_XFER_CONTROL:
1679 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1680 slot_id, ep_index);
1681 break;
1682 case USB_ENDPOINT_XFER_BULK:
1683 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1684 slot_id, ep_index);
1685 break;
1686 case USB_ENDPOINT_XFER_INT:
1687 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1688 slot_id, ep_index);
1689 break;
1690 case USB_ENDPOINT_XFER_ISOC:
1691 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1692 slot_id, ep_index);
1693 }
1694
1695 if (ret) {
1696free_priv:
1697 xhci_urb_free_priv(urb_priv);
1698 urb->hcpriv = NULL;
1699 }
1700 spin_unlock_irqrestore(&xhci->lock, flags);
1701 return ret;
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1736{
1737 unsigned long flags;
1738 int ret, i;
1739 u32 temp;
1740 struct xhci_hcd *xhci;
1741 struct urb_priv *urb_priv;
1742 struct xhci_td *td;
1743 unsigned int ep_index;
1744 struct xhci_ring *ep_ring;
1745 struct xhci_virt_ep *ep;
1746 struct xhci_command *command;
1747 struct xhci_virt_device *vdev;
1748
1749 xhci = hcd_to_xhci(hcd);
1750 spin_lock_irqsave(&xhci->lock, flags);
1751
1752 trace_xhci_urb_dequeue(urb);
1753
1754
1755 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1756 if (ret)
1757 goto done;
1758
1759
1760 vdev = xhci->devs[urb->dev->slot_id];
1761 urb_priv = urb->hcpriv;
1762 if (!vdev || !urb_priv)
1763 goto err_giveback;
1764
1765 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1766 ep = &vdev->eps[ep_index];
1767 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1768 if (!ep || !ep_ring)
1769 goto err_giveback;
1770
1771
1772 temp = readl(&xhci->op_regs->status);
1773 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1774 xhci_hc_died(xhci);
1775 goto done;
1776 }
1777
1778
1779
1780
1781
1782
1783 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1784 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1785 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1786 td = &urb_priv->td[i];
1787 if (!list_empty(&td->cancelled_td_list))
1788 list_del_init(&td->cancelled_td_list);
1789 }
1790 goto err_giveback;
1791 }
1792
1793 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1794 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1795 "HC halted, freeing TD manually.");
1796 for (i = urb_priv->num_tds_done;
1797 i < urb_priv->num_tds;
1798 i++) {
1799 td = &urb_priv->td[i];
1800 if (!list_empty(&td->td_list))
1801 list_del_init(&td->td_list);
1802 if (!list_empty(&td->cancelled_td_list))
1803 list_del_init(&td->cancelled_td_list);
1804 }
1805 goto err_giveback;
1806 }
1807
1808 i = urb_priv->num_tds_done;
1809 if (i < urb_priv->num_tds)
1810 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1811 "Cancel URB %p, dev %s, ep 0x%x, "
1812 "starting at offset 0x%llx",
1813 urb, urb->dev->devpath,
1814 urb->ep->desc.bEndpointAddress,
1815 (unsigned long long) xhci_trb_virt_to_dma(
1816 urb_priv->td[i].start_seg,
1817 urb_priv->td[i].first_trb));
1818
1819 for (; i < urb_priv->num_tds; i++) {
1820 td = &urb_priv->td[i];
1821
1822 if (list_empty(&td->cancelled_td_list)) {
1823 td->cancel_status = TD_DIRTY;
1824 list_add_tail(&td->cancelled_td_list,
1825 &ep->cancelled_td_list);
1826 }
1827 }
1828
1829
1830
1831
1832 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1833 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1834 if (!command) {
1835 ret = -ENOMEM;
1836 goto done;
1837 }
1838 ep->ep_state |= EP_STOP_CMD_PENDING;
1839 ep->stop_cmd_timer.expires = jiffies +
1840 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1841 add_timer(&ep->stop_cmd_timer);
1842 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1843 ep_index, 0);
1844 xhci_ring_cmd_db(xhci);
1845 }
1846done:
1847 spin_unlock_irqrestore(&xhci->lock, flags);
1848 return ret;
1849
1850err_giveback:
1851 if (urb_priv)
1852 xhci_urb_free_priv(urb_priv);
1853 usb_hcd_unlink_urb_from_ep(hcd, urb);
1854 spin_unlock_irqrestore(&xhci->lock, flags);
1855 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1856 return ret;
1857}
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1873 struct usb_host_endpoint *ep)
1874{
1875 struct xhci_hcd *xhci;
1876 struct xhci_container_ctx *in_ctx, *out_ctx;
1877 struct xhci_input_control_ctx *ctrl_ctx;
1878 unsigned int ep_index;
1879 struct xhci_ep_ctx *ep_ctx;
1880 u32 drop_flag;
1881 u32 new_add_flags, new_drop_flags;
1882 int ret;
1883
1884 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1885 if (ret <= 0)
1886 return ret;
1887 xhci = hcd_to_xhci(hcd);
1888 if (xhci->xhc_state & XHCI_STATE_DYING)
1889 return -ENODEV;
1890
1891 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1892 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1893 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1894 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1895 __func__, drop_flag);
1896 return 0;
1897 }
1898
1899 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1900 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1901 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1902 if (!ctrl_ctx) {
1903 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1904 __func__);
1905 return 0;
1906 }
1907
1908 ep_index = xhci_get_endpoint_index(&ep->desc);
1909 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1910
1911
1912
1913 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1914 le32_to_cpu(ctrl_ctx->drop_flags) &
1915 xhci_get_endpoint_flag(&ep->desc)) {
1916
1917 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1918 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1919 __func__, ep);
1920 return 0;
1921 }
1922
1923 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1924 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1925
1926 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1927 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1928
1929 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1930
1931 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1932
1933 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1934 (unsigned int) ep->desc.bEndpointAddress,
1935 udev->slot_id,
1936 (unsigned int) new_drop_flags,
1937 (unsigned int) new_add_flags);
1938 return 0;
1939}
1940EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1956 struct usb_host_endpoint *ep)
1957{
1958 struct xhci_hcd *xhci;
1959 struct xhci_container_ctx *in_ctx;
1960 unsigned int ep_index;
1961 struct xhci_input_control_ctx *ctrl_ctx;
1962 struct xhci_ep_ctx *ep_ctx;
1963 u32 added_ctxs;
1964 u32 new_add_flags, new_drop_flags;
1965 struct xhci_virt_device *virt_dev;
1966 int ret = 0;
1967
1968 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1969 if (ret <= 0) {
1970
1971 ep->hcpriv = NULL;
1972 return ret;
1973 }
1974 xhci = hcd_to_xhci(hcd);
1975 if (xhci->xhc_state & XHCI_STATE_DYING)
1976 return -ENODEV;
1977
1978 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1979 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1980
1981
1982
1983
1984 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1985 __func__, added_ctxs);
1986 return 0;
1987 }
1988
1989 virt_dev = xhci->devs[udev->slot_id];
1990 in_ctx = virt_dev->in_ctx;
1991 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1992 if (!ctrl_ctx) {
1993 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1994 __func__);
1995 return 0;
1996 }
1997
1998 ep_index = xhci_get_endpoint_index(&ep->desc);
1999
2000
2001
2002 if (virt_dev->eps[ep_index].ring &&
2003 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
2004 xhci_warn(xhci, "Trying to add endpoint 0x%x "
2005 "without dropping it.\n",
2006 (unsigned int) ep->desc.bEndpointAddress);
2007 return -EINVAL;
2008 }
2009
2010
2011
2012
2013 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
2014 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
2015 __func__, ep);
2016 return 0;
2017 }
2018
2019
2020
2021
2022
2023
2024 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
2025 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
2026 __func__, ep->desc.bEndpointAddress);
2027 return -ENOMEM;
2028 }
2029
2030 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
2031 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
2032
2033
2034
2035
2036
2037
2038
2039 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
2040
2041
2042 ep->hcpriv = udev;
2043
2044 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2045 trace_xhci_add_endpoint(ep_ctx);
2046
2047 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
2048 (unsigned int) ep->desc.bEndpointAddress,
2049 udev->slot_id,
2050 (unsigned int) new_drop_flags,
2051 (unsigned int) new_add_flags);
2052 return 0;
2053}
2054EXPORT_SYMBOL_GPL(xhci_add_endpoint);
2055
2056static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
2057{
2058 struct xhci_input_control_ctx *ctrl_ctx;
2059 struct xhci_ep_ctx *ep_ctx;
2060 struct xhci_slot_ctx *slot_ctx;
2061 int i;
2062
2063 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
2064 if (!ctrl_ctx) {
2065 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2066 __func__);
2067 return;
2068 }
2069
2070
2071
2072
2073
2074
2075 ctrl_ctx->drop_flags = 0;
2076 ctrl_ctx->add_flags = 0;
2077 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2078 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2079
2080 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
2081 for (i = 1; i < 31; i++) {
2082 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2083 ep_ctx->ep_info = 0;
2084 ep_ctx->ep_info2 = 0;
2085 ep_ctx->deq = 0;
2086 ep_ctx->tx_info = 0;
2087 }
2088}
2089
2090static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2091 struct usb_device *udev, u32 *cmd_status)
2092{
2093 int ret;
2094
2095 switch (*cmd_status) {
2096 case COMP_COMMAND_ABORTED:
2097 case COMP_COMMAND_RING_STOPPED:
2098 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2099 ret = -ETIME;
2100 break;
2101 case COMP_RESOURCE_ERROR:
2102 dev_warn(&udev->dev,
2103 "Not enough host controller resources for new device state.\n");
2104 ret = -ENOMEM;
2105
2106 break;
2107 case COMP_BANDWIDTH_ERROR:
2108 case COMP_SECONDARY_BANDWIDTH_ERROR:
2109 dev_warn(&udev->dev,
2110 "Not enough bandwidth for new device state.\n");
2111 ret = -ENOSPC;
2112
2113 break;
2114 case COMP_TRB_ERROR:
2115
2116 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2117 "add flag = 1, "
2118 "and endpoint is not disabled.\n");
2119 ret = -EINVAL;
2120 break;
2121 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2122 dev_warn(&udev->dev,
2123 "ERROR: Incompatible device for endpoint configure command.\n");
2124 ret = -ENODEV;
2125 break;
2126 case COMP_SUCCESS:
2127 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2128 "Successful Endpoint Configure command");
2129 ret = 0;
2130 break;
2131 default:
2132 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2133 *cmd_status);
2134 ret = -EINVAL;
2135 break;
2136 }
2137 return ret;
2138}
2139
2140static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2141 struct usb_device *udev, u32 *cmd_status)
2142{
2143 int ret;
2144
2145 switch (*cmd_status) {
2146 case COMP_COMMAND_ABORTED:
2147 case COMP_COMMAND_RING_STOPPED:
2148 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2149 ret = -ETIME;
2150 break;
2151 case COMP_PARAMETER_ERROR:
2152 dev_warn(&udev->dev,
2153 "WARN: xHCI driver setup invalid evaluate context command.\n");
2154 ret = -EINVAL;
2155 break;
2156 case COMP_SLOT_NOT_ENABLED_ERROR:
2157 dev_warn(&udev->dev,
2158 "WARN: slot not enabled for evaluate context command.\n");
2159 ret = -EINVAL;
2160 break;
2161 case COMP_CONTEXT_STATE_ERROR:
2162 dev_warn(&udev->dev,
2163 "WARN: invalid context state for evaluate context command.\n");
2164 ret = -EINVAL;
2165 break;
2166 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2167 dev_warn(&udev->dev,
2168 "ERROR: Incompatible device for evaluate context command.\n");
2169 ret = -ENODEV;
2170 break;
2171 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2172
2173 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2174 ret = -EINVAL;
2175 break;
2176 case COMP_SUCCESS:
2177 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2178 "Successful evaluate context command");
2179 ret = 0;
2180 break;
2181 default:
2182 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2183 *cmd_status);
2184 ret = -EINVAL;
2185 break;
2186 }
2187 return ret;
2188}
2189
2190static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2191 struct xhci_input_control_ctx *ctrl_ctx)
2192{
2193 u32 valid_add_flags;
2194 u32 valid_drop_flags;
2195
2196
2197
2198
2199
2200 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2201 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2202
2203
2204
2205
2206
2207 return hweight32(valid_add_flags) -
2208 hweight32(valid_add_flags & valid_drop_flags);
2209}
2210
2211static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2212 struct xhci_input_control_ctx *ctrl_ctx)
2213{
2214 u32 valid_add_flags;
2215 u32 valid_drop_flags;
2216
2217 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2218 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2219
2220 return hweight32(valid_drop_flags) -
2221 hweight32(valid_add_flags & valid_drop_flags);
2222}
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2238 struct xhci_input_control_ctx *ctrl_ctx)
2239{
2240 u32 added_eps;
2241
2242 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2243 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2244 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2245 "Not enough ep ctxs: "
2246 "%u active, need to add %u, limit is %u.",
2247 xhci->num_active_eps, added_eps,
2248 xhci->limit_active_eps);
2249 return -ENOMEM;
2250 }
2251 xhci->num_active_eps += added_eps;
2252 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2253 "Adding %u ep ctxs, %u now active.", added_eps,
2254 xhci->num_active_eps);
2255 return 0;
2256}
2257
2258
2259
2260
2261
2262
2263
2264static void xhci_free_host_resources(struct xhci_hcd *xhci,
2265 struct xhci_input_control_ctx *ctrl_ctx)
2266{
2267 u32 num_failed_eps;
2268
2269 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2270 xhci->num_active_eps -= num_failed_eps;
2271 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2272 "Removing %u failed ep ctxs, %u now active.",
2273 num_failed_eps,
2274 xhci->num_active_eps);
2275}
2276
2277
2278
2279
2280
2281
2282
2283static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2284 struct xhci_input_control_ctx *ctrl_ctx)
2285{
2286 u32 num_dropped_eps;
2287
2288 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2289 xhci->num_active_eps -= num_dropped_eps;
2290 if (num_dropped_eps)
2291 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2292 "Removing %u dropped ep ctxs, %u now active.",
2293 num_dropped_eps,
2294 xhci->num_active_eps);
2295}
2296
2297static unsigned int xhci_get_block_size(struct usb_device *udev)
2298{
2299 switch (udev->speed) {
2300 case USB_SPEED_LOW:
2301 case USB_SPEED_FULL:
2302 return FS_BLOCK;
2303 case USB_SPEED_HIGH:
2304 return HS_BLOCK;
2305 case USB_SPEED_SUPER:
2306 case USB_SPEED_SUPER_PLUS:
2307 return SS_BLOCK;
2308 case USB_SPEED_UNKNOWN:
2309 case USB_SPEED_WIRELESS:
2310 default:
2311
2312 return 1;
2313 }
2314}
2315
2316static unsigned int
2317xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2318{
2319 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2320 return LS_OVERHEAD;
2321 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2322 return FS_OVERHEAD;
2323 return HS_OVERHEAD;
2324}
2325
2326
2327
2328
2329
2330static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2331 struct xhci_virt_device *virt_dev,
2332 int old_active_eps)
2333{
2334 struct xhci_interval_bw_table *bw_table;
2335 struct xhci_tt_bw_info *tt_info;
2336
2337
2338 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2339 tt_info = virt_dev->tt_info;
2340
2341
2342
2343
2344 if (old_active_eps)
2345 return 0;
2346 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2347 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2348 return -ENOMEM;
2349 return 0;
2350 }
2351
2352
2353
2354
2355
2356
2357 return 0;
2358}
2359
2360static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2361 struct xhci_virt_device *virt_dev)
2362{
2363 unsigned int bw_reserved;
2364
2365 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2366 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2367 return -ENOMEM;
2368
2369 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2370 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2371 return -ENOMEM;
2372
2373 return 0;
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417static int xhci_check_bw_table(struct xhci_hcd *xhci,
2418 struct xhci_virt_device *virt_dev,
2419 int old_active_eps)
2420{
2421 unsigned int bw_reserved;
2422 unsigned int max_bandwidth;
2423 unsigned int bw_used;
2424 unsigned int block_size;
2425 struct xhci_interval_bw_table *bw_table;
2426 unsigned int packet_size = 0;
2427 unsigned int overhead = 0;
2428 unsigned int packets_transmitted = 0;
2429 unsigned int packets_remaining = 0;
2430 unsigned int i;
2431
2432 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2433 return xhci_check_ss_bw(xhci, virt_dev);
2434
2435 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2436 max_bandwidth = HS_BW_LIMIT;
2437
2438 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2439 } else {
2440 max_bandwidth = FS_BW_LIMIT;
2441 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2442 }
2443
2444 bw_table = virt_dev->bw_table;
2445
2446
2447
2448 block_size = xhci_get_block_size(virt_dev->udev);
2449
2450
2451
2452
2453 if (virt_dev->tt_info) {
2454 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2455 "Recalculating BW for rootport %u",
2456 virt_dev->real_port);
2457 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2458 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2459 "newly activated TT.\n");
2460 return -ENOMEM;
2461 }
2462 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2463 "Recalculating BW for TT slot %u port %u",
2464 virt_dev->tt_info->slot_id,
2465 virt_dev->tt_info->ttport);
2466 } else {
2467 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2468 "Recalculating BW for rootport %u",
2469 virt_dev->real_port);
2470 }
2471
2472
2473
2474
2475 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2476 bw_table->interval_bw[0].num_packets *
2477 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2478
2479 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2480 unsigned int bw_added;
2481 unsigned int largest_mps;
2482 unsigned int interval_overhead;
2483
2484
2485
2486
2487
2488
2489 packets_remaining = 2 * packets_remaining +
2490 bw_table->interval_bw[i].num_packets;
2491
2492
2493
2494
2495 if (list_empty(&bw_table->interval_bw[i].endpoints))
2496 largest_mps = 0;
2497 else {
2498 struct xhci_virt_ep *virt_ep;
2499 struct list_head *ep_entry;
2500
2501 ep_entry = bw_table->interval_bw[i].endpoints.next;
2502 virt_ep = list_entry(ep_entry,
2503 struct xhci_virt_ep, bw_endpoint_list);
2504
2505 largest_mps = DIV_ROUND_UP(
2506 virt_ep->bw_info.max_packet_size,
2507 block_size);
2508 }
2509 if (largest_mps > packet_size)
2510 packet_size = largest_mps;
2511
2512
2513 interval_overhead = xhci_get_largest_overhead(
2514 &bw_table->interval_bw[i]);
2515 if (interval_overhead > overhead)
2516 overhead = interval_overhead;
2517
2518
2519
2520
2521 packets_transmitted = packets_remaining >> (i + 1);
2522
2523
2524 bw_added = packets_transmitted * (overhead + packet_size);
2525
2526
2527 packets_remaining = packets_remaining % (1 << (i + 1));
2528
2529
2530
2531
2532
2533 if (packets_remaining == 0) {
2534 packet_size = 0;
2535 overhead = 0;
2536 } else if (packets_transmitted > 0) {
2537
2538
2539
2540
2541
2542 packet_size = largest_mps;
2543 overhead = interval_overhead;
2544 }
2545
2546
2547
2548 bw_used += bw_added;
2549 if (bw_used > max_bandwidth) {
2550 xhci_warn(xhci, "Not enough bandwidth. "
2551 "Proposed: %u, Max: %u\n",
2552 bw_used, max_bandwidth);
2553 return -ENOMEM;
2554 }
2555 }
2556
2557
2558
2559
2560
2561
2562 if (packets_remaining > 0)
2563 bw_used += overhead + packet_size;
2564
2565 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2566 unsigned int port_index = virt_dev->real_port - 1;
2567
2568
2569
2570
2571
2572 bw_used += TT_HS_OVERHEAD *
2573 xhci->rh_bw[port_index].num_active_tts;
2574 }
2575
2576 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2577 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2578 "Available: %u " "percent",
2579 bw_used, max_bandwidth, bw_reserved,
2580 (max_bandwidth - bw_used - bw_reserved) * 100 /
2581 max_bandwidth);
2582
2583 bw_used += bw_reserved;
2584 if (bw_used > max_bandwidth) {
2585 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2586 bw_used, max_bandwidth);
2587 return -ENOMEM;
2588 }
2589
2590 bw_table->bw_used = bw_used;
2591 return 0;
2592}
2593
2594static bool xhci_is_async_ep(unsigned int ep_type)
2595{
2596 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2597 ep_type != ISOC_IN_EP &&
2598 ep_type != INT_IN_EP);
2599}
2600
2601static bool xhci_is_sync_in_ep(unsigned int ep_type)
2602{
2603 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2604}
2605
2606static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2607{
2608 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2609
2610 if (ep_bw->ep_interval == 0)
2611 return SS_OVERHEAD_BURST +
2612 (ep_bw->mult * ep_bw->num_packets *
2613 (SS_OVERHEAD + mps));
2614 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2615 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2616 1 << ep_bw->ep_interval);
2617
2618}
2619
2620static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2621 struct xhci_bw_info *ep_bw,
2622 struct xhci_interval_bw_table *bw_table,
2623 struct usb_device *udev,
2624 struct xhci_virt_ep *virt_ep,
2625 struct xhci_tt_bw_info *tt_info)
2626{
2627 struct xhci_interval_bw *interval_bw;
2628 int normalized_interval;
2629
2630 if (xhci_is_async_ep(ep_bw->type))
2631 return;
2632
2633 if (udev->speed >= USB_SPEED_SUPER) {
2634 if (xhci_is_sync_in_ep(ep_bw->type))
2635 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2636 xhci_get_ss_bw_consumed(ep_bw);
2637 else
2638 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2639 xhci_get_ss_bw_consumed(ep_bw);
2640 return;
2641 }
2642
2643
2644
2645
2646 if (list_empty(&virt_ep->bw_endpoint_list))
2647 return;
2648
2649
2650
2651 if (udev->speed == USB_SPEED_HIGH)
2652 normalized_interval = ep_bw->ep_interval;
2653 else
2654 normalized_interval = ep_bw->ep_interval - 3;
2655
2656 if (normalized_interval == 0)
2657 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2658 interval_bw = &bw_table->interval_bw[normalized_interval];
2659 interval_bw->num_packets -= ep_bw->num_packets;
2660 switch (udev->speed) {
2661 case USB_SPEED_LOW:
2662 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2663 break;
2664 case USB_SPEED_FULL:
2665 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2666 break;
2667 case USB_SPEED_HIGH:
2668 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2669 break;
2670 case USB_SPEED_SUPER:
2671 case USB_SPEED_SUPER_PLUS:
2672 case USB_SPEED_UNKNOWN:
2673 case USB_SPEED_WIRELESS:
2674
2675
2676
2677 return;
2678 }
2679 if (tt_info)
2680 tt_info->active_eps -= 1;
2681 list_del_init(&virt_ep->bw_endpoint_list);
2682}
2683
2684static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2685 struct xhci_bw_info *ep_bw,
2686 struct xhci_interval_bw_table *bw_table,
2687 struct usb_device *udev,
2688 struct xhci_virt_ep *virt_ep,
2689 struct xhci_tt_bw_info *tt_info)
2690{
2691 struct xhci_interval_bw *interval_bw;
2692 struct xhci_virt_ep *smaller_ep;
2693 int normalized_interval;
2694
2695 if (xhci_is_async_ep(ep_bw->type))
2696 return;
2697
2698 if (udev->speed == USB_SPEED_SUPER) {
2699 if (xhci_is_sync_in_ep(ep_bw->type))
2700 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2701 xhci_get_ss_bw_consumed(ep_bw);
2702 else
2703 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2704 xhci_get_ss_bw_consumed(ep_bw);
2705 return;
2706 }
2707
2708
2709
2710
2711 if (udev->speed == USB_SPEED_HIGH)
2712 normalized_interval = ep_bw->ep_interval;
2713 else
2714 normalized_interval = ep_bw->ep_interval - 3;
2715
2716 if (normalized_interval == 0)
2717 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2718 interval_bw = &bw_table->interval_bw[normalized_interval];
2719 interval_bw->num_packets += ep_bw->num_packets;
2720 switch (udev->speed) {
2721 case USB_SPEED_LOW:
2722 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2723 break;
2724 case USB_SPEED_FULL:
2725 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2726 break;
2727 case USB_SPEED_HIGH:
2728 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2729 break;
2730 case USB_SPEED_SUPER:
2731 case USB_SPEED_SUPER_PLUS:
2732 case USB_SPEED_UNKNOWN:
2733 case USB_SPEED_WIRELESS:
2734
2735
2736
2737 return;
2738 }
2739
2740 if (tt_info)
2741 tt_info->active_eps += 1;
2742
2743 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2744 bw_endpoint_list) {
2745 if (ep_bw->max_packet_size >=
2746 smaller_ep->bw_info.max_packet_size) {
2747
2748 list_add_tail(&virt_ep->bw_endpoint_list,
2749 &smaller_ep->bw_endpoint_list);
2750 return;
2751 }
2752 }
2753
2754 list_add_tail(&virt_ep->bw_endpoint_list,
2755 &interval_bw->endpoints);
2756}
2757
2758void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2759 struct xhci_virt_device *virt_dev,
2760 int old_active_eps)
2761{
2762 struct xhci_root_port_bw_info *rh_bw_info;
2763 if (!virt_dev->tt_info)
2764 return;
2765
2766 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2767 if (old_active_eps == 0 &&
2768 virt_dev->tt_info->active_eps != 0) {
2769 rh_bw_info->num_active_tts += 1;
2770 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2771 } else if (old_active_eps != 0 &&
2772 virt_dev->tt_info->active_eps == 0) {
2773 rh_bw_info->num_active_tts -= 1;
2774 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2775 }
2776}
2777
2778static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2779 struct xhci_virt_device *virt_dev,
2780 struct xhci_container_ctx *in_ctx)
2781{
2782 struct xhci_bw_info ep_bw_info[31];
2783 int i;
2784 struct xhci_input_control_ctx *ctrl_ctx;
2785 int old_active_eps = 0;
2786
2787 if (virt_dev->tt_info)
2788 old_active_eps = virt_dev->tt_info->active_eps;
2789
2790 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2791 if (!ctrl_ctx) {
2792 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2793 __func__);
2794 return -ENOMEM;
2795 }
2796
2797 for (i = 0; i < 31; i++) {
2798 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2799 continue;
2800
2801
2802 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2803 sizeof(ep_bw_info[i]));
2804
2805
2806
2807 if (EP_IS_DROPPED(ctrl_ctx, i))
2808 xhci_drop_ep_from_interval_table(xhci,
2809 &virt_dev->eps[i].bw_info,
2810 virt_dev->bw_table,
2811 virt_dev->udev,
2812 &virt_dev->eps[i],
2813 virt_dev->tt_info);
2814 }
2815
2816 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2817 for (i = 0; i < 31; i++) {
2818
2819 if (EP_IS_ADDED(ctrl_ctx, i))
2820 xhci_add_ep_to_interval_table(xhci,
2821 &virt_dev->eps[i].bw_info,
2822 virt_dev->bw_table,
2823 virt_dev->udev,
2824 &virt_dev->eps[i],
2825 virt_dev->tt_info);
2826 }
2827
2828 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2829
2830
2831
2832 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2833 return 0;
2834 }
2835
2836
2837 for (i = 0; i < 31; i++) {
2838 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2839 continue;
2840
2841
2842
2843
2844 if (EP_IS_ADDED(ctrl_ctx, i)) {
2845 xhci_drop_ep_from_interval_table(xhci,
2846 &virt_dev->eps[i].bw_info,
2847 virt_dev->bw_table,
2848 virt_dev->udev,
2849 &virt_dev->eps[i],
2850 virt_dev->tt_info);
2851 }
2852
2853 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2854 sizeof(ep_bw_info[i]));
2855
2856 if (EP_IS_DROPPED(ctrl_ctx, i))
2857 xhci_add_ep_to_interval_table(xhci,
2858 &virt_dev->eps[i].bw_info,
2859 virt_dev->bw_table,
2860 virt_dev->udev,
2861 &virt_dev->eps[i],
2862 virt_dev->tt_info);
2863 }
2864 return -ENOMEM;
2865}
2866
2867
2868
2869
2870
2871static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2872 struct usb_device *udev,
2873 struct xhci_command *command,
2874 bool ctx_change, bool must_succeed)
2875{
2876 int ret;
2877 unsigned long flags;
2878 struct xhci_input_control_ctx *ctrl_ctx;
2879 struct xhci_virt_device *virt_dev;
2880 struct xhci_slot_ctx *slot_ctx;
2881
2882 if (!command)
2883 return -EINVAL;
2884
2885 spin_lock_irqsave(&xhci->lock, flags);
2886
2887 if (xhci->xhc_state & XHCI_STATE_DYING) {
2888 spin_unlock_irqrestore(&xhci->lock, flags);
2889 return -ESHUTDOWN;
2890 }
2891
2892 virt_dev = xhci->devs[udev->slot_id];
2893
2894 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2895 if (!ctrl_ctx) {
2896 spin_unlock_irqrestore(&xhci->lock, flags);
2897 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2898 __func__);
2899 return -ENOMEM;
2900 }
2901
2902 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2903 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2904 spin_unlock_irqrestore(&xhci->lock, flags);
2905 xhci_warn(xhci, "Not enough host resources, "
2906 "active endpoint contexts = %u\n",
2907 xhci->num_active_eps);
2908 return -ENOMEM;
2909 }
2910 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2911 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2912 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2913 xhci_free_host_resources(xhci, ctrl_ctx);
2914 spin_unlock_irqrestore(&xhci->lock, flags);
2915 xhci_warn(xhci, "Not enough bandwidth\n");
2916 return -ENOMEM;
2917 }
2918
2919 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2920
2921 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2922 trace_xhci_configure_endpoint(slot_ctx);
2923
2924 if (!ctx_change)
2925 ret = xhci_queue_configure_endpoint(xhci, command,
2926 command->in_ctx->dma,
2927 udev->slot_id, must_succeed);
2928 else
2929 ret = xhci_queue_evaluate_context(xhci, command,
2930 command->in_ctx->dma,
2931 udev->slot_id, must_succeed);
2932 if (ret < 0) {
2933 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2934 xhci_free_host_resources(xhci, ctrl_ctx);
2935 spin_unlock_irqrestore(&xhci->lock, flags);
2936 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2937 "FIXME allocate a new ring segment");
2938 return -ENOMEM;
2939 }
2940 xhci_ring_cmd_db(xhci);
2941 spin_unlock_irqrestore(&xhci->lock, flags);
2942
2943
2944 wait_for_completion(command->completion);
2945
2946 if (!ctx_change)
2947 ret = xhci_configure_endpoint_result(xhci, udev,
2948 &command->status);
2949 else
2950 ret = xhci_evaluate_context_result(xhci, udev,
2951 &command->status);
2952
2953 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2954 spin_lock_irqsave(&xhci->lock, flags);
2955
2956
2957
2958 if (ret)
2959 xhci_free_host_resources(xhci, ctrl_ctx);
2960 else
2961 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2962 spin_unlock_irqrestore(&xhci->lock, flags);
2963 }
2964 return ret;
2965}
2966
2967static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2968 struct xhci_virt_device *vdev, int i)
2969{
2970 struct xhci_virt_ep *ep = &vdev->eps[i];
2971
2972 if (ep->ep_state & EP_HAS_STREAMS) {
2973 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2974 xhci_get_endpoint_address(i));
2975 xhci_free_stream_info(xhci, ep->stream_info);
2976 ep->stream_info = NULL;
2977 ep->ep_state &= ~EP_HAS_STREAMS;
2978 }
2979}
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2992{
2993 int i;
2994 int ret = 0;
2995 struct xhci_hcd *xhci;
2996 struct xhci_virt_device *virt_dev;
2997 struct xhci_input_control_ctx *ctrl_ctx;
2998 struct xhci_slot_ctx *slot_ctx;
2999 struct xhci_command *command;
3000
3001 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3002 if (ret <= 0)
3003 return ret;
3004 xhci = hcd_to_xhci(hcd);
3005 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3006 (xhci->xhc_state & XHCI_STATE_REMOVING))
3007 return -ENODEV;
3008
3009 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3010 virt_dev = xhci->devs[udev->slot_id];
3011
3012 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3013 if (!command)
3014 return -ENOMEM;
3015
3016 command->in_ctx = virt_dev->in_ctx;
3017
3018
3019 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3020 if (!ctrl_ctx) {
3021 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3022 __func__);
3023 ret = -ENOMEM;
3024 goto command_cleanup;
3025 }
3026 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3027 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
3028 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
3029
3030
3031 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
3032 ctrl_ctx->drop_flags == 0) {
3033 ret = 0;
3034 goto command_cleanup;
3035 }
3036
3037 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3038 for (i = 31; i >= 1; i--) {
3039 __le32 le32 = cpu_to_le32(BIT(i));
3040
3041 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
3042 || (ctrl_ctx->add_flags & le32) || i == 1) {
3043 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
3044 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
3045 break;
3046 }
3047 }
3048
3049 ret = xhci_configure_endpoint(xhci, udev, command,
3050 false, false);
3051 if (ret)
3052
3053 goto command_cleanup;
3054
3055
3056 for (i = 1; i < 31; i++) {
3057 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
3058 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
3059 xhci_free_endpoint_ring(xhci, virt_dev, i);
3060 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3061 }
3062 }
3063 xhci_zero_in_ctx(xhci, virt_dev);
3064
3065
3066
3067
3068 for (i = 1; i < 31; i++) {
3069 if (!virt_dev->eps[i].new_ring)
3070 continue;
3071
3072
3073
3074 if (virt_dev->eps[i].ring) {
3075 xhci_free_endpoint_ring(xhci, virt_dev, i);
3076 }
3077 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3078 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
3079 virt_dev->eps[i].new_ring = NULL;
3080 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
3081 }
3082command_cleanup:
3083 kfree(command->completion);
3084 kfree(command);
3085
3086 return ret;
3087}
3088EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
3089
3090void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3091{
3092 struct xhci_hcd *xhci;
3093 struct xhci_virt_device *virt_dev;
3094 int i, ret;
3095
3096 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3097 if (ret <= 0)
3098 return;
3099 xhci = hcd_to_xhci(hcd);
3100
3101 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3102 virt_dev = xhci->devs[udev->slot_id];
3103
3104 for (i = 0; i < 31; i++) {
3105 if (virt_dev->eps[i].new_ring) {
3106 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3107 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3108 virt_dev->eps[i].new_ring = NULL;
3109 }
3110 }
3111 xhci_zero_in_ctx(xhci, virt_dev);
3112}
3113EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3114
3115static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3116 struct xhci_container_ctx *in_ctx,
3117 struct xhci_container_ctx *out_ctx,
3118 struct xhci_input_control_ctx *ctrl_ctx,
3119 u32 add_flags, u32 drop_flags)
3120{
3121 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
3122 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3123 xhci_slot_copy(xhci, in_ctx, out_ctx);
3124 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3125}
3126
3127static void xhci_endpoint_disable(struct usb_hcd *hcd,
3128 struct usb_host_endpoint *host_ep)
3129{
3130 struct xhci_hcd *xhci;
3131 struct xhci_virt_device *vdev;
3132 struct xhci_virt_ep *ep;
3133 struct usb_device *udev;
3134 unsigned long flags;
3135 unsigned int ep_index;
3136
3137 xhci = hcd_to_xhci(hcd);
3138rescan:
3139 spin_lock_irqsave(&xhci->lock, flags);
3140
3141 udev = (struct usb_device *)host_ep->hcpriv;
3142 if (!udev || !udev->slot_id)
3143 goto done;
3144
3145 vdev = xhci->devs[udev->slot_id];
3146 if (!vdev)
3147 goto done;
3148
3149 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3150 ep = &vdev->eps[ep_index];
3151 if (!ep)
3152 goto done;
3153
3154
3155 if (ep->ep_state & EP_CLEARING_TT) {
3156 spin_unlock_irqrestore(&xhci->lock, flags);
3157 schedule_timeout_uninterruptible(1);
3158 goto rescan;
3159 }
3160
3161 if (ep->ep_state)
3162 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3163 ep->ep_state);
3164done:
3165 host_ep->hcpriv = NULL;
3166 spin_unlock_irqrestore(&xhci->lock, flags);
3167}
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181static void xhci_endpoint_reset(struct usb_hcd *hcd,
3182 struct usb_host_endpoint *host_ep)
3183{
3184 struct xhci_hcd *xhci;
3185 struct usb_device *udev;
3186 struct xhci_virt_device *vdev;
3187 struct xhci_virt_ep *ep;
3188 struct xhci_input_control_ctx *ctrl_ctx;
3189 struct xhci_command *stop_cmd, *cfg_cmd;
3190 unsigned int ep_index;
3191 unsigned long flags;
3192 u32 ep_flag;
3193 int err;
3194
3195 xhci = hcd_to_xhci(hcd);
3196 if (!host_ep->hcpriv)
3197 return;
3198 udev = (struct usb_device *) host_ep->hcpriv;
3199 vdev = xhci->devs[udev->slot_id];
3200
3201
3202
3203
3204
3205
3206 if (!udev->slot_id || !vdev)
3207 return;
3208 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3209 ep = &vdev->eps[ep_index];
3210 if (!ep)
3211 return;
3212
3213
3214 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3215 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3216 return;
3217 }
3218
3219 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3220 usb_endpoint_xfer_isoc(&host_ep->desc))
3221 return;
3222
3223 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3224
3225 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3226 return;
3227
3228 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3229 if (!stop_cmd)
3230 return;
3231
3232 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3233 if (!cfg_cmd)
3234 goto cleanup;
3235
3236 spin_lock_irqsave(&xhci->lock, flags);
3237
3238
3239 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3240
3241
3242
3243
3244
3245
3246
3247 if (!list_empty(&ep->ring->td_list)) {
3248 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3249 spin_unlock_irqrestore(&xhci->lock, flags);
3250 xhci_free_command(xhci, cfg_cmd);
3251 goto cleanup;
3252 }
3253
3254 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3255 ep_index, 0);
3256 if (err < 0) {
3257 spin_unlock_irqrestore(&xhci->lock, flags);
3258 xhci_free_command(xhci, cfg_cmd);
3259 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3260 __func__, err);
3261 goto cleanup;
3262 }
3263
3264 xhci_ring_cmd_db(xhci);
3265 spin_unlock_irqrestore(&xhci->lock, flags);
3266
3267 wait_for_completion(stop_cmd->completion);
3268
3269 spin_lock_irqsave(&xhci->lock, flags);
3270
3271
3272 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3273 if (!ctrl_ctx) {
3274 spin_unlock_irqrestore(&xhci->lock, flags);
3275 xhci_free_command(xhci, cfg_cmd);
3276 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3277 __func__);
3278 goto cleanup;
3279 }
3280
3281 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3282 ctrl_ctx, ep_flag, ep_flag);
3283 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3284
3285 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3286 udev->slot_id, false);
3287 if (err < 0) {
3288 spin_unlock_irqrestore(&xhci->lock, flags);
3289 xhci_free_command(xhci, cfg_cmd);
3290 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3291 __func__, err);
3292 goto cleanup;
3293 }
3294
3295 xhci_ring_cmd_db(xhci);
3296 spin_unlock_irqrestore(&xhci->lock, flags);
3297
3298 wait_for_completion(cfg_cmd->completion);
3299
3300 xhci_free_command(xhci, cfg_cmd);
3301cleanup:
3302 xhci_free_command(xhci, stop_cmd);
3303 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3304 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3305}
3306
3307static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3308 struct usb_device *udev, struct usb_host_endpoint *ep,
3309 unsigned int slot_id)
3310{
3311 int ret;
3312 unsigned int ep_index;
3313 unsigned int ep_state;
3314
3315 if (!ep)
3316 return -EINVAL;
3317 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3318 if (ret <= 0)
3319 return -EINVAL;
3320 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3321 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3322 " descriptor for ep 0x%x does not support streams\n",
3323 ep->desc.bEndpointAddress);
3324 return -EINVAL;
3325 }
3326
3327 ep_index = xhci_get_endpoint_index(&ep->desc);
3328 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3329 if (ep_state & EP_HAS_STREAMS ||
3330 ep_state & EP_GETTING_STREAMS) {
3331 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3332 "already has streams set up.\n",
3333 ep->desc.bEndpointAddress);
3334 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3335 "dynamic stream context array reallocation.\n");
3336 return -EINVAL;
3337 }
3338 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3339 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3340 "endpoint 0x%x; URBs are pending.\n",
3341 ep->desc.bEndpointAddress);
3342 return -EINVAL;
3343 }
3344 return 0;
3345}
3346
3347static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3348 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3349{
3350 unsigned int max_streams;
3351
3352
3353 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3354
3355
3356
3357
3358
3359
3360 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3361 if (*num_stream_ctxs > max_streams) {
3362 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3363 max_streams);
3364 *num_stream_ctxs = max_streams;
3365 *num_streams = max_streams;
3366 }
3367}
3368
3369
3370
3371
3372
3373static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3374 struct usb_device *udev,
3375 struct usb_host_endpoint **eps, unsigned int num_eps,
3376 unsigned int *num_streams, u32 *changed_ep_bitmask)
3377{
3378 unsigned int max_streams;
3379 unsigned int endpoint_flag;
3380 int i;
3381 int ret;
3382
3383 for (i = 0; i < num_eps; i++) {
3384 ret = xhci_check_streams_endpoint(xhci, udev,
3385 eps[i], udev->slot_id);
3386 if (ret < 0)
3387 return ret;
3388
3389 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3390 if (max_streams < (*num_streams - 1)) {
3391 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3392 eps[i]->desc.bEndpointAddress,
3393 max_streams);
3394 *num_streams = max_streams+1;
3395 }
3396
3397 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3398 if (*changed_ep_bitmask & endpoint_flag)
3399 return -EINVAL;
3400 *changed_ep_bitmask |= endpoint_flag;
3401 }
3402 return 0;
3403}
3404
3405static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3406 struct usb_device *udev,
3407 struct usb_host_endpoint **eps, unsigned int num_eps)
3408{
3409 u32 changed_ep_bitmask = 0;
3410 unsigned int slot_id;
3411 unsigned int ep_index;
3412 unsigned int ep_state;
3413 int i;
3414
3415 slot_id = udev->slot_id;
3416 if (!xhci->devs[slot_id])
3417 return 0;
3418
3419 for (i = 0; i < num_eps; i++) {
3420 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3421 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3422
3423 if (ep_state & EP_GETTING_NO_STREAMS) {
3424 xhci_warn(xhci, "WARN Can't disable streams for "
3425 "endpoint 0x%x, "
3426 "streams are being disabled already\n",
3427 eps[i]->desc.bEndpointAddress);
3428 return 0;
3429 }
3430
3431 if (!(ep_state & EP_HAS_STREAMS) &&
3432 !(ep_state & EP_GETTING_STREAMS)) {
3433 xhci_warn(xhci, "WARN Can't disable streams for "
3434 "endpoint 0x%x, "
3435 "streams are already disabled!\n",
3436 eps[i]->desc.bEndpointAddress);
3437 xhci_warn(xhci, "WARN xhci_free_streams() called "
3438 "with non-streams endpoint\n");
3439 return 0;
3440 }
3441 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3442 }
3443 return changed_ep_bitmask;
3444}
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3463 struct usb_host_endpoint **eps, unsigned int num_eps,
3464 unsigned int num_streams, gfp_t mem_flags)
3465{
3466 int i, ret;
3467 struct xhci_hcd *xhci;
3468 struct xhci_virt_device *vdev;
3469 struct xhci_command *config_cmd;
3470 struct xhci_input_control_ctx *ctrl_ctx;
3471 unsigned int ep_index;
3472 unsigned int num_stream_ctxs;
3473 unsigned int max_packet;
3474 unsigned long flags;
3475 u32 changed_ep_bitmask = 0;
3476
3477 if (!eps)
3478 return -EINVAL;
3479
3480
3481
3482
3483 num_streams += 1;
3484 xhci = hcd_to_xhci(hcd);
3485 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3486 num_streams);
3487
3488
3489 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3490 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3491 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3492 return -ENOSYS;
3493 }
3494
3495 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3496 if (!config_cmd)
3497 return -ENOMEM;
3498
3499 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3500 if (!ctrl_ctx) {
3501 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3502 __func__);
3503 xhci_free_command(xhci, config_cmd);
3504 return -ENOMEM;
3505 }
3506
3507
3508
3509
3510
3511 spin_lock_irqsave(&xhci->lock, flags);
3512 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3513 num_eps, &num_streams, &changed_ep_bitmask);
3514 if (ret < 0) {
3515 xhci_free_command(xhci, config_cmd);
3516 spin_unlock_irqrestore(&xhci->lock, flags);
3517 return ret;
3518 }
3519 if (num_streams <= 1) {
3520 xhci_warn(xhci, "WARN: endpoints can't handle "
3521 "more than one stream.\n");
3522 xhci_free_command(xhci, config_cmd);
3523 spin_unlock_irqrestore(&xhci->lock, flags);
3524 return -EINVAL;
3525 }
3526 vdev = xhci->devs[udev->slot_id];
3527
3528
3529
3530 for (i = 0; i < num_eps; i++) {
3531 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3532 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3533 }
3534 spin_unlock_irqrestore(&xhci->lock, flags);
3535
3536
3537
3538
3539
3540 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3541 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3542 num_stream_ctxs, num_streams);
3543
3544 for (i = 0; i < num_eps; i++) {
3545 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3546 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3547 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3548 num_stream_ctxs,
3549 num_streams,
3550 max_packet, mem_flags);
3551 if (!vdev->eps[ep_index].stream_info)
3552 goto cleanup;
3553
3554
3555
3556 }
3557
3558
3559 for (i = 0; i < num_eps; i++) {
3560 struct xhci_ep_ctx *ep_ctx;
3561
3562 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3563 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3564
3565 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3566 vdev->out_ctx, ep_index);
3567 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3568 vdev->eps[ep_index].stream_info);
3569 }
3570
3571
3572
3573 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3574 vdev->out_ctx, ctrl_ctx,
3575 changed_ep_bitmask, changed_ep_bitmask);
3576
3577
3578 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3579 false, false);
3580
3581
3582
3583
3584
3585 if (ret < 0)
3586 goto cleanup;
3587
3588 spin_lock_irqsave(&xhci->lock, flags);
3589 for (i = 0; i < num_eps; i++) {
3590 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3591 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3592 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3593 udev->slot_id, ep_index);
3594 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3595 }
3596 xhci_free_command(xhci, config_cmd);
3597 spin_unlock_irqrestore(&xhci->lock, flags);
3598
3599 for (i = 0; i < num_eps; i++) {
3600 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3601 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3602 }
3603
3604 return num_streams - 1;
3605
3606cleanup:
3607
3608 for (i = 0; i < num_eps; i++) {
3609 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3610 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3611 vdev->eps[ep_index].stream_info = NULL;
3612
3613
3614
3615 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3616 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3617 xhci_endpoint_zero(xhci, vdev, eps[i]);
3618 }
3619 xhci_free_command(xhci, config_cmd);
3620 return -ENOMEM;
3621}
3622
3623
3624
3625
3626
3627
3628
3629static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3630 struct usb_host_endpoint **eps, unsigned int num_eps,
3631 gfp_t mem_flags)
3632{
3633 int i, ret;
3634 struct xhci_hcd *xhci;
3635 struct xhci_virt_device *vdev;
3636 struct xhci_command *command;
3637 struct xhci_input_control_ctx *ctrl_ctx;
3638 unsigned int ep_index;
3639 unsigned long flags;
3640 u32 changed_ep_bitmask;
3641
3642 xhci = hcd_to_xhci(hcd);
3643 vdev = xhci->devs[udev->slot_id];
3644
3645
3646 spin_lock_irqsave(&xhci->lock, flags);
3647 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3648 udev, eps, num_eps);
3649 if (changed_ep_bitmask == 0) {
3650 spin_unlock_irqrestore(&xhci->lock, flags);
3651 return -EINVAL;
3652 }
3653
3654
3655
3656
3657
3658 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3659 command = vdev->eps[ep_index].stream_info->free_streams_command;
3660 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3661 if (!ctrl_ctx) {
3662 spin_unlock_irqrestore(&xhci->lock, flags);
3663 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3664 __func__);
3665 return -EINVAL;
3666 }
3667
3668 for (i = 0; i < num_eps; i++) {
3669 struct xhci_ep_ctx *ep_ctx;
3670
3671 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3672 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3673 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3674 EP_GETTING_NO_STREAMS;
3675
3676 xhci_endpoint_copy(xhci, command->in_ctx,
3677 vdev->out_ctx, ep_index);
3678 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3679 &vdev->eps[ep_index]);
3680 }
3681 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3682 vdev->out_ctx, ctrl_ctx,
3683 changed_ep_bitmask, changed_ep_bitmask);
3684 spin_unlock_irqrestore(&xhci->lock, flags);
3685
3686
3687
3688
3689 ret = xhci_configure_endpoint(xhci, udev, command,
3690 false, true);
3691
3692
3693
3694
3695 if (ret < 0)
3696 return ret;
3697
3698 spin_lock_irqsave(&xhci->lock, flags);
3699 for (i = 0; i < num_eps; i++) {
3700 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3701 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3702 vdev->eps[ep_index].stream_info = NULL;
3703
3704
3705
3706 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3707 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3708 }
3709 spin_unlock_irqrestore(&xhci->lock, flags);
3710
3711 return 0;
3712}
3713
3714
3715
3716
3717
3718
3719
3720
3721void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3722 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3723{
3724 int i;
3725 unsigned int num_dropped_eps = 0;
3726 unsigned int drop_flags = 0;
3727
3728 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3729 if (virt_dev->eps[i].ring) {
3730 drop_flags |= 1 << i;
3731 num_dropped_eps++;
3732 }
3733 }
3734 xhci->num_active_eps -= num_dropped_eps;
3735 if (num_dropped_eps)
3736 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3737 "Dropped %u ep ctxs, flags = 0x%x, "
3738 "%u now active.",
3739 num_dropped_eps, drop_flags,
3740 xhci->num_active_eps);
3741}
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3762 struct usb_device *udev)
3763{
3764 int ret, i;
3765 unsigned long flags;
3766 struct xhci_hcd *xhci;
3767 unsigned int slot_id;
3768 struct xhci_virt_device *virt_dev;
3769 struct xhci_command *reset_device_cmd;
3770 struct xhci_slot_ctx *slot_ctx;
3771 int old_active_eps = 0;
3772
3773 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3774 if (ret <= 0)
3775 return ret;
3776 xhci = hcd_to_xhci(hcd);
3777 slot_id = udev->slot_id;
3778 virt_dev = xhci->devs[slot_id];
3779 if (!virt_dev) {
3780 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3781 "not exist. Re-allocate the device\n", slot_id);
3782 ret = xhci_alloc_dev(hcd, udev);
3783 if (ret == 1)
3784 return 0;
3785 else
3786 return -EINVAL;
3787 }
3788
3789 if (virt_dev->tt_info)
3790 old_active_eps = virt_dev->tt_info->active_eps;
3791
3792 if (virt_dev->udev != udev) {
3793
3794
3795
3796
3797 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3798 "not match the udev. Re-allocate the device\n",
3799 slot_id);
3800 ret = xhci_alloc_dev(hcd, udev);
3801 if (ret == 1)
3802 return 0;
3803 else
3804 return -EINVAL;
3805 }
3806
3807
3808 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3809 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3810 SLOT_STATE_DISABLED)
3811 return 0;
3812
3813 trace_xhci_discover_or_reset_device(slot_ctx);
3814
3815 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3816
3817
3818
3819
3820
3821
3822 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3823 if (!reset_device_cmd) {
3824 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3825 return -ENOMEM;
3826 }
3827
3828
3829 spin_lock_irqsave(&xhci->lock, flags);
3830
3831 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3832 if (ret) {
3833 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3834 spin_unlock_irqrestore(&xhci->lock, flags);
3835 goto command_cleanup;
3836 }
3837 xhci_ring_cmd_db(xhci);
3838 spin_unlock_irqrestore(&xhci->lock, flags);
3839
3840
3841 wait_for_completion(reset_device_cmd->completion);
3842
3843
3844
3845
3846
3847 ret = reset_device_cmd->status;
3848 switch (ret) {
3849 case COMP_COMMAND_ABORTED:
3850 case COMP_COMMAND_RING_STOPPED:
3851 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3852 ret = -ETIME;
3853 goto command_cleanup;
3854 case COMP_SLOT_NOT_ENABLED_ERROR:
3855 case COMP_CONTEXT_STATE_ERROR:
3856 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3857 slot_id,
3858 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3859 xhci_dbg(xhci, "Not freeing device rings.\n");
3860
3861 ret = 0;
3862 goto command_cleanup;
3863 case COMP_SUCCESS:
3864 xhci_dbg(xhci, "Successful reset device command.\n");
3865 break;
3866 default:
3867 if (xhci_is_vendor_info_code(xhci, ret))
3868 break;
3869 xhci_warn(xhci, "Unknown completion code %u for "
3870 "reset device command.\n", ret);
3871 ret = -EINVAL;
3872 goto command_cleanup;
3873 }
3874
3875
3876 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3877 spin_lock_irqsave(&xhci->lock, flags);
3878
3879 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3880 spin_unlock_irqrestore(&xhci->lock, flags);
3881 }
3882
3883
3884 for (i = 1; i < 31; i++) {
3885 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3886
3887 if (ep->ep_state & EP_HAS_STREAMS) {
3888 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3889 xhci_get_endpoint_address(i));
3890 xhci_free_stream_info(xhci, ep->stream_info);
3891 ep->stream_info = NULL;
3892 ep->ep_state &= ~EP_HAS_STREAMS;
3893 }
3894
3895 if (ep->ring) {
3896 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3897 xhci_free_endpoint_ring(xhci, virt_dev, i);
3898 }
3899 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3900 xhci_drop_ep_from_interval_table(xhci,
3901 &virt_dev->eps[i].bw_info,
3902 virt_dev->bw_table,
3903 udev,
3904 &virt_dev->eps[i],
3905 virt_dev->tt_info);
3906 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3907 }
3908
3909 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3910 virt_dev->flags = 0;
3911 ret = 0;
3912
3913command_cleanup:
3914 xhci_free_command(xhci, reset_device_cmd);
3915 return ret;
3916}
3917
3918
3919
3920
3921
3922
3923static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3924{
3925 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3926 struct xhci_virt_device *virt_dev;
3927 struct xhci_slot_ctx *slot_ctx;
3928 int i, ret;
3929
3930#ifndef CONFIG_USB_DEFAULT_PERSIST
3931
3932
3933
3934
3935
3936 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3937 pm_runtime_put_noidle(hcd->self.controller);
3938#endif
3939
3940 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3941
3942
3943
3944 if (ret <= 0 && ret != -ENODEV)
3945 return;
3946
3947 virt_dev = xhci->devs[udev->slot_id];
3948 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3949 trace_xhci_free_dev(slot_ctx);
3950
3951
3952 for (i = 0; i < 31; i++) {
3953 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3954 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3955 }
3956 virt_dev->udev = NULL;
3957 ret = xhci_disable_slot(xhci, udev->slot_id);
3958 if (ret)
3959 xhci_free_virt_device(xhci, udev->slot_id);
3960}
3961
3962int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3963{
3964 struct xhci_command *command;
3965 unsigned long flags;
3966 u32 state;
3967 int ret = 0;
3968
3969 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
3970 if (!command)
3971 return -ENOMEM;
3972
3973 xhci_debugfs_remove_slot(xhci, slot_id);
3974
3975 spin_lock_irqsave(&xhci->lock, flags);
3976
3977 state = readl(&xhci->op_regs->status);
3978 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3979 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3980 spin_unlock_irqrestore(&xhci->lock, flags);
3981 kfree(command);
3982 return -ENODEV;
3983 }
3984
3985 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3986 slot_id);
3987 if (ret) {
3988 spin_unlock_irqrestore(&xhci->lock, flags);
3989 kfree(command);
3990 return ret;
3991 }
3992 xhci_ring_cmd_db(xhci);
3993 spin_unlock_irqrestore(&xhci->lock, flags);
3994 return ret;
3995}
3996
3997
3998
3999
4000
4001
4002
4003static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
4004{
4005 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
4006 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4007 "Not enough ep ctxs: "
4008 "%u active, need to add 1, limit is %u.",
4009 xhci->num_active_eps, xhci->limit_active_eps);
4010 return -ENOMEM;
4011 }
4012 xhci->num_active_eps += 1;
4013 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4014 "Adding 1 ep ctx, %u now active.",
4015 xhci->num_active_eps);
4016 return 0;
4017}
4018
4019
4020
4021
4022
4023
4024int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
4025{
4026 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4027 struct xhci_virt_device *vdev;
4028 struct xhci_slot_ctx *slot_ctx;
4029 unsigned long flags;
4030 int ret, slot_id;
4031 struct xhci_command *command;
4032
4033 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4034 if (!command)
4035 return 0;
4036
4037 spin_lock_irqsave(&xhci->lock, flags);
4038 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
4039 if (ret) {
4040 spin_unlock_irqrestore(&xhci->lock, flags);
4041 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
4042 xhci_free_command(xhci, command);
4043 return 0;
4044 }
4045 xhci_ring_cmd_db(xhci);
4046 spin_unlock_irqrestore(&xhci->lock, flags);
4047
4048 wait_for_completion(command->completion);
4049 slot_id = command->slot_id;
4050
4051 if (!slot_id || command->status != COMP_SUCCESS) {
4052 xhci_err(xhci, "Error while assigning device slot ID\n");
4053 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4054 HCS_MAX_SLOTS(
4055 readl(&xhci->cap_regs->hcs_params1)));
4056 xhci_free_command(xhci, command);
4057 return 0;
4058 }
4059
4060 xhci_free_command(xhci, command);
4061
4062 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4063 spin_lock_irqsave(&xhci->lock, flags);
4064 ret = xhci_reserve_host_control_ep_resources(xhci);
4065 if (ret) {
4066 spin_unlock_irqrestore(&xhci->lock, flags);
4067 xhci_warn(xhci, "Not enough host resources, "
4068 "active endpoint contexts = %u\n",
4069 xhci->num_active_eps);
4070 goto disable_slot;
4071 }
4072 spin_unlock_irqrestore(&xhci->lock, flags);
4073 }
4074
4075
4076
4077
4078 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4079 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4080 goto disable_slot;
4081 }
4082 vdev = xhci->devs[slot_id];
4083 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4084 trace_xhci_alloc_dev(slot_ctx);
4085
4086 udev->slot_id = slot_id;
4087
4088 xhci_debugfs_create_slot(xhci, slot_id);
4089
4090#ifndef CONFIG_USB_DEFAULT_PERSIST
4091
4092
4093
4094
4095 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4096 pm_runtime_get_noresume(hcd->self.controller);
4097#endif
4098
4099
4100
4101 return 1;
4102
4103disable_slot:
4104 ret = xhci_disable_slot(xhci, udev->slot_id);
4105 if (ret)
4106 xhci_free_virt_device(xhci, udev->slot_id);
4107
4108 return 0;
4109}
4110
4111
4112
4113
4114
4115static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4116 enum xhci_setup_dev setup)
4117{
4118 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4119 unsigned long flags;
4120 struct xhci_virt_device *virt_dev;
4121 int ret = 0;
4122 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4123 struct xhci_slot_ctx *slot_ctx;
4124 struct xhci_input_control_ctx *ctrl_ctx;
4125 u64 temp_64;
4126 struct xhci_command *command = NULL;
4127
4128 mutex_lock(&xhci->mutex);
4129
4130 if (xhci->xhc_state) {
4131 ret = -ESHUTDOWN;
4132 goto out;
4133 }
4134
4135 if (!udev->slot_id) {
4136 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4137 "Bad Slot ID %d", udev->slot_id);
4138 ret = -EINVAL;
4139 goto out;
4140 }
4141
4142 virt_dev = xhci->devs[udev->slot_id];
4143
4144 if (WARN_ON(!virt_dev)) {
4145
4146
4147
4148
4149
4150 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4151 udev->slot_id);
4152 ret = -EINVAL;
4153 goto out;
4154 }
4155 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4156 trace_xhci_setup_device_slot(slot_ctx);
4157
4158 if (setup == SETUP_CONTEXT_ONLY) {
4159 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4160 SLOT_STATE_DEFAULT) {
4161 xhci_dbg(xhci, "Slot already in default state\n");
4162 goto out;
4163 }
4164 }
4165
4166 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4167 if (!command) {
4168 ret = -ENOMEM;
4169 goto out;
4170 }
4171
4172 command->in_ctx = virt_dev->in_ctx;
4173
4174 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4175 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4176 if (!ctrl_ctx) {
4177 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4178 __func__);
4179 ret = -EINVAL;
4180 goto out;
4181 }
4182
4183
4184
4185
4186
4187 if (!slot_ctx->dev_info)
4188 xhci_setup_addressable_virt_dev(xhci, udev);
4189
4190 else
4191 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4192 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4193 ctrl_ctx->drop_flags = 0;
4194
4195 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4196 le32_to_cpu(slot_ctx->dev_info) >> 27);
4197
4198 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4199 spin_lock_irqsave(&xhci->lock, flags);
4200 trace_xhci_setup_device(virt_dev);
4201 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4202 udev->slot_id, setup);
4203 if (ret) {
4204 spin_unlock_irqrestore(&xhci->lock, flags);
4205 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4206 "FIXME: allocate a command ring segment");
4207 goto out;
4208 }
4209 xhci_ring_cmd_db(xhci);
4210 spin_unlock_irqrestore(&xhci->lock, flags);
4211
4212
4213 wait_for_completion(command->completion);
4214
4215
4216
4217
4218
4219 switch (command->status) {
4220 case COMP_COMMAND_ABORTED:
4221 case COMP_COMMAND_RING_STOPPED:
4222 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4223 ret = -ETIME;
4224 break;
4225 case COMP_CONTEXT_STATE_ERROR:
4226 case COMP_SLOT_NOT_ENABLED_ERROR:
4227 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4228 act, udev->slot_id);
4229 ret = -EINVAL;
4230 break;
4231 case COMP_USB_TRANSACTION_ERROR:
4232 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4233
4234 mutex_unlock(&xhci->mutex);
4235 ret = xhci_disable_slot(xhci, udev->slot_id);
4236 if (!ret)
4237 xhci_alloc_dev(hcd, udev);
4238 kfree(command->completion);
4239 kfree(command);
4240 return -EPROTO;
4241 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4242 dev_warn(&udev->dev,
4243 "ERROR: Incompatible device for setup %s command\n", act);
4244 ret = -ENODEV;
4245 break;
4246 case COMP_SUCCESS:
4247 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4248 "Successful setup %s command", act);
4249 break;
4250 default:
4251 xhci_err(xhci,
4252 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4253 act, command->status);
4254 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4255 ret = -EINVAL;
4256 break;
4257 }
4258 if (ret)
4259 goto out;
4260 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4261 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4262 "Op regs DCBAA ptr = %#016llx", temp_64);
4263 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4264 "Slot ID %d dcbaa entry @%p = %#016llx",
4265 udev->slot_id,
4266 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4267 (unsigned long long)
4268 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4269 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4270 "Output Context DMA address = %#08llx",
4271 (unsigned long long)virt_dev->out_ctx->dma);
4272 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4273 le32_to_cpu(slot_ctx->dev_info) >> 27);
4274
4275
4276
4277
4278 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4279 le32_to_cpu(slot_ctx->dev_info) >> 27);
4280
4281 ctrl_ctx->add_flags = 0;
4282 ctrl_ctx->drop_flags = 0;
4283 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4284 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4285
4286 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4287 "Internal device address = %d",
4288 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4289out:
4290 mutex_unlock(&xhci->mutex);
4291 if (command) {
4292 kfree(command->completion);
4293 kfree(command);
4294 }
4295 return ret;
4296}
4297
4298static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4299{
4300 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
4301}
4302
4303static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4304{
4305 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
4306}
4307
4308
4309
4310
4311
4312
4313
4314int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4315{
4316 struct xhci_hub *rhub;
4317
4318 rhub = xhci_get_rhub(hcd);
4319 return rhub->ports[port1 - 1]->hw_portnum + 1;
4320}
4321
4322
4323
4324
4325
4326static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4327 struct usb_device *udev, u16 max_exit_latency)
4328{
4329 struct xhci_virt_device *virt_dev;
4330 struct xhci_command *command;
4331 struct xhci_input_control_ctx *ctrl_ctx;
4332 struct xhci_slot_ctx *slot_ctx;
4333 unsigned long flags;
4334 int ret;
4335
4336 spin_lock_irqsave(&xhci->lock, flags);
4337
4338 virt_dev = xhci->devs[udev->slot_id];
4339
4340
4341
4342
4343
4344
4345
4346 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4347 spin_unlock_irqrestore(&xhci->lock, flags);
4348 return 0;
4349 }
4350
4351
4352 command = xhci->lpm_command;
4353 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4354 if (!ctrl_ctx) {
4355 spin_unlock_irqrestore(&xhci->lock, flags);
4356 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4357 __func__);
4358 return -ENOMEM;
4359 }
4360
4361 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4362 spin_unlock_irqrestore(&xhci->lock, flags);
4363
4364 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4365 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4366 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4367 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4368 slot_ctx->dev_state = 0;
4369
4370 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4371 "Set up evaluate context for LPM MEL change.");
4372
4373
4374 ret = xhci_configure_endpoint(xhci, udev, command,
4375 true, true);
4376
4377 if (!ret) {
4378 spin_lock_irqsave(&xhci->lock, flags);
4379 virt_dev->current_mel = max_exit_latency;
4380 spin_unlock_irqrestore(&xhci->lock, flags);
4381 }
4382 return ret;
4383}
4384
4385#ifdef CONFIG_PM
4386
4387
4388static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4389 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4390
4391
4392static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4393 struct usb_device *udev)
4394{
4395 int u2del, besl, besl_host;
4396 int besl_device = 0;
4397 u32 field;
4398
4399 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4400 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4401
4402 if (field & USB_BESL_SUPPORT) {
4403 for (besl_host = 0; besl_host < 16; besl_host++) {
4404 if (xhci_besl_encoding[besl_host] >= u2del)
4405 break;
4406 }
4407
4408 if (field & USB_BESL_BASELINE_VALID)
4409 besl_device = USB_GET_BESL_BASELINE(field);
4410 else if (field & USB_BESL_DEEP_VALID)
4411 besl_device = USB_GET_BESL_DEEP(field);
4412 } else {
4413 if (u2del <= 50)
4414 besl_host = 0;
4415 else
4416 besl_host = (u2del - 51) / 75 + 1;
4417 }
4418
4419 besl = besl_host + besl_device;
4420 if (besl > 15)
4421 besl = 15;
4422
4423 return besl;
4424}
4425
4426
4427static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4428{
4429 u32 field;
4430 int l1;
4431 int besld = 0;
4432 int hirdm = 0;
4433
4434 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4435
4436
4437 l1 = udev->l1_params.timeout / 256;
4438
4439
4440 if (field & USB_BESL_DEEP_VALID) {
4441 besld = USB_GET_BESL_DEEP(field);
4442 hirdm = 1;
4443 }
4444
4445 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4446}
4447
4448static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4449 struct usb_device *udev, int enable)
4450{
4451 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4452 struct xhci_port **ports;
4453 __le32 __iomem *pm_addr, *hlpm_addr;
4454 u32 pm_val, hlpm_val, field;
4455 unsigned int port_num;
4456 unsigned long flags;
4457 int hird, exit_latency;
4458 int ret;
4459
4460 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4461 return -EPERM;
4462
4463 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4464 !udev->lpm_capable)
4465 return -EPERM;
4466
4467 if (!udev->parent || udev->parent->parent ||
4468 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4469 return -EPERM;
4470
4471 if (udev->usb2_hw_lpm_capable != 1)
4472 return -EPERM;
4473
4474 spin_lock_irqsave(&xhci->lock, flags);
4475
4476 ports = xhci->usb2_rhub.ports;
4477 port_num = udev->portnum - 1;
4478 pm_addr = ports[port_num]->addr + PORTPMSC;
4479 pm_val = readl(pm_addr);
4480 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4481
4482 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4483 enable ? "enable" : "disable", port_num + 1);
4484
4485 if (enable) {
4486
4487 if (udev->usb2_hw_lpm_besl_capable) {
4488
4489
4490
4491
4492 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4493 if ((field & USB_BESL_SUPPORT) &&
4494 (field & USB_BESL_BASELINE_VALID))
4495 hird = USB_GET_BESL_BASELINE(field);
4496 else
4497 hird = udev->l1_params.besl;
4498
4499 exit_latency = xhci_besl_encoding[hird];
4500 spin_unlock_irqrestore(&xhci->lock, flags);
4501
4502
4503
4504
4505
4506
4507
4508
4509 mutex_lock(hcd->bandwidth_mutex);
4510 ret = xhci_change_max_exit_latency(xhci, udev,
4511 exit_latency);
4512 mutex_unlock(hcd->bandwidth_mutex);
4513
4514 if (ret < 0)
4515 return ret;
4516 spin_lock_irqsave(&xhci->lock, flags);
4517
4518 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4519 writel(hlpm_val, hlpm_addr);
4520
4521 readl(hlpm_addr);
4522 } else {
4523 hird = xhci_calculate_hird_besl(xhci, udev);
4524 }
4525
4526 pm_val &= ~PORT_HIRD_MASK;
4527 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4528 writel(pm_val, pm_addr);
4529 pm_val = readl(pm_addr);
4530 pm_val |= PORT_HLE;
4531 writel(pm_val, pm_addr);
4532
4533 readl(pm_addr);
4534 } else {
4535 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4536 writel(pm_val, pm_addr);
4537
4538 readl(pm_addr);
4539 if (udev->usb2_hw_lpm_besl_capable) {
4540 spin_unlock_irqrestore(&xhci->lock, flags);
4541 mutex_lock(hcd->bandwidth_mutex);
4542 xhci_change_max_exit_latency(xhci, udev, 0);
4543 mutex_unlock(hcd->bandwidth_mutex);
4544 readl_poll_timeout(ports[port_num]->addr, pm_val,
4545 (pm_val & PORT_PLS_MASK) == XDEV_U0,
4546 100, 10000);
4547 return 0;
4548 }
4549 }
4550
4551 spin_unlock_irqrestore(&xhci->lock, flags);
4552 return 0;
4553}
4554
4555
4556
4557
4558
4559static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4560 unsigned capability)
4561{
4562 u32 port_offset, port_count;
4563 int i;
4564
4565 for (i = 0; i < xhci->num_ext_caps; i++) {
4566 if (xhci->ext_caps[i] & capability) {
4567
4568 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4569 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4570 if (port >= port_offset &&
4571 port < port_offset + port_count)
4572 return 1;
4573 }
4574 }
4575 return 0;
4576}
4577
4578static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4579{
4580 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4581 int portnum = udev->portnum - 1;
4582
4583 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4584 return 0;
4585
4586
4587 if (!udev->parent || udev->parent->parent ||
4588 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4589 return 0;
4590
4591 if (xhci->hw_lpm_support == 1 &&
4592 xhci_check_usb2_port_capability(
4593 xhci, portnum, XHCI_HLC)) {
4594 udev->usb2_hw_lpm_capable = 1;
4595 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4596 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4597 if (xhci_check_usb2_port_capability(xhci, portnum,
4598 XHCI_BLC))
4599 udev->usb2_hw_lpm_besl_capable = 1;
4600 }
4601
4602 return 0;
4603}
4604
4605
4606
4607
4608static unsigned long long xhci_service_interval_to_ns(
4609 struct usb_endpoint_descriptor *desc)
4610{
4611 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4612}
4613
4614static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4615 enum usb3_link_state state)
4616{
4617 unsigned long long sel;
4618 unsigned long long pel;
4619 unsigned int max_sel_pel;
4620 char *state_name;
4621
4622 switch (state) {
4623 case USB3_LPM_U1:
4624
4625 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4626 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4627 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4628 state_name = "U1";
4629 break;
4630 case USB3_LPM_U2:
4631 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4632 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4633 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4634 state_name = "U2";
4635 break;
4636 default:
4637 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4638 __func__);
4639 return USB3_LPM_DISABLED;
4640 }
4641
4642 if (sel <= max_sel_pel && pel <= max_sel_pel)
4643 return USB3_LPM_DEVICE_INITIATED;
4644
4645 if (sel > max_sel_pel)
4646 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4647 "due to long SEL %llu ms\n",
4648 state_name, sel);
4649 else
4650 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4651 "due to long PEL %llu ms\n",
4652 state_name, pel);
4653 return USB3_LPM_DISABLED;
4654}
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664static unsigned long long xhci_calculate_intel_u1_timeout(
4665 struct usb_device *udev,
4666 struct usb_endpoint_descriptor *desc)
4667{
4668 unsigned long long timeout_ns;
4669 int ep_type;
4670 int intr_type;
4671
4672 ep_type = usb_endpoint_type(desc);
4673 switch (ep_type) {
4674 case USB_ENDPOINT_XFER_CONTROL:
4675 timeout_ns = udev->u1_params.sel * 3;
4676 break;
4677 case USB_ENDPOINT_XFER_BULK:
4678 timeout_ns = udev->u1_params.sel * 5;
4679 break;
4680 case USB_ENDPOINT_XFER_INT:
4681 intr_type = usb_endpoint_interrupt_type(desc);
4682 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4683 timeout_ns = udev->u1_params.sel * 3;
4684 break;
4685 }
4686
4687 fallthrough;
4688 case USB_ENDPOINT_XFER_ISOC:
4689 timeout_ns = xhci_service_interval_to_ns(desc);
4690 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4691 if (timeout_ns < udev->u1_params.sel * 2)
4692 timeout_ns = udev->u1_params.sel * 2;
4693 break;
4694 default:
4695 return 0;
4696 }
4697
4698 return timeout_ns;
4699}
4700
4701
4702static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4703 struct usb_device *udev,
4704 struct usb_endpoint_descriptor *desc)
4705{
4706 unsigned long long timeout_ns;
4707
4708 if (xhci->quirks & XHCI_INTEL_HOST)
4709 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4710 else
4711 timeout_ns = udev->u1_params.sel;
4712
4713
4714 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4715 if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
4716 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4717 return USB3_LPM_DISABLED;
4718 }
4719 }
4720
4721
4722
4723
4724 if (timeout_ns == USB3_LPM_DISABLED)
4725 timeout_ns = 1;
4726 else
4727 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4728
4729
4730
4731
4732 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4733 return timeout_ns;
4734 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4735 "due to long timeout %llu ms\n", timeout_ns);
4736 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4737}
4738
4739
4740
4741
4742
4743
4744
4745static unsigned long long xhci_calculate_intel_u2_timeout(
4746 struct usb_device *udev,
4747 struct usb_endpoint_descriptor *desc)
4748{
4749 unsigned long long timeout_ns;
4750 unsigned long long u2_del_ns;
4751
4752 timeout_ns = 10 * 1000 * 1000;
4753
4754 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4755 (xhci_service_interval_to_ns(desc) > timeout_ns))
4756 timeout_ns = xhci_service_interval_to_ns(desc);
4757
4758 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4759 if (u2_del_ns > timeout_ns)
4760 timeout_ns = u2_del_ns;
4761
4762 return timeout_ns;
4763}
4764
4765
4766static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4767 struct usb_device *udev,
4768 struct usb_endpoint_descriptor *desc)
4769{
4770 unsigned long long timeout_ns;
4771
4772 if (xhci->quirks & XHCI_INTEL_HOST)
4773 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4774 else
4775 timeout_ns = udev->u2_params.sel;
4776
4777
4778 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4779 if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
4780 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4781 return USB3_LPM_DISABLED;
4782 }
4783 }
4784
4785
4786 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4787
4788
4789
4790 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4791 return timeout_ns;
4792 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4793 "due to long timeout %llu ms\n", timeout_ns);
4794 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4795}
4796
4797static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4798 struct usb_device *udev,
4799 struct usb_endpoint_descriptor *desc,
4800 enum usb3_link_state state,
4801 u16 *timeout)
4802{
4803 if (state == USB3_LPM_U1)
4804 return xhci_calculate_u1_timeout(xhci, udev, desc);
4805 else if (state == USB3_LPM_U2)
4806 return xhci_calculate_u2_timeout(xhci, udev, desc);
4807
4808 return USB3_LPM_DISABLED;
4809}
4810
4811static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4812 struct usb_device *udev,
4813 struct usb_endpoint_descriptor *desc,
4814 enum usb3_link_state state,
4815 u16 *timeout)
4816{
4817 u16 alt_timeout;
4818
4819 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4820 desc, state, timeout);
4821
4822
4823
4824
4825
4826
4827 if (alt_timeout == USB3_LPM_DISABLED) {
4828 *timeout = alt_timeout;
4829 return -E2BIG;
4830 }
4831 if (alt_timeout > *timeout)
4832 *timeout = alt_timeout;
4833 return 0;
4834}
4835
4836static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4837 struct usb_device *udev,
4838 struct usb_host_interface *alt,
4839 enum usb3_link_state state,
4840 u16 *timeout)
4841{
4842 int j;
4843
4844 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4845 if (xhci_update_timeout_for_endpoint(xhci, udev,
4846 &alt->endpoint[j].desc, state, timeout))
4847 return -E2BIG;
4848 }
4849 return 0;
4850}
4851
4852static int xhci_check_intel_tier_policy(struct usb_device *udev,
4853 enum usb3_link_state state)
4854{
4855 struct usb_device *parent;
4856 unsigned int num_hubs;
4857
4858 if (state == USB3_LPM_U2)
4859 return 0;
4860
4861
4862 for (parent = udev->parent, num_hubs = 0; parent->parent;
4863 parent = parent->parent)
4864 num_hubs++;
4865
4866 if (num_hubs < 2)
4867 return 0;
4868
4869 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4870 " below second-tier hub.\n");
4871 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4872 "to decrease power consumption.\n");
4873 return -E2BIG;
4874}
4875
4876static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4877 struct usb_device *udev,
4878 enum usb3_link_state state)
4879{
4880 if (xhci->quirks & XHCI_INTEL_HOST)
4881 return xhci_check_intel_tier_policy(udev, state);
4882 else
4883 return 0;
4884}
4885
4886
4887
4888
4889
4890
4891static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4892 struct usb_device *udev, enum usb3_link_state state)
4893{
4894 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4895 struct usb_host_config *config;
4896 char *state_name;
4897 int i;
4898 u16 timeout = USB3_LPM_DISABLED;
4899
4900 if (state == USB3_LPM_U1)
4901 state_name = "U1";
4902 else if (state == USB3_LPM_U2)
4903 state_name = "U2";
4904 else {
4905 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4906 state);
4907 return timeout;
4908 }
4909
4910 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4911 return timeout;
4912
4913
4914
4915
4916 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4917 state, &timeout))
4918 return timeout;
4919
4920 config = udev->actconfig;
4921 if (!config)
4922 return timeout;
4923
4924 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4925 struct usb_driver *driver;
4926 struct usb_interface *intf = config->interface[i];
4927
4928 if (!intf)
4929 continue;
4930
4931
4932
4933
4934 if (intf->dev.driver) {
4935 driver = to_usb_driver(intf->dev.driver);
4936 if (driver && driver->disable_hub_initiated_lpm) {
4937 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4938 state_name, driver->name);
4939 timeout = xhci_get_timeout_no_hub_lpm(udev,
4940 state);
4941 if (timeout == USB3_LPM_DISABLED)
4942 return timeout;
4943 }
4944 }
4945
4946
4947 if (!intf->cur_altsetting)
4948 continue;
4949
4950 if (xhci_update_timeout_for_interface(xhci, udev,
4951 intf->cur_altsetting,
4952 state, &timeout))
4953 return timeout;
4954 }
4955 return timeout;
4956}
4957
4958static int calculate_max_exit_latency(struct usb_device *udev,
4959 enum usb3_link_state state_changed,
4960 u16 hub_encoded_timeout)
4961{
4962 unsigned long long u1_mel_us = 0;
4963 unsigned long long u2_mel_us = 0;
4964 unsigned long long mel_us = 0;
4965 bool disabling_u1;
4966 bool disabling_u2;
4967 bool enabling_u1;
4968 bool enabling_u2;
4969
4970 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4971 hub_encoded_timeout == USB3_LPM_DISABLED);
4972 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4973 hub_encoded_timeout == USB3_LPM_DISABLED);
4974
4975 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4976 hub_encoded_timeout != USB3_LPM_DISABLED);
4977 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4978 hub_encoded_timeout != USB3_LPM_DISABLED);
4979
4980
4981
4982
4983 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4984 enabling_u1)
4985 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4986 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4987 enabling_u2)
4988 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4989
4990 if (u1_mel_us > u2_mel_us)
4991 mel_us = u1_mel_us;
4992 else
4993 mel_us = u2_mel_us;
4994
4995 if (mel_us > MAX_EXIT) {
4996 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4997 "is too big.\n", mel_us);
4998 return -E2BIG;
4999 }
5000 return mel_us;
5001}
5002
5003
5004static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5005 struct usb_device *udev, enum usb3_link_state state)
5006{
5007 struct xhci_hcd *xhci;
5008 u16 hub_encoded_timeout;
5009 int mel;
5010 int ret;
5011
5012 xhci = hcd_to_xhci(hcd);
5013
5014
5015
5016
5017 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5018 !xhci->devs[udev->slot_id])
5019 return USB3_LPM_DISABLED;
5020
5021 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
5022 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
5023 if (mel < 0) {
5024
5025 hub_encoded_timeout = USB3_LPM_DISABLED;
5026 mel = 0;
5027 }
5028
5029 ret = xhci_change_max_exit_latency(xhci, udev, mel);
5030 if (ret)
5031 return ret;
5032 return hub_encoded_timeout;
5033}
5034
5035static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5036 struct usb_device *udev, enum usb3_link_state state)
5037{
5038 struct xhci_hcd *xhci;
5039 u16 mel;
5040
5041 xhci = hcd_to_xhci(hcd);
5042 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5043 !xhci->devs[udev->slot_id])
5044 return 0;
5045
5046 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
5047 return xhci_change_max_exit_latency(xhci, udev, mel);
5048}
5049#else
5050
5051static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5052 struct usb_device *udev, int enable)
5053{
5054 return 0;
5055}
5056
5057static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
5058{
5059 return 0;
5060}
5061
5062static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5063 struct usb_device *udev, enum usb3_link_state state)
5064{
5065 return USB3_LPM_DISABLED;
5066}
5067
5068static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5069 struct usb_device *udev, enum usb3_link_state state)
5070{
5071 return 0;
5072}
5073#endif
5074
5075
5076
5077
5078
5079
5080static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5081 struct usb_tt *tt, gfp_t mem_flags)
5082{
5083 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5084 struct xhci_virt_device *vdev;
5085 struct xhci_command *config_cmd;
5086 struct xhci_input_control_ctx *ctrl_ctx;
5087 struct xhci_slot_ctx *slot_ctx;
5088 unsigned long flags;
5089 unsigned think_time;
5090 int ret;
5091
5092
5093 if (!hdev->parent)
5094 return 0;
5095
5096 vdev = xhci->devs[hdev->slot_id];
5097 if (!vdev) {
5098 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5099 return -EINVAL;
5100 }
5101
5102 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5103 if (!config_cmd)
5104 return -ENOMEM;
5105
5106 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5107 if (!ctrl_ctx) {
5108 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5109 __func__);
5110 xhci_free_command(xhci, config_cmd);
5111 return -ENOMEM;
5112 }
5113
5114 spin_lock_irqsave(&xhci->lock, flags);
5115 if (hdev->speed == USB_SPEED_HIGH &&
5116 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5117 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5118 xhci_free_command(xhci, config_cmd);
5119 spin_unlock_irqrestore(&xhci->lock, flags);
5120 return -ENOMEM;
5121 }
5122
5123 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5124 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5125 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5126 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5127
5128
5129
5130
5131
5132 if (tt->multi)
5133 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5134 else if (hdev->speed == USB_SPEED_FULL)
5135 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5136
5137 if (xhci->hci_version > 0x95) {
5138 xhci_dbg(xhci, "xHCI version %x needs hub "
5139 "TT think time and number of ports\n",
5140 (unsigned int) xhci->hci_version);
5141 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5142
5143
5144
5145
5146
5147
5148
5149 think_time = tt->think_time;
5150 if (think_time != 0)
5151 think_time = (think_time / 666) - 1;
5152 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5153 slot_ctx->tt_info |=
5154 cpu_to_le32(TT_THINK_TIME(think_time));
5155 } else {
5156 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5157 "TT think time or number of ports\n",
5158 (unsigned int) xhci->hci_version);
5159 }
5160 slot_ctx->dev_state = 0;
5161 spin_unlock_irqrestore(&xhci->lock, flags);
5162
5163 xhci_dbg(xhci, "Set up %s for hub device.\n",
5164 (xhci->hci_version > 0x95) ?
5165 "configure endpoint" : "evaluate context");
5166
5167
5168
5169
5170 if (xhci->hci_version > 0x95)
5171 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5172 false, false);
5173 else
5174 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5175 true, false);
5176
5177 xhci_free_command(xhci, config_cmd);
5178 return ret;
5179}
5180
5181static int xhci_get_frame(struct usb_hcd *hcd)
5182{
5183 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5184
5185 return readl(&xhci->run_regs->microframe_index) >> 3;
5186}
5187
5188int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5189{
5190 struct xhci_hcd *xhci;
5191
5192
5193
5194
5195 struct device *dev = hcd->self.sysdev;
5196 unsigned int minor_rev;
5197 int retval;
5198
5199
5200 hcd->self.sg_tablesize = ~0;
5201
5202
5203 hcd->self.no_sg_constraint = 1;
5204
5205
5206 hcd->self.no_stop_on_short = 1;
5207
5208 xhci = hcd_to_xhci(hcd);
5209
5210 if (usb_hcd_is_primary_hcd(hcd)) {
5211 xhci->main_hcd = hcd;
5212 xhci->usb2_rhub.hcd = hcd;
5213
5214
5215
5216 hcd->speed = HCD_USB2;
5217 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5218
5219
5220
5221
5222
5223 hcd->has_tt = 1;
5224 } else {
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234 if (xhci->usb3_rhub.min_rev == 0x1)
5235 minor_rev = 1;
5236 else
5237 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5238
5239 switch (minor_rev) {
5240 case 2:
5241 hcd->speed = HCD_USB32;
5242 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5243 hcd->self.root_hub->rx_lanes = 2;
5244 hcd->self.root_hub->tx_lanes = 2;
5245 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5246 break;
5247 case 1:
5248 hcd->speed = HCD_USB31;
5249 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5250 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5251 break;
5252 }
5253 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5254 minor_rev,
5255 minor_rev ? "Enhanced " : "");
5256
5257 xhci->usb3_rhub.hcd = hcd;
5258
5259
5260
5261 return 0;
5262 }
5263
5264 mutex_init(&xhci->mutex);
5265 xhci->cap_regs = hcd->regs;
5266 xhci->op_regs = hcd->regs +
5267 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5268 xhci->run_regs = hcd->regs +
5269 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5270
5271 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5272 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5273 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5274 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
5275 xhci->hci_version = HC_VERSION(xhci->hcc_params);
5276 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5277 if (xhci->hci_version > 0x100)
5278 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5279
5280 xhci->quirks |= quirks;
5281
5282 get_quirks(dev, xhci);
5283
5284
5285
5286
5287
5288 if (xhci->hci_version > 0x96)
5289 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5290
5291
5292 retval = xhci_halt(xhci);
5293 if (retval)
5294 return retval;
5295
5296 xhci_zero_64b_regs(xhci);
5297
5298 xhci_dbg(xhci, "Resetting HCD\n");
5299
5300 retval = xhci_reset(xhci);
5301 if (retval)
5302 return retval;
5303 xhci_dbg(xhci, "Reset complete\n");
5304
5305
5306
5307
5308
5309
5310
5311
5312 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5313 xhci->hcc_params &= ~BIT(0);
5314
5315
5316
5317 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5318 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5319 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5320 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5321 } else {
5322
5323
5324
5325
5326 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5327 if (retval)
5328 return retval;
5329 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5330 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5331 }
5332
5333 xhci_dbg(xhci, "Calling HCD init\n");
5334
5335 retval = xhci_init(hcd);
5336 if (retval)
5337 return retval;
5338 xhci_dbg(xhci, "Called HCD init\n");
5339
5340 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5341 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5342
5343 return 0;
5344}
5345EXPORT_SYMBOL_GPL(xhci_gen_setup);
5346
5347static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5348 struct usb_host_endpoint *ep)
5349{
5350 struct xhci_hcd *xhci;
5351 struct usb_device *udev;
5352 unsigned int slot_id;
5353 unsigned int ep_index;
5354 unsigned long flags;
5355
5356 xhci = hcd_to_xhci(hcd);
5357
5358 spin_lock_irqsave(&xhci->lock, flags);
5359 udev = (struct usb_device *)ep->hcpriv;
5360 slot_id = udev->slot_id;
5361 ep_index = xhci_get_endpoint_index(&ep->desc);
5362
5363 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5364 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5365 spin_unlock_irqrestore(&xhci->lock, flags);
5366}
5367
5368static const struct hc_driver xhci_hc_driver = {
5369 .description = "xhci-hcd",
5370 .product_desc = "xHCI Host Controller",
5371 .hcd_priv_size = sizeof(struct xhci_hcd),
5372
5373
5374
5375
5376 .irq = xhci_irq,
5377 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5378 HCD_BH,
5379
5380
5381
5382
5383 .reset = NULL,
5384 .start = xhci_run,
5385 .stop = xhci_stop,
5386 .shutdown = xhci_shutdown,
5387
5388
5389
5390
5391 .map_urb_for_dma = xhci_map_urb_for_dma,
5392 .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
5393 .urb_enqueue = xhci_urb_enqueue,
5394 .urb_dequeue = xhci_urb_dequeue,
5395 .alloc_dev = xhci_alloc_dev,
5396 .free_dev = xhci_free_dev,
5397 .alloc_streams = xhci_alloc_streams,
5398 .free_streams = xhci_free_streams,
5399 .add_endpoint = xhci_add_endpoint,
5400 .drop_endpoint = xhci_drop_endpoint,
5401 .endpoint_disable = xhci_endpoint_disable,
5402 .endpoint_reset = xhci_endpoint_reset,
5403 .check_bandwidth = xhci_check_bandwidth,
5404 .reset_bandwidth = xhci_reset_bandwidth,
5405 .address_device = xhci_address_device,
5406 .enable_device = xhci_enable_device,
5407 .update_hub_device = xhci_update_hub_device,
5408 .reset_device = xhci_discover_or_reset_device,
5409
5410
5411
5412
5413 .get_frame_number = xhci_get_frame,
5414
5415
5416
5417
5418 .hub_control = xhci_hub_control,
5419 .hub_status_data = xhci_hub_status_data,
5420 .bus_suspend = xhci_bus_suspend,
5421 .bus_resume = xhci_bus_resume,
5422 .get_resuming_ports = xhci_get_resuming_ports,
5423
5424
5425
5426
5427 .update_device = xhci_update_device,
5428 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5429 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5430 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5431 .find_raw_port_number = xhci_find_raw_port_number,
5432 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5433};
5434
5435void xhci_init_driver(struct hc_driver *drv,
5436 const struct xhci_driver_overrides *over)
5437{
5438 BUG_ON(!over);
5439
5440
5441 *drv = xhci_hc_driver;
5442
5443 if (over) {
5444 drv->hcd_priv_size += over->extra_priv_size;
5445 if (over->reset)
5446 drv->reset = over->reset;
5447 if (over->start)
5448 drv->start = over->start;
5449 if (over->add_endpoint)
5450 drv->add_endpoint = over->add_endpoint;
5451 if (over->drop_endpoint)
5452 drv->drop_endpoint = over->drop_endpoint;
5453 if (over->check_bandwidth)
5454 drv->check_bandwidth = over->check_bandwidth;
5455 if (over->reset_bandwidth)
5456 drv->reset_bandwidth = over->reset_bandwidth;
5457 }
5458}
5459EXPORT_SYMBOL_GPL(xhci_init_driver);
5460
5461MODULE_DESCRIPTION(DRIVER_DESC);
5462MODULE_AUTHOR(DRIVER_AUTHOR);
5463MODULE_LICENSE("GPL");
5464
5465static int __init xhci_hcd_init(void)
5466{
5467
5468
5469
5470
5471 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5472 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5473 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5474
5475
5476
5477 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5478 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5479 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5480 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5481 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5482
5483 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5484
5485 if (usb_disabled())
5486 return -ENODEV;
5487
5488 xhci_debugfs_create_root();
5489
5490 return 0;
5491}
5492
5493
5494
5495
5496
5497static void __exit xhci_hcd_fini(void)
5498{
5499 xhci_debugfs_remove_root();
5500}
5501
5502module_init(xhci_hcd_init);
5503module_exit(xhci_hcd_fini);
5504