1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30
31#include "xhci.h"
32
33#define DRIVER_AUTHOR "Sarah Sharp"
34#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35
36
37static int link_quirk;
38module_param(link_quirk, int, S_IRUGO | S_IWUSR);
39MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
56 u32 mask, u32 done, int usec)
57{
58 u32 result;
59
60 do {
61 result = xhci_readl(xhci, ptr);
62 if (result == ~(u32)0)
63 return -ENODEV;
64 result &= mask;
65 if (result == done)
66 return 0;
67 udelay(1);
68 usec--;
69 } while (usec > 0);
70 return -ETIMEDOUT;
71}
72
73
74
75
76void xhci_quiesce(struct xhci_hcd *xhci)
77{
78 u32 halted;
79 u32 cmd;
80 u32 mask;
81
82 mask = ~(XHCI_IRQS);
83 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
84 if (!halted)
85 mask &= ~CMD_RUN;
86
87 cmd = xhci_readl(xhci, &xhci->op_regs->command);
88 cmd &= mask;
89 xhci_writel(xhci, cmd, &xhci->op_regs->command);
90}
91
92
93
94
95
96
97
98
99
100int xhci_halt(struct xhci_hcd *xhci)
101{
102 int ret;
103 xhci_dbg(xhci, "// Halt the HC\n");
104 xhci_quiesce(xhci);
105
106 ret = xhci_handshake(xhci, &xhci->op_regs->status,
107 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
108 if (!ret) {
109 xhci->xhc_state |= XHCI_STATE_HALTED;
110 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
111 } else
112 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
113 XHCI_MAX_HALT_USEC);
114 return ret;
115}
116
117
118
119
120static int xhci_start(struct xhci_hcd *xhci)
121{
122 u32 temp;
123 int ret;
124
125 temp = xhci_readl(xhci, &xhci->op_regs->command);
126 temp |= (CMD_RUN);
127 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
128 temp);
129 xhci_writel(xhci, temp, &xhci->op_regs->command);
130
131
132
133
134
135 ret = xhci_handshake(xhci, &xhci->op_regs->status,
136 STS_HALT, 0, XHCI_MAX_HALT_USEC);
137 if (ret == -ETIMEDOUT)
138 xhci_err(xhci, "Host took too long to start, "
139 "waited %u microseconds.\n",
140 XHCI_MAX_HALT_USEC);
141 if (!ret)
142 xhci->xhc_state &= ~XHCI_STATE_HALTED;
143 return ret;
144}
145
146
147
148
149
150
151
152
153int xhci_reset(struct xhci_hcd *xhci)
154{
155 u32 command;
156 u32 state;
157 int ret, i;
158
159 state = xhci_readl(xhci, &xhci->op_regs->status);
160 if ((state & STS_HALT) == 0) {
161 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
162 return 0;
163 }
164
165 xhci_dbg(xhci, "// Reset the HC\n");
166 command = xhci_readl(xhci, &xhci->op_regs->command);
167 command |= CMD_RESET;
168 xhci_writel(xhci, command, &xhci->op_regs->command);
169
170 ret = xhci_handshake(xhci, &xhci->op_regs->command,
171 CMD_RESET, 0, 10 * 1000 * 1000);
172 if (ret)
173 return ret;
174
175 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
176
177
178
179
180 ret = xhci_handshake(xhci, &xhci->op_regs->status,
181 STS_CNR, 0, 10 * 1000 * 1000);
182
183 for (i = 0; i < 2; ++i) {
184 xhci->bus_state[i].port_c_suspend = 0;
185 xhci->bus_state[i].suspended_ports = 0;
186 xhci->bus_state[i].resuming_ports = 0;
187 }
188
189 return ret;
190}
191
192#ifdef CONFIG_PCI
193static int xhci_free_msi(struct xhci_hcd *xhci)
194{
195 int i;
196
197 if (!xhci->msix_entries)
198 return -EINVAL;
199
200 for (i = 0; i < xhci->msix_count; i++)
201 if (xhci->msix_entries[i].vector)
202 free_irq(xhci->msix_entries[i].vector,
203 xhci_to_hcd(xhci));
204 return 0;
205}
206
207
208
209
210static int xhci_setup_msi(struct xhci_hcd *xhci)
211{
212 int ret;
213 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
214
215 ret = pci_enable_msi(pdev);
216 if (ret) {
217 xhci_dbg(xhci, "failed to allocate MSI entry\n");
218 return ret;
219 }
220
221 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
222 0, "xhci_hcd", xhci_to_hcd(xhci));
223 if (ret) {
224 xhci_dbg(xhci, "disable MSI interrupt\n");
225 pci_disable_msi(pdev);
226 }
227
228 return ret;
229}
230
231
232
233
234
235static void xhci_free_irq(struct xhci_hcd *xhci)
236{
237 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
238 int ret;
239
240
241 if (xhci_to_hcd(xhci)->irq > 0)
242 return;
243
244 ret = xhci_free_msi(xhci);
245 if (!ret)
246 return;
247 if (pdev->irq > 0)
248 free_irq(pdev->irq, xhci_to_hcd(xhci));
249
250 return;
251}
252
253
254
255
256static int xhci_setup_msix(struct xhci_hcd *xhci)
257{
258 int i, ret = 0;
259 struct usb_hcd *hcd = xhci_to_hcd(xhci);
260 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
261
262
263
264
265
266
267
268
269 xhci->msix_count = min(num_online_cpus() + 1,
270 HCS_MAX_INTRS(xhci->hcs_params1));
271
272 xhci->msix_entries =
273 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
274 GFP_KERNEL);
275 if (!xhci->msix_entries) {
276 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
277 return -ENOMEM;
278 }
279
280 for (i = 0; i < xhci->msix_count; i++) {
281 xhci->msix_entries[i].entry = i;
282 xhci->msix_entries[i].vector = 0;
283 }
284
285 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
286 if (ret) {
287 xhci_dbg(xhci, "Failed to enable MSI-X\n");
288 goto free_entries;
289 }
290
291 for (i = 0; i < xhci->msix_count; i++) {
292 ret = request_irq(xhci->msix_entries[i].vector,
293 (irq_handler_t)xhci_msi_irq,
294 0, "xhci_hcd", xhci_to_hcd(xhci));
295 if (ret)
296 goto disable_msix;
297 }
298
299 hcd->msix_enabled = 1;
300 return ret;
301
302disable_msix:
303 xhci_dbg(xhci, "disable MSI-X interrupt\n");
304 xhci_free_irq(xhci);
305 pci_disable_msix(pdev);
306free_entries:
307 kfree(xhci->msix_entries);
308 xhci->msix_entries = NULL;
309 return ret;
310}
311
312
313static void xhci_cleanup_msix(struct xhci_hcd *xhci)
314{
315 struct usb_hcd *hcd = xhci_to_hcd(xhci);
316 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
317
318 xhci_free_irq(xhci);
319
320 if (xhci->msix_entries) {
321 pci_disable_msix(pdev);
322 kfree(xhci->msix_entries);
323 xhci->msix_entries = NULL;
324 } else {
325 pci_disable_msi(pdev);
326 }
327
328 hcd->msix_enabled = 0;
329 return;
330}
331
332static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
333{
334 int i;
335
336 if (xhci->msix_entries) {
337 for (i = 0; i < xhci->msix_count; i++)
338 synchronize_irq(xhci->msix_entries[i].vector);
339 }
340}
341
342static int xhci_try_enable_msi(struct usb_hcd *hcd)
343{
344 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
345 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
346 int ret;
347
348
349
350
351
352 if (xhci->quirks & XHCI_BROKEN_MSI)
353 return 0;
354
355
356 if (hcd->irq)
357 free_irq(hcd->irq, hcd);
358 hcd->irq = 0;
359
360 ret = xhci_setup_msix(xhci);
361 if (ret)
362
363 ret = xhci_setup_msi(xhci);
364
365 if (!ret)
366
367 return 0;
368
369 if (!pdev->irq) {
370 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
371 return -EINVAL;
372 }
373
374
375 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
376 hcd->irq_descr, hcd);
377 if (ret) {
378 xhci_err(xhci, "request interrupt %d failed\n",
379 pdev->irq);
380 return ret;
381 }
382 hcd->irq = pdev->irq;
383 return 0;
384}
385
386#else
387
388static int xhci_try_enable_msi(struct usb_hcd *hcd)
389{
390 return 0;
391}
392
393static void xhci_cleanup_msix(struct xhci_hcd *xhci)
394{
395}
396
397static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
398{
399}
400
401#endif
402
403static void compliance_mode_recovery(unsigned long arg)
404{
405 struct xhci_hcd *xhci;
406 struct usb_hcd *hcd;
407 u32 temp;
408 int i;
409
410 xhci = (struct xhci_hcd *)arg;
411
412 for (i = 0; i < xhci->num_usb3_ports; i++) {
413 temp = xhci_readl(xhci, xhci->usb3_ports[i]);
414 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
415
416
417
418
419 xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
420 i + 1);
421 xhci_dbg(xhci, "Attempting Recovery routine!\n");
422 hcd = xhci->shared_hcd;
423
424 if (hcd->state == HC_STATE_SUSPENDED)
425 usb_hcd_resume_root_hub(hcd);
426
427 usb_hcd_poll_rh_status(hcd);
428 }
429 }
430
431 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
432 mod_timer(&xhci->comp_mode_recovery_timer,
433 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
434}
435
436
437
438
439
440
441
442
443
444
445
446static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
447{
448 xhci->port_status_u0 = 0;
449 init_timer(&xhci->comp_mode_recovery_timer);
450
451 xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
452 xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
453 xhci->comp_mode_recovery_timer.expires = jiffies +
454 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
455
456 set_timer_slack(&xhci->comp_mode_recovery_timer,
457 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
458 add_timer(&xhci->comp_mode_recovery_timer);
459 xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
460}
461
462
463
464
465
466
467
468static bool compliance_mode_recovery_timer_quirk_check(void)
469{
470 const char *dmi_product_name, *dmi_sys_vendor;
471
472 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
473 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
474 if (!dmi_product_name || !dmi_sys_vendor)
475 return false;
476
477 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
478 return false;
479
480 if (strstr(dmi_product_name, "Z420") ||
481 strstr(dmi_product_name, "Z620") ||
482 strstr(dmi_product_name, "Z820") ||
483 strstr(dmi_product_name, "Z1 Workstation"))
484 return true;
485
486 return false;
487}
488
489static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
490{
491 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
492}
493
494
495
496
497
498
499
500
501
502int xhci_init(struct usb_hcd *hcd)
503{
504 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
505 int retval = 0;
506
507 xhci_dbg(xhci, "xhci_init\n");
508 spin_lock_init(&xhci->lock);
509 if (xhci->hci_version == 0x95 && link_quirk) {
510 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
511 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
512 } else {
513 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
514 }
515 retval = xhci_mem_init(xhci, GFP_KERNEL);
516 xhci_dbg(xhci, "Finished xhci_init\n");
517
518
519 if (compliance_mode_recovery_timer_quirk_check()) {
520 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
521 compliance_mode_recovery_timer_init(xhci);
522 }
523
524 return retval;
525}
526
527
528
529
530#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
531static void xhci_event_ring_work(unsigned long arg)
532{
533 unsigned long flags;
534 int temp;
535 u64 temp_64;
536 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
537 int i, j;
538
539 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
540
541 spin_lock_irqsave(&xhci->lock, flags);
542 temp = xhci_readl(xhci, &xhci->op_regs->status);
543 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
544 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
545 (xhci->xhc_state & XHCI_STATE_HALTED)) {
546 xhci_dbg(xhci, "HW died, polling stopped.\n");
547 spin_unlock_irqrestore(&xhci->lock, flags);
548 return;
549 }
550
551 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
552 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
553 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
554 xhci->error_bitmask = 0;
555 xhci_dbg(xhci, "Event ring:\n");
556 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
557 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
558 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
559 temp_64 &= ~ERST_PTR_MASK;
560 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
561 xhci_dbg(xhci, "Command ring:\n");
562 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
563 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
564 xhci_dbg_cmd_ptrs(xhci);
565 for (i = 0; i < MAX_HC_SLOTS; ++i) {
566 if (!xhci->devs[i])
567 continue;
568 for (j = 0; j < 31; ++j) {
569 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
570 }
571 }
572 spin_unlock_irqrestore(&xhci->lock, flags);
573
574 if (!xhci->zombie)
575 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
576 else
577 xhci_dbg(xhci, "Quit polling the event ring.\n");
578}
579#endif
580
581static int xhci_run_finished(struct xhci_hcd *xhci)
582{
583 if (xhci_start(xhci)) {
584 xhci_halt(xhci);
585 return -ENODEV;
586 }
587 xhci->shared_hcd->state = HC_STATE_RUNNING;
588 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
589
590 if (xhci->quirks & XHCI_NEC_HOST)
591 xhci_ring_cmd_db(xhci);
592
593 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
594 return 0;
595}
596
597
598
599
600
601
602
603
604
605
606
607
608
609int xhci_run(struct usb_hcd *hcd)
610{
611 u32 temp;
612 u64 temp_64;
613 int ret;
614 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
615
616
617
618
619
620 hcd->uses_new_polling = 1;
621 if (!usb_hcd_is_primary_hcd(hcd))
622 return xhci_run_finished(xhci);
623
624 xhci_dbg(xhci, "xhci_run\n");
625
626 ret = xhci_try_enable_msi(hcd);
627 if (ret)
628 return ret;
629
630#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
631 init_timer(&xhci->event_ring_timer);
632 xhci->event_ring_timer.data = (unsigned long) xhci;
633 xhci->event_ring_timer.function = xhci_event_ring_work;
634
635 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
636 xhci->zombie = 0;
637 xhci_dbg(xhci, "Setting event ring polling timer\n");
638 add_timer(&xhci->event_ring_timer);
639#endif
640
641 xhci_dbg(xhci, "Command ring memory map follows:\n");
642 xhci_debug_ring(xhci, xhci->cmd_ring);
643 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
644 xhci_dbg_cmd_ptrs(xhci);
645
646 xhci_dbg(xhci, "ERST memory map follows:\n");
647 xhci_dbg_erst(xhci, &xhci->erst);
648 xhci_dbg(xhci, "Event ring:\n");
649 xhci_debug_ring(xhci, xhci->event_ring);
650 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
651 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
652 temp_64 &= ~ERST_PTR_MASK;
653 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
654
655 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
656 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
657 temp &= ~ER_IRQ_INTERVAL_MASK;
658 temp |= (u32) 160;
659 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
660
661
662 temp = xhci_readl(xhci, &xhci->op_regs->command);
663 temp |= (CMD_EIE);
664 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
665 temp);
666 xhci_writel(xhci, temp, &xhci->op_regs->command);
667
668 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
669 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
670 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
671 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
672 &xhci->ir_set->irq_pending);
673 xhci_print_ir_set(xhci, 0);
674
675 if (xhci->quirks & XHCI_NEC_HOST)
676 xhci_queue_vendor_command(xhci, 0, 0, 0,
677 TRB_TYPE(TRB_NEC_GET_FW));
678
679 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
680 return 0;
681}
682
683static void xhci_only_stop_hcd(struct usb_hcd *hcd)
684{
685 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
686
687 spin_lock_irq(&xhci->lock);
688 xhci_halt(xhci);
689
690
691
692
693
694 xhci->shared_hcd = NULL;
695 spin_unlock_irq(&xhci->lock);
696}
697
698
699
700
701
702
703
704
705
706
707void xhci_stop(struct usb_hcd *hcd)
708{
709 u32 temp;
710 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
711
712 if (!usb_hcd_is_primary_hcd(hcd)) {
713 xhci_only_stop_hcd(xhci->shared_hcd);
714 return;
715 }
716
717 spin_lock_irq(&xhci->lock);
718
719
720
721 xhci_halt(xhci);
722 xhci_reset(xhci);
723 spin_unlock_irq(&xhci->lock);
724
725 xhci_cleanup_msix(xhci);
726
727#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
728
729 xhci->zombie = 1;
730 del_timer_sync(&xhci->event_ring_timer);
731#endif
732
733
734 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
735 (!(xhci_all_ports_seen_u0(xhci))))
736 del_timer_sync(&xhci->comp_mode_recovery_timer);
737
738 if (xhci->quirks & XHCI_AMD_PLL_FIX)
739 usb_amd_dev_put();
740
741 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
742 temp = xhci_readl(xhci, &xhci->op_regs->status);
743 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
744 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
745 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
746 &xhci->ir_set->irq_pending);
747 xhci_print_ir_set(xhci, 0);
748
749 xhci_dbg(xhci, "cleaning up memory\n");
750 xhci_mem_cleanup(xhci);
751 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
752 xhci_readl(xhci, &xhci->op_regs->status));
753}
754
755
756
757
758
759
760
761
762
763
764void xhci_shutdown(struct usb_hcd *hcd)
765{
766 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
767
768 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
769 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
770
771 spin_lock_irq(&xhci->lock);
772 xhci_halt(xhci);
773 spin_unlock_irq(&xhci->lock);
774
775 xhci_cleanup_msix(xhci);
776
777 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
778 xhci_readl(xhci, &xhci->op_regs->status));
779}
780
781#ifdef CONFIG_PM
782static void xhci_save_registers(struct xhci_hcd *xhci)
783{
784 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
785 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
786 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
787 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
788 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
789 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
790 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
791 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
792 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
793}
794
795static void xhci_restore_registers(struct xhci_hcd *xhci)
796{
797 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
798 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
799 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
800 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
801 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
802 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
803 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
804 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
805 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
806}
807
808static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
809{
810 u64 val_64;
811
812
813 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
814 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
815 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
816 xhci->cmd_ring->dequeue) &
817 (u64) ~CMD_RING_RSVD_BITS) |
818 xhci->cmd_ring->cycle_state;
819 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
820 (long unsigned long) val_64);
821 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
822}
823
824
825
826
827
828
829
830
831
832
833static void xhci_clear_command_ring(struct xhci_hcd *xhci)
834{
835 struct xhci_ring *ring;
836 struct xhci_segment *seg;
837
838 ring = xhci->cmd_ring;
839 seg = ring->deq_seg;
840 do {
841 memset(seg->trbs, 0,
842 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
843 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
844 cpu_to_le32(~TRB_CYCLE);
845 seg = seg->next;
846 } while (seg != ring->deq_seg);
847
848
849 ring->deq_seg = ring->first_seg;
850 ring->dequeue = ring->first_seg->trbs;
851 ring->enq_seg = ring->deq_seg;
852 ring->enqueue = ring->dequeue;
853
854 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
855
856
857
858
859 ring->cycle_state = 1;
860
861
862
863
864
865
866
867
868 xhci_set_cmd_ring_deq(xhci);
869}
870
871
872
873
874
875
876
877int xhci_suspend(struct xhci_hcd *xhci)
878{
879 int rc = 0;
880 struct usb_hcd *hcd = xhci_to_hcd(xhci);
881 u32 command;
882
883 if (hcd->state != HC_STATE_SUSPENDED ||
884 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
885 return -EINVAL;
886
887
888 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
889 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
890 del_timer_sync(&hcd->rh_timer);
891
892 spin_lock_irq(&xhci->lock);
893 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
894 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
895
896
897
898
899 command = xhci_readl(xhci, &xhci->op_regs->command);
900 command &= ~CMD_RUN;
901 xhci_writel(xhci, command, &xhci->op_regs->command);
902 if (xhci_handshake(xhci, &xhci->op_regs->status,
903 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
904 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
905 spin_unlock_irq(&xhci->lock);
906 return -ETIMEDOUT;
907 }
908 xhci_clear_command_ring(xhci);
909
910
911 xhci_save_registers(xhci);
912
913
914 command = xhci_readl(xhci, &xhci->op_regs->command);
915 command |= CMD_CSS;
916 xhci_writel(xhci, command, &xhci->op_regs->command);
917 if (xhci_handshake(xhci, &xhci->op_regs->status,
918 STS_SAVE, 0, 10 * 1000)) {
919 xhci_warn(xhci, "WARN: xHC save state timeout\n");
920 spin_unlock_irq(&xhci->lock);
921 return -ETIMEDOUT;
922 }
923 spin_unlock_irq(&xhci->lock);
924
925
926
927
928
929 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
930 (!(xhci_all_ports_seen_u0(xhci)))) {
931 del_timer_sync(&xhci->comp_mode_recovery_timer);
932 xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
933 }
934
935
936
937 xhci_msix_sync_irqs(xhci);
938
939 return rc;
940}
941
942
943
944
945
946
947
948int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
949{
950 u32 command, temp = 0;
951 struct usb_hcd *hcd = xhci_to_hcd(xhci);
952 struct usb_hcd *secondary_hcd;
953 int retval = 0;
954
955
956
957
958 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
959 time_before(jiffies,
960 xhci->bus_state[1].next_statechange))
961 msleep(100);
962
963 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
964 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
965
966 spin_lock_irq(&xhci->lock);
967 if (xhci->quirks & XHCI_RESET_ON_RESUME)
968 hibernated = true;
969
970 if (!hibernated) {
971
972 xhci_restore_registers(xhci);
973
974 xhci_set_cmd_ring_deq(xhci);
975
976
977 command = xhci_readl(xhci, &xhci->op_regs->command);
978 command |= CMD_CRS;
979 xhci_writel(xhci, command, &xhci->op_regs->command);
980 if (xhci_handshake(xhci, &xhci->op_regs->status,
981 STS_RESTORE, 0, 10 * 1000)) {
982 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
983 spin_unlock_irq(&xhci->lock);
984 return -ETIMEDOUT;
985 }
986 temp = xhci_readl(xhci, &xhci->op_regs->status);
987 }
988
989
990 if ((temp & STS_SRE) || hibernated) {
991
992 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
993 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
994
995 xhci_dbg(xhci, "Stop HCD\n");
996 xhci_halt(xhci);
997 xhci_reset(xhci);
998 spin_unlock_irq(&xhci->lock);
999 xhci_cleanup_msix(xhci);
1000
1001#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1002
1003 xhci->zombie = 1;
1004 del_timer_sync(&xhci->event_ring_timer);
1005#endif
1006
1007 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1008 temp = xhci_readl(xhci, &xhci->op_regs->status);
1009 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
1010 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
1011 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
1012 &xhci->ir_set->irq_pending);
1013 xhci_print_ir_set(xhci, 0);
1014
1015 xhci_dbg(xhci, "cleaning up memory\n");
1016 xhci_mem_cleanup(xhci);
1017 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1018 xhci_readl(xhci, &xhci->op_regs->status));
1019
1020
1021
1022
1023
1024 if (!usb_hcd_is_primary_hcd(hcd))
1025 secondary_hcd = hcd;
1026 else
1027 secondary_hcd = xhci->shared_hcd;
1028
1029 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1030 retval = xhci_init(hcd->primary_hcd);
1031 if (retval)
1032 return retval;
1033 xhci_dbg(xhci, "Start the primary HCD\n");
1034 retval = xhci_run(hcd->primary_hcd);
1035 if (!retval) {
1036 xhci_dbg(xhci, "Start the secondary HCD\n");
1037 retval = xhci_run(secondary_hcd);
1038 }
1039 hcd->state = HC_STATE_SUSPENDED;
1040 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1041 goto done;
1042 }
1043
1044
1045 command = xhci_readl(xhci, &xhci->op_regs->command);
1046 command |= CMD_RUN;
1047 xhci_writel(xhci, command, &xhci->op_regs->command);
1048 xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT,
1049 0, 250 * 1000);
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 spin_unlock_irq(&xhci->lock);
1061
1062 done:
1063 if (retval == 0) {
1064 usb_hcd_resume_root_hub(hcd);
1065 usb_hcd_resume_root_hub(xhci->shared_hcd);
1066 }
1067
1068
1069
1070
1071
1072
1073
1074 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
1075 compliance_mode_recovery_timer_init(xhci);
1076
1077
1078 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1079 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1080 usb_hcd_poll_rh_status(hcd);
1081
1082 return retval;
1083}
1084#endif
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1099{
1100 unsigned int index;
1101 if (usb_endpoint_xfer_control(desc))
1102 index = (unsigned int) (usb_endpoint_num(desc)*2);
1103 else
1104 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1105 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1106 return index;
1107}
1108
1109
1110
1111
1112
1113unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1114{
1115 return 1 << (xhci_get_endpoint_index(desc) + 1);
1116}
1117
1118
1119
1120
1121
1122unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1123{
1124 return 1 << (ep_index + 1);
1125}
1126
1127
1128
1129
1130
1131
1132
1133unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1134{
1135 return fls(added_ctxs) - 1;
1136}
1137
1138
1139
1140
1141static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1142 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1143 const char *func) {
1144 struct xhci_hcd *xhci;
1145 struct xhci_virt_device *virt_dev;
1146
1147 if (!hcd || (check_ep && !ep) || !udev) {
1148 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1149 func);
1150 return -EINVAL;
1151 }
1152 if (!udev->parent) {
1153 printk(KERN_DEBUG "xHCI %s called for root hub\n",
1154 func);
1155 return 0;
1156 }
1157
1158 xhci = hcd_to_xhci(hcd);
1159 if (xhci->xhc_state & XHCI_STATE_HALTED)
1160 return -ENODEV;
1161
1162 if (check_virt_dev) {
1163 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1164 printk(KERN_DEBUG "xHCI %s called with unaddressed "
1165 "device\n", func);
1166 return -EINVAL;
1167 }
1168
1169 virt_dev = xhci->devs[udev->slot_id];
1170 if (virt_dev->udev != udev) {
1171 printk(KERN_DEBUG "xHCI %s called with udev and "
1172 "virt_dev does not match\n", func);
1173 return -EINVAL;
1174 }
1175 }
1176
1177 return 1;
1178}
1179
1180static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1181 struct usb_device *udev, struct xhci_command *command,
1182 bool ctx_change, bool must_succeed);
1183
1184
1185
1186
1187
1188
1189
1190static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1191 unsigned int ep_index, struct urb *urb)
1192{
1193 struct xhci_container_ctx *in_ctx;
1194 struct xhci_container_ctx *out_ctx;
1195 struct xhci_input_control_ctx *ctrl_ctx;
1196 struct xhci_ep_ctx *ep_ctx;
1197 int max_packet_size;
1198 int hw_max_packet_size;
1199 int ret = 0;
1200
1201 out_ctx = xhci->devs[slot_id]->out_ctx;
1202 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1203 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1204 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1205 if (hw_max_packet_size != max_packet_size) {
1206 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1207 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1208 max_packet_size);
1209 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1210 hw_max_packet_size);
1211 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1212
1213
1214 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1215 xhci->devs[slot_id]->out_ctx, ep_index);
1216 in_ctx = xhci->devs[slot_id]->in_ctx;
1217 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1218 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1219 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1220
1221
1222
1223
1224
1225 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1226 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1227 ctrl_ctx->drop_flags = 0;
1228
1229 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1230 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1231 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1232 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1233
1234 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1235 true, false);
1236
1237
1238
1239
1240 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1241 }
1242 return ret;
1243}
1244
1245
1246
1247
1248
1249int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1250{
1251 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1252 struct xhci_td *buffer;
1253 unsigned long flags;
1254 int ret = 0;
1255 unsigned int slot_id, ep_index;
1256 struct urb_priv *urb_priv;
1257 int size, i;
1258
1259 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1260 true, true, __func__) <= 0)
1261 return -EINVAL;
1262
1263 slot_id = urb->dev->slot_id;
1264 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1265
1266 if (!HCD_HW_ACCESSIBLE(hcd)) {
1267 if (!in_interrupt())
1268 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1269 ret = -ESHUTDOWN;
1270 goto exit;
1271 }
1272
1273 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1274 size = urb->number_of_packets;
1275 else
1276 size = 1;
1277
1278 urb_priv = kzalloc(sizeof(struct urb_priv) +
1279 size * sizeof(struct xhci_td *), mem_flags);
1280 if (!urb_priv)
1281 return -ENOMEM;
1282
1283 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1284 if (!buffer) {
1285 kfree(urb_priv);
1286 return -ENOMEM;
1287 }
1288
1289 for (i = 0; i < size; i++) {
1290 urb_priv->td[i] = buffer;
1291 buffer++;
1292 }
1293
1294 urb_priv->length = size;
1295 urb_priv->td_cnt = 0;
1296 urb->hcpriv = urb_priv;
1297
1298 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1299
1300
1301
1302 if (urb->dev->speed == USB_SPEED_FULL) {
1303 ret = xhci_check_maxpacket(xhci, slot_id,
1304 ep_index, urb);
1305 if (ret < 0) {
1306 xhci_urb_free_priv(xhci, urb_priv);
1307 urb->hcpriv = NULL;
1308 return ret;
1309 }
1310 }
1311
1312
1313
1314
1315 spin_lock_irqsave(&xhci->lock, flags);
1316 if (xhci->xhc_state & XHCI_STATE_DYING)
1317 goto dying;
1318 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1319 slot_id, ep_index);
1320 if (ret)
1321 goto free_priv;
1322 spin_unlock_irqrestore(&xhci->lock, flags);
1323 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1324 spin_lock_irqsave(&xhci->lock, flags);
1325 if (xhci->xhc_state & XHCI_STATE_DYING)
1326 goto dying;
1327 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1328 EP_GETTING_STREAMS) {
1329 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1330 "is transitioning to using streams.\n");
1331 ret = -EINVAL;
1332 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1333 EP_GETTING_NO_STREAMS) {
1334 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1335 "is transitioning to "
1336 "not having streams.\n");
1337 ret = -EINVAL;
1338 } else {
1339 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1340 slot_id, ep_index);
1341 }
1342 if (ret)
1343 goto free_priv;
1344 spin_unlock_irqrestore(&xhci->lock, flags);
1345 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1346 spin_lock_irqsave(&xhci->lock, flags);
1347 if (xhci->xhc_state & XHCI_STATE_DYING)
1348 goto dying;
1349 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1350 slot_id, ep_index);
1351 if (ret)
1352 goto free_priv;
1353 spin_unlock_irqrestore(&xhci->lock, flags);
1354 } else {
1355 spin_lock_irqsave(&xhci->lock, flags);
1356 if (xhci->xhc_state & XHCI_STATE_DYING)
1357 goto dying;
1358 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1359 slot_id, ep_index);
1360 if (ret)
1361 goto free_priv;
1362 spin_unlock_irqrestore(&xhci->lock, flags);
1363 }
1364exit:
1365 return ret;
1366dying:
1367 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1368 "non-responsive xHCI host.\n",
1369 urb->ep->desc.bEndpointAddress, urb);
1370 ret = -ESHUTDOWN;
1371free_priv:
1372 xhci_urb_free_priv(xhci, urb_priv);
1373 urb->hcpriv = NULL;
1374 spin_unlock_irqrestore(&xhci->lock, flags);
1375 return ret;
1376}
1377
1378
1379
1380
1381
1382static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1383 struct urb *urb)
1384{
1385 unsigned int slot_id;
1386 unsigned int ep_index;
1387 unsigned int stream_id;
1388 struct xhci_virt_ep *ep;
1389
1390 slot_id = urb->dev->slot_id;
1391 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1392 stream_id = urb->stream_id;
1393 ep = &xhci->devs[slot_id]->eps[ep_index];
1394
1395 if (!(ep->ep_state & EP_HAS_STREAMS))
1396 return ep->ring;
1397
1398 if (stream_id == 0) {
1399 xhci_warn(xhci,
1400 "WARN: Slot ID %u, ep index %u has streams, "
1401 "but URB has no stream ID.\n",
1402 slot_id, ep_index);
1403 return NULL;
1404 }
1405
1406 if (stream_id < ep->stream_info->num_streams)
1407 return ep->stream_info->stream_rings[stream_id];
1408
1409 xhci_warn(xhci,
1410 "WARN: Slot ID %u, ep index %u has "
1411 "stream IDs 1 to %u allocated, "
1412 "but stream ID %u is requested.\n",
1413 slot_id, ep_index,
1414 ep->stream_info->num_streams - 1,
1415 stream_id);
1416 return NULL;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1451{
1452 unsigned long flags;
1453 int ret, i;
1454 u32 temp;
1455 struct xhci_hcd *xhci;
1456 struct urb_priv *urb_priv;
1457 struct xhci_td *td;
1458 unsigned int ep_index;
1459 struct xhci_ring *ep_ring;
1460 struct xhci_virt_ep *ep;
1461
1462 xhci = hcd_to_xhci(hcd);
1463 spin_lock_irqsave(&xhci->lock, flags);
1464
1465 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1466 if (ret || !urb->hcpriv)
1467 goto done;
1468 temp = xhci_readl(xhci, &xhci->op_regs->status);
1469 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1470 xhci_dbg(xhci, "HW died, freeing TD.\n");
1471 urb_priv = urb->hcpriv;
1472 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1473 td = urb_priv->td[i];
1474 if (!list_empty(&td->td_list))
1475 list_del_init(&td->td_list);
1476 if (!list_empty(&td->cancelled_td_list))
1477 list_del_init(&td->cancelled_td_list);
1478 }
1479
1480 usb_hcd_unlink_urb_from_ep(hcd, urb);
1481 spin_unlock_irqrestore(&xhci->lock, flags);
1482 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1483 xhci_urb_free_priv(xhci, urb_priv);
1484 return ret;
1485 }
1486 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1487 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1488 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1489 "non-responsive xHCI host.\n",
1490 urb->ep->desc.bEndpointAddress, urb);
1491
1492
1493
1494
1495
1496 goto done;
1497 }
1498
1499 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1500 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1501 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1502 if (!ep_ring) {
1503 ret = -EINVAL;
1504 goto done;
1505 }
1506
1507 urb_priv = urb->hcpriv;
1508 i = urb_priv->td_cnt;
1509 if (i < urb_priv->length)
1510 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
1511 "starting at offset 0x%llx\n",
1512 urb, urb->dev->devpath,
1513 urb->ep->desc.bEndpointAddress,
1514 (unsigned long long) xhci_trb_virt_to_dma(
1515 urb_priv->td[i]->start_seg,
1516 urb_priv->td[i]->first_trb));
1517
1518 for (; i < urb_priv->length; i++) {
1519 td = urb_priv->td[i];
1520 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1521 }
1522
1523
1524
1525
1526 if (!(ep->ep_state & EP_HALT_PENDING)) {
1527 ep->ep_state |= EP_HALT_PENDING;
1528 ep->stop_cmds_pending++;
1529 ep->stop_cmd_timer.expires = jiffies +
1530 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1531 add_timer(&ep->stop_cmd_timer);
1532 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1533 xhci_ring_cmd_db(xhci);
1534 }
1535done:
1536 spin_unlock_irqrestore(&xhci->lock, flags);
1537 return ret;
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1554 struct usb_host_endpoint *ep)
1555{
1556 struct xhci_hcd *xhci;
1557 struct xhci_container_ctx *in_ctx, *out_ctx;
1558 struct xhci_input_control_ctx *ctrl_ctx;
1559 struct xhci_slot_ctx *slot_ctx;
1560 unsigned int last_ctx;
1561 unsigned int ep_index;
1562 struct xhci_ep_ctx *ep_ctx;
1563 u32 drop_flag;
1564 u32 new_add_flags, new_drop_flags, new_slot_info;
1565 int ret;
1566
1567 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1568 if (ret <= 0)
1569 return ret;
1570 xhci = hcd_to_xhci(hcd);
1571 if (xhci->xhc_state & XHCI_STATE_DYING)
1572 return -ENODEV;
1573
1574 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1575 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1576 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1577 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1578 __func__, drop_flag);
1579 return 0;
1580 }
1581
1582 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1583 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1584 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1585 ep_index = xhci_get_endpoint_index(&ep->desc);
1586 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1587
1588
1589
1590 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1591 cpu_to_le32(EP_STATE_DISABLED)) ||
1592 le32_to_cpu(ctrl_ctx->drop_flags) &
1593 xhci_get_endpoint_flag(&ep->desc)) {
1594 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1595 __func__, ep);
1596 return 0;
1597 }
1598
1599 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1600 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1601
1602 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1603 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1604
1605 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1606 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1607
1608 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1609 LAST_CTX(last_ctx)) {
1610 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1611 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1612 }
1613 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1614
1615 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1616
1617 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1618 (unsigned int) ep->desc.bEndpointAddress,
1619 udev->slot_id,
1620 (unsigned int) new_drop_flags,
1621 (unsigned int) new_add_flags,
1622 (unsigned int) new_slot_info);
1623 return 0;
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1640 struct usb_host_endpoint *ep)
1641{
1642 struct xhci_hcd *xhci;
1643 struct xhci_container_ctx *in_ctx, *out_ctx;
1644 unsigned int ep_index;
1645 struct xhci_slot_ctx *slot_ctx;
1646 struct xhci_input_control_ctx *ctrl_ctx;
1647 u32 added_ctxs;
1648 unsigned int last_ctx;
1649 u32 new_add_flags, new_drop_flags, new_slot_info;
1650 struct xhci_virt_device *virt_dev;
1651 int ret = 0;
1652
1653 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1654 if (ret <= 0) {
1655
1656 ep->hcpriv = NULL;
1657 return ret;
1658 }
1659 xhci = hcd_to_xhci(hcd);
1660 if (xhci->xhc_state & XHCI_STATE_DYING)
1661 return -ENODEV;
1662
1663 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1664 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1665 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1666
1667
1668
1669
1670 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1671 __func__, added_ctxs);
1672 return 0;
1673 }
1674
1675 virt_dev = xhci->devs[udev->slot_id];
1676 in_ctx = virt_dev->in_ctx;
1677 out_ctx = virt_dev->out_ctx;
1678 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1679 ep_index = xhci_get_endpoint_index(&ep->desc);
1680
1681
1682
1683
1684 if (virt_dev->eps[ep_index].ring &&
1685 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1686 xhci_get_endpoint_flag(&ep->desc))) {
1687 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1688 "without dropping it.\n",
1689 (unsigned int) ep->desc.bEndpointAddress);
1690 return -EINVAL;
1691 }
1692
1693
1694
1695
1696 if (le32_to_cpu(ctrl_ctx->add_flags) &
1697 xhci_get_endpoint_flag(&ep->desc)) {
1698 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1699 __func__, ep);
1700 return 0;
1701 }
1702
1703
1704
1705
1706
1707
1708 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1709 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1710 __func__, ep->desc.bEndpointAddress);
1711 return -ENOMEM;
1712 }
1713
1714 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1715 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1716
1717
1718
1719
1720
1721
1722
1723 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1724
1725 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1726
1727 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1728 LAST_CTX(last_ctx)) {
1729 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1730 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1731 }
1732 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1733
1734
1735 ep->hcpriv = udev;
1736
1737 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1738 (unsigned int) ep->desc.bEndpointAddress,
1739 udev->slot_id,
1740 (unsigned int) new_drop_flags,
1741 (unsigned int) new_add_flags,
1742 (unsigned int) new_slot_info);
1743 return 0;
1744}
1745
1746static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1747{
1748 struct xhci_input_control_ctx *ctrl_ctx;
1749 struct xhci_ep_ctx *ep_ctx;
1750 struct xhci_slot_ctx *slot_ctx;
1751 int i;
1752
1753
1754
1755
1756
1757
1758 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1759 ctrl_ctx->drop_flags = 0;
1760 ctrl_ctx->add_flags = 0;
1761 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1762 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1763
1764 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1765 for (i = 1; i < 31; ++i) {
1766 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1767 ep_ctx->ep_info = 0;
1768 ep_ctx->ep_info2 = 0;
1769 ep_ctx->deq = 0;
1770 ep_ctx->tx_info = 0;
1771 }
1772}
1773
1774static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1775 struct usb_device *udev, u32 *cmd_status)
1776{
1777 int ret;
1778
1779 switch (*cmd_status) {
1780 case COMP_ENOMEM:
1781 dev_warn(&udev->dev, "Not enough host controller resources "
1782 "for new device state.\n");
1783 ret = -ENOMEM;
1784
1785 break;
1786 case COMP_BW_ERR:
1787 case COMP_2ND_BW_ERR:
1788 dev_warn(&udev->dev, "Not enough bandwidth "
1789 "for new device state.\n");
1790 ret = -ENOSPC;
1791
1792 break;
1793 case COMP_TRB_ERR:
1794
1795 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1796 "add flag = 1, "
1797 "and endpoint is not disabled.\n");
1798 ret = -EINVAL;
1799 break;
1800 case COMP_DEV_ERR:
1801 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1802 "configure command.\n");
1803 ret = -ENODEV;
1804 break;
1805 case COMP_SUCCESS:
1806 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1807 ret = 0;
1808 break;
1809 default:
1810 xhci_err(xhci, "ERROR: unexpected command completion "
1811 "code 0x%x.\n", *cmd_status);
1812 ret = -EINVAL;
1813 break;
1814 }
1815 return ret;
1816}
1817
1818static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1819 struct usb_device *udev, u32 *cmd_status)
1820{
1821 int ret;
1822 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1823
1824 switch (*cmd_status) {
1825 case COMP_EINVAL:
1826 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1827 "context command.\n");
1828 ret = -EINVAL;
1829 break;
1830 case COMP_EBADSLT:
1831 dev_warn(&udev->dev, "WARN: slot not enabled for"
1832 "evaluate context command.\n");
1833 ret = -EINVAL;
1834 break;
1835 case COMP_CTX_STATE:
1836 dev_warn(&udev->dev, "WARN: invalid context state for "
1837 "evaluate context command.\n");
1838 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1839 ret = -EINVAL;
1840 break;
1841 case COMP_DEV_ERR:
1842 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1843 "context command.\n");
1844 ret = -ENODEV;
1845 break;
1846 case COMP_MEL_ERR:
1847
1848 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1849 ret = -EINVAL;
1850 break;
1851 case COMP_SUCCESS:
1852 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1853 ret = 0;
1854 break;
1855 default:
1856 xhci_err(xhci, "ERROR: unexpected command completion "
1857 "code 0x%x.\n", *cmd_status);
1858 ret = -EINVAL;
1859 break;
1860 }
1861 return ret;
1862}
1863
1864static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1865 struct xhci_container_ctx *in_ctx)
1866{
1867 struct xhci_input_control_ctx *ctrl_ctx;
1868 u32 valid_add_flags;
1869 u32 valid_drop_flags;
1870
1871 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1872
1873
1874
1875
1876 valid_add_flags = ctrl_ctx->add_flags >> 2;
1877 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1878
1879
1880
1881
1882
1883 return hweight32(valid_add_flags) -
1884 hweight32(valid_add_flags & valid_drop_flags);
1885}
1886
1887static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1888 struct xhci_container_ctx *in_ctx)
1889{
1890 struct xhci_input_control_ctx *ctrl_ctx;
1891 u32 valid_add_flags;
1892 u32 valid_drop_flags;
1893
1894 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1895 valid_add_flags = ctrl_ctx->add_flags >> 2;
1896 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1897
1898 return hweight32(valid_drop_flags) -
1899 hweight32(valid_add_flags & valid_drop_flags);
1900}
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1916 struct xhci_container_ctx *in_ctx)
1917{
1918 u32 added_eps;
1919
1920 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1921 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1922 xhci_dbg(xhci, "Not enough ep ctxs: "
1923 "%u active, need to add %u, limit is %u.\n",
1924 xhci->num_active_eps, added_eps,
1925 xhci->limit_active_eps);
1926 return -ENOMEM;
1927 }
1928 xhci->num_active_eps += added_eps;
1929 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1930 xhci->num_active_eps);
1931 return 0;
1932}
1933
1934
1935
1936
1937
1938
1939
1940static void xhci_free_host_resources(struct xhci_hcd *xhci,
1941 struct xhci_container_ctx *in_ctx)
1942{
1943 u32 num_failed_eps;
1944
1945 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1946 xhci->num_active_eps -= num_failed_eps;
1947 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1948 num_failed_eps,
1949 xhci->num_active_eps);
1950}
1951
1952
1953
1954
1955
1956
1957
1958static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1959 struct xhci_container_ctx *in_ctx)
1960{
1961 u32 num_dropped_eps;
1962
1963 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1964 xhci->num_active_eps -= num_dropped_eps;
1965 if (num_dropped_eps)
1966 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1967 num_dropped_eps,
1968 xhci->num_active_eps);
1969}
1970
1971static unsigned int xhci_get_block_size(struct usb_device *udev)
1972{
1973 switch (udev->speed) {
1974 case USB_SPEED_LOW:
1975 case USB_SPEED_FULL:
1976 return FS_BLOCK;
1977 case USB_SPEED_HIGH:
1978 return HS_BLOCK;
1979 case USB_SPEED_SUPER:
1980 return SS_BLOCK;
1981 case USB_SPEED_UNKNOWN:
1982 case USB_SPEED_WIRELESS:
1983 default:
1984
1985 return 1;
1986 }
1987}
1988
1989static unsigned int
1990xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1991{
1992 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1993 return LS_OVERHEAD;
1994 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1995 return FS_OVERHEAD;
1996 return HS_OVERHEAD;
1997}
1998
1999
2000
2001
2002
2003static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2004 struct xhci_virt_device *virt_dev,
2005 int old_active_eps)
2006{
2007 struct xhci_interval_bw_table *bw_table;
2008 struct xhci_tt_bw_info *tt_info;
2009
2010
2011 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2012 tt_info = virt_dev->tt_info;
2013
2014
2015
2016
2017 if (old_active_eps)
2018 return 0;
2019 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2020 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2021 return -ENOMEM;
2022 return 0;
2023 }
2024
2025
2026
2027
2028
2029
2030 return 0;
2031}
2032
2033static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2034 struct xhci_virt_device *virt_dev)
2035{
2036 unsigned int bw_reserved;
2037
2038 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2039 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2040 return -ENOMEM;
2041
2042 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2043 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2044 return -ENOMEM;
2045
2046 return 0;
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090static int xhci_check_bw_table(struct xhci_hcd *xhci,
2091 struct xhci_virt_device *virt_dev,
2092 int old_active_eps)
2093{
2094 unsigned int bw_reserved;
2095 unsigned int max_bandwidth;
2096 unsigned int bw_used;
2097 unsigned int block_size;
2098 struct xhci_interval_bw_table *bw_table;
2099 unsigned int packet_size = 0;
2100 unsigned int overhead = 0;
2101 unsigned int packets_transmitted = 0;
2102 unsigned int packets_remaining = 0;
2103 unsigned int i;
2104
2105 if (virt_dev->udev->speed == USB_SPEED_SUPER)
2106 return xhci_check_ss_bw(xhci, virt_dev);
2107
2108 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2109 max_bandwidth = HS_BW_LIMIT;
2110
2111 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2112 } else {
2113 max_bandwidth = FS_BW_LIMIT;
2114 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2115 }
2116
2117 bw_table = virt_dev->bw_table;
2118
2119
2120
2121 block_size = xhci_get_block_size(virt_dev->udev);
2122
2123
2124
2125
2126 if (virt_dev->tt_info) {
2127 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2128 virt_dev->real_port);
2129 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2130 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2131 "newly activated TT.\n");
2132 return -ENOMEM;
2133 }
2134 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
2135 virt_dev->tt_info->slot_id,
2136 virt_dev->tt_info->ttport);
2137 } else {
2138 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2139 virt_dev->real_port);
2140 }
2141
2142
2143
2144
2145 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2146 bw_table->interval_bw[0].num_packets *
2147 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2148
2149 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2150 unsigned int bw_added;
2151 unsigned int largest_mps;
2152 unsigned int interval_overhead;
2153
2154
2155
2156
2157
2158
2159 packets_remaining = 2 * packets_remaining +
2160 bw_table->interval_bw[i].num_packets;
2161
2162
2163
2164
2165 if (list_empty(&bw_table->interval_bw[i].endpoints))
2166 largest_mps = 0;
2167 else {
2168 struct xhci_virt_ep *virt_ep;
2169 struct list_head *ep_entry;
2170
2171 ep_entry = bw_table->interval_bw[i].endpoints.next;
2172 virt_ep = list_entry(ep_entry,
2173 struct xhci_virt_ep, bw_endpoint_list);
2174
2175 largest_mps = DIV_ROUND_UP(
2176 virt_ep->bw_info.max_packet_size,
2177 block_size);
2178 }
2179 if (largest_mps > packet_size)
2180 packet_size = largest_mps;
2181
2182
2183 interval_overhead = xhci_get_largest_overhead(
2184 &bw_table->interval_bw[i]);
2185 if (interval_overhead > overhead)
2186 overhead = interval_overhead;
2187
2188
2189
2190
2191 packets_transmitted = packets_remaining >> (i + 1);
2192
2193
2194 bw_added = packets_transmitted * (overhead + packet_size);
2195
2196
2197 packets_remaining = packets_remaining % (1 << (i + 1));
2198
2199
2200
2201
2202
2203 if (packets_remaining == 0) {
2204 packet_size = 0;
2205 overhead = 0;
2206 } else if (packets_transmitted > 0) {
2207
2208
2209
2210
2211
2212 packet_size = largest_mps;
2213 overhead = interval_overhead;
2214 }
2215
2216
2217
2218 bw_used += bw_added;
2219 if (bw_used > max_bandwidth) {
2220 xhci_warn(xhci, "Not enough bandwidth. "
2221 "Proposed: %u, Max: %u\n",
2222 bw_used, max_bandwidth);
2223 return -ENOMEM;
2224 }
2225 }
2226
2227
2228
2229
2230
2231
2232 if (packets_remaining > 0)
2233 bw_used += overhead + packet_size;
2234
2235 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2236 unsigned int port_index = virt_dev->real_port - 1;
2237
2238
2239
2240
2241
2242 bw_used += TT_HS_OVERHEAD *
2243 xhci->rh_bw[port_index].num_active_tts;
2244 }
2245
2246 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2247 "Available: %u " "percent\n",
2248 bw_used, max_bandwidth, bw_reserved,
2249 (max_bandwidth - bw_used - bw_reserved) * 100 /
2250 max_bandwidth);
2251
2252 bw_used += bw_reserved;
2253 if (bw_used > max_bandwidth) {
2254 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2255 bw_used, max_bandwidth);
2256 return -ENOMEM;
2257 }
2258
2259 bw_table->bw_used = bw_used;
2260 return 0;
2261}
2262
2263static bool xhci_is_async_ep(unsigned int ep_type)
2264{
2265 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2266 ep_type != ISOC_IN_EP &&
2267 ep_type != INT_IN_EP);
2268}
2269
2270static bool xhci_is_sync_in_ep(unsigned int ep_type)
2271{
2272 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2273}
2274
2275static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2276{
2277 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2278
2279 if (ep_bw->ep_interval == 0)
2280 return SS_OVERHEAD_BURST +
2281 (ep_bw->mult * ep_bw->num_packets *
2282 (SS_OVERHEAD + mps));
2283 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2284 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2285 1 << ep_bw->ep_interval);
2286
2287}
2288
2289void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2290 struct xhci_bw_info *ep_bw,
2291 struct xhci_interval_bw_table *bw_table,
2292 struct usb_device *udev,
2293 struct xhci_virt_ep *virt_ep,
2294 struct xhci_tt_bw_info *tt_info)
2295{
2296 struct xhci_interval_bw *interval_bw;
2297 int normalized_interval;
2298
2299 if (xhci_is_async_ep(ep_bw->type))
2300 return;
2301
2302 if (udev->speed == USB_SPEED_SUPER) {
2303 if (xhci_is_sync_in_ep(ep_bw->type))
2304 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2305 xhci_get_ss_bw_consumed(ep_bw);
2306 else
2307 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2308 xhci_get_ss_bw_consumed(ep_bw);
2309 return;
2310 }
2311
2312
2313
2314
2315 if (list_empty(&virt_ep->bw_endpoint_list))
2316 return;
2317
2318
2319
2320 if (udev->speed == USB_SPEED_HIGH)
2321 normalized_interval = ep_bw->ep_interval;
2322 else
2323 normalized_interval = ep_bw->ep_interval - 3;
2324
2325 if (normalized_interval == 0)
2326 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2327 interval_bw = &bw_table->interval_bw[normalized_interval];
2328 interval_bw->num_packets -= ep_bw->num_packets;
2329 switch (udev->speed) {
2330 case USB_SPEED_LOW:
2331 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2332 break;
2333 case USB_SPEED_FULL:
2334 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2335 break;
2336 case USB_SPEED_HIGH:
2337 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2338 break;
2339 case USB_SPEED_SUPER:
2340 case USB_SPEED_UNKNOWN:
2341 case USB_SPEED_WIRELESS:
2342
2343
2344
2345 return;
2346 }
2347 if (tt_info)
2348 tt_info->active_eps -= 1;
2349 list_del_init(&virt_ep->bw_endpoint_list);
2350}
2351
2352static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2353 struct xhci_bw_info *ep_bw,
2354 struct xhci_interval_bw_table *bw_table,
2355 struct usb_device *udev,
2356 struct xhci_virt_ep *virt_ep,
2357 struct xhci_tt_bw_info *tt_info)
2358{
2359 struct xhci_interval_bw *interval_bw;
2360 struct xhci_virt_ep *smaller_ep;
2361 int normalized_interval;
2362
2363 if (xhci_is_async_ep(ep_bw->type))
2364 return;
2365
2366 if (udev->speed == USB_SPEED_SUPER) {
2367 if (xhci_is_sync_in_ep(ep_bw->type))
2368 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2369 xhci_get_ss_bw_consumed(ep_bw);
2370 else
2371 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2372 xhci_get_ss_bw_consumed(ep_bw);
2373 return;
2374 }
2375
2376
2377
2378
2379 if (udev->speed == USB_SPEED_HIGH)
2380 normalized_interval = ep_bw->ep_interval;
2381 else
2382 normalized_interval = ep_bw->ep_interval - 3;
2383
2384 if (normalized_interval == 0)
2385 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2386 interval_bw = &bw_table->interval_bw[normalized_interval];
2387 interval_bw->num_packets += ep_bw->num_packets;
2388 switch (udev->speed) {
2389 case USB_SPEED_LOW:
2390 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2391 break;
2392 case USB_SPEED_FULL:
2393 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2394 break;
2395 case USB_SPEED_HIGH:
2396 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2397 break;
2398 case USB_SPEED_SUPER:
2399 case USB_SPEED_UNKNOWN:
2400 case USB_SPEED_WIRELESS:
2401
2402
2403
2404 return;
2405 }
2406
2407 if (tt_info)
2408 tt_info->active_eps += 1;
2409
2410 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2411 bw_endpoint_list) {
2412 if (ep_bw->max_packet_size >=
2413 smaller_ep->bw_info.max_packet_size) {
2414
2415 list_add_tail(&virt_ep->bw_endpoint_list,
2416 &smaller_ep->bw_endpoint_list);
2417 return;
2418 }
2419 }
2420
2421 list_add_tail(&virt_ep->bw_endpoint_list,
2422 &interval_bw->endpoints);
2423}
2424
2425void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2426 struct xhci_virt_device *virt_dev,
2427 int old_active_eps)
2428{
2429 struct xhci_root_port_bw_info *rh_bw_info;
2430 if (!virt_dev->tt_info)
2431 return;
2432
2433 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2434 if (old_active_eps == 0 &&
2435 virt_dev->tt_info->active_eps != 0) {
2436 rh_bw_info->num_active_tts += 1;
2437 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2438 } else if (old_active_eps != 0 &&
2439 virt_dev->tt_info->active_eps == 0) {
2440 rh_bw_info->num_active_tts -= 1;
2441 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2442 }
2443}
2444
2445static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2446 struct xhci_virt_device *virt_dev,
2447 struct xhci_container_ctx *in_ctx)
2448{
2449 struct xhci_bw_info ep_bw_info[31];
2450 int i;
2451 struct xhci_input_control_ctx *ctrl_ctx;
2452 int old_active_eps = 0;
2453
2454 if (virt_dev->tt_info)
2455 old_active_eps = virt_dev->tt_info->active_eps;
2456
2457 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2458
2459 for (i = 0; i < 31; i++) {
2460 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2461 continue;
2462
2463
2464 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2465 sizeof(ep_bw_info[i]));
2466
2467
2468
2469 if (EP_IS_DROPPED(ctrl_ctx, i))
2470 xhci_drop_ep_from_interval_table(xhci,
2471 &virt_dev->eps[i].bw_info,
2472 virt_dev->bw_table,
2473 virt_dev->udev,
2474 &virt_dev->eps[i],
2475 virt_dev->tt_info);
2476 }
2477
2478 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2479 for (i = 0; i < 31; i++) {
2480
2481 if (EP_IS_ADDED(ctrl_ctx, i))
2482 xhci_add_ep_to_interval_table(xhci,
2483 &virt_dev->eps[i].bw_info,
2484 virt_dev->bw_table,
2485 virt_dev->udev,
2486 &virt_dev->eps[i],
2487 virt_dev->tt_info);
2488 }
2489
2490 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2491
2492
2493
2494 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2495 return 0;
2496 }
2497
2498
2499 for (i = 0; i < 31; i++) {
2500 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2501 continue;
2502
2503
2504
2505
2506 if (EP_IS_ADDED(ctrl_ctx, i)) {
2507 xhci_drop_ep_from_interval_table(xhci,
2508 &virt_dev->eps[i].bw_info,
2509 virt_dev->bw_table,
2510 virt_dev->udev,
2511 &virt_dev->eps[i],
2512 virt_dev->tt_info);
2513 }
2514
2515 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2516 sizeof(ep_bw_info[i]));
2517
2518 if (EP_IS_DROPPED(ctrl_ctx, i))
2519 xhci_add_ep_to_interval_table(xhci,
2520 &virt_dev->eps[i].bw_info,
2521 virt_dev->bw_table,
2522 virt_dev->udev,
2523 &virt_dev->eps[i],
2524 virt_dev->tt_info);
2525 }
2526 return -ENOMEM;
2527}
2528
2529
2530
2531
2532
2533static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2534 struct usb_device *udev,
2535 struct xhci_command *command,
2536 bool ctx_change, bool must_succeed)
2537{
2538 int ret;
2539 int timeleft;
2540 unsigned long flags;
2541 struct xhci_container_ctx *in_ctx;
2542 struct completion *cmd_completion;
2543 u32 *cmd_status;
2544 struct xhci_virt_device *virt_dev;
2545 union xhci_trb *cmd_trb;
2546
2547 spin_lock_irqsave(&xhci->lock, flags);
2548 virt_dev = xhci->devs[udev->slot_id];
2549
2550 if (command)
2551 in_ctx = command->in_ctx;
2552 else
2553 in_ctx = virt_dev->in_ctx;
2554
2555 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2556 xhci_reserve_host_resources(xhci, in_ctx)) {
2557 spin_unlock_irqrestore(&xhci->lock, flags);
2558 xhci_warn(xhci, "Not enough host resources, "
2559 "active endpoint contexts = %u\n",
2560 xhci->num_active_eps);
2561 return -ENOMEM;
2562 }
2563 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2564 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2565 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2566 xhci_free_host_resources(xhci, in_ctx);
2567 spin_unlock_irqrestore(&xhci->lock, flags);
2568 xhci_warn(xhci, "Not enough bandwidth\n");
2569 return -ENOMEM;
2570 }
2571
2572 if (command) {
2573 cmd_completion = command->completion;
2574 cmd_status = &command->status;
2575 command->command_trb = xhci->cmd_ring->enqueue;
2576
2577
2578
2579
2580 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2581 command->command_trb =
2582 xhci->cmd_ring->enq_seg->next->trbs;
2583
2584 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2585 } else {
2586 cmd_completion = &virt_dev->cmd_completion;
2587 cmd_status = &virt_dev->cmd_status;
2588 }
2589 init_completion(cmd_completion);
2590
2591 cmd_trb = xhci->cmd_ring->dequeue;
2592 if (!ctx_change)
2593 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2594 udev->slot_id, must_succeed);
2595 else
2596 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2597 udev->slot_id, must_succeed);
2598 if (ret < 0) {
2599 if (command)
2600 list_del(&command->cmd_list);
2601 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2602 xhci_free_host_resources(xhci, in_ctx);
2603 spin_unlock_irqrestore(&xhci->lock, flags);
2604 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2605 return -ENOMEM;
2606 }
2607 xhci_ring_cmd_db(xhci);
2608 spin_unlock_irqrestore(&xhci->lock, flags);
2609
2610
2611 timeleft = wait_for_completion_interruptible_timeout(
2612 cmd_completion,
2613 XHCI_CMD_DEFAULT_TIMEOUT);
2614 if (timeleft <= 0) {
2615 xhci_warn(xhci, "%s while waiting for %s command\n",
2616 timeleft == 0 ? "Timeout" : "Signal",
2617 ctx_change == 0 ?
2618 "configure endpoint" :
2619 "evaluate context");
2620
2621 ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2622 if (ret < 0)
2623 return ret;
2624 return -ETIME;
2625 }
2626
2627 if (!ctx_change)
2628 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2629 else
2630 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2631
2632 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2633 spin_lock_irqsave(&xhci->lock, flags);
2634
2635
2636
2637 if (ret)
2638 xhci_free_host_resources(xhci, in_ctx);
2639 else
2640 xhci_finish_resource_reservation(xhci, in_ctx);
2641 spin_unlock_irqrestore(&xhci->lock, flags);
2642 }
2643 return ret;
2644}
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2657{
2658 int i;
2659 int ret = 0;
2660 struct xhci_hcd *xhci;
2661 struct xhci_virt_device *virt_dev;
2662 struct xhci_input_control_ctx *ctrl_ctx;
2663 struct xhci_slot_ctx *slot_ctx;
2664
2665 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2666 if (ret <= 0)
2667 return ret;
2668 xhci = hcd_to_xhci(hcd);
2669 if (xhci->xhc_state & XHCI_STATE_DYING)
2670 return -ENODEV;
2671
2672 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2673 virt_dev = xhci->devs[udev->slot_id];
2674
2675
2676 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2677 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2678 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2679 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2680
2681
2682 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2683 ctrl_ctx->drop_flags == 0)
2684 return 0;
2685
2686 xhci_dbg(xhci, "New Input Control Context:\n");
2687 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2688 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2689 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2690
2691 ret = xhci_configure_endpoint(xhci, udev, NULL,
2692 false, false);
2693 if (ret) {
2694
2695 return ret;
2696 }
2697
2698 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2699 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2700 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2701
2702
2703 for (i = 1; i < 31; ++i) {
2704 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2705 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
2706 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2707 }
2708 xhci_zero_in_ctx(xhci, virt_dev);
2709
2710
2711
2712
2713 for (i = 1; i < 31; ++i) {
2714 if (!virt_dev->eps[i].new_ring)
2715 continue;
2716
2717
2718
2719 if (virt_dev->eps[i].ring) {
2720 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2721 }
2722 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2723 virt_dev->eps[i].new_ring = NULL;
2724 }
2725
2726 return ret;
2727}
2728
2729void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2730{
2731 struct xhci_hcd *xhci;
2732 struct xhci_virt_device *virt_dev;
2733 int i, ret;
2734
2735 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2736 if (ret <= 0)
2737 return;
2738 xhci = hcd_to_xhci(hcd);
2739
2740 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2741 virt_dev = xhci->devs[udev->slot_id];
2742
2743 for (i = 0; i < 31; ++i) {
2744 if (virt_dev->eps[i].new_ring) {
2745 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2746 virt_dev->eps[i].new_ring = NULL;
2747 }
2748 }
2749 xhci_zero_in_ctx(xhci, virt_dev);
2750}
2751
2752static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2753 struct xhci_container_ctx *in_ctx,
2754 struct xhci_container_ctx *out_ctx,
2755 u32 add_flags, u32 drop_flags)
2756{
2757 struct xhci_input_control_ctx *ctrl_ctx;
2758 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2759 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2760 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2761 xhci_slot_copy(xhci, in_ctx, out_ctx);
2762 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2763
2764 xhci_dbg(xhci, "Input Context:\n");
2765 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2766}
2767
2768static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2769 unsigned int slot_id, unsigned int ep_index,
2770 struct xhci_dequeue_state *deq_state)
2771{
2772 struct xhci_container_ctx *in_ctx;
2773 struct xhci_ep_ctx *ep_ctx;
2774 u32 added_ctxs;
2775 dma_addr_t addr;
2776
2777 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2778 xhci->devs[slot_id]->out_ctx, ep_index);
2779 in_ctx = xhci->devs[slot_id]->in_ctx;
2780 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2781 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2782 deq_state->new_deq_ptr);
2783 if (addr == 0) {
2784 xhci_warn(xhci, "WARN Cannot submit config ep after "
2785 "reset ep command\n");
2786 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2787 deq_state->new_deq_seg,
2788 deq_state->new_deq_ptr);
2789 return;
2790 }
2791 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2792
2793 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2794 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2795 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2796}
2797
2798void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2799 struct usb_device *udev, unsigned int ep_index)
2800{
2801 struct xhci_dequeue_state deq_state;
2802 struct xhci_virt_ep *ep;
2803
2804 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2805 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2806
2807
2808
2809 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2810 ep_index, ep->stopped_stream, ep->stopped_td,
2811 &deq_state);
2812
2813
2814
2815
2816 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2817 xhci_dbg(xhci, "Queueing new dequeue state\n");
2818 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2819 ep_index, ep->stopped_stream, &deq_state);
2820 } else {
2821
2822
2823
2824
2825
2826 xhci_dbg(xhci, "Setting up input context for "
2827 "configure endpoint command\n");
2828 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2829 ep_index, &deq_state);
2830 }
2831}
2832
2833
2834
2835
2836
2837
2838
2839void xhci_endpoint_reset(struct usb_hcd *hcd,
2840 struct usb_host_endpoint *ep)
2841{
2842 struct xhci_hcd *xhci;
2843 struct usb_device *udev;
2844 unsigned int ep_index;
2845 unsigned long flags;
2846 int ret;
2847 struct xhci_virt_ep *virt_ep;
2848
2849 xhci = hcd_to_xhci(hcd);
2850 udev = (struct usb_device *) ep->hcpriv;
2851
2852
2853
2854 if (!ep->hcpriv)
2855 return;
2856 ep_index = xhci_get_endpoint_index(&ep->desc);
2857 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2858 if (!virt_ep->stopped_td) {
2859 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2860 ep->desc.bEndpointAddress);
2861 return;
2862 }
2863 if (usb_endpoint_xfer_control(&ep->desc)) {
2864 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2865 return;
2866 }
2867
2868 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2869 spin_lock_irqsave(&xhci->lock, flags);
2870 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2871
2872
2873
2874
2875
2876 if (!ret) {
2877 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2878 kfree(virt_ep->stopped_td);
2879 xhci_ring_cmd_db(xhci);
2880 }
2881 virt_ep->stopped_td = NULL;
2882 virt_ep->stopped_trb = NULL;
2883 virt_ep->stopped_stream = 0;
2884 spin_unlock_irqrestore(&xhci->lock, flags);
2885
2886 if (ret)
2887 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2888}
2889
2890static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2891 struct usb_device *udev, struct usb_host_endpoint *ep,
2892 unsigned int slot_id)
2893{
2894 int ret;
2895 unsigned int ep_index;
2896 unsigned int ep_state;
2897
2898 if (!ep)
2899 return -EINVAL;
2900 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2901 if (ret <= 0)
2902 return -EINVAL;
2903 if (ep->ss_ep_comp.bmAttributes == 0) {
2904 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2905 " descriptor for ep 0x%x does not support streams\n",
2906 ep->desc.bEndpointAddress);
2907 return -EINVAL;
2908 }
2909
2910 ep_index = xhci_get_endpoint_index(&ep->desc);
2911 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2912 if (ep_state & EP_HAS_STREAMS ||
2913 ep_state & EP_GETTING_STREAMS) {
2914 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2915 "already has streams set up.\n",
2916 ep->desc.bEndpointAddress);
2917 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2918 "dynamic stream context array reallocation.\n");
2919 return -EINVAL;
2920 }
2921 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2922 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2923 "endpoint 0x%x; URBs are pending.\n",
2924 ep->desc.bEndpointAddress);
2925 return -EINVAL;
2926 }
2927 return 0;
2928}
2929
2930static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2931 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2932{
2933 unsigned int max_streams;
2934
2935
2936 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2937
2938
2939
2940
2941
2942
2943 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2944 if (*num_stream_ctxs > max_streams) {
2945 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2946 max_streams);
2947 *num_stream_ctxs = max_streams;
2948 *num_streams = max_streams;
2949 }
2950}
2951
2952
2953
2954
2955
2956static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2957 struct usb_device *udev,
2958 struct usb_host_endpoint **eps, unsigned int num_eps,
2959 unsigned int *num_streams, u32 *changed_ep_bitmask)
2960{
2961 unsigned int max_streams;
2962 unsigned int endpoint_flag;
2963 int i;
2964 int ret;
2965
2966 for (i = 0; i < num_eps; i++) {
2967 ret = xhci_check_streams_endpoint(xhci, udev,
2968 eps[i], udev->slot_id);
2969 if (ret < 0)
2970 return ret;
2971
2972 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
2973 if (max_streams < (*num_streams - 1)) {
2974 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2975 eps[i]->desc.bEndpointAddress,
2976 max_streams);
2977 *num_streams = max_streams+1;
2978 }
2979
2980 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2981 if (*changed_ep_bitmask & endpoint_flag)
2982 return -EINVAL;
2983 *changed_ep_bitmask |= endpoint_flag;
2984 }
2985 return 0;
2986}
2987
2988static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2989 struct usb_device *udev,
2990 struct usb_host_endpoint **eps, unsigned int num_eps)
2991{
2992 u32 changed_ep_bitmask = 0;
2993 unsigned int slot_id;
2994 unsigned int ep_index;
2995 unsigned int ep_state;
2996 int i;
2997
2998 slot_id = udev->slot_id;
2999 if (!xhci->devs[slot_id])
3000 return 0;
3001
3002 for (i = 0; i < num_eps; i++) {
3003 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3004 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3005
3006 if (ep_state & EP_GETTING_NO_STREAMS) {
3007 xhci_warn(xhci, "WARN Can't disable streams for "
3008 "endpoint 0x%x\n, "
3009 "streams are being disabled already.",
3010 eps[i]->desc.bEndpointAddress);
3011 return 0;
3012 }
3013
3014 if (!(ep_state & EP_HAS_STREAMS) &&
3015 !(ep_state & EP_GETTING_STREAMS)) {
3016 xhci_warn(xhci, "WARN Can't disable streams for "
3017 "endpoint 0x%x\n, "
3018 "streams are already disabled!",
3019 eps[i]->desc.bEndpointAddress);
3020 xhci_warn(xhci, "WARN xhci_free_streams() called "
3021 "with non-streams endpoint\n");
3022 return 0;
3023 }
3024 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3025 }
3026 return changed_ep_bitmask;
3027}
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3046 struct usb_host_endpoint **eps, unsigned int num_eps,
3047 unsigned int num_streams, gfp_t mem_flags)
3048{
3049 int i, ret;
3050 struct xhci_hcd *xhci;
3051 struct xhci_virt_device *vdev;
3052 struct xhci_command *config_cmd;
3053 unsigned int ep_index;
3054 unsigned int num_stream_ctxs;
3055 unsigned long flags;
3056 u32 changed_ep_bitmask = 0;
3057
3058 if (!eps)
3059 return -EINVAL;
3060
3061
3062
3063
3064 num_streams += 1;
3065 xhci = hcd_to_xhci(hcd);
3066 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3067 num_streams);
3068
3069 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3070 if (!config_cmd) {
3071 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3072 return -ENOMEM;
3073 }
3074
3075
3076
3077
3078
3079 spin_lock_irqsave(&xhci->lock, flags);
3080 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3081 num_eps, &num_streams, &changed_ep_bitmask);
3082 if (ret < 0) {
3083 xhci_free_command(xhci, config_cmd);
3084 spin_unlock_irqrestore(&xhci->lock, flags);
3085 return ret;
3086 }
3087 if (num_streams <= 1) {
3088 xhci_warn(xhci, "WARN: endpoints can't handle "
3089 "more than one stream.\n");
3090 xhci_free_command(xhci, config_cmd);
3091 spin_unlock_irqrestore(&xhci->lock, flags);
3092 return -EINVAL;
3093 }
3094 vdev = xhci->devs[udev->slot_id];
3095
3096
3097
3098 for (i = 0; i < num_eps; i++) {
3099 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3100 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3101 }
3102 spin_unlock_irqrestore(&xhci->lock, flags);
3103
3104
3105
3106
3107
3108 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3109 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3110 num_stream_ctxs, num_streams);
3111
3112 for (i = 0; i < num_eps; i++) {
3113 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3114 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3115 num_stream_ctxs,
3116 num_streams, mem_flags);
3117 if (!vdev->eps[ep_index].stream_info)
3118 goto cleanup;
3119
3120
3121
3122 }
3123
3124
3125 for (i = 0; i < num_eps; i++) {
3126 struct xhci_ep_ctx *ep_ctx;
3127
3128 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3129 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3130
3131 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3132 vdev->out_ctx, ep_index);
3133 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3134 vdev->eps[ep_index].stream_info);
3135 }
3136
3137
3138
3139 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3140 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3141
3142
3143 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3144 false, false);
3145
3146
3147
3148
3149
3150 if (ret < 0)
3151 goto cleanup;
3152
3153 spin_lock_irqsave(&xhci->lock, flags);
3154 for (i = 0; i < num_eps; i++) {
3155 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3156 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3157 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3158 udev->slot_id, ep_index);
3159 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3160 }
3161 xhci_free_command(xhci, config_cmd);
3162 spin_unlock_irqrestore(&xhci->lock, flags);
3163
3164
3165 return num_streams - 1;
3166
3167cleanup:
3168
3169 for (i = 0; i < num_eps; i++) {
3170 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3171 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3172 vdev->eps[ep_index].stream_info = NULL;
3173
3174
3175
3176 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3177 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3178 xhci_endpoint_zero(xhci, vdev, eps[i]);
3179 }
3180 xhci_free_command(xhci, config_cmd);
3181 return -ENOMEM;
3182}
3183
3184
3185
3186
3187
3188
3189
3190int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3191 struct usb_host_endpoint **eps, unsigned int num_eps,
3192 gfp_t mem_flags)
3193{
3194 int i, ret;
3195 struct xhci_hcd *xhci;
3196 struct xhci_virt_device *vdev;
3197 struct xhci_command *command;
3198 unsigned int ep_index;
3199 unsigned long flags;
3200 u32 changed_ep_bitmask;
3201
3202 xhci = hcd_to_xhci(hcd);
3203 vdev = xhci->devs[udev->slot_id];
3204
3205
3206 spin_lock_irqsave(&xhci->lock, flags);
3207 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3208 udev, eps, num_eps);
3209 if (changed_ep_bitmask == 0) {
3210 spin_unlock_irqrestore(&xhci->lock, flags);
3211 return -EINVAL;
3212 }
3213
3214
3215
3216
3217
3218 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3219 command = vdev->eps[ep_index].stream_info->free_streams_command;
3220 for (i = 0; i < num_eps; i++) {
3221 struct xhci_ep_ctx *ep_ctx;
3222
3223 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3224 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3225 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3226 EP_GETTING_NO_STREAMS;
3227
3228 xhci_endpoint_copy(xhci, command->in_ctx,
3229 vdev->out_ctx, ep_index);
3230 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3231 &vdev->eps[ep_index]);
3232 }
3233 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3234 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3235 spin_unlock_irqrestore(&xhci->lock, flags);
3236
3237
3238
3239
3240 ret = xhci_configure_endpoint(xhci, udev, command,
3241 false, true);
3242
3243
3244
3245
3246 if (ret < 0)
3247 return ret;
3248
3249 spin_lock_irqsave(&xhci->lock, flags);
3250 for (i = 0; i < num_eps; i++) {
3251 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3252 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3253 vdev->eps[ep_index].stream_info = NULL;
3254
3255
3256
3257 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3258 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3259 }
3260 spin_unlock_irqrestore(&xhci->lock, flags);
3261
3262 return 0;
3263}
3264
3265
3266
3267
3268
3269
3270
3271
3272void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3273 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3274{
3275 int i;
3276 unsigned int num_dropped_eps = 0;
3277 unsigned int drop_flags = 0;
3278
3279 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3280 if (virt_dev->eps[i].ring) {
3281 drop_flags |= 1 << i;
3282 num_dropped_eps++;
3283 }
3284 }
3285 xhci->num_active_eps -= num_dropped_eps;
3286 if (num_dropped_eps)
3287 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3288 "%u now active.\n",
3289 num_dropped_eps, drop_flags,
3290 xhci->num_active_eps);
3291}
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3312{
3313 int ret, i;
3314 unsigned long flags;
3315 struct xhci_hcd *xhci;
3316 unsigned int slot_id;
3317 struct xhci_virt_device *virt_dev;
3318 struct xhci_command *reset_device_cmd;
3319 int timeleft;
3320 int last_freed_endpoint;
3321 struct xhci_slot_ctx *slot_ctx;
3322 int old_active_eps = 0;
3323
3324 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3325 if (ret <= 0)
3326 return ret;
3327 xhci = hcd_to_xhci(hcd);
3328 slot_id = udev->slot_id;
3329 virt_dev = xhci->devs[slot_id];
3330 if (!virt_dev) {
3331 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3332 "not exist. Re-allocate the device\n", slot_id);
3333 ret = xhci_alloc_dev(hcd, udev);
3334 if (ret == 1)
3335 return 0;
3336 else
3337 return -EINVAL;
3338 }
3339
3340 if (virt_dev->udev != udev) {
3341
3342
3343
3344
3345 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3346 "not match the udev. Re-allocate the device\n",
3347 slot_id);
3348 ret = xhci_alloc_dev(hcd, udev);
3349 if (ret == 1)
3350 return 0;
3351 else
3352 return -EINVAL;
3353 }
3354
3355
3356 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3357 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3358 SLOT_STATE_DISABLED)
3359 return 0;
3360
3361 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3362
3363
3364
3365
3366
3367
3368 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3369 if (!reset_device_cmd) {
3370 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3371 return -ENOMEM;
3372 }
3373
3374
3375 spin_lock_irqsave(&xhci->lock, flags);
3376 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
3377
3378
3379
3380
3381 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3382 reset_device_cmd->command_trb =
3383 xhci->cmd_ring->enq_seg->next->trbs;
3384
3385 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3386 ret = xhci_queue_reset_device(xhci, slot_id);
3387 if (ret) {
3388 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3389 list_del(&reset_device_cmd->cmd_list);
3390 spin_unlock_irqrestore(&xhci->lock, flags);
3391 goto command_cleanup;
3392 }
3393 xhci_ring_cmd_db(xhci);
3394 spin_unlock_irqrestore(&xhci->lock, flags);
3395
3396
3397 timeleft = wait_for_completion_interruptible_timeout(
3398 reset_device_cmd->completion,
3399 USB_CTRL_SET_TIMEOUT);
3400 if (timeleft <= 0) {
3401 xhci_warn(xhci, "%s while waiting for reset device command\n",
3402 timeleft == 0 ? "Timeout" : "Signal");
3403 spin_lock_irqsave(&xhci->lock, flags);
3404
3405
3406
3407 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3408 list_del(&reset_device_cmd->cmd_list);
3409 spin_unlock_irqrestore(&xhci->lock, flags);
3410 ret = -ETIME;
3411 goto command_cleanup;
3412 }
3413
3414
3415
3416
3417
3418 ret = reset_device_cmd->status;
3419 switch (ret) {
3420 case COMP_EBADSLT:
3421 case COMP_CTX_STATE:
3422 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3423 slot_id,
3424 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3425 xhci_info(xhci, "Not freeing device rings.\n");
3426
3427 ret = 0;
3428 goto command_cleanup;
3429 case COMP_SUCCESS:
3430 xhci_dbg(xhci, "Successful reset device command.\n");
3431 break;
3432 default:
3433 if (xhci_is_vendor_info_code(xhci, ret))
3434 break;
3435 xhci_warn(xhci, "Unknown completion code %u for "
3436 "reset device command.\n", ret);
3437 ret = -EINVAL;
3438 goto command_cleanup;
3439 }
3440
3441
3442 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3443 spin_lock_irqsave(&xhci->lock, flags);
3444
3445 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3446 spin_unlock_irqrestore(&xhci->lock, flags);
3447 }
3448
3449
3450 last_freed_endpoint = 1;
3451 for (i = 1; i < 31; ++i) {
3452 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3453
3454 if (ep->ep_state & EP_HAS_STREAMS) {
3455 xhci_free_stream_info(xhci, ep->stream_info);
3456 ep->stream_info = NULL;
3457 ep->ep_state &= ~EP_HAS_STREAMS;
3458 }
3459
3460 if (ep->ring) {
3461 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3462 last_freed_endpoint = i;
3463 }
3464 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3465 xhci_drop_ep_from_interval_table(xhci,
3466 &virt_dev->eps[i].bw_info,
3467 virt_dev->bw_table,
3468 udev,
3469 &virt_dev->eps[i],
3470 virt_dev->tt_info);
3471 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3472 }
3473
3474 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3475
3476 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3477 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3478 ret = 0;
3479
3480command_cleanup:
3481 xhci_free_command(xhci, reset_device_cmd);
3482 return ret;
3483}
3484
3485
3486
3487
3488
3489
3490void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3491{
3492 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3493 struct xhci_virt_device *virt_dev;
3494 unsigned long flags;
3495 u32 state;
3496 int i, ret;
3497
3498 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3499
3500
3501
3502 if (ret <= 0 && ret != -ENODEV)
3503 return;
3504
3505 virt_dev = xhci->devs[udev->slot_id];
3506
3507
3508 for (i = 0; i < 31; ++i) {
3509 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3510 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3511 }
3512
3513 if (udev->usb2_hw_lpm_enabled) {
3514 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3515 udev->usb2_hw_lpm_enabled = 0;
3516 }
3517
3518 spin_lock_irqsave(&xhci->lock, flags);
3519
3520 state = xhci_readl(xhci, &xhci->op_regs->status);
3521 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3522 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3523 xhci_free_virt_device(xhci, udev->slot_id);
3524 spin_unlock_irqrestore(&xhci->lock, flags);
3525 return;
3526 }
3527
3528 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3529 spin_unlock_irqrestore(&xhci->lock, flags);
3530 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3531 return;
3532 }
3533 xhci_ring_cmd_db(xhci);
3534 spin_unlock_irqrestore(&xhci->lock, flags);
3535
3536
3537
3538
3539}
3540
3541
3542
3543
3544
3545
3546
3547static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3548{
3549 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3550 xhci_dbg(xhci, "Not enough ep ctxs: "
3551 "%u active, need to add 1, limit is %u.\n",
3552 xhci->num_active_eps, xhci->limit_active_eps);
3553 return -ENOMEM;
3554 }
3555 xhci->num_active_eps += 1;
3556 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3557 xhci->num_active_eps);
3558 return 0;
3559}
3560
3561
3562
3563
3564
3565
3566int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3567{
3568 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3569 unsigned long flags;
3570 int timeleft;
3571 int ret;
3572 union xhci_trb *cmd_trb;
3573
3574 spin_lock_irqsave(&xhci->lock, flags);
3575 cmd_trb = xhci->cmd_ring->dequeue;
3576 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3577 if (ret) {
3578 spin_unlock_irqrestore(&xhci->lock, flags);
3579 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3580 return 0;
3581 }
3582 xhci_ring_cmd_db(xhci);
3583 spin_unlock_irqrestore(&xhci->lock, flags);
3584
3585
3586 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3587 XHCI_CMD_DEFAULT_TIMEOUT);
3588 if (timeleft <= 0) {
3589 xhci_warn(xhci, "%s while waiting for a slot\n",
3590 timeleft == 0 ? "Timeout" : "Signal");
3591
3592 return xhci_cancel_cmd(xhci, NULL, cmd_trb);
3593 }
3594
3595 if (!xhci->slot_id) {
3596 xhci_err(xhci, "Error while assigning device slot ID\n");
3597 return 0;
3598 }
3599
3600 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3601 spin_lock_irqsave(&xhci->lock, flags);
3602 ret = xhci_reserve_host_control_ep_resources(xhci);
3603 if (ret) {
3604 spin_unlock_irqrestore(&xhci->lock, flags);
3605 xhci_warn(xhci, "Not enough host resources, "
3606 "active endpoint contexts = %u\n",
3607 xhci->num_active_eps);
3608 goto disable_slot;
3609 }
3610 spin_unlock_irqrestore(&xhci->lock, flags);
3611 }
3612
3613
3614
3615
3616 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3617 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3618 goto disable_slot;
3619 }
3620 udev->slot_id = xhci->slot_id;
3621
3622
3623 return 1;
3624
3625disable_slot:
3626
3627 spin_lock_irqsave(&xhci->lock, flags);
3628 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3629 xhci_ring_cmd_db(xhci);
3630 spin_unlock_irqrestore(&xhci->lock, flags);
3631 return 0;
3632}
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3644{
3645 unsigned long flags;
3646 int timeleft;
3647 struct xhci_virt_device *virt_dev;
3648 int ret = 0;
3649 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3650 struct xhci_slot_ctx *slot_ctx;
3651 struct xhci_input_control_ctx *ctrl_ctx;
3652 u64 temp_64;
3653 union xhci_trb *cmd_trb;
3654
3655 if (!udev->slot_id) {
3656 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3657 return -EINVAL;
3658 }
3659
3660 virt_dev = xhci->devs[udev->slot_id];
3661
3662 if (WARN_ON(!virt_dev)) {
3663
3664
3665
3666
3667
3668 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3669 udev->slot_id);
3670 return -EINVAL;
3671 }
3672
3673 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3674
3675
3676
3677
3678
3679 if (!slot_ctx->dev_info)
3680 xhci_setup_addressable_virt_dev(xhci, udev);
3681
3682 else
3683 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3684 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3685 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3686 ctrl_ctx->drop_flags = 0;
3687
3688 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3689 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3690
3691 spin_lock_irqsave(&xhci->lock, flags);
3692 cmd_trb = xhci->cmd_ring->dequeue;
3693 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3694 udev->slot_id);
3695 if (ret) {
3696 spin_unlock_irqrestore(&xhci->lock, flags);
3697 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3698 return ret;
3699 }
3700 xhci_ring_cmd_db(xhci);
3701 spin_unlock_irqrestore(&xhci->lock, flags);
3702
3703
3704 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3705 XHCI_CMD_DEFAULT_TIMEOUT);
3706
3707
3708
3709
3710 if (timeleft <= 0) {
3711 xhci_warn(xhci, "%s while waiting for address device command\n",
3712 timeleft == 0 ? "Timeout" : "Signal");
3713
3714 ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
3715 if (ret < 0)
3716 return ret;
3717 return -ETIME;
3718 }
3719
3720 switch (virt_dev->cmd_status) {
3721 case COMP_CTX_STATE:
3722 case COMP_EBADSLT:
3723 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3724 udev->slot_id);
3725 ret = -EINVAL;
3726 break;
3727 case COMP_TX_ERR:
3728 dev_warn(&udev->dev, "Device not responding to set address.\n");
3729 ret = -EPROTO;
3730 break;
3731 case COMP_DEV_ERR:
3732 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3733 "device command.\n");
3734 ret = -ENODEV;
3735 break;
3736 case COMP_SUCCESS:
3737 xhci_dbg(xhci, "Successful Address Device command\n");
3738 break;
3739 default:
3740 xhci_err(xhci, "ERROR: unexpected command completion "
3741 "code 0x%x.\n", virt_dev->cmd_status);
3742 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3743 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3744 ret = -EINVAL;
3745 break;
3746 }
3747 if (ret) {
3748 return ret;
3749 }
3750 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3751 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3752 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3753 udev->slot_id,
3754 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3755 (unsigned long long)
3756 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3757 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
3758 (unsigned long long)virt_dev->out_ctx->dma);
3759 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3760 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3761 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3762 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3763
3764
3765
3766
3767 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3768
3769
3770 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3771 + 1;
3772
3773 ctrl_ctx->add_flags = 0;
3774 ctrl_ctx->drop_flags = 0;
3775
3776 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
3777
3778 return 0;
3779}
3780
3781#ifdef CONFIG_USB_SUSPEND
3782
3783
3784static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3785 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3786
3787
3788static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
3789 struct usb_device *udev)
3790{
3791 int u2del, besl, besl_host;
3792 int besl_device = 0;
3793 u32 field;
3794
3795 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3796 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
3797
3798 if (field & USB_BESL_SUPPORT) {
3799 for (besl_host = 0; besl_host < 16; besl_host++) {
3800 if (xhci_besl_encoding[besl_host] >= u2del)
3801 break;
3802 }
3803
3804 if (field & USB_BESL_BASELINE_VALID)
3805 besl_device = USB_GET_BESL_BASELINE(field);
3806 else if (field & USB_BESL_DEEP_VALID)
3807 besl_device = USB_GET_BESL_DEEP(field);
3808 } else {
3809 if (u2del <= 50)
3810 besl_host = 0;
3811 else
3812 besl_host = (u2del - 51) / 75 + 1;
3813 }
3814
3815 besl = besl_host + besl_device;
3816 if (besl > 15)
3817 besl = 15;
3818
3819 return besl;
3820}
3821
3822static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3823 struct usb_device *udev)
3824{
3825 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3826 struct dev_info *dev_info;
3827 __le32 __iomem **port_array;
3828 __le32 __iomem *addr, *pm_addr;
3829 u32 temp, dev_id;
3830 unsigned int port_num;
3831 unsigned long flags;
3832 int hird;
3833 int ret;
3834
3835 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3836 !udev->lpm_capable)
3837 return -EINVAL;
3838
3839
3840 if (!udev->parent || udev->parent->parent ||
3841 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3842 return -EINVAL;
3843
3844 spin_lock_irqsave(&xhci->lock, flags);
3845
3846
3847 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3848 le16_to_cpu(udev->descriptor.idProduct);
3849 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3850 if (dev_info->dev_id == dev_id) {
3851 ret = -EINVAL;
3852 goto finish;
3853 }
3854 }
3855
3856 port_array = xhci->usb2_ports;
3857 port_num = udev->portnum - 1;
3858
3859 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3860 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3861 ret = -EINVAL;
3862 goto finish;
3863 }
3864
3865
3866
3867
3868
3869
3870
3871 xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3872
3873
3874
3875
3876
3877 pm_addr = port_array[port_num] + 1;
3878 hird = xhci_calculate_hird_besl(xhci, udev);
3879 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3880 xhci_writel(xhci, temp, pm_addr);
3881
3882
3883 addr = port_array[port_num];
3884 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3885
3886
3887 spin_unlock_irqrestore(&xhci->lock, flags);
3888 msleep(10);
3889 spin_lock_irqsave(&xhci->lock, flags);
3890
3891
3892 ret = xhci_handshake(xhci, pm_addr,
3893 PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3894 if (ret != -ETIMEDOUT) {
3895
3896 temp = xhci_readl(xhci, addr);
3897 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3898 port_num, temp);
3899 ret = 0;
3900 } else {
3901 temp = xhci_readl(xhci, pm_addr);
3902 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3903 port_num, temp & PORT_L1S_MASK);
3904 ret = -EINVAL;
3905 }
3906
3907
3908 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3909
3910 spin_unlock_irqrestore(&xhci->lock, flags);
3911 msleep(10);
3912 spin_lock_irqsave(&xhci->lock, flags);
3913
3914
3915 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3916
3917
3918 if (!ret) {
3919 temp = xhci_readl(xhci, addr);
3920 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3921 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3922 (temp & PORT_PLS_MASK) != XDEV_U0) {
3923 xhci_dbg(xhci, "port L1 resume fail\n");
3924 ret = -EINVAL;
3925 }
3926 }
3927
3928 if (ret) {
3929
3930 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3931 "re-enumerate\n");
3932 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3933 if (!dev_info) {
3934 ret = -ENOMEM;
3935 goto finish;
3936 }
3937 dev_info->dev_id = dev_id;
3938 INIT_LIST_HEAD(&dev_info->list);
3939 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3940 } else {
3941 xhci_ring_device(xhci, udev->slot_id);
3942 }
3943
3944finish:
3945 spin_unlock_irqrestore(&xhci->lock, flags);
3946 return ret;
3947}
3948
3949int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3950 struct usb_device *udev, int enable)
3951{
3952 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3953 __le32 __iomem **port_array;
3954 __le32 __iomem *pm_addr;
3955 u32 temp;
3956 unsigned int port_num;
3957 unsigned long flags;
3958 int hird;
3959
3960 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3961 !udev->lpm_capable)
3962 return -EPERM;
3963
3964 if (!udev->parent || udev->parent->parent ||
3965 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3966 return -EPERM;
3967
3968 if (udev->usb2_hw_lpm_capable != 1)
3969 return -EPERM;
3970
3971 spin_lock_irqsave(&xhci->lock, flags);
3972
3973 port_array = xhci->usb2_ports;
3974 port_num = udev->portnum - 1;
3975 pm_addr = port_array[port_num] + 1;
3976 temp = xhci_readl(xhci, pm_addr);
3977
3978 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3979 enable ? "enable" : "disable", port_num);
3980
3981 hird = xhci_calculate_hird_besl(xhci, udev);
3982
3983 if (enable) {
3984 temp &= ~PORT_HIRD_MASK;
3985 temp |= PORT_HIRD(hird) | PORT_RWE;
3986 xhci_writel(xhci, temp, pm_addr);
3987 temp = xhci_readl(xhci, pm_addr);
3988 temp |= PORT_HLE;
3989 xhci_writel(xhci, temp, pm_addr);
3990 } else {
3991 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3992 xhci_writel(xhci, temp, pm_addr);
3993 }
3994
3995 spin_unlock_irqrestore(&xhci->lock, flags);
3996 return 0;
3997}
3998
3999int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4000{
4001 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4002 int ret;
4003
4004 ret = xhci_usb2_software_lpm_test(hcd, udev);
4005 if (!ret) {
4006 xhci_dbg(xhci, "software LPM test succeed\n");
4007 if (xhci->hw_lpm_support == 1) {
4008 udev->usb2_hw_lpm_capable = 1;
4009 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
4010 if (!ret)
4011 udev->usb2_hw_lpm_enabled = 1;
4012 }
4013 }
4014
4015 return 0;
4016}
4017
4018#else
4019
4020int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4021 struct usb_device *udev, int enable)
4022{
4023 return 0;
4024}
4025
4026int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4027{
4028 return 0;
4029}
4030
4031#endif
4032
4033
4034
4035#ifdef CONFIG_PM
4036
4037static unsigned long long xhci_service_interval_to_ns(
4038 struct usb_endpoint_descriptor *desc)
4039{
4040 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4041}
4042
4043static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4044 enum usb3_link_state state)
4045{
4046 unsigned long long sel;
4047 unsigned long long pel;
4048 unsigned int max_sel_pel;
4049 char *state_name;
4050
4051 switch (state) {
4052 case USB3_LPM_U1:
4053
4054 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4055 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4056 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4057 state_name = "U1";
4058 break;
4059 case USB3_LPM_U2:
4060 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4061 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4062 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4063 state_name = "U2";
4064 break;
4065 default:
4066 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4067 __func__);
4068 return USB3_LPM_DISABLED;
4069 }
4070
4071 if (sel <= max_sel_pel && pel <= max_sel_pel)
4072 return USB3_LPM_DEVICE_INITIATED;
4073
4074 if (sel > max_sel_pel)
4075 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4076 "due to long SEL %llu ms\n",
4077 state_name, sel);
4078 else
4079 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4080 "due to long PEL %llu\n ms",
4081 state_name, pel);
4082 return USB3_LPM_DISABLED;
4083}
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
4095 struct usb_endpoint_descriptor *desc)
4096{
4097 unsigned long long timeout_ns;
4098 int ep_type;
4099 int intr_type;
4100
4101 ep_type = usb_endpoint_type(desc);
4102 switch (ep_type) {
4103 case USB_ENDPOINT_XFER_CONTROL:
4104 timeout_ns = udev->u1_params.sel * 3;
4105 break;
4106 case USB_ENDPOINT_XFER_BULK:
4107 timeout_ns = udev->u1_params.sel * 5;
4108 break;
4109 case USB_ENDPOINT_XFER_INT:
4110 intr_type = usb_endpoint_interrupt_type(desc);
4111 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4112 timeout_ns = udev->u1_params.sel * 3;
4113 break;
4114 }
4115
4116 case USB_ENDPOINT_XFER_ISOC:
4117 timeout_ns = xhci_service_interval_to_ns(desc);
4118 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4119 if (timeout_ns < udev->u1_params.sel * 2)
4120 timeout_ns = udev->u1_params.sel * 2;
4121 break;
4122 default:
4123 return 0;
4124 }
4125
4126
4127 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4128
4129 if (timeout_ns == USB3_LPM_DISABLED)
4130 timeout_ns++;
4131
4132
4133
4134
4135 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4136 return timeout_ns;
4137 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4138 "due to long timeout %llu ms\n", timeout_ns);
4139 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4140}
4141
4142
4143
4144
4145
4146
4147
4148
4149static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
4150 struct usb_endpoint_descriptor *desc)
4151{
4152 unsigned long long timeout_ns;
4153 unsigned long long u2_del_ns;
4154
4155 timeout_ns = 10 * 1000 * 1000;
4156
4157 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4158 (xhci_service_interval_to_ns(desc) > timeout_ns))
4159 timeout_ns = xhci_service_interval_to_ns(desc);
4160
4161 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4162 if (u2_del_ns > timeout_ns)
4163 timeout_ns = u2_del_ns;
4164
4165
4166 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4167
4168
4169
4170 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4171 return timeout_ns;
4172 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4173 "due to long timeout %llu ms\n", timeout_ns);
4174 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4175}
4176
4177static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4178 struct usb_device *udev,
4179 struct usb_endpoint_descriptor *desc,
4180 enum usb3_link_state state,
4181 u16 *timeout)
4182{
4183 if (state == USB3_LPM_U1) {
4184 if (xhci->quirks & XHCI_INTEL_HOST)
4185 return xhci_calculate_intel_u1_timeout(udev, desc);
4186 } else {
4187 if (xhci->quirks & XHCI_INTEL_HOST)
4188 return xhci_calculate_intel_u2_timeout(udev, desc);
4189 }
4190
4191 return USB3_LPM_DISABLED;
4192}
4193
4194static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4195 struct usb_device *udev,
4196 struct usb_endpoint_descriptor *desc,
4197 enum usb3_link_state state,
4198 u16 *timeout)
4199{
4200 u16 alt_timeout;
4201
4202 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4203 desc, state, timeout);
4204
4205
4206
4207
4208
4209 if (alt_timeout == USB3_LPM_DISABLED ||
4210 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4211 *timeout = alt_timeout;
4212 return -E2BIG;
4213 }
4214 if (alt_timeout > *timeout)
4215 *timeout = alt_timeout;
4216 return 0;
4217}
4218
4219static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4220 struct usb_device *udev,
4221 struct usb_host_interface *alt,
4222 enum usb3_link_state state,
4223 u16 *timeout)
4224{
4225 int j;
4226
4227 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4228 if (xhci_update_timeout_for_endpoint(xhci, udev,
4229 &alt->endpoint[j].desc, state, timeout))
4230 return -E2BIG;
4231 continue;
4232 }
4233 return 0;
4234}
4235
4236static int xhci_check_intel_tier_policy(struct usb_device *udev,
4237 enum usb3_link_state state)
4238{
4239 struct usb_device *parent;
4240 unsigned int num_hubs;
4241
4242 if (state == USB3_LPM_U2)
4243 return 0;
4244
4245
4246 for (parent = udev->parent, num_hubs = 0; parent->parent;
4247 parent = parent->parent)
4248 num_hubs++;
4249
4250 if (num_hubs < 2)
4251 return 0;
4252
4253 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4254 " below second-tier hub.\n");
4255 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4256 "to decrease power consumption.\n");
4257 return -E2BIG;
4258}
4259
4260static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4261 struct usb_device *udev,
4262 enum usb3_link_state state)
4263{
4264 if (xhci->quirks & XHCI_INTEL_HOST)
4265 return xhci_check_intel_tier_policy(udev, state);
4266 return -EINVAL;
4267}
4268
4269
4270
4271
4272
4273
4274static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4275 struct usb_device *udev, enum usb3_link_state state)
4276{
4277 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4278 struct usb_host_config *config;
4279 char *state_name;
4280 int i;
4281 u16 timeout = USB3_LPM_DISABLED;
4282
4283 if (state == USB3_LPM_U1)
4284 state_name = "U1";
4285 else if (state == USB3_LPM_U2)
4286 state_name = "U2";
4287 else {
4288 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4289 state);
4290 return timeout;
4291 }
4292
4293 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4294 return timeout;
4295
4296
4297
4298
4299 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4300 state, &timeout))
4301 return timeout;
4302
4303 config = udev->actconfig;
4304 if (!config)
4305 return timeout;
4306
4307 for (i = 0; i < USB_MAXINTERFACES; i++) {
4308 struct usb_driver *driver;
4309 struct usb_interface *intf = config->interface[i];
4310
4311 if (!intf)
4312 continue;
4313
4314
4315
4316
4317 if (intf->dev.driver) {
4318 driver = to_usb_driver(intf->dev.driver);
4319 if (driver && driver->disable_hub_initiated_lpm) {
4320 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4321 "at request of driver %s\n",
4322 state_name, driver->name);
4323 return xhci_get_timeout_no_hub_lpm(udev, state);
4324 }
4325 }
4326
4327
4328 if (!intf->cur_altsetting)
4329 continue;
4330
4331 if (xhci_update_timeout_for_interface(xhci, udev,
4332 intf->cur_altsetting,
4333 state, &timeout))
4334 return timeout;
4335 }
4336 return timeout;
4337}
4338
4339
4340
4341
4342
4343static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4344 struct usb_device *udev, u16 max_exit_latency)
4345{
4346 struct xhci_virt_device *virt_dev;
4347 struct xhci_command *command;
4348 struct xhci_input_control_ctx *ctrl_ctx;
4349 struct xhci_slot_ctx *slot_ctx;
4350 unsigned long flags;
4351 int ret;
4352
4353 spin_lock_irqsave(&xhci->lock, flags);
4354 if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
4355 spin_unlock_irqrestore(&xhci->lock, flags);
4356 return 0;
4357 }
4358
4359
4360 virt_dev = xhci->devs[udev->slot_id];
4361 command = xhci->lpm_command;
4362 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4363 spin_unlock_irqrestore(&xhci->lock, flags);
4364
4365 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
4366 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4367 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4368 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4369 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4370
4371 xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
4372 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4373 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4374
4375
4376 ret = xhci_configure_endpoint(xhci, udev, command,
4377 true, true);
4378 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4379 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4380
4381 if (!ret) {
4382 spin_lock_irqsave(&xhci->lock, flags);
4383 virt_dev->current_mel = max_exit_latency;
4384 spin_unlock_irqrestore(&xhci->lock, flags);
4385 }
4386 return ret;
4387}
4388
4389static int calculate_max_exit_latency(struct usb_device *udev,
4390 enum usb3_link_state state_changed,
4391 u16 hub_encoded_timeout)
4392{
4393 unsigned long long u1_mel_us = 0;
4394 unsigned long long u2_mel_us = 0;
4395 unsigned long long mel_us = 0;
4396 bool disabling_u1;
4397 bool disabling_u2;
4398 bool enabling_u1;
4399 bool enabling_u2;
4400
4401 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4402 hub_encoded_timeout == USB3_LPM_DISABLED);
4403 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4404 hub_encoded_timeout == USB3_LPM_DISABLED);
4405
4406 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4407 hub_encoded_timeout != USB3_LPM_DISABLED);
4408 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4409 hub_encoded_timeout != USB3_LPM_DISABLED);
4410
4411
4412
4413
4414 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4415 enabling_u1)
4416 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4417 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4418 enabling_u2)
4419 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4420
4421 if (u1_mel_us > u2_mel_us)
4422 mel_us = u1_mel_us;
4423 else
4424 mel_us = u2_mel_us;
4425
4426 if (mel_us > MAX_EXIT) {
4427 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4428 "is too big.\n", mel_us);
4429 return -E2BIG;
4430 }
4431 return mel_us;
4432}
4433
4434
4435int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4436 struct usb_device *udev, enum usb3_link_state state)
4437{
4438 struct xhci_hcd *xhci;
4439 u16 hub_encoded_timeout;
4440 int mel;
4441 int ret;
4442
4443 xhci = hcd_to_xhci(hcd);
4444
4445
4446
4447
4448 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4449 !xhci->devs[udev->slot_id])
4450 return USB3_LPM_DISABLED;
4451
4452 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4453 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4454 if (mel < 0) {
4455
4456 hub_encoded_timeout = USB3_LPM_DISABLED;
4457 mel = 0;
4458 }
4459
4460 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4461 if (ret)
4462 return ret;
4463 return hub_encoded_timeout;
4464}
4465
4466int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4467 struct usb_device *udev, enum usb3_link_state state)
4468{
4469 struct xhci_hcd *xhci;
4470 u16 mel;
4471 int ret;
4472
4473 xhci = hcd_to_xhci(hcd);
4474 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4475 !xhci->devs[udev->slot_id])
4476 return 0;
4477
4478 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4479 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4480 if (ret)
4481 return ret;
4482 return 0;
4483}
4484#else
4485
4486int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4487 struct usb_device *udev, enum usb3_link_state state)
4488{
4489 return USB3_LPM_DISABLED;
4490}
4491
4492int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4493 struct usb_device *udev, enum usb3_link_state state)
4494{
4495 return 0;
4496}
4497#endif
4498
4499
4500
4501
4502
4503
4504int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4505 struct usb_tt *tt, gfp_t mem_flags)
4506{
4507 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4508 struct xhci_virt_device *vdev;
4509 struct xhci_command *config_cmd;
4510 struct xhci_input_control_ctx *ctrl_ctx;
4511 struct xhci_slot_ctx *slot_ctx;
4512 unsigned long flags;
4513 unsigned think_time;
4514 int ret;
4515
4516
4517 if (!hdev->parent)
4518 return 0;
4519
4520 vdev = xhci->devs[hdev->slot_id];
4521 if (!vdev) {
4522 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4523 return -EINVAL;
4524 }
4525 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4526 if (!config_cmd) {
4527 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4528 return -ENOMEM;
4529 }
4530
4531 spin_lock_irqsave(&xhci->lock, flags);
4532 if (hdev->speed == USB_SPEED_HIGH &&
4533 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4534 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4535 xhci_free_command(xhci, config_cmd);
4536 spin_unlock_irqrestore(&xhci->lock, flags);
4537 return -ENOMEM;
4538 }
4539
4540 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4541 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
4542 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4543 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4544 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4545 if (tt->multi)
4546 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4547 if (xhci->hci_version > 0x95) {
4548 xhci_dbg(xhci, "xHCI version %x needs hub "
4549 "TT think time and number of ports\n",
4550 (unsigned int) xhci->hci_version);
4551 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4552
4553
4554
4555
4556
4557
4558
4559 think_time = tt->think_time;
4560 if (think_time != 0)
4561 think_time = (think_time / 666) - 1;
4562 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4563 slot_ctx->tt_info |=
4564 cpu_to_le32(TT_THINK_TIME(think_time));
4565 } else {
4566 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4567 "TT think time or number of ports\n",
4568 (unsigned int) xhci->hci_version);
4569 }
4570 slot_ctx->dev_state = 0;
4571 spin_unlock_irqrestore(&xhci->lock, flags);
4572
4573 xhci_dbg(xhci, "Set up %s for hub device.\n",
4574 (xhci->hci_version > 0x95) ?
4575 "configure endpoint" : "evaluate context");
4576 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4577 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4578
4579
4580
4581
4582 if (xhci->hci_version > 0x95)
4583 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4584 false, false);
4585 else
4586 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4587 true, false);
4588
4589 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4590 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4591
4592 xhci_free_command(xhci, config_cmd);
4593 return ret;
4594}
4595
4596int xhci_get_frame(struct usb_hcd *hcd)
4597{
4598 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4599
4600 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
4601}
4602
4603int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4604{
4605 struct xhci_hcd *xhci;
4606 struct device *dev = hcd->self.controller;
4607 int retval;
4608 u32 temp;
4609
4610
4611 hcd->self.sg_tablesize = ~0;
4612
4613 hcd->self.no_stop_on_short = 1;
4614
4615 if (usb_hcd_is_primary_hcd(hcd)) {
4616 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4617 if (!xhci)
4618 return -ENOMEM;
4619 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4620 xhci->main_hcd = hcd;
4621
4622
4623
4624 hcd->speed = HCD_USB2;
4625 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4626
4627
4628
4629
4630
4631 hcd->has_tt = 1;
4632 } else {
4633
4634
4635
4636 xhci = hcd_to_xhci(hcd);
4637 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4638 if (HCC_64BIT_ADDR(temp)) {
4639 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4640 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4641 } else {
4642 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4643 }
4644 return 0;
4645 }
4646
4647 xhci->cap_regs = hcd->regs;
4648 xhci->op_regs = hcd->regs +
4649 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
4650 xhci->run_regs = hcd->regs +
4651 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4652
4653 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
4654 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
4655 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
4656 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
4657 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4658 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4659 xhci_print_registers(xhci);
4660
4661 get_quirks(dev, xhci);
4662
4663
4664 retval = xhci_halt(xhci);
4665 if (retval)
4666 goto error;
4667
4668 xhci_dbg(xhci, "Resetting HCD\n");
4669
4670 retval = xhci_reset(xhci);
4671 if (retval)
4672 goto error;
4673 xhci_dbg(xhci, "Reset complete\n");
4674
4675 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4676 if (HCC_64BIT_ADDR(temp)) {
4677 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4678 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4679 } else {
4680 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4681 }
4682
4683 xhci_dbg(xhci, "Calling HCD init\n");
4684
4685 retval = xhci_init(hcd);
4686 if (retval)
4687 goto error;
4688 xhci_dbg(xhci, "Called HCD init\n");
4689 return 0;
4690error:
4691 kfree(xhci);
4692 return retval;
4693}
4694
4695MODULE_DESCRIPTION(DRIVER_DESC);
4696MODULE_AUTHOR(DRIVER_AUTHOR);
4697MODULE_LICENSE("GPL");
4698
4699static int __init xhci_hcd_init(void)
4700{
4701 int retval;
4702
4703 retval = xhci_register_pci();
4704 if (retval < 0) {
4705 printk(KERN_DEBUG "Problem registering PCI driver.");
4706 return retval;
4707 }
4708 retval = xhci_register_plat();
4709 if (retval < 0) {
4710 printk(KERN_DEBUG "Problem registering platform driver.");
4711 goto unreg_pci;
4712 }
4713
4714
4715
4716
4717 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4718 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4719 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4720
4721
4722
4723 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4724 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4725 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4726 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4727 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4728
4729 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4730 return 0;
4731unreg_pci:
4732 xhci_unregister_pci();
4733 return retval;
4734}
4735module_init(xhci_hcd_init);
4736
4737static void __exit xhci_hcd_cleanup(void)
4738{
4739 xhci_unregister_pci();
4740 xhci_unregister_plat();
4741}
4742module_exit(xhci_hcd_cleanup);
4743