1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29
30#include "xhci.h"
31
32#define DRIVER_AUTHOR "Sarah Sharp"
33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
34
35
36static int link_quirk;
37module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 u32 mask, u32 done, int usec)
56{
57 u32 result;
58
59 do {
60 result = xhci_readl(xhci, ptr);
61 if (result == ~(u32)0)
62 return -ENODEV;
63 result &= mask;
64 if (result == done)
65 return 0;
66 udelay(1);
67 usec--;
68 } while (usec > 0);
69 return -ETIMEDOUT;
70}
71
72
73
74
75void xhci_quiesce(struct xhci_hcd *xhci)
76{
77 u32 halted;
78 u32 cmd;
79 u32 mask;
80
81 mask = ~(XHCI_IRQS);
82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
83 if (!halted)
84 mask &= ~CMD_RUN;
85
86 cmd = xhci_readl(xhci, &xhci->op_regs->command);
87 cmd &= mask;
88 xhci_writel(xhci, cmd, &xhci->op_regs->command);
89}
90
91
92
93
94
95
96
97
98
99int xhci_halt(struct xhci_hcd *xhci)
100{
101 int ret;
102 xhci_dbg(xhci, "// Halt the HC\n");
103 xhci_quiesce(xhci);
104
105 ret = handshake(xhci, &xhci->op_regs->status,
106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
107 if (!ret)
108 xhci->xhc_state |= XHCI_STATE_HALTED;
109 else
110 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
111 XHCI_MAX_HALT_USEC);
112 return ret;
113}
114
115
116
117
118static int xhci_start(struct xhci_hcd *xhci)
119{
120 u32 temp;
121 int ret;
122
123 temp = xhci_readl(xhci, &xhci->op_regs->command);
124 temp |= (CMD_RUN);
125 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
126 temp);
127 xhci_writel(xhci, temp, &xhci->op_regs->command);
128
129
130
131
132
133 ret = handshake(xhci, &xhci->op_regs->status,
134 STS_HALT, 0, XHCI_MAX_HALT_USEC);
135 if (ret == -ETIMEDOUT)
136 xhci_err(xhci, "Host took too long to start, "
137 "waited %u microseconds.\n",
138 XHCI_MAX_HALT_USEC);
139 if (!ret)
140 xhci->xhc_state &= ~XHCI_STATE_HALTED;
141 return ret;
142}
143
144
145
146
147
148
149
150
151int xhci_reset(struct xhci_hcd *xhci)
152{
153 u32 command;
154 u32 state;
155 int ret, i;
156
157 state = xhci_readl(xhci, &xhci->op_regs->status);
158 if ((state & STS_HALT) == 0) {
159 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
160 return 0;
161 }
162
163 xhci_dbg(xhci, "// Reset the HC\n");
164 command = xhci_readl(xhci, &xhci->op_regs->command);
165 command |= CMD_RESET;
166 xhci_writel(xhci, command, &xhci->op_regs->command);
167
168 ret = handshake(xhci, &xhci->op_regs->command,
169 CMD_RESET, 0, 250 * 1000);
170 if (ret)
171 return ret;
172
173 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
174
175
176
177
178 ret = handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
179
180 for (i = 0; i < 2; ++i) {
181 xhci->bus_state[i].port_c_suspend = 0;
182 xhci->bus_state[i].suspended_ports = 0;
183 xhci->bus_state[i].resuming_ports = 0;
184 }
185
186 return ret;
187}
188
189#ifdef CONFIG_PCI
190static int xhci_free_msi(struct xhci_hcd *xhci)
191{
192 int i;
193
194 if (!xhci->msix_entries)
195 return -EINVAL;
196
197 for (i = 0; i < xhci->msix_count; i++)
198 if (xhci->msix_entries[i].vector)
199 free_irq(xhci->msix_entries[i].vector,
200 xhci_to_hcd(xhci));
201 return 0;
202}
203
204
205
206
207static int xhci_setup_msi(struct xhci_hcd *xhci)
208{
209 int ret;
210 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
211
212 ret = pci_enable_msi(pdev);
213 if (ret) {
214 xhci_dbg(xhci, "failed to allocate MSI entry\n");
215 return ret;
216 }
217
218 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
219 0, "xhci_hcd", xhci_to_hcd(xhci));
220 if (ret) {
221 xhci_dbg(xhci, "disable MSI interrupt\n");
222 pci_disable_msi(pdev);
223 }
224
225 return ret;
226}
227
228
229
230
231
232static void xhci_free_irq(struct xhci_hcd *xhci)
233{
234 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
235 int ret;
236
237
238 if (xhci_to_hcd(xhci)->irq > 0)
239 return;
240
241 ret = xhci_free_msi(xhci);
242 if (!ret)
243 return;
244 if (pdev->irq > 0)
245 free_irq(pdev->irq, xhci_to_hcd(xhci));
246
247 return;
248}
249
250
251
252
253static int xhci_setup_msix(struct xhci_hcd *xhci)
254{
255 int i, ret = 0;
256 struct usb_hcd *hcd = xhci_to_hcd(xhci);
257 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
258
259
260
261
262
263
264
265
266 xhci->msix_count = min(num_online_cpus() + 1,
267 HCS_MAX_INTRS(xhci->hcs_params1));
268
269 xhci->msix_entries =
270 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
271 GFP_KERNEL);
272 if (!xhci->msix_entries) {
273 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
274 return -ENOMEM;
275 }
276
277 for (i = 0; i < xhci->msix_count; i++) {
278 xhci->msix_entries[i].entry = i;
279 xhci->msix_entries[i].vector = 0;
280 }
281
282 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
283 if (ret) {
284 xhci_dbg(xhci, "Failed to enable MSI-X\n");
285 goto free_entries;
286 }
287
288 for (i = 0; i < xhci->msix_count; i++) {
289 ret = request_irq(xhci->msix_entries[i].vector,
290 (irq_handler_t)xhci_msi_irq,
291 0, "xhci_hcd", xhci_to_hcd(xhci));
292 if (ret)
293 goto disable_msix;
294 }
295
296 hcd->msix_enabled = 1;
297 return ret;
298
299disable_msix:
300 xhci_dbg(xhci, "disable MSI-X interrupt\n");
301 xhci_free_irq(xhci);
302 pci_disable_msix(pdev);
303free_entries:
304 kfree(xhci->msix_entries);
305 xhci->msix_entries = NULL;
306 return ret;
307}
308
309
310static void xhci_cleanup_msix(struct xhci_hcd *xhci)
311{
312 struct usb_hcd *hcd = xhci_to_hcd(xhci);
313 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
314
315 xhci_free_irq(xhci);
316
317 if (xhci->msix_entries) {
318 pci_disable_msix(pdev);
319 kfree(xhci->msix_entries);
320 xhci->msix_entries = NULL;
321 } else {
322 pci_disable_msi(pdev);
323 }
324
325 hcd->msix_enabled = 0;
326 return;
327}
328
329static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
330{
331 int i;
332
333 if (xhci->msix_entries) {
334 for (i = 0; i < xhci->msix_count; i++)
335 synchronize_irq(xhci->msix_entries[i].vector);
336 }
337}
338
339static int xhci_try_enable_msi(struct usb_hcd *hcd)
340{
341 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
342 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
343 int ret;
344
345
346
347
348
349 if (xhci->quirks & XHCI_BROKEN_MSI)
350 return 0;
351
352
353 if (hcd->irq)
354 free_irq(hcd->irq, hcd);
355 hcd->irq = 0;
356
357 ret = xhci_setup_msix(xhci);
358 if (ret)
359
360 ret = xhci_setup_msi(xhci);
361
362 if (!ret)
363
364 return 0;
365
366 if (!pdev->irq) {
367 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
368 return -EINVAL;
369 }
370
371
372 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
373 hcd->irq_descr, hcd);
374 if (ret) {
375 xhci_err(xhci, "request interrupt %d failed\n",
376 pdev->irq);
377 return ret;
378 }
379 hcd->irq = pdev->irq;
380 return 0;
381}
382
383#else
384
385static int xhci_try_enable_msi(struct usb_hcd *hcd)
386{
387 return 0;
388}
389
390static void xhci_cleanup_msix(struct xhci_hcd *xhci)
391{
392}
393
394static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
395{
396}
397
398#endif
399
400
401
402
403
404
405
406
407int xhci_init(struct usb_hcd *hcd)
408{
409 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
410 int retval = 0;
411
412 xhci_dbg(xhci, "xhci_init\n");
413 spin_lock_init(&xhci->lock);
414 if (xhci->hci_version == 0x95 && link_quirk) {
415 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
416 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
417 } else {
418 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
419 }
420 retval = xhci_mem_init(xhci, GFP_KERNEL);
421 xhci_dbg(xhci, "Finished xhci_init\n");
422
423 return retval;
424}
425
426
427
428
429#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
430static void xhci_event_ring_work(unsigned long arg)
431{
432 unsigned long flags;
433 int temp;
434 u64 temp_64;
435 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
436 int i, j;
437
438 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
439
440 spin_lock_irqsave(&xhci->lock, flags);
441 temp = xhci_readl(xhci, &xhci->op_regs->status);
442 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
443 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
444 (xhci->xhc_state & XHCI_STATE_HALTED)) {
445 xhci_dbg(xhci, "HW died, polling stopped.\n");
446 spin_unlock_irqrestore(&xhci->lock, flags);
447 return;
448 }
449
450 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
451 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
452 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
453 xhci->error_bitmask = 0;
454 xhci_dbg(xhci, "Event ring:\n");
455 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
456 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
457 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
458 temp_64 &= ~ERST_PTR_MASK;
459 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
460 xhci_dbg(xhci, "Command ring:\n");
461 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
462 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
463 xhci_dbg_cmd_ptrs(xhci);
464 for (i = 0; i < MAX_HC_SLOTS; ++i) {
465 if (!xhci->devs[i])
466 continue;
467 for (j = 0; j < 31; ++j) {
468 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
469 }
470 }
471 spin_unlock_irqrestore(&xhci->lock, flags);
472
473 if (!xhci->zombie)
474 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
475 else
476 xhci_dbg(xhci, "Quit polling the event ring.\n");
477}
478#endif
479
480static int xhci_run_finished(struct xhci_hcd *xhci)
481{
482 if (xhci_start(xhci)) {
483 xhci_halt(xhci);
484 return -ENODEV;
485 }
486 xhci->shared_hcd->state = HC_STATE_RUNNING;
487
488 if (xhci->quirks & XHCI_NEC_HOST)
489 xhci_ring_cmd_db(xhci);
490
491 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
492 return 0;
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507int xhci_run(struct usb_hcd *hcd)
508{
509 u32 temp;
510 u64 temp_64;
511 int ret;
512 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
513
514
515
516
517
518 hcd->uses_new_polling = 1;
519 if (!usb_hcd_is_primary_hcd(hcd))
520 return xhci_run_finished(xhci);
521
522 xhci_dbg(xhci, "xhci_run\n");
523
524 ret = xhci_try_enable_msi(hcd);
525 if (ret)
526 return ret;
527
528#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
529 init_timer(&xhci->event_ring_timer);
530 xhci->event_ring_timer.data = (unsigned long) xhci;
531 xhci->event_ring_timer.function = xhci_event_ring_work;
532
533 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
534 xhci->zombie = 0;
535 xhci_dbg(xhci, "Setting event ring polling timer\n");
536 add_timer(&xhci->event_ring_timer);
537#endif
538
539 xhci_dbg(xhci, "Command ring memory map follows:\n");
540 xhci_debug_ring(xhci, xhci->cmd_ring);
541 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
542 xhci_dbg_cmd_ptrs(xhci);
543
544 xhci_dbg(xhci, "ERST memory map follows:\n");
545 xhci_dbg_erst(xhci, &xhci->erst);
546 xhci_dbg(xhci, "Event ring:\n");
547 xhci_debug_ring(xhci, xhci->event_ring);
548 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
549 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
550 temp_64 &= ~ERST_PTR_MASK;
551 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
552
553 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
554 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
555 temp &= ~ER_IRQ_INTERVAL_MASK;
556 temp |= (u32) 160;
557 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
558
559
560 temp = xhci_readl(xhci, &xhci->op_regs->command);
561 temp |= (CMD_EIE);
562 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
563 temp);
564 xhci_writel(xhci, temp, &xhci->op_regs->command);
565
566 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
567 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
568 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
569 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
570 &xhci->ir_set->irq_pending);
571 xhci_print_ir_set(xhci, 0);
572
573 if (xhci->quirks & XHCI_NEC_HOST)
574 xhci_queue_vendor_command(xhci, 0, 0, 0,
575 TRB_TYPE(TRB_NEC_GET_FW));
576
577 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
578 return 0;
579}
580
581static void xhci_only_stop_hcd(struct usb_hcd *hcd)
582{
583 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
584
585 spin_lock_irq(&xhci->lock);
586 xhci_halt(xhci);
587
588
589
590
591
592 xhci->shared_hcd = NULL;
593 spin_unlock_irq(&xhci->lock);
594}
595
596
597
598
599
600
601
602
603
604
605void xhci_stop(struct usb_hcd *hcd)
606{
607 u32 temp;
608 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
609
610 if (!usb_hcd_is_primary_hcd(hcd)) {
611 xhci_only_stop_hcd(xhci->shared_hcd);
612 return;
613 }
614
615 spin_lock_irq(&xhci->lock);
616
617
618
619 xhci_halt(xhci);
620 xhci_reset(xhci);
621 spin_unlock_irq(&xhci->lock);
622
623 xhci_cleanup_msix(xhci);
624
625#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
626
627 xhci->zombie = 1;
628 del_timer_sync(&xhci->event_ring_timer);
629#endif
630
631 if (xhci->quirks & XHCI_AMD_PLL_FIX)
632 usb_amd_dev_put();
633
634 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
635 temp = xhci_readl(xhci, &xhci->op_regs->status);
636 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
637 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
638 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
639 &xhci->ir_set->irq_pending);
640 xhci_print_ir_set(xhci, 0);
641
642 xhci_dbg(xhci, "cleaning up memory\n");
643 xhci_mem_cleanup(xhci);
644 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
645 xhci_readl(xhci, &xhci->op_regs->status));
646}
647
648
649
650
651
652
653
654
655
656
657void xhci_shutdown(struct usb_hcd *hcd)
658{
659 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
660
661 spin_lock_irq(&xhci->lock);
662 xhci_halt(xhci);
663 spin_unlock_irq(&xhci->lock);
664
665 xhci_cleanup_msix(xhci);
666
667 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
668 xhci_readl(xhci, &xhci->op_regs->status));
669}
670
671#ifdef CONFIG_PM
672static void xhci_save_registers(struct xhci_hcd *xhci)
673{
674 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
675 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
676 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
677 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
678 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
679 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
680 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
681 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
682 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
683}
684
685static void xhci_restore_registers(struct xhci_hcd *xhci)
686{
687 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
688 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
689 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
690 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
691 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
692 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
693 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
694 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
695 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
696}
697
698static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
699{
700 u64 val_64;
701
702
703 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
704 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
705 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
706 xhci->cmd_ring->dequeue) &
707 (u64) ~CMD_RING_RSVD_BITS) |
708 xhci->cmd_ring->cycle_state;
709 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
710 (long unsigned long) val_64);
711 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
712}
713
714
715
716
717
718
719
720
721
722
723static void xhci_clear_command_ring(struct xhci_hcd *xhci)
724{
725 struct xhci_ring *ring;
726 struct xhci_segment *seg;
727
728 ring = xhci->cmd_ring;
729 seg = ring->deq_seg;
730 do {
731 memset(seg->trbs, 0,
732 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
733 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
734 cpu_to_le32(~TRB_CYCLE);
735 seg = seg->next;
736 } while (seg != ring->deq_seg);
737
738
739 ring->deq_seg = ring->first_seg;
740 ring->dequeue = ring->first_seg->trbs;
741 ring->enq_seg = ring->deq_seg;
742 ring->enqueue = ring->dequeue;
743
744 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
745
746
747
748
749 ring->cycle_state = 1;
750
751
752
753
754
755
756
757
758 xhci_set_cmd_ring_deq(xhci);
759}
760
761
762
763
764
765
766
767int xhci_suspend(struct xhci_hcd *xhci)
768{
769 int rc = 0;
770 struct usb_hcd *hcd = xhci_to_hcd(xhci);
771 u32 command;
772
773 spin_lock_irq(&xhci->lock);
774 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
775 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
776
777
778
779
780 command = xhci_readl(xhci, &xhci->op_regs->command);
781 command &= ~CMD_RUN;
782 xhci_writel(xhci, command, &xhci->op_regs->command);
783 if (handshake(xhci, &xhci->op_regs->status,
784 STS_HALT, STS_HALT, 100*100)) {
785 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
786 spin_unlock_irq(&xhci->lock);
787 return -ETIMEDOUT;
788 }
789 xhci_clear_command_ring(xhci);
790
791
792 xhci_save_registers(xhci);
793
794
795 command = xhci_readl(xhci, &xhci->op_regs->command);
796 command |= CMD_CSS;
797 xhci_writel(xhci, command, &xhci->op_regs->command);
798 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
799 xhci_warn(xhci, "WARN: xHC save state timeout\n");
800 spin_unlock_irq(&xhci->lock);
801 return -ETIMEDOUT;
802 }
803 spin_unlock_irq(&xhci->lock);
804
805
806
807 xhci_msix_sync_irqs(xhci);
808
809 return rc;
810}
811
812
813
814
815
816
817
818int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
819{
820 u32 command, temp = 0;
821 struct usb_hcd *hcd = xhci_to_hcd(xhci);
822 struct usb_hcd *secondary_hcd;
823 int retval = 0;
824
825
826
827
828 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
829 time_before(jiffies,
830 xhci->bus_state[1].next_statechange))
831 msleep(100);
832
833 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
834 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
835
836 spin_lock_irq(&xhci->lock);
837 if (xhci->quirks & XHCI_RESET_ON_RESUME)
838 hibernated = true;
839
840 if (!hibernated) {
841
842 xhci_restore_registers(xhci);
843
844 xhci_set_cmd_ring_deq(xhci);
845
846
847 command = xhci_readl(xhci, &xhci->op_regs->command);
848 command |= CMD_CRS;
849 xhci_writel(xhci, command, &xhci->op_regs->command);
850 if (handshake(xhci, &xhci->op_regs->status,
851 STS_RESTORE, 0, 10 * 1000)) {
852 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
853 spin_unlock_irq(&xhci->lock);
854 return -ETIMEDOUT;
855 }
856 temp = xhci_readl(xhci, &xhci->op_regs->status);
857 }
858
859
860 if ((temp & STS_SRE) || hibernated) {
861
862 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
863 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
864
865 xhci_dbg(xhci, "Stop HCD\n");
866 xhci_halt(xhci);
867 xhci_reset(xhci);
868 spin_unlock_irq(&xhci->lock);
869 xhci_cleanup_msix(xhci);
870
871#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
872
873 xhci->zombie = 1;
874 del_timer_sync(&xhci->event_ring_timer);
875#endif
876
877 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
878 temp = xhci_readl(xhci, &xhci->op_regs->status);
879 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
880 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
881 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
882 &xhci->ir_set->irq_pending);
883 xhci_print_ir_set(xhci, 0);
884
885 xhci_dbg(xhci, "cleaning up memory\n");
886 xhci_mem_cleanup(xhci);
887 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
888 xhci_readl(xhci, &xhci->op_regs->status));
889
890
891
892
893
894 if (!usb_hcd_is_primary_hcd(hcd))
895 secondary_hcd = hcd;
896 else
897 secondary_hcd = xhci->shared_hcd;
898
899 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
900 retval = xhci_init(hcd->primary_hcd);
901 if (retval)
902 return retval;
903 xhci_dbg(xhci, "Start the primary HCD\n");
904 retval = xhci_run(hcd->primary_hcd);
905 if (!retval) {
906 xhci_dbg(xhci, "Start the secondary HCD\n");
907 retval = xhci_run(secondary_hcd);
908 }
909 hcd->state = HC_STATE_SUSPENDED;
910 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
911 goto done;
912 }
913
914
915 command = xhci_readl(xhci, &xhci->op_regs->command);
916 command |= CMD_RUN;
917 xhci_writel(xhci, command, &xhci->op_regs->command);
918 handshake(xhci, &xhci->op_regs->status, STS_HALT,
919 0, 250 * 1000);
920
921
922
923
924
925
926
927
928
929
930 spin_unlock_irq(&xhci->lock);
931
932 done:
933 if (retval == 0) {
934 usb_hcd_resume_root_hub(hcd);
935 usb_hcd_resume_root_hub(xhci->shared_hcd);
936 }
937 return retval;
938}
939#endif
940
941
942
943
944
945
946
947
948
949
950
951
952
953unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
954{
955 unsigned int index;
956 if (usb_endpoint_xfer_control(desc))
957 index = (unsigned int) (usb_endpoint_num(desc)*2);
958 else
959 index = (unsigned int) (usb_endpoint_num(desc)*2) +
960 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
961 return index;
962}
963
964
965
966
967
968unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
969{
970 return 1 << (xhci_get_endpoint_index(desc) + 1);
971}
972
973
974
975
976
977unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
978{
979 return 1 << (ep_index + 1);
980}
981
982
983
984
985
986
987
988unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
989{
990 return fls(added_ctxs) - 1;
991}
992
993
994
995
996static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
997 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
998 const char *func) {
999 struct xhci_hcd *xhci;
1000 struct xhci_virt_device *virt_dev;
1001
1002 if (!hcd || (check_ep && !ep) || !udev) {
1003 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1004 func);
1005 return -EINVAL;
1006 }
1007 if (!udev->parent) {
1008 printk(KERN_DEBUG "xHCI %s called for root hub\n",
1009 func);
1010 return 0;
1011 }
1012
1013 xhci = hcd_to_xhci(hcd);
1014 if (xhci->xhc_state & XHCI_STATE_HALTED)
1015 return -ENODEV;
1016
1017 if (check_virt_dev) {
1018 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1019 printk(KERN_DEBUG "xHCI %s called with unaddressed "
1020 "device\n", func);
1021 return -EINVAL;
1022 }
1023
1024 virt_dev = xhci->devs[udev->slot_id];
1025 if (virt_dev->udev != udev) {
1026 printk(KERN_DEBUG "xHCI %s called with udev and "
1027 "virt_dev does not match\n", func);
1028 return -EINVAL;
1029 }
1030 }
1031
1032 return 1;
1033}
1034
1035static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1036 struct usb_device *udev, struct xhci_command *command,
1037 bool ctx_change, bool must_succeed);
1038
1039
1040
1041
1042
1043
1044
1045static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1046 unsigned int ep_index, struct urb *urb)
1047{
1048 struct xhci_container_ctx *in_ctx;
1049 struct xhci_container_ctx *out_ctx;
1050 struct xhci_input_control_ctx *ctrl_ctx;
1051 struct xhci_ep_ctx *ep_ctx;
1052 int max_packet_size;
1053 int hw_max_packet_size;
1054 int ret = 0;
1055
1056 out_ctx = xhci->devs[slot_id]->out_ctx;
1057 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1058 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1059 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1060 if (hw_max_packet_size != max_packet_size) {
1061 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1062 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1063 max_packet_size);
1064 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1065 hw_max_packet_size);
1066 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1067
1068
1069 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1070 xhci->devs[slot_id]->out_ctx, ep_index);
1071 in_ctx = xhci->devs[slot_id]->in_ctx;
1072 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1073 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1074 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1075
1076
1077
1078
1079
1080 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1081 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1082 ctrl_ctx->drop_flags = 0;
1083
1084 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1085 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1086 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1087 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1088
1089 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1090 true, false);
1091
1092
1093
1094
1095 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1096 }
1097 return ret;
1098}
1099
1100
1101
1102
1103
1104int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1105{
1106 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1107 struct xhci_td *buffer;
1108 unsigned long flags;
1109 int ret = 0;
1110 unsigned int slot_id, ep_index;
1111 struct urb_priv *urb_priv;
1112 int size, i;
1113
1114 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1115 true, true, __func__) <= 0)
1116 return -EINVAL;
1117
1118 slot_id = urb->dev->slot_id;
1119 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1120
1121 if (!HCD_HW_ACCESSIBLE(hcd)) {
1122 if (!in_interrupt())
1123 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1124 ret = -ESHUTDOWN;
1125 goto exit;
1126 }
1127
1128 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1129 size = urb->number_of_packets;
1130 else
1131 size = 1;
1132
1133 urb_priv = kzalloc(sizeof(struct urb_priv) +
1134 size * sizeof(struct xhci_td *), mem_flags);
1135 if (!urb_priv)
1136 return -ENOMEM;
1137
1138 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1139 if (!buffer) {
1140 kfree(urb_priv);
1141 return -ENOMEM;
1142 }
1143
1144 for (i = 0; i < size; i++) {
1145 urb_priv->td[i] = buffer;
1146 buffer++;
1147 }
1148
1149 urb_priv->length = size;
1150 urb_priv->td_cnt = 0;
1151 urb->hcpriv = urb_priv;
1152
1153 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1154
1155
1156
1157 if (urb->dev->speed == USB_SPEED_FULL) {
1158 ret = xhci_check_maxpacket(xhci, slot_id,
1159 ep_index, urb);
1160 if (ret < 0) {
1161 xhci_urb_free_priv(xhci, urb_priv);
1162 urb->hcpriv = NULL;
1163 return ret;
1164 }
1165 }
1166
1167
1168
1169
1170 spin_lock_irqsave(&xhci->lock, flags);
1171 if (xhci->xhc_state & XHCI_STATE_DYING)
1172 goto dying;
1173 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1174 slot_id, ep_index);
1175 if (ret)
1176 goto free_priv;
1177 spin_unlock_irqrestore(&xhci->lock, flags);
1178 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1179 spin_lock_irqsave(&xhci->lock, flags);
1180 if (xhci->xhc_state & XHCI_STATE_DYING)
1181 goto dying;
1182 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1183 EP_GETTING_STREAMS) {
1184 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1185 "is transitioning to using streams.\n");
1186 ret = -EINVAL;
1187 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1188 EP_GETTING_NO_STREAMS) {
1189 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1190 "is transitioning to "
1191 "not having streams.\n");
1192 ret = -EINVAL;
1193 } else {
1194 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1195 slot_id, ep_index);
1196 }
1197 if (ret)
1198 goto free_priv;
1199 spin_unlock_irqrestore(&xhci->lock, flags);
1200 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1201 spin_lock_irqsave(&xhci->lock, flags);
1202 if (xhci->xhc_state & XHCI_STATE_DYING)
1203 goto dying;
1204 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1205 slot_id, ep_index);
1206 if (ret)
1207 goto free_priv;
1208 spin_unlock_irqrestore(&xhci->lock, flags);
1209 } else {
1210 spin_lock_irqsave(&xhci->lock, flags);
1211 if (xhci->xhc_state & XHCI_STATE_DYING)
1212 goto dying;
1213 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1214 slot_id, ep_index);
1215 if (ret)
1216 goto free_priv;
1217 spin_unlock_irqrestore(&xhci->lock, flags);
1218 }
1219exit:
1220 return ret;
1221dying:
1222 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1223 "non-responsive xHCI host.\n",
1224 urb->ep->desc.bEndpointAddress, urb);
1225 ret = -ESHUTDOWN;
1226free_priv:
1227 xhci_urb_free_priv(xhci, urb_priv);
1228 urb->hcpriv = NULL;
1229 spin_unlock_irqrestore(&xhci->lock, flags);
1230 return ret;
1231}
1232
1233
1234
1235
1236
1237static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1238 struct urb *urb)
1239{
1240 unsigned int slot_id;
1241 unsigned int ep_index;
1242 unsigned int stream_id;
1243 struct xhci_virt_ep *ep;
1244
1245 slot_id = urb->dev->slot_id;
1246 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1247 stream_id = urb->stream_id;
1248 ep = &xhci->devs[slot_id]->eps[ep_index];
1249
1250 if (!(ep->ep_state & EP_HAS_STREAMS))
1251 return ep->ring;
1252
1253 if (stream_id == 0) {
1254 xhci_warn(xhci,
1255 "WARN: Slot ID %u, ep index %u has streams, "
1256 "but URB has no stream ID.\n",
1257 slot_id, ep_index);
1258 return NULL;
1259 }
1260
1261 if (stream_id < ep->stream_info->num_streams)
1262 return ep->stream_info->stream_rings[stream_id];
1263
1264 xhci_warn(xhci,
1265 "WARN: Slot ID %u, ep index %u has "
1266 "stream IDs 1 to %u allocated, "
1267 "but stream ID %u is requested.\n",
1268 slot_id, ep_index,
1269 ep->stream_info->num_streams - 1,
1270 stream_id);
1271 return NULL;
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1306{
1307 unsigned long flags;
1308 int ret, i;
1309 u32 temp;
1310 struct xhci_hcd *xhci;
1311 struct urb_priv *urb_priv;
1312 struct xhci_td *td;
1313 unsigned int ep_index;
1314 struct xhci_ring *ep_ring;
1315 struct xhci_virt_ep *ep;
1316
1317 xhci = hcd_to_xhci(hcd);
1318 spin_lock_irqsave(&xhci->lock, flags);
1319
1320 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1321 if (ret || !urb->hcpriv)
1322 goto done;
1323 temp = xhci_readl(xhci, &xhci->op_regs->status);
1324 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1325 xhci_dbg(xhci, "HW died, freeing TD.\n");
1326 urb_priv = urb->hcpriv;
1327 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1328 td = urb_priv->td[i];
1329 if (!list_empty(&td->td_list))
1330 list_del_init(&td->td_list);
1331 if (!list_empty(&td->cancelled_td_list))
1332 list_del_init(&td->cancelled_td_list);
1333 }
1334
1335 usb_hcd_unlink_urb_from_ep(hcd, urb);
1336 spin_unlock_irqrestore(&xhci->lock, flags);
1337 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1338 xhci_urb_free_priv(xhci, urb_priv);
1339 return ret;
1340 }
1341 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1342 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1343 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1344 "non-responsive xHCI host.\n",
1345 urb->ep->desc.bEndpointAddress, urb);
1346
1347
1348
1349
1350
1351 goto done;
1352 }
1353
1354 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1355 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1356 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1357 if (!ep_ring) {
1358 ret = -EINVAL;
1359 goto done;
1360 }
1361
1362 urb_priv = urb->hcpriv;
1363 i = urb_priv->td_cnt;
1364 if (i < urb_priv->length)
1365 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
1366 "starting at offset 0x%llx\n",
1367 urb, urb->dev->devpath,
1368 urb->ep->desc.bEndpointAddress,
1369 (unsigned long long) xhci_trb_virt_to_dma(
1370 urb_priv->td[i]->start_seg,
1371 urb_priv->td[i]->first_trb));
1372
1373 for (; i < urb_priv->length; i++) {
1374 td = urb_priv->td[i];
1375 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1376 }
1377
1378
1379
1380
1381 if (!(ep->ep_state & EP_HALT_PENDING)) {
1382 ep->ep_state |= EP_HALT_PENDING;
1383 ep->stop_cmds_pending++;
1384 ep->stop_cmd_timer.expires = jiffies +
1385 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1386 add_timer(&ep->stop_cmd_timer);
1387 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1388 xhci_ring_cmd_db(xhci);
1389 }
1390done:
1391 spin_unlock_irqrestore(&xhci->lock, flags);
1392 return ret;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1409 struct usb_host_endpoint *ep)
1410{
1411 struct xhci_hcd *xhci;
1412 struct xhci_container_ctx *in_ctx, *out_ctx;
1413 struct xhci_input_control_ctx *ctrl_ctx;
1414 struct xhci_slot_ctx *slot_ctx;
1415 unsigned int last_ctx;
1416 unsigned int ep_index;
1417 struct xhci_ep_ctx *ep_ctx;
1418 u32 drop_flag;
1419 u32 new_add_flags, new_drop_flags, new_slot_info;
1420 int ret;
1421
1422 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1423 if (ret <= 0)
1424 return ret;
1425 xhci = hcd_to_xhci(hcd);
1426 if (xhci->xhc_state & XHCI_STATE_DYING)
1427 return -ENODEV;
1428
1429 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1430 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1431 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1432 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1433 __func__, drop_flag);
1434 return 0;
1435 }
1436
1437 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1438 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1439 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1440 ep_index = xhci_get_endpoint_index(&ep->desc);
1441 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1442
1443
1444
1445 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1446 cpu_to_le32(EP_STATE_DISABLED)) ||
1447 le32_to_cpu(ctrl_ctx->drop_flags) &
1448 xhci_get_endpoint_flag(&ep->desc)) {
1449 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1450 __func__, ep);
1451 return 0;
1452 }
1453
1454 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1455 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1456
1457 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1458 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1459
1460 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1461 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1462
1463 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1464 LAST_CTX(last_ctx)) {
1465 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1466 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1467 }
1468 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1469
1470 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1471
1472 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1473 (unsigned int) ep->desc.bEndpointAddress,
1474 udev->slot_id,
1475 (unsigned int) new_drop_flags,
1476 (unsigned int) new_add_flags,
1477 (unsigned int) new_slot_info);
1478 return 0;
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1495 struct usb_host_endpoint *ep)
1496{
1497 struct xhci_hcd *xhci;
1498 struct xhci_container_ctx *in_ctx, *out_ctx;
1499 unsigned int ep_index;
1500 struct xhci_ep_ctx *ep_ctx;
1501 struct xhci_slot_ctx *slot_ctx;
1502 struct xhci_input_control_ctx *ctrl_ctx;
1503 u32 added_ctxs;
1504 unsigned int last_ctx;
1505 u32 new_add_flags, new_drop_flags, new_slot_info;
1506 struct xhci_virt_device *virt_dev;
1507 int ret = 0;
1508
1509 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1510 if (ret <= 0) {
1511
1512 ep->hcpriv = NULL;
1513 return ret;
1514 }
1515 xhci = hcd_to_xhci(hcd);
1516 if (xhci->xhc_state & XHCI_STATE_DYING)
1517 return -ENODEV;
1518
1519 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1520 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1521 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1522
1523
1524
1525
1526 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1527 __func__, added_ctxs);
1528 return 0;
1529 }
1530
1531 virt_dev = xhci->devs[udev->slot_id];
1532 in_ctx = virt_dev->in_ctx;
1533 out_ctx = virt_dev->out_ctx;
1534 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1535 ep_index = xhci_get_endpoint_index(&ep->desc);
1536 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1537
1538
1539
1540
1541 if (virt_dev->eps[ep_index].ring &&
1542 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1543 xhci_get_endpoint_flag(&ep->desc))) {
1544 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1545 "without dropping it.\n",
1546 (unsigned int) ep->desc.bEndpointAddress);
1547 return -EINVAL;
1548 }
1549
1550
1551
1552
1553 if (le32_to_cpu(ctrl_ctx->add_flags) &
1554 xhci_get_endpoint_flag(&ep->desc)) {
1555 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1556 __func__, ep);
1557 return 0;
1558 }
1559
1560
1561
1562
1563
1564
1565 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1566 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1567 __func__, ep->desc.bEndpointAddress);
1568 return -ENOMEM;
1569 }
1570
1571 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1572 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1573
1574
1575
1576
1577
1578
1579
1580 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1581
1582 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1583
1584 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1585 LAST_CTX(last_ctx)) {
1586 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1587 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1588 }
1589 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1590
1591
1592 ep->hcpriv = udev;
1593
1594 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1595 (unsigned int) ep->desc.bEndpointAddress,
1596 udev->slot_id,
1597 (unsigned int) new_drop_flags,
1598 (unsigned int) new_add_flags,
1599 (unsigned int) new_slot_info);
1600 return 0;
1601}
1602
1603static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1604{
1605 struct xhci_input_control_ctx *ctrl_ctx;
1606 struct xhci_ep_ctx *ep_ctx;
1607 struct xhci_slot_ctx *slot_ctx;
1608 int i;
1609
1610
1611
1612
1613
1614
1615 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1616 ctrl_ctx->drop_flags = 0;
1617 ctrl_ctx->add_flags = 0;
1618 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1619 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1620
1621 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1622 for (i = 1; i < 31; ++i) {
1623 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1624 ep_ctx->ep_info = 0;
1625 ep_ctx->ep_info2 = 0;
1626 ep_ctx->deq = 0;
1627 ep_ctx->tx_info = 0;
1628 }
1629}
1630
1631static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1632 struct usb_device *udev, u32 *cmd_status)
1633{
1634 int ret;
1635
1636 switch (*cmd_status) {
1637 case COMP_ENOMEM:
1638 dev_warn(&udev->dev, "Not enough host controller resources "
1639 "for new device state.\n");
1640 ret = -ENOMEM;
1641
1642 break;
1643 case COMP_BW_ERR:
1644 case COMP_2ND_BW_ERR:
1645 dev_warn(&udev->dev, "Not enough bandwidth "
1646 "for new device state.\n");
1647 ret = -ENOSPC;
1648
1649 break;
1650 case COMP_TRB_ERR:
1651
1652 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1653 "add flag = 1, "
1654 "and endpoint is not disabled.\n");
1655 ret = -EINVAL;
1656 break;
1657 case COMP_DEV_ERR:
1658 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1659 "configure command.\n");
1660 ret = -ENODEV;
1661 break;
1662 case COMP_SUCCESS:
1663 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1664 ret = 0;
1665 break;
1666 default:
1667 xhci_err(xhci, "ERROR: unexpected command completion "
1668 "code 0x%x.\n", *cmd_status);
1669 ret = -EINVAL;
1670 break;
1671 }
1672 return ret;
1673}
1674
1675static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1676 struct usb_device *udev, u32 *cmd_status)
1677{
1678 int ret;
1679 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1680
1681 switch (*cmd_status) {
1682 case COMP_EINVAL:
1683 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1684 "context command.\n");
1685 ret = -EINVAL;
1686 break;
1687 case COMP_EBADSLT:
1688 dev_warn(&udev->dev, "WARN: slot not enabled for"
1689 "evaluate context command.\n");
1690 case COMP_CTX_STATE:
1691 dev_warn(&udev->dev, "WARN: invalid context state for "
1692 "evaluate context command.\n");
1693 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1694 ret = -EINVAL;
1695 break;
1696 case COMP_DEV_ERR:
1697 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1698 "context command.\n");
1699 ret = -ENODEV;
1700 break;
1701 case COMP_MEL_ERR:
1702
1703 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1704 ret = -EINVAL;
1705 break;
1706 case COMP_SUCCESS:
1707 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1708 ret = 0;
1709 break;
1710 default:
1711 xhci_err(xhci, "ERROR: unexpected command completion "
1712 "code 0x%x.\n", *cmd_status);
1713 ret = -EINVAL;
1714 break;
1715 }
1716 return ret;
1717}
1718
1719static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1720 struct xhci_container_ctx *in_ctx)
1721{
1722 struct xhci_input_control_ctx *ctrl_ctx;
1723 u32 valid_add_flags;
1724 u32 valid_drop_flags;
1725
1726 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1727
1728
1729
1730
1731 valid_add_flags = ctrl_ctx->add_flags >> 2;
1732 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1733
1734
1735
1736
1737
1738 return hweight32(valid_add_flags) -
1739 hweight32(valid_add_flags & valid_drop_flags);
1740}
1741
1742static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1743 struct xhci_container_ctx *in_ctx)
1744{
1745 struct xhci_input_control_ctx *ctrl_ctx;
1746 u32 valid_add_flags;
1747 u32 valid_drop_flags;
1748
1749 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1750 valid_add_flags = ctrl_ctx->add_flags >> 2;
1751 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1752
1753 return hweight32(valid_drop_flags) -
1754 hweight32(valid_add_flags & valid_drop_flags);
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1771 struct xhci_container_ctx *in_ctx)
1772{
1773 u32 added_eps;
1774
1775 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1776 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1777 xhci_dbg(xhci, "Not enough ep ctxs: "
1778 "%u active, need to add %u, limit is %u.\n",
1779 xhci->num_active_eps, added_eps,
1780 xhci->limit_active_eps);
1781 return -ENOMEM;
1782 }
1783 xhci->num_active_eps += added_eps;
1784 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1785 xhci->num_active_eps);
1786 return 0;
1787}
1788
1789
1790
1791
1792
1793
1794
1795static void xhci_free_host_resources(struct xhci_hcd *xhci,
1796 struct xhci_container_ctx *in_ctx)
1797{
1798 u32 num_failed_eps;
1799
1800 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1801 xhci->num_active_eps -= num_failed_eps;
1802 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1803 num_failed_eps,
1804 xhci->num_active_eps);
1805}
1806
1807
1808
1809
1810
1811
1812
1813static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1814 struct xhci_container_ctx *in_ctx)
1815{
1816 u32 num_dropped_eps;
1817
1818 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1819 xhci->num_active_eps -= num_dropped_eps;
1820 if (num_dropped_eps)
1821 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1822 num_dropped_eps,
1823 xhci->num_active_eps);
1824}
1825
1826unsigned int xhci_get_block_size(struct usb_device *udev)
1827{
1828 switch (udev->speed) {
1829 case USB_SPEED_LOW:
1830 case USB_SPEED_FULL:
1831 return FS_BLOCK;
1832 case USB_SPEED_HIGH:
1833 return HS_BLOCK;
1834 case USB_SPEED_SUPER:
1835 return SS_BLOCK;
1836 case USB_SPEED_UNKNOWN:
1837 case USB_SPEED_WIRELESS:
1838 default:
1839
1840 return 1;
1841 }
1842}
1843
1844unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1845{
1846 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1847 return LS_OVERHEAD;
1848 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1849 return FS_OVERHEAD;
1850 return HS_OVERHEAD;
1851}
1852
1853
1854
1855
1856
1857static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1858 struct xhci_virt_device *virt_dev,
1859 int old_active_eps)
1860{
1861 struct xhci_interval_bw_table *bw_table;
1862 struct xhci_tt_bw_info *tt_info;
1863
1864
1865 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
1866 tt_info = virt_dev->tt_info;
1867
1868
1869
1870
1871 if (old_active_eps)
1872 return 0;
1873 if (old_active_eps == 0 && tt_info->active_eps != 0) {
1874 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
1875 return -ENOMEM;
1876 return 0;
1877 }
1878
1879
1880
1881
1882
1883
1884 return 0;
1885}
1886
1887static int xhci_check_ss_bw(struct xhci_hcd *xhci,
1888 struct xhci_virt_device *virt_dev)
1889{
1890 unsigned int bw_reserved;
1891
1892 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
1893 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
1894 return -ENOMEM;
1895
1896 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
1897 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
1898 return -ENOMEM;
1899
1900 return 0;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944static int xhci_check_bw_table(struct xhci_hcd *xhci,
1945 struct xhci_virt_device *virt_dev,
1946 int old_active_eps)
1947{
1948 unsigned int bw_reserved;
1949 unsigned int max_bandwidth;
1950 unsigned int bw_used;
1951 unsigned int block_size;
1952 struct xhci_interval_bw_table *bw_table;
1953 unsigned int packet_size = 0;
1954 unsigned int overhead = 0;
1955 unsigned int packets_transmitted = 0;
1956 unsigned int packets_remaining = 0;
1957 unsigned int i;
1958
1959 if (virt_dev->udev->speed == USB_SPEED_SUPER)
1960 return xhci_check_ss_bw(xhci, virt_dev);
1961
1962 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
1963 max_bandwidth = HS_BW_LIMIT;
1964
1965 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
1966 } else {
1967 max_bandwidth = FS_BW_LIMIT;
1968 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
1969 }
1970
1971 bw_table = virt_dev->bw_table;
1972
1973
1974
1975 block_size = xhci_get_block_size(virt_dev->udev);
1976
1977
1978
1979
1980 if (virt_dev->tt_info) {
1981 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1982 virt_dev->real_port);
1983 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
1984 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
1985 "newly activated TT.\n");
1986 return -ENOMEM;
1987 }
1988 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
1989 virt_dev->tt_info->slot_id,
1990 virt_dev->tt_info->ttport);
1991 } else {
1992 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1993 virt_dev->real_port);
1994 }
1995
1996
1997
1998
1999 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2000 bw_table->interval_bw[0].num_packets *
2001 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2002
2003 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2004 unsigned int bw_added;
2005 unsigned int largest_mps;
2006 unsigned int interval_overhead;
2007
2008
2009
2010
2011
2012
2013 packets_remaining = 2 * packets_remaining +
2014 bw_table->interval_bw[i].num_packets;
2015
2016
2017
2018
2019 if (list_empty(&bw_table->interval_bw[i].endpoints))
2020 largest_mps = 0;
2021 else {
2022 struct xhci_virt_ep *virt_ep;
2023 struct list_head *ep_entry;
2024
2025 ep_entry = bw_table->interval_bw[i].endpoints.next;
2026 virt_ep = list_entry(ep_entry,
2027 struct xhci_virt_ep, bw_endpoint_list);
2028
2029 largest_mps = DIV_ROUND_UP(
2030 virt_ep->bw_info.max_packet_size,
2031 block_size);
2032 }
2033 if (largest_mps > packet_size)
2034 packet_size = largest_mps;
2035
2036
2037 interval_overhead = xhci_get_largest_overhead(
2038 &bw_table->interval_bw[i]);
2039 if (interval_overhead > overhead)
2040 overhead = interval_overhead;
2041
2042
2043
2044
2045 packets_transmitted = packets_remaining >> (i + 1);
2046
2047
2048 bw_added = packets_transmitted * (overhead + packet_size);
2049
2050
2051 packets_remaining = packets_remaining % (1 << (i + 1));
2052
2053
2054
2055
2056
2057 if (packets_remaining == 0) {
2058 packet_size = 0;
2059 overhead = 0;
2060 } else if (packets_transmitted > 0) {
2061
2062
2063
2064
2065
2066 packet_size = largest_mps;
2067 overhead = interval_overhead;
2068 }
2069
2070
2071
2072 bw_used += bw_added;
2073 if (bw_used > max_bandwidth) {
2074 xhci_warn(xhci, "Not enough bandwidth. "
2075 "Proposed: %u, Max: %u\n",
2076 bw_used, max_bandwidth);
2077 return -ENOMEM;
2078 }
2079 }
2080
2081
2082
2083
2084
2085
2086 if (packets_remaining > 0)
2087 bw_used += overhead + packet_size;
2088
2089 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2090 unsigned int port_index = virt_dev->real_port - 1;
2091
2092
2093
2094
2095
2096 bw_used += TT_HS_OVERHEAD *
2097 xhci->rh_bw[port_index].num_active_tts;
2098 }
2099
2100 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2101 "Available: %u " "percent\n",
2102 bw_used, max_bandwidth, bw_reserved,
2103 (max_bandwidth - bw_used - bw_reserved) * 100 /
2104 max_bandwidth);
2105
2106 bw_used += bw_reserved;
2107 if (bw_used > max_bandwidth) {
2108 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2109 bw_used, max_bandwidth);
2110 return -ENOMEM;
2111 }
2112
2113 bw_table->bw_used = bw_used;
2114 return 0;
2115}
2116
2117static bool xhci_is_async_ep(unsigned int ep_type)
2118{
2119 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2120 ep_type != ISOC_IN_EP &&
2121 ep_type != INT_IN_EP);
2122}
2123
2124static bool xhci_is_sync_in_ep(unsigned int ep_type)
2125{
2126 return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
2127}
2128
2129static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2130{
2131 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2132
2133 if (ep_bw->ep_interval == 0)
2134 return SS_OVERHEAD_BURST +
2135 (ep_bw->mult * ep_bw->num_packets *
2136 (SS_OVERHEAD + mps));
2137 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2138 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2139 1 << ep_bw->ep_interval);
2140
2141}
2142
2143void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2144 struct xhci_bw_info *ep_bw,
2145 struct xhci_interval_bw_table *bw_table,
2146 struct usb_device *udev,
2147 struct xhci_virt_ep *virt_ep,
2148 struct xhci_tt_bw_info *tt_info)
2149{
2150 struct xhci_interval_bw *interval_bw;
2151 int normalized_interval;
2152
2153 if (xhci_is_async_ep(ep_bw->type))
2154 return;
2155
2156 if (udev->speed == USB_SPEED_SUPER) {
2157 if (xhci_is_sync_in_ep(ep_bw->type))
2158 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2159 xhci_get_ss_bw_consumed(ep_bw);
2160 else
2161 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2162 xhci_get_ss_bw_consumed(ep_bw);
2163 return;
2164 }
2165
2166
2167
2168
2169 if (list_empty(&virt_ep->bw_endpoint_list))
2170 return;
2171
2172
2173
2174 if (udev->speed == USB_SPEED_HIGH)
2175 normalized_interval = ep_bw->ep_interval;
2176 else
2177 normalized_interval = ep_bw->ep_interval - 3;
2178
2179 if (normalized_interval == 0)
2180 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2181 interval_bw = &bw_table->interval_bw[normalized_interval];
2182 interval_bw->num_packets -= ep_bw->num_packets;
2183 switch (udev->speed) {
2184 case USB_SPEED_LOW:
2185 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2186 break;
2187 case USB_SPEED_FULL:
2188 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2189 break;
2190 case USB_SPEED_HIGH:
2191 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2192 break;
2193 case USB_SPEED_SUPER:
2194 case USB_SPEED_UNKNOWN:
2195 case USB_SPEED_WIRELESS:
2196
2197
2198
2199 return;
2200 }
2201 if (tt_info)
2202 tt_info->active_eps -= 1;
2203 list_del_init(&virt_ep->bw_endpoint_list);
2204}
2205
2206static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2207 struct xhci_bw_info *ep_bw,
2208 struct xhci_interval_bw_table *bw_table,
2209 struct usb_device *udev,
2210 struct xhci_virt_ep *virt_ep,
2211 struct xhci_tt_bw_info *tt_info)
2212{
2213 struct xhci_interval_bw *interval_bw;
2214 struct xhci_virt_ep *smaller_ep;
2215 int normalized_interval;
2216
2217 if (xhci_is_async_ep(ep_bw->type))
2218 return;
2219
2220 if (udev->speed == USB_SPEED_SUPER) {
2221 if (xhci_is_sync_in_ep(ep_bw->type))
2222 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2223 xhci_get_ss_bw_consumed(ep_bw);
2224 else
2225 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2226 xhci_get_ss_bw_consumed(ep_bw);
2227 return;
2228 }
2229
2230
2231
2232
2233 if (udev->speed == USB_SPEED_HIGH)
2234 normalized_interval = ep_bw->ep_interval;
2235 else
2236 normalized_interval = ep_bw->ep_interval - 3;
2237
2238 if (normalized_interval == 0)
2239 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2240 interval_bw = &bw_table->interval_bw[normalized_interval];
2241 interval_bw->num_packets += ep_bw->num_packets;
2242 switch (udev->speed) {
2243 case USB_SPEED_LOW:
2244 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2245 break;
2246 case USB_SPEED_FULL:
2247 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2248 break;
2249 case USB_SPEED_HIGH:
2250 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2251 break;
2252 case USB_SPEED_SUPER:
2253 case USB_SPEED_UNKNOWN:
2254 case USB_SPEED_WIRELESS:
2255
2256
2257
2258 return;
2259 }
2260
2261 if (tt_info)
2262 tt_info->active_eps += 1;
2263
2264 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2265 bw_endpoint_list) {
2266 if (ep_bw->max_packet_size >=
2267 smaller_ep->bw_info.max_packet_size) {
2268
2269 list_add_tail(&virt_ep->bw_endpoint_list,
2270 &smaller_ep->bw_endpoint_list);
2271 return;
2272 }
2273 }
2274
2275 list_add_tail(&virt_ep->bw_endpoint_list,
2276 &interval_bw->endpoints);
2277}
2278
2279void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2280 struct xhci_virt_device *virt_dev,
2281 int old_active_eps)
2282{
2283 struct xhci_root_port_bw_info *rh_bw_info;
2284 if (!virt_dev->tt_info)
2285 return;
2286
2287 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2288 if (old_active_eps == 0 &&
2289 virt_dev->tt_info->active_eps != 0) {
2290 rh_bw_info->num_active_tts += 1;
2291 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2292 } else if (old_active_eps != 0 &&
2293 virt_dev->tt_info->active_eps == 0) {
2294 rh_bw_info->num_active_tts -= 1;
2295 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2296 }
2297}
2298
2299static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2300 struct xhci_virt_device *virt_dev,
2301 struct xhci_container_ctx *in_ctx)
2302{
2303 struct xhci_bw_info ep_bw_info[31];
2304 int i;
2305 struct xhci_input_control_ctx *ctrl_ctx;
2306 int old_active_eps = 0;
2307
2308 if (virt_dev->tt_info)
2309 old_active_eps = virt_dev->tt_info->active_eps;
2310
2311 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2312
2313 for (i = 0; i < 31; i++) {
2314 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2315 continue;
2316
2317
2318 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2319 sizeof(ep_bw_info[i]));
2320
2321
2322
2323 if (EP_IS_DROPPED(ctrl_ctx, i))
2324 xhci_drop_ep_from_interval_table(xhci,
2325 &virt_dev->eps[i].bw_info,
2326 virt_dev->bw_table,
2327 virt_dev->udev,
2328 &virt_dev->eps[i],
2329 virt_dev->tt_info);
2330 }
2331
2332 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2333 for (i = 0; i < 31; i++) {
2334
2335 if (EP_IS_ADDED(ctrl_ctx, i))
2336 xhci_add_ep_to_interval_table(xhci,
2337 &virt_dev->eps[i].bw_info,
2338 virt_dev->bw_table,
2339 virt_dev->udev,
2340 &virt_dev->eps[i],
2341 virt_dev->tt_info);
2342 }
2343
2344 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2345
2346
2347
2348 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2349 return 0;
2350 }
2351
2352
2353 for (i = 0; i < 31; i++) {
2354 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2355 continue;
2356
2357
2358
2359
2360 if (EP_IS_ADDED(ctrl_ctx, i)) {
2361 xhci_drop_ep_from_interval_table(xhci,
2362 &virt_dev->eps[i].bw_info,
2363 virt_dev->bw_table,
2364 virt_dev->udev,
2365 &virt_dev->eps[i],
2366 virt_dev->tt_info);
2367 }
2368
2369 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2370 sizeof(ep_bw_info[i]));
2371
2372 if (EP_IS_DROPPED(ctrl_ctx, i))
2373 xhci_add_ep_to_interval_table(xhci,
2374 &virt_dev->eps[i].bw_info,
2375 virt_dev->bw_table,
2376 virt_dev->udev,
2377 &virt_dev->eps[i],
2378 virt_dev->tt_info);
2379 }
2380 return -ENOMEM;
2381}
2382
2383
2384
2385
2386
2387static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2388 struct usb_device *udev,
2389 struct xhci_command *command,
2390 bool ctx_change, bool must_succeed)
2391{
2392 int ret;
2393 int timeleft;
2394 unsigned long flags;
2395 struct xhci_container_ctx *in_ctx;
2396 struct completion *cmd_completion;
2397 u32 *cmd_status;
2398 struct xhci_virt_device *virt_dev;
2399
2400 spin_lock_irqsave(&xhci->lock, flags);
2401 virt_dev = xhci->devs[udev->slot_id];
2402
2403 if (command)
2404 in_ctx = command->in_ctx;
2405 else
2406 in_ctx = virt_dev->in_ctx;
2407
2408 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2409 xhci_reserve_host_resources(xhci, in_ctx)) {
2410 spin_unlock_irqrestore(&xhci->lock, flags);
2411 xhci_warn(xhci, "Not enough host resources, "
2412 "active endpoint contexts = %u\n",
2413 xhci->num_active_eps);
2414 return -ENOMEM;
2415 }
2416 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2417 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2418 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2419 xhci_free_host_resources(xhci, in_ctx);
2420 spin_unlock_irqrestore(&xhci->lock, flags);
2421 xhci_warn(xhci, "Not enough bandwidth\n");
2422 return -ENOMEM;
2423 }
2424
2425 if (command) {
2426 cmd_completion = command->completion;
2427 cmd_status = &command->status;
2428 command->command_trb = xhci->cmd_ring->enqueue;
2429
2430
2431
2432
2433 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2434 command->command_trb =
2435 xhci->cmd_ring->enq_seg->next->trbs;
2436
2437 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2438 } else {
2439 cmd_completion = &virt_dev->cmd_completion;
2440 cmd_status = &virt_dev->cmd_status;
2441 }
2442 init_completion(cmd_completion);
2443
2444 if (!ctx_change)
2445 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2446 udev->slot_id, must_succeed);
2447 else
2448 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2449 udev->slot_id, must_succeed);
2450 if (ret < 0) {
2451 if (command)
2452 list_del(&command->cmd_list);
2453 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2454 xhci_free_host_resources(xhci, in_ctx);
2455 spin_unlock_irqrestore(&xhci->lock, flags);
2456 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2457 return -ENOMEM;
2458 }
2459 xhci_ring_cmd_db(xhci);
2460 spin_unlock_irqrestore(&xhci->lock, flags);
2461
2462
2463 timeleft = wait_for_completion_interruptible_timeout(
2464 cmd_completion,
2465 USB_CTRL_SET_TIMEOUT);
2466 if (timeleft <= 0) {
2467 xhci_warn(xhci, "%s while waiting for %s command\n",
2468 timeleft == 0 ? "Timeout" : "Signal",
2469 ctx_change == 0 ?
2470 "configure endpoint" :
2471 "evaluate context");
2472
2473 return -ETIME;
2474 }
2475
2476 if (!ctx_change)
2477 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2478 else
2479 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2480
2481 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2482 spin_lock_irqsave(&xhci->lock, flags);
2483
2484
2485
2486 if (ret)
2487 xhci_free_host_resources(xhci, in_ctx);
2488 else
2489 xhci_finish_resource_reservation(xhci, in_ctx);
2490 spin_unlock_irqrestore(&xhci->lock, flags);
2491 }
2492 return ret;
2493}
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2506{
2507 int i;
2508 int ret = 0;
2509 struct xhci_hcd *xhci;
2510 struct xhci_virt_device *virt_dev;
2511 struct xhci_input_control_ctx *ctrl_ctx;
2512 struct xhci_slot_ctx *slot_ctx;
2513
2514 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2515 if (ret <= 0)
2516 return ret;
2517 xhci = hcd_to_xhci(hcd);
2518 if (xhci->xhc_state & XHCI_STATE_DYING)
2519 return -ENODEV;
2520
2521 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2522 virt_dev = xhci->devs[udev->slot_id];
2523
2524
2525 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2526 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2527 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2528 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2529
2530
2531 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2532 ctrl_ctx->drop_flags == 0)
2533 return 0;
2534
2535 xhci_dbg(xhci, "New Input Control Context:\n");
2536 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2537 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2538 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2539
2540 ret = xhci_configure_endpoint(xhci, udev, NULL,
2541 false, false);
2542 if (ret) {
2543
2544 return ret;
2545 }
2546
2547 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2548 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2549 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2550
2551
2552 for (i = 1; i < 31; ++i) {
2553 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2554 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
2555 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2556 }
2557 xhci_zero_in_ctx(xhci, virt_dev);
2558
2559
2560
2561
2562 for (i = 1; i < 31; ++i) {
2563 if (!virt_dev->eps[i].new_ring)
2564 continue;
2565
2566
2567
2568 if (virt_dev->eps[i].ring) {
2569 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2570 }
2571 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2572 virt_dev->eps[i].new_ring = NULL;
2573 }
2574
2575 return ret;
2576}
2577
2578void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2579{
2580 struct xhci_hcd *xhci;
2581 struct xhci_virt_device *virt_dev;
2582 int i, ret;
2583
2584 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2585 if (ret <= 0)
2586 return;
2587 xhci = hcd_to_xhci(hcd);
2588
2589 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2590 virt_dev = xhci->devs[udev->slot_id];
2591
2592 for (i = 0; i < 31; ++i) {
2593 if (virt_dev->eps[i].new_ring) {
2594 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2595 virt_dev->eps[i].new_ring = NULL;
2596 }
2597 }
2598 xhci_zero_in_ctx(xhci, virt_dev);
2599}
2600
2601static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2602 struct xhci_container_ctx *in_ctx,
2603 struct xhci_container_ctx *out_ctx,
2604 u32 add_flags, u32 drop_flags)
2605{
2606 struct xhci_input_control_ctx *ctrl_ctx;
2607 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2608 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2609 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2610 xhci_slot_copy(xhci, in_ctx, out_ctx);
2611 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2612
2613 xhci_dbg(xhci, "Input Context:\n");
2614 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2615}
2616
2617static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2618 unsigned int slot_id, unsigned int ep_index,
2619 struct xhci_dequeue_state *deq_state)
2620{
2621 struct xhci_container_ctx *in_ctx;
2622 struct xhci_ep_ctx *ep_ctx;
2623 u32 added_ctxs;
2624 dma_addr_t addr;
2625
2626 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2627 xhci->devs[slot_id]->out_ctx, ep_index);
2628 in_ctx = xhci->devs[slot_id]->in_ctx;
2629 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2630 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2631 deq_state->new_deq_ptr);
2632 if (addr == 0) {
2633 xhci_warn(xhci, "WARN Cannot submit config ep after "
2634 "reset ep command\n");
2635 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2636 deq_state->new_deq_seg,
2637 deq_state->new_deq_ptr);
2638 return;
2639 }
2640 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2641
2642 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2643 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2644 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2645}
2646
2647void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2648 struct usb_device *udev, unsigned int ep_index)
2649{
2650 struct xhci_dequeue_state deq_state;
2651 struct xhci_virt_ep *ep;
2652
2653 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2654 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2655
2656
2657
2658 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2659 ep_index, ep->stopped_stream, ep->stopped_td,
2660 &deq_state);
2661
2662
2663
2664
2665 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2666 xhci_dbg(xhci, "Queueing new dequeue state\n");
2667 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2668 ep_index, ep->stopped_stream, &deq_state);
2669 } else {
2670
2671
2672
2673
2674
2675 xhci_dbg(xhci, "Setting up input context for "
2676 "configure endpoint command\n");
2677 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2678 ep_index, &deq_state);
2679 }
2680}
2681
2682
2683
2684
2685
2686
2687
2688void xhci_endpoint_reset(struct usb_hcd *hcd,
2689 struct usb_host_endpoint *ep)
2690{
2691 struct xhci_hcd *xhci;
2692 struct usb_device *udev;
2693 unsigned int ep_index;
2694 unsigned long flags;
2695 int ret;
2696 struct xhci_virt_ep *virt_ep;
2697
2698 xhci = hcd_to_xhci(hcd);
2699 udev = (struct usb_device *) ep->hcpriv;
2700
2701
2702
2703 if (!ep->hcpriv)
2704 return;
2705 ep_index = xhci_get_endpoint_index(&ep->desc);
2706 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2707 if (!virt_ep->stopped_td) {
2708 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2709 ep->desc.bEndpointAddress);
2710 return;
2711 }
2712 if (usb_endpoint_xfer_control(&ep->desc)) {
2713 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2714 return;
2715 }
2716
2717 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2718 spin_lock_irqsave(&xhci->lock, flags);
2719 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2720
2721
2722
2723
2724
2725 if (!ret) {
2726 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2727 kfree(virt_ep->stopped_td);
2728 xhci_ring_cmd_db(xhci);
2729 }
2730 virt_ep->stopped_td = NULL;
2731 virt_ep->stopped_trb = NULL;
2732 virt_ep->stopped_stream = 0;
2733 spin_unlock_irqrestore(&xhci->lock, flags);
2734
2735 if (ret)
2736 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2737}
2738
2739static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2740 struct usb_device *udev, struct usb_host_endpoint *ep,
2741 unsigned int slot_id)
2742{
2743 int ret;
2744 unsigned int ep_index;
2745 unsigned int ep_state;
2746
2747 if (!ep)
2748 return -EINVAL;
2749 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2750 if (ret <= 0)
2751 return -EINVAL;
2752 if (ep->ss_ep_comp.bmAttributes == 0) {
2753 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2754 " descriptor for ep 0x%x does not support streams\n",
2755 ep->desc.bEndpointAddress);
2756 return -EINVAL;
2757 }
2758
2759 ep_index = xhci_get_endpoint_index(&ep->desc);
2760 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2761 if (ep_state & EP_HAS_STREAMS ||
2762 ep_state & EP_GETTING_STREAMS) {
2763 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2764 "already has streams set up.\n",
2765 ep->desc.bEndpointAddress);
2766 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2767 "dynamic stream context array reallocation.\n");
2768 return -EINVAL;
2769 }
2770 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2771 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2772 "endpoint 0x%x; URBs are pending.\n",
2773 ep->desc.bEndpointAddress);
2774 return -EINVAL;
2775 }
2776 return 0;
2777}
2778
2779static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2780 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2781{
2782 unsigned int max_streams;
2783
2784
2785 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2786
2787
2788
2789
2790
2791
2792 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2793 if (*num_stream_ctxs > max_streams) {
2794 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2795 max_streams);
2796 *num_stream_ctxs = max_streams;
2797 *num_streams = max_streams;
2798 }
2799}
2800
2801
2802
2803
2804
2805static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2806 struct usb_device *udev,
2807 struct usb_host_endpoint **eps, unsigned int num_eps,
2808 unsigned int *num_streams, u32 *changed_ep_bitmask)
2809{
2810 unsigned int max_streams;
2811 unsigned int endpoint_flag;
2812 int i;
2813 int ret;
2814
2815 for (i = 0; i < num_eps; i++) {
2816 ret = xhci_check_streams_endpoint(xhci, udev,
2817 eps[i], udev->slot_id);
2818 if (ret < 0)
2819 return ret;
2820
2821 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
2822 if (max_streams < (*num_streams - 1)) {
2823 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2824 eps[i]->desc.bEndpointAddress,
2825 max_streams);
2826 *num_streams = max_streams+1;
2827 }
2828
2829 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2830 if (*changed_ep_bitmask & endpoint_flag)
2831 return -EINVAL;
2832 *changed_ep_bitmask |= endpoint_flag;
2833 }
2834 return 0;
2835}
2836
2837static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2838 struct usb_device *udev,
2839 struct usb_host_endpoint **eps, unsigned int num_eps)
2840{
2841 u32 changed_ep_bitmask = 0;
2842 unsigned int slot_id;
2843 unsigned int ep_index;
2844 unsigned int ep_state;
2845 int i;
2846
2847 slot_id = udev->slot_id;
2848 if (!xhci->devs[slot_id])
2849 return 0;
2850
2851 for (i = 0; i < num_eps; i++) {
2852 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2853 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2854
2855 if (ep_state & EP_GETTING_NO_STREAMS) {
2856 xhci_warn(xhci, "WARN Can't disable streams for "
2857 "endpoint 0x%x\n, "
2858 "streams are being disabled already.",
2859 eps[i]->desc.bEndpointAddress);
2860 return 0;
2861 }
2862
2863 if (!(ep_state & EP_HAS_STREAMS) &&
2864 !(ep_state & EP_GETTING_STREAMS)) {
2865 xhci_warn(xhci, "WARN Can't disable streams for "
2866 "endpoint 0x%x\n, "
2867 "streams are already disabled!",
2868 eps[i]->desc.bEndpointAddress);
2869 xhci_warn(xhci, "WARN xhci_free_streams() called "
2870 "with non-streams endpoint\n");
2871 return 0;
2872 }
2873 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
2874 }
2875 return changed_ep_bitmask;
2876}
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
2895 struct usb_host_endpoint **eps, unsigned int num_eps,
2896 unsigned int num_streams, gfp_t mem_flags)
2897{
2898 int i, ret;
2899 struct xhci_hcd *xhci;
2900 struct xhci_virt_device *vdev;
2901 struct xhci_command *config_cmd;
2902 unsigned int ep_index;
2903 unsigned int num_stream_ctxs;
2904 unsigned long flags;
2905 u32 changed_ep_bitmask = 0;
2906
2907 if (!eps)
2908 return -EINVAL;
2909
2910
2911
2912
2913 num_streams += 1;
2914 xhci = hcd_to_xhci(hcd);
2915 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2916 num_streams);
2917
2918 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2919 if (!config_cmd) {
2920 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2921 return -ENOMEM;
2922 }
2923
2924
2925
2926
2927
2928 spin_lock_irqsave(&xhci->lock, flags);
2929 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2930 num_eps, &num_streams, &changed_ep_bitmask);
2931 if (ret < 0) {
2932 xhci_free_command(xhci, config_cmd);
2933 spin_unlock_irqrestore(&xhci->lock, flags);
2934 return ret;
2935 }
2936 if (num_streams <= 1) {
2937 xhci_warn(xhci, "WARN: endpoints can't handle "
2938 "more than one stream.\n");
2939 xhci_free_command(xhci, config_cmd);
2940 spin_unlock_irqrestore(&xhci->lock, flags);
2941 return -EINVAL;
2942 }
2943 vdev = xhci->devs[udev->slot_id];
2944
2945
2946
2947 for (i = 0; i < num_eps; i++) {
2948 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2949 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2950 }
2951 spin_unlock_irqrestore(&xhci->lock, flags);
2952
2953
2954
2955
2956
2957 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2958 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2959 num_stream_ctxs, num_streams);
2960
2961 for (i = 0; i < num_eps; i++) {
2962 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2963 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2964 num_stream_ctxs,
2965 num_streams, mem_flags);
2966 if (!vdev->eps[ep_index].stream_info)
2967 goto cleanup;
2968
2969
2970
2971 }
2972
2973
2974 for (i = 0; i < num_eps; i++) {
2975 struct xhci_ep_ctx *ep_ctx;
2976
2977 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2978 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2979
2980 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2981 vdev->out_ctx, ep_index);
2982 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2983 vdev->eps[ep_index].stream_info);
2984 }
2985
2986
2987
2988 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2989 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2990
2991
2992 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2993 false, false);
2994
2995
2996
2997
2998
2999 if (ret < 0)
3000 goto cleanup;
3001
3002 spin_lock_irqsave(&xhci->lock, flags);
3003 for (i = 0; i < num_eps; i++) {
3004 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3005 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3006 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3007 udev->slot_id, ep_index);
3008 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3009 }
3010 xhci_free_command(xhci, config_cmd);
3011 spin_unlock_irqrestore(&xhci->lock, flags);
3012
3013
3014 return num_streams - 1;
3015
3016cleanup:
3017
3018 for (i = 0; i < num_eps; i++) {
3019 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3020 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3021 vdev->eps[ep_index].stream_info = NULL;
3022
3023
3024
3025 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3026 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3027 xhci_endpoint_zero(xhci, vdev, eps[i]);
3028 }
3029 xhci_free_command(xhci, config_cmd);
3030 return -ENOMEM;
3031}
3032
3033
3034
3035
3036
3037
3038
3039int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3040 struct usb_host_endpoint **eps, unsigned int num_eps,
3041 gfp_t mem_flags)
3042{
3043 int i, ret;
3044 struct xhci_hcd *xhci;
3045 struct xhci_virt_device *vdev;
3046 struct xhci_command *command;
3047 unsigned int ep_index;
3048 unsigned long flags;
3049 u32 changed_ep_bitmask;
3050
3051 xhci = hcd_to_xhci(hcd);
3052 vdev = xhci->devs[udev->slot_id];
3053
3054
3055 spin_lock_irqsave(&xhci->lock, flags);
3056 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3057 udev, eps, num_eps);
3058 if (changed_ep_bitmask == 0) {
3059 spin_unlock_irqrestore(&xhci->lock, flags);
3060 return -EINVAL;
3061 }
3062
3063
3064
3065
3066
3067 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3068 command = vdev->eps[ep_index].stream_info->free_streams_command;
3069 for (i = 0; i < num_eps; i++) {
3070 struct xhci_ep_ctx *ep_ctx;
3071
3072 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3073 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3074 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3075 EP_GETTING_NO_STREAMS;
3076
3077 xhci_endpoint_copy(xhci, command->in_ctx,
3078 vdev->out_ctx, ep_index);
3079 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3080 &vdev->eps[ep_index]);
3081 }
3082 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3083 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3084 spin_unlock_irqrestore(&xhci->lock, flags);
3085
3086
3087
3088
3089 ret = xhci_configure_endpoint(xhci, udev, command,
3090 false, true);
3091
3092
3093
3094
3095 if (ret < 0)
3096 return ret;
3097
3098 spin_lock_irqsave(&xhci->lock, flags);
3099 for (i = 0; i < num_eps; i++) {
3100 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3101 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3102 vdev->eps[ep_index].stream_info = NULL;
3103
3104
3105
3106 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3107 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3108 }
3109 spin_unlock_irqrestore(&xhci->lock, flags);
3110
3111 return 0;
3112}
3113
3114
3115
3116
3117
3118
3119
3120
3121void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3122 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3123{
3124 int i;
3125 unsigned int num_dropped_eps = 0;
3126 unsigned int drop_flags = 0;
3127
3128 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3129 if (virt_dev->eps[i].ring) {
3130 drop_flags |= 1 << i;
3131 num_dropped_eps++;
3132 }
3133 }
3134 xhci->num_active_eps -= num_dropped_eps;
3135 if (num_dropped_eps)
3136 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3137 "%u now active.\n",
3138 num_dropped_eps, drop_flags,
3139 xhci->num_active_eps);
3140}
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3161{
3162 int ret, i;
3163 unsigned long flags;
3164 struct xhci_hcd *xhci;
3165 unsigned int slot_id;
3166 struct xhci_virt_device *virt_dev;
3167 struct xhci_command *reset_device_cmd;
3168 int timeleft;
3169 int last_freed_endpoint;
3170 struct xhci_slot_ctx *slot_ctx;
3171 int old_active_eps = 0;
3172
3173 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3174 if (ret <= 0)
3175 return ret;
3176 xhci = hcd_to_xhci(hcd);
3177 slot_id = udev->slot_id;
3178 virt_dev = xhci->devs[slot_id];
3179 if (!virt_dev) {
3180 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3181 "not exist. Re-allocate the device\n", slot_id);
3182 ret = xhci_alloc_dev(hcd, udev);
3183 if (ret == 1)
3184 return 0;
3185 else
3186 return -EINVAL;
3187 }
3188
3189 if (virt_dev->udev != udev) {
3190
3191
3192
3193
3194 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3195 "not match the udev. Re-allocate the device\n",
3196 slot_id);
3197 ret = xhci_alloc_dev(hcd, udev);
3198 if (ret == 1)
3199 return 0;
3200 else
3201 return -EINVAL;
3202 }
3203
3204
3205 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3206 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3207 SLOT_STATE_DISABLED)
3208 return 0;
3209
3210 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3211
3212
3213
3214
3215
3216
3217 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3218 if (!reset_device_cmd) {
3219 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3220 return -ENOMEM;
3221 }
3222
3223
3224 spin_lock_irqsave(&xhci->lock, flags);
3225 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
3226
3227
3228
3229
3230 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3231 reset_device_cmd->command_trb =
3232 xhci->cmd_ring->enq_seg->next->trbs;
3233
3234 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3235 ret = xhci_queue_reset_device(xhci, slot_id);
3236 if (ret) {
3237 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3238 list_del(&reset_device_cmd->cmd_list);
3239 spin_unlock_irqrestore(&xhci->lock, flags);
3240 goto command_cleanup;
3241 }
3242 xhci_ring_cmd_db(xhci);
3243 spin_unlock_irqrestore(&xhci->lock, flags);
3244
3245
3246 timeleft = wait_for_completion_interruptible_timeout(
3247 reset_device_cmd->completion,
3248 USB_CTRL_SET_TIMEOUT);
3249 if (timeleft <= 0) {
3250 xhci_warn(xhci, "%s while waiting for reset device command\n",
3251 timeleft == 0 ? "Timeout" : "Signal");
3252 spin_lock_irqsave(&xhci->lock, flags);
3253
3254
3255
3256 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3257 list_del(&reset_device_cmd->cmd_list);
3258 spin_unlock_irqrestore(&xhci->lock, flags);
3259 ret = -ETIME;
3260 goto command_cleanup;
3261 }
3262
3263
3264
3265
3266
3267 ret = reset_device_cmd->status;
3268 switch (ret) {
3269 case COMP_EBADSLT:
3270 case COMP_CTX_STATE:
3271 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3272 slot_id,
3273 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3274 xhci_info(xhci, "Not freeing device rings.\n");
3275
3276 ret = 0;
3277 goto command_cleanup;
3278 case COMP_SUCCESS:
3279 xhci_dbg(xhci, "Successful reset device command.\n");
3280 break;
3281 default:
3282 if (xhci_is_vendor_info_code(xhci, ret))
3283 break;
3284 xhci_warn(xhci, "Unknown completion code %u for "
3285 "reset device command.\n", ret);
3286 ret = -EINVAL;
3287 goto command_cleanup;
3288 }
3289
3290
3291 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3292 spin_lock_irqsave(&xhci->lock, flags);
3293
3294 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3295 spin_unlock_irqrestore(&xhci->lock, flags);
3296 }
3297
3298
3299 last_freed_endpoint = 1;
3300 for (i = 1; i < 31; ++i) {
3301 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3302
3303 if (ep->ep_state & EP_HAS_STREAMS) {
3304 xhci_free_stream_info(xhci, ep->stream_info);
3305 ep->stream_info = NULL;
3306 ep->ep_state &= ~EP_HAS_STREAMS;
3307 }
3308
3309 if (ep->ring) {
3310 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3311 last_freed_endpoint = i;
3312 }
3313 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3314 xhci_drop_ep_from_interval_table(xhci,
3315 &virt_dev->eps[i].bw_info,
3316 virt_dev->bw_table,
3317 udev,
3318 &virt_dev->eps[i],
3319 virt_dev->tt_info);
3320 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3321 }
3322
3323 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3324
3325 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3326 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3327 ret = 0;
3328
3329command_cleanup:
3330 xhci_free_command(xhci, reset_device_cmd);
3331 return ret;
3332}
3333
3334
3335
3336
3337
3338
3339void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3340{
3341 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3342 struct xhci_virt_device *virt_dev;
3343 unsigned long flags;
3344 u32 state;
3345 int i, ret;
3346
3347 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3348
3349
3350
3351 if (ret <= 0 && ret != -ENODEV)
3352 return;
3353
3354 virt_dev = xhci->devs[udev->slot_id];
3355
3356
3357 for (i = 0; i < 31; ++i) {
3358 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3359 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3360 }
3361
3362 if (udev->usb2_hw_lpm_enabled) {
3363 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3364 udev->usb2_hw_lpm_enabled = 0;
3365 }
3366
3367 spin_lock_irqsave(&xhci->lock, flags);
3368
3369 state = xhci_readl(xhci, &xhci->op_regs->status);
3370 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3371 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3372 xhci_free_virt_device(xhci, udev->slot_id);
3373 spin_unlock_irqrestore(&xhci->lock, flags);
3374 return;
3375 }
3376
3377 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3378 spin_unlock_irqrestore(&xhci->lock, flags);
3379 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3380 return;
3381 }
3382 xhci_ring_cmd_db(xhci);
3383 spin_unlock_irqrestore(&xhci->lock, flags);
3384
3385
3386
3387
3388}
3389
3390
3391
3392
3393
3394
3395
3396static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3397{
3398 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3399 xhci_dbg(xhci, "Not enough ep ctxs: "
3400 "%u active, need to add 1, limit is %u.\n",
3401 xhci->num_active_eps, xhci->limit_active_eps);
3402 return -ENOMEM;
3403 }
3404 xhci->num_active_eps += 1;
3405 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3406 xhci->num_active_eps);
3407 return 0;
3408}
3409
3410
3411
3412
3413
3414
3415int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3416{
3417 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3418 unsigned long flags;
3419 int timeleft;
3420 int ret;
3421
3422 spin_lock_irqsave(&xhci->lock, flags);
3423 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3424 if (ret) {
3425 spin_unlock_irqrestore(&xhci->lock, flags);
3426 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3427 return 0;
3428 }
3429 xhci_ring_cmd_db(xhci);
3430 spin_unlock_irqrestore(&xhci->lock, flags);
3431
3432
3433 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3434 USB_CTRL_SET_TIMEOUT);
3435 if (timeleft <= 0) {
3436 xhci_warn(xhci, "%s while waiting for a slot\n",
3437 timeleft == 0 ? "Timeout" : "Signal");
3438
3439 return 0;
3440 }
3441
3442 if (!xhci->slot_id) {
3443 xhci_err(xhci, "Error while assigning device slot ID\n");
3444 return 0;
3445 }
3446
3447 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3448 spin_lock_irqsave(&xhci->lock, flags);
3449 ret = xhci_reserve_host_control_ep_resources(xhci);
3450 if (ret) {
3451 spin_unlock_irqrestore(&xhci->lock, flags);
3452 xhci_warn(xhci, "Not enough host resources, "
3453 "active endpoint contexts = %u\n",
3454 xhci->num_active_eps);
3455 goto disable_slot;
3456 }
3457 spin_unlock_irqrestore(&xhci->lock, flags);
3458 }
3459
3460
3461
3462
3463 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3464 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3465 goto disable_slot;
3466 }
3467 udev->slot_id = xhci->slot_id;
3468
3469
3470 return 1;
3471
3472disable_slot:
3473
3474 spin_lock_irqsave(&xhci->lock, flags);
3475 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3476 xhci_ring_cmd_db(xhci);
3477 spin_unlock_irqrestore(&xhci->lock, flags);
3478 return 0;
3479}
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3491{
3492 unsigned long flags;
3493 int timeleft;
3494 struct xhci_virt_device *virt_dev;
3495 int ret = 0;
3496 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3497 struct xhci_slot_ctx *slot_ctx;
3498 struct xhci_input_control_ctx *ctrl_ctx;
3499 u64 temp_64;
3500
3501 if (!udev->slot_id) {
3502 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3503 return -EINVAL;
3504 }
3505
3506 virt_dev = xhci->devs[udev->slot_id];
3507
3508 if (WARN_ON(!virt_dev)) {
3509
3510
3511
3512
3513
3514 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3515 udev->slot_id);
3516 return -EINVAL;
3517 }
3518
3519 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3520
3521
3522
3523
3524
3525 if (!slot_ctx->dev_info)
3526 xhci_setup_addressable_virt_dev(xhci, udev);
3527
3528 else
3529 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3530 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3531 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3532 ctrl_ctx->drop_flags = 0;
3533
3534 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3535 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3536
3537 spin_lock_irqsave(&xhci->lock, flags);
3538 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3539 udev->slot_id);
3540 if (ret) {
3541 spin_unlock_irqrestore(&xhci->lock, flags);
3542 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3543 return ret;
3544 }
3545 xhci_ring_cmd_db(xhci);
3546 spin_unlock_irqrestore(&xhci->lock, flags);
3547
3548
3549 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3550 USB_CTRL_SET_TIMEOUT);
3551
3552
3553
3554
3555 if (timeleft <= 0) {
3556 xhci_warn(xhci, "%s while waiting for address device command\n",
3557 timeleft == 0 ? "Timeout" : "Signal");
3558
3559 return -ETIME;
3560 }
3561
3562 switch (virt_dev->cmd_status) {
3563 case COMP_CTX_STATE:
3564 case COMP_EBADSLT:
3565 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3566 udev->slot_id);
3567 ret = -EINVAL;
3568 break;
3569 case COMP_TX_ERR:
3570 dev_warn(&udev->dev, "Device not responding to set address.\n");
3571 ret = -EPROTO;
3572 break;
3573 case COMP_DEV_ERR:
3574 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3575 "device command.\n");
3576 ret = -ENODEV;
3577 break;
3578 case COMP_SUCCESS:
3579 xhci_dbg(xhci, "Successful Address Device command\n");
3580 break;
3581 default:
3582 xhci_err(xhci, "ERROR: unexpected command completion "
3583 "code 0x%x.\n", virt_dev->cmd_status);
3584 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3585 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3586 ret = -EINVAL;
3587 break;
3588 }
3589 if (ret) {
3590 return ret;
3591 }
3592 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3593 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3594 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3595 udev->slot_id,
3596 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3597 (unsigned long long)
3598 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3599 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
3600 (unsigned long long)virt_dev->out_ctx->dma);
3601 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3602 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3603 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3604 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3605
3606
3607
3608
3609 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3610
3611
3612 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3613 + 1;
3614
3615 ctrl_ctx->add_flags = 0;
3616 ctrl_ctx->drop_flags = 0;
3617
3618 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
3619
3620 return 0;
3621}
3622
3623#ifdef CONFIG_USB_SUSPEND
3624
3625
3626static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3627 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3628
3629
3630static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
3631 struct usb_device *udev)
3632{
3633 int u2del, besl, besl_host;
3634 int besl_device = 0;
3635 u32 field;
3636
3637 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3638 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
3639
3640 if (field & USB_BESL_SUPPORT) {
3641 for (besl_host = 0; besl_host < 16; besl_host++) {
3642 if (xhci_besl_encoding[besl_host] >= u2del)
3643 break;
3644 }
3645
3646 if (field & USB_BESL_BASELINE_VALID)
3647 besl_device = USB_GET_BESL_BASELINE(field);
3648 else if (field & USB_BESL_DEEP_VALID)
3649 besl_device = USB_GET_BESL_DEEP(field);
3650 } else {
3651 if (u2del <= 50)
3652 besl_host = 0;
3653 else
3654 besl_host = (u2del - 51) / 75 + 1;
3655 }
3656
3657 besl = besl_host + besl_device;
3658 if (besl > 15)
3659 besl = 15;
3660
3661 return besl;
3662}
3663
3664static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3665 struct usb_device *udev)
3666{
3667 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3668 struct dev_info *dev_info;
3669 __le32 __iomem **port_array;
3670 __le32 __iomem *addr, *pm_addr;
3671 u32 temp, dev_id;
3672 unsigned int port_num;
3673 unsigned long flags;
3674 int hird;
3675 int ret;
3676
3677 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3678 !udev->lpm_capable)
3679 return -EINVAL;
3680
3681
3682 if (!udev->parent || udev->parent->parent ||
3683 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3684 return -EINVAL;
3685
3686 spin_lock_irqsave(&xhci->lock, flags);
3687
3688
3689 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3690 le16_to_cpu(udev->descriptor.idProduct);
3691 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3692 if (dev_info->dev_id == dev_id) {
3693 ret = -EINVAL;
3694 goto finish;
3695 }
3696 }
3697
3698 port_array = xhci->usb2_ports;
3699 port_num = udev->portnum - 1;
3700
3701 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3702 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3703 ret = -EINVAL;
3704 goto finish;
3705 }
3706
3707
3708
3709
3710
3711
3712
3713 xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3714
3715
3716
3717
3718
3719 pm_addr = port_array[port_num] + 1;
3720 hird = xhci_calculate_hird_besl(xhci, udev);
3721 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3722 xhci_writel(xhci, temp, pm_addr);
3723
3724
3725 addr = port_array[port_num];
3726 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3727
3728
3729 spin_unlock_irqrestore(&xhci->lock, flags);
3730 msleep(10);
3731 spin_lock_irqsave(&xhci->lock, flags);
3732
3733
3734 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3735 if (ret != -ETIMEDOUT) {
3736
3737 temp = xhci_readl(xhci, addr);
3738 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3739 port_num, temp);
3740 ret = 0;
3741 } else {
3742 temp = xhci_readl(xhci, pm_addr);
3743 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3744 port_num, temp & PORT_L1S_MASK);
3745 ret = -EINVAL;
3746 }
3747
3748
3749 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3750
3751 spin_unlock_irqrestore(&xhci->lock, flags);
3752 msleep(10);
3753 spin_lock_irqsave(&xhci->lock, flags);
3754
3755
3756 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3757
3758
3759 if (!ret) {
3760 temp = xhci_readl(xhci, addr);
3761 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3762 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3763 (temp & PORT_PLS_MASK) != XDEV_U0) {
3764 xhci_dbg(xhci, "port L1 resume fail\n");
3765 ret = -EINVAL;
3766 }
3767 }
3768
3769 if (ret) {
3770
3771 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3772 "re-enumerate\n");
3773 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3774 if (!dev_info) {
3775 ret = -ENOMEM;
3776 goto finish;
3777 }
3778 dev_info->dev_id = dev_id;
3779 INIT_LIST_HEAD(&dev_info->list);
3780 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3781 } else {
3782 xhci_ring_device(xhci, udev->slot_id);
3783 }
3784
3785finish:
3786 spin_unlock_irqrestore(&xhci->lock, flags);
3787 return ret;
3788}
3789
3790int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3791 struct usb_device *udev, int enable)
3792{
3793 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3794 __le32 __iomem **port_array;
3795 __le32 __iomem *pm_addr;
3796 u32 temp;
3797 unsigned int port_num;
3798 unsigned long flags;
3799 int hird;
3800
3801 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3802 !udev->lpm_capable)
3803 return -EPERM;
3804
3805 if (!udev->parent || udev->parent->parent ||
3806 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3807 return -EPERM;
3808
3809 if (udev->usb2_hw_lpm_capable != 1)
3810 return -EPERM;
3811
3812 spin_lock_irqsave(&xhci->lock, flags);
3813
3814 port_array = xhci->usb2_ports;
3815 port_num = udev->portnum - 1;
3816 pm_addr = port_array[port_num] + 1;
3817 temp = xhci_readl(xhci, pm_addr);
3818
3819 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3820 enable ? "enable" : "disable", port_num);
3821
3822 hird = xhci_calculate_hird_besl(xhci, udev);
3823
3824 if (enable) {
3825 temp &= ~PORT_HIRD_MASK;
3826 temp |= PORT_HIRD(hird) | PORT_RWE;
3827 xhci_writel(xhci, temp, pm_addr);
3828 temp = xhci_readl(xhci, pm_addr);
3829 temp |= PORT_HLE;
3830 xhci_writel(xhci, temp, pm_addr);
3831 } else {
3832 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3833 xhci_writel(xhci, temp, pm_addr);
3834 }
3835
3836 spin_unlock_irqrestore(&xhci->lock, flags);
3837 return 0;
3838}
3839
3840int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3841{
3842 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3843 int ret;
3844
3845 ret = xhci_usb2_software_lpm_test(hcd, udev);
3846 if (!ret) {
3847 xhci_dbg(xhci, "software LPM test succeed\n");
3848 if (xhci->hw_lpm_support == 1) {
3849 udev->usb2_hw_lpm_capable = 1;
3850 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
3851 if (!ret)
3852 udev->usb2_hw_lpm_enabled = 1;
3853 }
3854 }
3855
3856 return 0;
3857}
3858
3859#else
3860
3861int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3862 struct usb_device *udev, int enable)
3863{
3864 return 0;
3865}
3866
3867int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3868{
3869 return 0;
3870}
3871
3872#endif
3873
3874
3875
3876#ifdef CONFIG_PM
3877
3878static unsigned long long xhci_service_interval_to_ns(
3879 struct usb_endpoint_descriptor *desc)
3880{
3881 return (1 << (desc->bInterval - 1)) * 125 * 1000;
3882}
3883
3884static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
3885 enum usb3_link_state state)
3886{
3887 unsigned long long sel;
3888 unsigned long long pel;
3889 unsigned int max_sel_pel;
3890 char *state_name;
3891
3892 switch (state) {
3893 case USB3_LPM_U1:
3894
3895 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
3896 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
3897 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
3898 state_name = "U1";
3899 break;
3900 case USB3_LPM_U2:
3901 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
3902 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
3903 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
3904 state_name = "U2";
3905 break;
3906 default:
3907 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
3908 __func__);
3909 return USB3_LPM_DISABLED;
3910 }
3911
3912 if (sel <= max_sel_pel && pel <= max_sel_pel)
3913 return USB3_LPM_DEVICE_INITIATED;
3914
3915 if (sel > max_sel_pel)
3916 dev_dbg(&udev->dev, "Device-initiated %s disabled "
3917 "due to long SEL %llu ms\n",
3918 state_name, sel);
3919 else
3920 dev_dbg(&udev->dev, "Device-initiated %s disabled "
3921 "due to long PEL %llu\n ms",
3922 state_name, pel);
3923 return USB3_LPM_DISABLED;
3924}
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
3936 struct usb_endpoint_descriptor *desc)
3937{
3938 unsigned long long timeout_ns;
3939 int ep_type;
3940 int intr_type;
3941
3942 ep_type = usb_endpoint_type(desc);
3943 switch (ep_type) {
3944 case USB_ENDPOINT_XFER_CONTROL:
3945 timeout_ns = udev->u1_params.sel * 3;
3946 break;
3947 case USB_ENDPOINT_XFER_BULK:
3948 timeout_ns = udev->u1_params.sel * 5;
3949 break;
3950 case USB_ENDPOINT_XFER_INT:
3951 intr_type = usb_endpoint_interrupt_type(desc);
3952 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
3953 timeout_ns = udev->u1_params.sel * 3;
3954 break;
3955 }
3956
3957 case USB_ENDPOINT_XFER_ISOC:
3958 timeout_ns = xhci_service_interval_to_ns(desc);
3959 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
3960 if (timeout_ns < udev->u1_params.sel * 2)
3961 timeout_ns = udev->u1_params.sel * 2;
3962 break;
3963 default:
3964 return 0;
3965 }
3966
3967
3968 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
3969
3970 if (timeout_ns == USB3_LPM_DISABLED)
3971 timeout_ns++;
3972
3973
3974
3975
3976 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
3977 return timeout_ns;
3978 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
3979 "due to long timeout %llu ms\n", timeout_ns);
3980 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
3981}
3982
3983
3984
3985
3986
3987
3988
3989
3990static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
3991 struct usb_endpoint_descriptor *desc)
3992{
3993 unsigned long long timeout_ns;
3994 unsigned long long u2_del_ns;
3995
3996 timeout_ns = 10 * 1000 * 1000;
3997
3998 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
3999 (xhci_service_interval_to_ns(desc) > timeout_ns))
4000 timeout_ns = xhci_service_interval_to_ns(desc);
4001
4002 u2_del_ns = udev->bos->ss_cap->bU2DevExitLat * 1000;
4003 if (u2_del_ns > timeout_ns)
4004 timeout_ns = u2_del_ns;
4005
4006
4007 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4008
4009
4010
4011 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4012 return timeout_ns;
4013 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4014 "due to long timeout %llu ms\n", timeout_ns);
4015 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4016}
4017
4018static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4019 struct usb_device *udev,
4020 struct usb_endpoint_descriptor *desc,
4021 enum usb3_link_state state,
4022 u16 *timeout)
4023{
4024 if (state == USB3_LPM_U1) {
4025 if (xhci->quirks & XHCI_INTEL_HOST)
4026 return xhci_calculate_intel_u1_timeout(udev, desc);
4027 } else {
4028 if (xhci->quirks & XHCI_INTEL_HOST)
4029 return xhci_calculate_intel_u2_timeout(udev, desc);
4030 }
4031
4032 return USB3_LPM_DISABLED;
4033}
4034
4035static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4036 struct usb_device *udev,
4037 struct usb_endpoint_descriptor *desc,
4038 enum usb3_link_state state,
4039 u16 *timeout)
4040{
4041 u16 alt_timeout;
4042
4043 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4044 desc, state, timeout);
4045
4046
4047
4048
4049
4050 if (alt_timeout == USB3_LPM_DISABLED ||
4051 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4052 *timeout = alt_timeout;
4053 return -E2BIG;
4054 }
4055 if (alt_timeout > *timeout)
4056 *timeout = alt_timeout;
4057 return 0;
4058}
4059
4060static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4061 struct usb_device *udev,
4062 struct usb_host_interface *alt,
4063 enum usb3_link_state state,
4064 u16 *timeout)
4065{
4066 int j;
4067
4068 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4069 if (xhci_update_timeout_for_endpoint(xhci, udev,
4070 &alt->endpoint[j].desc, state, timeout))
4071 return -E2BIG;
4072 continue;
4073 }
4074 return 0;
4075}
4076
4077static int xhci_check_intel_tier_policy(struct usb_device *udev,
4078 enum usb3_link_state state)
4079{
4080 struct usb_device *parent;
4081 unsigned int num_hubs;
4082
4083 if (state == USB3_LPM_U2)
4084 return 0;
4085
4086
4087 for (parent = udev->parent, num_hubs = 0; parent->parent;
4088 parent = parent->parent)
4089 num_hubs++;
4090
4091 if (num_hubs < 2)
4092 return 0;
4093
4094 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4095 " below second-tier hub.\n");
4096 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4097 "to decrease power consumption.\n");
4098 return -E2BIG;
4099}
4100
4101static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4102 struct usb_device *udev,
4103 enum usb3_link_state state)
4104{
4105 if (xhci->quirks & XHCI_INTEL_HOST)
4106 return xhci_check_intel_tier_policy(udev, state);
4107 return -EINVAL;
4108}
4109
4110
4111
4112
4113
4114
4115static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4116 struct usb_device *udev, enum usb3_link_state state)
4117{
4118 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4119 struct usb_host_config *config;
4120 char *state_name;
4121 int i;
4122 u16 timeout = USB3_LPM_DISABLED;
4123
4124 if (state == USB3_LPM_U1)
4125 state_name = "U1";
4126 else if (state == USB3_LPM_U2)
4127 state_name = "U2";
4128 else {
4129 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4130 state);
4131 return timeout;
4132 }
4133
4134 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4135 return timeout;
4136
4137
4138
4139
4140 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4141 state, &timeout))
4142 return timeout;
4143
4144 config = udev->actconfig;
4145 if (!config)
4146 return timeout;
4147
4148 for (i = 0; i < USB_MAXINTERFACES; i++) {
4149 struct usb_driver *driver;
4150 struct usb_interface *intf = config->interface[i];
4151
4152 if (!intf)
4153 continue;
4154
4155
4156
4157
4158 if (intf->dev.driver) {
4159 driver = to_usb_driver(intf->dev.driver);
4160 if (driver && driver->disable_hub_initiated_lpm) {
4161 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4162 "at request of driver %s\n",
4163 state_name, driver->name);
4164 return xhci_get_timeout_no_hub_lpm(udev, state);
4165 }
4166 }
4167
4168
4169 if (!intf->cur_altsetting)
4170 continue;
4171
4172 if (xhci_update_timeout_for_interface(xhci, udev,
4173 intf->cur_altsetting,
4174 state, &timeout))
4175 return timeout;
4176 }
4177 return timeout;
4178}
4179
4180
4181
4182
4183
4184static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4185 struct usb_device *udev, u16 max_exit_latency)
4186{
4187 struct xhci_virt_device *virt_dev;
4188 struct xhci_command *command;
4189 struct xhci_input_control_ctx *ctrl_ctx;
4190 struct xhci_slot_ctx *slot_ctx;
4191 unsigned long flags;
4192 int ret;
4193
4194 spin_lock_irqsave(&xhci->lock, flags);
4195 if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
4196 spin_unlock_irqrestore(&xhci->lock, flags);
4197 return 0;
4198 }
4199
4200
4201 virt_dev = xhci->devs[udev->slot_id];
4202 command = xhci->lpm_command;
4203 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4204 spin_unlock_irqrestore(&xhci->lock, flags);
4205
4206 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
4207 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4208 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4209 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4210 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4211
4212 xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
4213 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4214 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4215
4216
4217 ret = xhci_configure_endpoint(xhci, udev, command,
4218 true, true);
4219 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4220 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4221
4222 if (!ret) {
4223 spin_lock_irqsave(&xhci->lock, flags);
4224 virt_dev->current_mel = max_exit_latency;
4225 spin_unlock_irqrestore(&xhci->lock, flags);
4226 }
4227 return ret;
4228}
4229
4230static int calculate_max_exit_latency(struct usb_device *udev,
4231 enum usb3_link_state state_changed,
4232 u16 hub_encoded_timeout)
4233{
4234 unsigned long long u1_mel_us = 0;
4235 unsigned long long u2_mel_us = 0;
4236 unsigned long long mel_us = 0;
4237 bool disabling_u1;
4238 bool disabling_u2;
4239 bool enabling_u1;
4240 bool enabling_u2;
4241
4242 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4243 hub_encoded_timeout == USB3_LPM_DISABLED);
4244 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4245 hub_encoded_timeout == USB3_LPM_DISABLED);
4246
4247 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4248 hub_encoded_timeout != USB3_LPM_DISABLED);
4249 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4250 hub_encoded_timeout != USB3_LPM_DISABLED);
4251
4252
4253
4254
4255 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4256 enabling_u1)
4257 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4258 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4259 enabling_u2)
4260 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4261
4262 if (u1_mel_us > u2_mel_us)
4263 mel_us = u1_mel_us;
4264 else
4265 mel_us = u2_mel_us;
4266
4267 if (mel_us > MAX_EXIT) {
4268 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4269 "is too big.\n", mel_us);
4270 return -E2BIG;
4271 }
4272 return mel_us;
4273}
4274
4275
4276int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4277 struct usb_device *udev, enum usb3_link_state state)
4278{
4279 struct xhci_hcd *xhci;
4280 u16 hub_encoded_timeout;
4281 int mel;
4282 int ret;
4283
4284 xhci = hcd_to_xhci(hcd);
4285
4286
4287
4288
4289 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4290 !xhci->devs[udev->slot_id])
4291 return USB3_LPM_DISABLED;
4292
4293 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4294 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4295 if (mel < 0) {
4296
4297 hub_encoded_timeout = USB3_LPM_DISABLED;
4298 mel = 0;
4299 }
4300
4301 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4302 if (ret)
4303 return ret;
4304 return hub_encoded_timeout;
4305}
4306
4307int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4308 struct usb_device *udev, enum usb3_link_state state)
4309{
4310 struct xhci_hcd *xhci;
4311 u16 mel;
4312 int ret;
4313
4314 xhci = hcd_to_xhci(hcd);
4315 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4316 !xhci->devs[udev->slot_id])
4317 return 0;
4318
4319 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4320 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4321 if (ret)
4322 return ret;
4323 return 0;
4324}
4325#else
4326
4327int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4328 struct usb_device *udev, enum usb3_link_state state)
4329{
4330 return USB3_LPM_DISABLED;
4331}
4332
4333int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4334 struct usb_device *udev, enum usb3_link_state state)
4335{
4336 return 0;
4337}
4338#endif
4339
4340
4341
4342
4343
4344
4345int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4346 struct usb_tt *tt, gfp_t mem_flags)
4347{
4348 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4349 struct xhci_virt_device *vdev;
4350 struct xhci_command *config_cmd;
4351 struct xhci_input_control_ctx *ctrl_ctx;
4352 struct xhci_slot_ctx *slot_ctx;
4353 unsigned long flags;
4354 unsigned think_time;
4355 int ret;
4356
4357
4358 if (!hdev->parent)
4359 return 0;
4360
4361 vdev = xhci->devs[hdev->slot_id];
4362 if (!vdev) {
4363 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4364 return -EINVAL;
4365 }
4366 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4367 if (!config_cmd) {
4368 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4369 return -ENOMEM;
4370 }
4371
4372 spin_lock_irqsave(&xhci->lock, flags);
4373 if (hdev->speed == USB_SPEED_HIGH &&
4374 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4375 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4376 xhci_free_command(xhci, config_cmd);
4377 spin_unlock_irqrestore(&xhci->lock, flags);
4378 return -ENOMEM;
4379 }
4380
4381 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4382 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
4383 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4384 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4385 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4386 if (tt->multi)
4387 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4388 if (xhci->hci_version > 0x95) {
4389 xhci_dbg(xhci, "xHCI version %x needs hub "
4390 "TT think time and number of ports\n",
4391 (unsigned int) xhci->hci_version);
4392 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4393
4394
4395
4396
4397
4398
4399
4400 think_time = tt->think_time;
4401 if (think_time != 0)
4402 think_time = (think_time / 666) - 1;
4403 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4404 slot_ctx->tt_info |=
4405 cpu_to_le32(TT_THINK_TIME(think_time));
4406 } else {
4407 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4408 "TT think time or number of ports\n",
4409 (unsigned int) xhci->hci_version);
4410 }
4411 slot_ctx->dev_state = 0;
4412 spin_unlock_irqrestore(&xhci->lock, flags);
4413
4414 xhci_dbg(xhci, "Set up %s for hub device.\n",
4415 (xhci->hci_version > 0x95) ?
4416 "configure endpoint" : "evaluate context");
4417 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4418 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4419
4420
4421
4422
4423 if (xhci->hci_version > 0x95)
4424 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4425 false, false);
4426 else
4427 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4428 true, false);
4429
4430 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4431 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4432
4433 xhci_free_command(xhci, config_cmd);
4434 return ret;
4435}
4436
4437int xhci_get_frame(struct usb_hcd *hcd)
4438{
4439 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4440
4441 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
4442}
4443
4444int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4445{
4446 struct xhci_hcd *xhci;
4447 struct device *dev = hcd->self.controller;
4448 int retval;
4449 u32 temp;
4450
4451
4452 hcd->self.sg_tablesize = ~0;
4453
4454 if (usb_hcd_is_primary_hcd(hcd)) {
4455 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4456 if (!xhci)
4457 return -ENOMEM;
4458 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4459 xhci->main_hcd = hcd;
4460
4461
4462
4463 hcd->speed = HCD_USB2;
4464 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4465
4466
4467
4468
4469
4470 hcd->has_tt = 1;
4471 } else {
4472
4473
4474
4475 xhci = hcd_to_xhci(hcd);
4476 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4477 if (HCC_64BIT_ADDR(temp)) {
4478 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4479 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4480 } else {
4481 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4482 }
4483 return 0;
4484 }
4485
4486 xhci->cap_regs = hcd->regs;
4487 xhci->op_regs = hcd->regs +
4488 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
4489 xhci->run_regs = hcd->regs +
4490 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4491
4492 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
4493 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
4494 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
4495 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
4496 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4497 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4498 xhci_print_registers(xhci);
4499
4500 get_quirks(dev, xhci);
4501
4502
4503 retval = xhci_halt(xhci);
4504 if (retval)
4505 goto error;
4506
4507 xhci_dbg(xhci, "Resetting HCD\n");
4508
4509 retval = xhci_reset(xhci);
4510 if (retval)
4511 goto error;
4512 xhci_dbg(xhci, "Reset complete\n");
4513
4514 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4515 if (HCC_64BIT_ADDR(temp)) {
4516 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4517 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4518 } else {
4519 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4520 }
4521
4522 xhci_dbg(xhci, "Calling HCD init\n");
4523
4524 retval = xhci_init(hcd);
4525 if (retval)
4526 goto error;
4527 xhci_dbg(xhci, "Called HCD init\n");
4528 return 0;
4529error:
4530 kfree(xhci);
4531 return retval;
4532}
4533
4534MODULE_DESCRIPTION(DRIVER_DESC);
4535MODULE_AUTHOR(DRIVER_AUTHOR);
4536MODULE_LICENSE("GPL");
4537
4538static int __init xhci_hcd_init(void)
4539{
4540 int retval;
4541
4542 retval = xhci_register_pci();
4543 if (retval < 0) {
4544 printk(KERN_DEBUG "Problem registering PCI driver.");
4545 return retval;
4546 }
4547 retval = xhci_register_plat();
4548 if (retval < 0) {
4549 printk(KERN_DEBUG "Problem registering platform driver.");
4550 goto unreg_pci;
4551 }
4552
4553
4554
4555
4556 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4557 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4558 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4559
4560
4561
4562 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4563 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4564 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4565 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4566 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4567
4568 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4569 return 0;
4570unreg_pci:
4571 xhci_unregister_pci();
4572 return retval;
4573}
4574module_init(xhci_hcd_init);
4575
4576static void __exit xhci_hcd_cleanup(void)
4577{
4578 xhci_unregister_pci();
4579 xhci_unregister_plat();
4580}
4581module_exit(xhci_hcd_cleanup);
4582