1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29
30#include "xhci.h"
31
32#define DRIVER_AUTHOR "Sarah Sharp"
33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
34
35
36static int link_quirk;
37module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 u32 mask, u32 done, int usec)
56{
57 u32 result;
58
59 do {
60 result = xhci_readl(xhci, ptr);
61 if (result == ~(u32)0)
62 return -ENODEV;
63 result &= mask;
64 if (result == done)
65 return 0;
66 udelay(1);
67 usec--;
68 } while (usec > 0);
69 return -ETIMEDOUT;
70}
71
72
73
74
75void xhci_quiesce(struct xhci_hcd *xhci)
76{
77 u32 halted;
78 u32 cmd;
79 u32 mask;
80
81 mask = ~(XHCI_IRQS);
82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
83 if (!halted)
84 mask &= ~CMD_RUN;
85
86 cmd = xhci_readl(xhci, &xhci->op_regs->command);
87 cmd &= mask;
88 xhci_writel(xhci, cmd, &xhci->op_regs->command);
89}
90
91
92
93
94
95
96
97
98
99int xhci_halt(struct xhci_hcd *xhci)
100{
101 int ret;
102 xhci_dbg(xhci, "// Halt the HC\n");
103 xhci_quiesce(xhci);
104
105 ret = handshake(xhci, &xhci->op_regs->status,
106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
107 if (!ret)
108 xhci->xhc_state |= XHCI_STATE_HALTED;
109 return ret;
110}
111
112
113
114
115static int xhci_start(struct xhci_hcd *xhci)
116{
117 u32 temp;
118 int ret;
119
120 temp = xhci_readl(xhci, &xhci->op_regs->command);
121 temp |= (CMD_RUN);
122 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
123 temp);
124 xhci_writel(xhci, temp, &xhci->op_regs->command);
125
126
127
128
129
130 ret = handshake(xhci, &xhci->op_regs->status,
131 STS_HALT, 0, XHCI_MAX_HALT_USEC);
132 if (ret == -ETIMEDOUT)
133 xhci_err(xhci, "Host took too long to start, "
134 "waited %u microseconds.\n",
135 XHCI_MAX_HALT_USEC);
136 if (!ret)
137 xhci->xhc_state &= ~XHCI_STATE_HALTED;
138 return ret;
139}
140
141
142
143
144
145
146
147
148int xhci_reset(struct xhci_hcd *xhci)
149{
150 u32 command;
151 u32 state;
152 int ret;
153
154 state = xhci_readl(xhci, &xhci->op_regs->status);
155 if ((state & STS_HALT) == 0) {
156 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
157 return 0;
158 }
159
160 xhci_dbg(xhci, "// Reset the HC\n");
161 command = xhci_readl(xhci, &xhci->op_regs->command);
162 command |= CMD_RESET;
163 xhci_writel(xhci, command, &xhci->op_regs->command);
164
165 ret = handshake(xhci, &xhci->op_regs->command,
166 CMD_RESET, 0, 250 * 1000);
167 if (ret)
168 return ret;
169
170 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
171
172
173
174
175 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
176}
177
178
179
180
181
182static void xhci_free_irq(struct xhci_hcd *xhci)
183{
184 int i;
185 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
186
187
188 if (xhci_to_hcd(xhci)->irq >= 0)
189 return;
190
191 if (xhci->msix_entries) {
192 for (i = 0; i < xhci->msix_count; i++)
193 if (xhci->msix_entries[i].vector)
194 free_irq(xhci->msix_entries[i].vector,
195 xhci_to_hcd(xhci));
196 } else if (pdev->irq >= 0)
197 free_irq(pdev->irq, xhci_to_hcd(xhci));
198
199 return;
200}
201
202
203
204
205static int xhci_setup_msi(struct xhci_hcd *xhci)
206{
207 int ret;
208 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
209
210 ret = pci_enable_msi(pdev);
211 if (ret) {
212 xhci_err(xhci, "failed to allocate MSI entry\n");
213 return ret;
214 }
215
216 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
217 0, "xhci_hcd", xhci_to_hcd(xhci));
218 if (ret) {
219 xhci_err(xhci, "disable MSI interrupt\n");
220 pci_disable_msi(pdev);
221 }
222
223 return ret;
224}
225
226
227
228
229static int xhci_setup_msix(struct xhci_hcd *xhci)
230{
231 int i, ret = 0;
232 struct usb_hcd *hcd = xhci_to_hcd(xhci);
233 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
234
235
236
237
238
239
240
241
242 xhci->msix_count = min(num_online_cpus() + 1,
243 HCS_MAX_INTRS(xhci->hcs_params1));
244
245 xhci->msix_entries =
246 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
247 GFP_KERNEL);
248 if (!xhci->msix_entries) {
249 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
250 return -ENOMEM;
251 }
252
253 for (i = 0; i < xhci->msix_count; i++) {
254 xhci->msix_entries[i].entry = i;
255 xhci->msix_entries[i].vector = 0;
256 }
257
258 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
259 if (ret) {
260 xhci_err(xhci, "Failed to enable MSI-X\n");
261 goto free_entries;
262 }
263
264 for (i = 0; i < xhci->msix_count; i++) {
265 ret = request_irq(xhci->msix_entries[i].vector,
266 (irq_handler_t)xhci_msi_irq,
267 0, "xhci_hcd", xhci_to_hcd(xhci));
268 if (ret)
269 goto disable_msix;
270 }
271
272 hcd->msix_enabled = 1;
273 return ret;
274
275disable_msix:
276 xhci_err(xhci, "disable MSI-X interrupt\n");
277 xhci_free_irq(xhci);
278 pci_disable_msix(pdev);
279free_entries:
280 kfree(xhci->msix_entries);
281 xhci->msix_entries = NULL;
282 return ret;
283}
284
285
286static void xhci_cleanup_msix(struct xhci_hcd *xhci)
287{
288 struct usb_hcd *hcd = xhci_to_hcd(xhci);
289 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
290
291 xhci_free_irq(xhci);
292
293 if (xhci->msix_entries) {
294 pci_disable_msix(pdev);
295 kfree(xhci->msix_entries);
296 xhci->msix_entries = NULL;
297 } else {
298 pci_disable_msi(pdev);
299 }
300
301 hcd->msix_enabled = 0;
302 return;
303}
304
305
306
307
308
309
310
311
312int xhci_init(struct usb_hcd *hcd)
313{
314 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
315 int retval = 0;
316
317 xhci_dbg(xhci, "xhci_init\n");
318 spin_lock_init(&xhci->lock);
319 if (link_quirk) {
320 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
321 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
322 } else {
323 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
324 }
325 retval = xhci_mem_init(xhci, GFP_KERNEL);
326 xhci_dbg(xhci, "Finished xhci_init\n");
327
328 return retval;
329}
330
331
332
333
334#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
335static void xhci_event_ring_work(unsigned long arg)
336{
337 unsigned long flags;
338 int temp;
339 u64 temp_64;
340 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
341 int i, j;
342
343 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
344
345 spin_lock_irqsave(&xhci->lock, flags);
346 temp = xhci_readl(xhci, &xhci->op_regs->status);
347 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
348 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
349 xhci_dbg(xhci, "HW died, polling stopped.\n");
350 spin_unlock_irqrestore(&xhci->lock, flags);
351 return;
352 }
353
354 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
355 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
356 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
357 xhci->error_bitmask = 0;
358 xhci_dbg(xhci, "Event ring:\n");
359 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
360 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
361 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
362 temp_64 &= ~ERST_PTR_MASK;
363 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
364 xhci_dbg(xhci, "Command ring:\n");
365 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
366 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
367 xhci_dbg_cmd_ptrs(xhci);
368 for (i = 0; i < MAX_HC_SLOTS; ++i) {
369 if (!xhci->devs[i])
370 continue;
371 for (j = 0; j < 31; ++j) {
372 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
373 }
374 }
375 spin_unlock_irqrestore(&xhci->lock, flags);
376
377 if (!xhci->zombie)
378 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
379 else
380 xhci_dbg(xhci, "Quit polling the event ring.\n");
381}
382#endif
383
384static int xhci_run_finished(struct xhci_hcd *xhci)
385{
386 if (xhci_start(xhci)) {
387 xhci_halt(xhci);
388 return -ENODEV;
389 }
390 xhci->shared_hcd->state = HC_STATE_RUNNING;
391
392 if (xhci->quirks & XHCI_NEC_HOST)
393 xhci_ring_cmd_db(xhci);
394
395 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
396 return 0;
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411int xhci_run(struct usb_hcd *hcd)
412{
413 u32 temp;
414 u64 temp_64;
415 u32 ret;
416 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
417 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
418
419
420
421
422
423 hcd->uses_new_polling = 1;
424 if (!usb_hcd_is_primary_hcd(hcd))
425 return xhci_run_finished(xhci);
426
427 xhci_dbg(xhci, "xhci_run\n");
428
429 if (hcd->irq)
430 free_irq(hcd->irq, hcd);
431 hcd->irq = -1;
432
433
434
435
436 if (xhci->quirks & XHCI_BROKEN_MSI)
437 goto legacy_irq;
438
439 ret = xhci_setup_msix(xhci);
440 if (ret)
441
442 ret = xhci_setup_msi(xhci);
443
444 if (ret) {
445legacy_irq:
446
447 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
448 hcd->irq_descr, hcd);
449 if (ret) {
450 xhci_err(xhci, "request interrupt %d failed\n",
451 pdev->irq);
452 return ret;
453 }
454 hcd->irq = pdev->irq;
455 }
456
457#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
458 init_timer(&xhci->event_ring_timer);
459 xhci->event_ring_timer.data = (unsigned long) xhci;
460 xhci->event_ring_timer.function = xhci_event_ring_work;
461
462 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
463 xhci->zombie = 0;
464 xhci_dbg(xhci, "Setting event ring polling timer\n");
465 add_timer(&xhci->event_ring_timer);
466#endif
467
468 xhci_dbg(xhci, "Command ring memory map follows:\n");
469 xhci_debug_ring(xhci, xhci->cmd_ring);
470 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
471 xhci_dbg_cmd_ptrs(xhci);
472
473 xhci_dbg(xhci, "ERST memory map follows:\n");
474 xhci_dbg_erst(xhci, &xhci->erst);
475 xhci_dbg(xhci, "Event ring:\n");
476 xhci_debug_ring(xhci, xhci->event_ring);
477 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
478 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
479 temp_64 &= ~ERST_PTR_MASK;
480 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
481
482 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
483 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
484 temp &= ~ER_IRQ_INTERVAL_MASK;
485 temp |= (u32) 160;
486 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
487
488
489 temp = xhci_readl(xhci, &xhci->op_regs->command);
490 temp |= (CMD_EIE);
491 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
492 temp);
493 xhci_writel(xhci, temp, &xhci->op_regs->command);
494
495 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
496 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
497 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
498 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
499 &xhci->ir_set->irq_pending);
500 xhci_print_ir_set(xhci, 0);
501
502 if (xhci->quirks & XHCI_NEC_HOST)
503 xhci_queue_vendor_command(xhci, 0, 0, 0,
504 TRB_TYPE(TRB_NEC_GET_FW));
505
506 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
507 return 0;
508}
509
510static void xhci_only_stop_hcd(struct usb_hcd *hcd)
511{
512 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
513
514 spin_lock_irq(&xhci->lock);
515 xhci_halt(xhci);
516
517
518
519
520
521 xhci->shared_hcd = NULL;
522 spin_unlock_irq(&xhci->lock);
523}
524
525
526
527
528
529
530
531
532
533
534void xhci_stop(struct usb_hcd *hcd)
535{
536 u32 temp;
537 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
538
539 if (!usb_hcd_is_primary_hcd(hcd)) {
540 xhci_only_stop_hcd(xhci->shared_hcd);
541 return;
542 }
543
544 spin_lock_irq(&xhci->lock);
545
546
547
548 xhci_halt(xhci);
549 xhci_reset(xhci);
550 spin_unlock_irq(&xhci->lock);
551
552 xhci_cleanup_msix(xhci);
553
554#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
555
556 xhci->zombie = 1;
557 del_timer_sync(&xhci->event_ring_timer);
558#endif
559
560 if (xhci->quirks & XHCI_AMD_PLL_FIX)
561 usb_amd_dev_put();
562
563 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
564 temp = xhci_readl(xhci, &xhci->op_regs->status);
565 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
566 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
567 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
568 &xhci->ir_set->irq_pending);
569 xhci_print_ir_set(xhci, 0);
570
571 xhci_dbg(xhci, "cleaning up memory\n");
572 xhci_mem_cleanup(xhci);
573 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
574 xhci_readl(xhci, &xhci->op_regs->status));
575}
576
577
578
579
580
581
582
583
584
585
586void xhci_shutdown(struct usb_hcd *hcd)
587{
588 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
589
590 spin_lock_irq(&xhci->lock);
591 xhci_halt(xhci);
592 spin_unlock_irq(&xhci->lock);
593
594 xhci_cleanup_msix(xhci);
595
596 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
597 xhci_readl(xhci, &xhci->op_regs->status));
598}
599
600#ifdef CONFIG_PM
601static void xhci_save_registers(struct xhci_hcd *xhci)
602{
603 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
604 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
605 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
606 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
607 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
608 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
609 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
610 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
611 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
612}
613
614static void xhci_restore_registers(struct xhci_hcd *xhci)
615{
616 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
617 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
618 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
619 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
620 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
621 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
622 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
623 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
624}
625
626static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
627{
628 u64 val_64;
629
630
631 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
632 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
633 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
634 xhci->cmd_ring->dequeue) &
635 (u64) ~CMD_RING_RSVD_BITS) |
636 xhci->cmd_ring->cycle_state;
637 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
638 (long unsigned long) val_64);
639 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
640}
641
642
643
644
645
646
647
648
649
650
651static void xhci_clear_command_ring(struct xhci_hcd *xhci)
652{
653 struct xhci_ring *ring;
654 struct xhci_segment *seg;
655
656 ring = xhci->cmd_ring;
657 seg = ring->deq_seg;
658 do {
659 memset(seg->trbs, 0, SEGMENT_SIZE);
660 seg = seg->next;
661 } while (seg != ring->deq_seg);
662
663
664 ring->deq_seg = ring->first_seg;
665 ring->dequeue = ring->first_seg->trbs;
666 ring->enq_seg = ring->deq_seg;
667 ring->enqueue = ring->dequeue;
668
669
670
671
672
673 ring->cycle_state = 1;
674
675
676
677
678
679
680
681
682 xhci_set_cmd_ring_deq(xhci);
683}
684
685
686
687
688
689
690
691int xhci_suspend(struct xhci_hcd *xhci)
692{
693 int rc = 0;
694 struct usb_hcd *hcd = xhci_to_hcd(xhci);
695 u32 command;
696 int i;
697
698 spin_lock_irq(&xhci->lock);
699 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
700 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
701
702
703
704
705 command = xhci_readl(xhci, &xhci->op_regs->command);
706 command &= ~CMD_RUN;
707 xhci_writel(xhci, command, &xhci->op_regs->command);
708 if (handshake(xhci, &xhci->op_regs->status,
709 STS_HALT, STS_HALT, 100*100)) {
710 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
711 spin_unlock_irq(&xhci->lock);
712 return -ETIMEDOUT;
713 }
714 xhci_clear_command_ring(xhci);
715
716
717 xhci_save_registers(xhci);
718
719
720 command = xhci_readl(xhci, &xhci->op_regs->command);
721 command |= CMD_CSS;
722 xhci_writel(xhci, command, &xhci->op_regs->command);
723 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
724 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
725 spin_unlock_irq(&xhci->lock);
726 return -ETIMEDOUT;
727 }
728 spin_unlock_irq(&xhci->lock);
729
730
731
732 if (xhci->msix_entries) {
733 for (i = 0; i < xhci->msix_count; i++)
734 synchronize_irq(xhci->msix_entries[i].vector);
735 }
736
737 return rc;
738}
739
740
741
742
743
744
745
746int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
747{
748 u32 command, temp = 0;
749 struct usb_hcd *hcd = xhci_to_hcd(xhci);
750 struct usb_hcd *secondary_hcd;
751 int retval;
752
753
754
755
756 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
757 time_before(jiffies,
758 xhci->bus_state[1].next_statechange))
759 msleep(100);
760
761 spin_lock_irq(&xhci->lock);
762 if (xhci->quirks & XHCI_RESET_ON_RESUME)
763 hibernated = true;
764
765 if (!hibernated) {
766
767 xhci_restore_registers(xhci);
768
769 xhci_set_cmd_ring_deq(xhci);
770
771
772 command = xhci_readl(xhci, &xhci->op_regs->command);
773 command |= CMD_CRS;
774 xhci_writel(xhci, command, &xhci->op_regs->command);
775 if (handshake(xhci, &xhci->op_regs->status,
776 STS_RESTORE, 0, 10*100)) {
777 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
778 spin_unlock_irq(&xhci->lock);
779 return -ETIMEDOUT;
780 }
781 temp = xhci_readl(xhci, &xhci->op_regs->status);
782 }
783
784
785 if ((temp & STS_SRE) || hibernated) {
786
787 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
788 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
789
790 xhci_dbg(xhci, "Stop HCD\n");
791 xhci_halt(xhci);
792 xhci_reset(xhci);
793 spin_unlock_irq(&xhci->lock);
794 xhci_cleanup_msix(xhci);
795
796#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
797
798 xhci->zombie = 1;
799 del_timer_sync(&xhci->event_ring_timer);
800#endif
801
802 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
803 temp = xhci_readl(xhci, &xhci->op_regs->status);
804 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
805 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
806 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
807 &xhci->ir_set->irq_pending);
808 xhci_print_ir_set(xhci, 0);
809
810 xhci_dbg(xhci, "cleaning up memory\n");
811 xhci_mem_cleanup(xhci);
812 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
813 xhci_readl(xhci, &xhci->op_regs->status));
814
815
816
817
818
819 if (!usb_hcd_is_primary_hcd(hcd))
820 secondary_hcd = hcd;
821 else
822 secondary_hcd = xhci->shared_hcd;
823
824 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
825 retval = xhci_init(hcd->primary_hcd);
826 if (retval)
827 return retval;
828 xhci_dbg(xhci, "Start the primary HCD\n");
829 retval = xhci_run(hcd->primary_hcd);
830 if (retval)
831 goto failed_restart;
832
833 xhci_dbg(xhci, "Start the secondary HCD\n");
834 retval = xhci_run(secondary_hcd);
835 if (!retval) {
836 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
837 set_bit(HCD_FLAG_HW_ACCESSIBLE,
838 &xhci->shared_hcd->flags);
839 }
840failed_restart:
841 hcd->state = HC_STATE_SUSPENDED;
842 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
843 return retval;
844 }
845
846
847 command = xhci_readl(xhci, &xhci->op_regs->command);
848 command |= CMD_RUN;
849 xhci_writel(xhci, command, &xhci->op_regs->command);
850 handshake(xhci, &xhci->op_regs->status, STS_HALT,
851 0, 250 * 1000);
852
853
854
855
856
857
858
859
860
861
862 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
863 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
864
865 spin_unlock_irq(&xhci->lock);
866 return 0;
867}
868#endif
869
870
871
872
873
874
875
876
877
878
879
880
881
882unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
883{
884 unsigned int index;
885 if (usb_endpoint_xfer_control(desc))
886 index = (unsigned int) (usb_endpoint_num(desc)*2);
887 else
888 index = (unsigned int) (usb_endpoint_num(desc)*2) +
889 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
890 return index;
891}
892
893
894
895
896
897unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
898{
899 return 1 << (xhci_get_endpoint_index(desc) + 1);
900}
901
902
903
904
905
906unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
907{
908 return 1 << (ep_index + 1);
909}
910
911
912
913
914
915
916
917unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
918{
919 return fls(added_ctxs) - 1;
920}
921
922
923
924
925static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
926 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
927 const char *func) {
928 struct xhci_hcd *xhci;
929 struct xhci_virt_device *virt_dev;
930
931 if (!hcd || (check_ep && !ep) || !udev) {
932 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
933 func);
934 return -EINVAL;
935 }
936 if (!udev->parent) {
937 printk(KERN_DEBUG "xHCI %s called for root hub\n",
938 func);
939 return 0;
940 }
941
942 if (check_virt_dev) {
943 xhci = hcd_to_xhci(hcd);
944 if (!udev->slot_id || !xhci->devs
945 || !xhci->devs[udev->slot_id]) {
946 printk(KERN_DEBUG "xHCI %s called with unaddressed "
947 "device\n", func);
948 return -EINVAL;
949 }
950
951 virt_dev = xhci->devs[udev->slot_id];
952 if (virt_dev->udev != udev) {
953 printk(KERN_DEBUG "xHCI %s called with udev and "
954 "virt_dev does not match\n", func);
955 return -EINVAL;
956 }
957 }
958
959 return 1;
960}
961
962static int xhci_configure_endpoint(struct xhci_hcd *xhci,
963 struct usb_device *udev, struct xhci_command *command,
964 bool ctx_change, bool must_succeed);
965
966
967
968
969
970
971
972static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
973 unsigned int ep_index, struct urb *urb)
974{
975 struct xhci_container_ctx *in_ctx;
976 struct xhci_container_ctx *out_ctx;
977 struct xhci_input_control_ctx *ctrl_ctx;
978 struct xhci_ep_ctx *ep_ctx;
979 int max_packet_size;
980 int hw_max_packet_size;
981 int ret = 0;
982
983 out_ctx = xhci->devs[slot_id]->out_ctx;
984 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
985 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
986 max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize);
987 if (hw_max_packet_size != max_packet_size) {
988 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
989 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
990 max_packet_size);
991 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
992 hw_max_packet_size);
993 xhci_dbg(xhci, "Issuing evaluate context command.\n");
994
995
996 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
997 xhci->devs[slot_id]->out_ctx, ep_index);
998 in_ctx = xhci->devs[slot_id]->in_ctx;
999 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1000 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1001 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1002
1003
1004
1005
1006
1007 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1008 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1009 ctrl_ctx->drop_flags = 0;
1010
1011 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1012 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1013 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1014 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1015
1016 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1017 true, false);
1018
1019
1020
1021
1022 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1023 }
1024 return ret;
1025}
1026
1027
1028
1029
1030
1031int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1032{
1033 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1034 unsigned long flags;
1035 int ret = 0;
1036 unsigned int slot_id, ep_index;
1037 struct urb_priv *urb_priv;
1038 int size, i;
1039
1040 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1041 true, true, __func__) <= 0)
1042 return -EINVAL;
1043
1044 slot_id = urb->dev->slot_id;
1045 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1046
1047 if (!HCD_HW_ACCESSIBLE(hcd)) {
1048 if (!in_interrupt())
1049 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1050 ret = -ESHUTDOWN;
1051 goto exit;
1052 }
1053
1054 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1055 size = urb->number_of_packets;
1056 else
1057 size = 1;
1058
1059 urb_priv = kzalloc(sizeof(struct urb_priv) +
1060 size * sizeof(struct xhci_td *), mem_flags);
1061 if (!urb_priv)
1062 return -ENOMEM;
1063
1064 for (i = 0; i < size; i++) {
1065 urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
1066 if (!urb_priv->td[i]) {
1067 urb_priv->length = i;
1068 xhci_urb_free_priv(xhci, urb_priv);
1069 return -ENOMEM;
1070 }
1071 }
1072
1073 urb_priv->length = size;
1074 urb_priv->td_cnt = 0;
1075 urb->hcpriv = urb_priv;
1076
1077 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1078
1079
1080
1081 if (urb->dev->speed == USB_SPEED_FULL) {
1082 ret = xhci_check_maxpacket(xhci, slot_id,
1083 ep_index, urb);
1084 if (ret < 0)
1085 return ret;
1086 }
1087
1088
1089
1090
1091 spin_lock_irqsave(&xhci->lock, flags);
1092 if (xhci->xhc_state & XHCI_STATE_DYING)
1093 goto dying;
1094 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1095 slot_id, ep_index);
1096 spin_unlock_irqrestore(&xhci->lock, flags);
1097 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1098 spin_lock_irqsave(&xhci->lock, flags);
1099 if (xhci->xhc_state & XHCI_STATE_DYING)
1100 goto dying;
1101 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1102 EP_GETTING_STREAMS) {
1103 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1104 "is transitioning to using streams.\n");
1105 ret = -EINVAL;
1106 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1107 EP_GETTING_NO_STREAMS) {
1108 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1109 "is transitioning to "
1110 "not having streams.\n");
1111 ret = -EINVAL;
1112 } else {
1113 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1114 slot_id, ep_index);
1115 }
1116 spin_unlock_irqrestore(&xhci->lock, flags);
1117 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1118 spin_lock_irqsave(&xhci->lock, flags);
1119 if (xhci->xhc_state & XHCI_STATE_DYING)
1120 goto dying;
1121 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1122 slot_id, ep_index);
1123 spin_unlock_irqrestore(&xhci->lock, flags);
1124 } else {
1125 spin_lock_irqsave(&xhci->lock, flags);
1126 if (xhci->xhc_state & XHCI_STATE_DYING)
1127 goto dying;
1128 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1129 slot_id, ep_index);
1130 spin_unlock_irqrestore(&xhci->lock, flags);
1131 }
1132exit:
1133 return ret;
1134dying:
1135 xhci_urb_free_priv(xhci, urb_priv);
1136 urb->hcpriv = NULL;
1137 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1138 "non-responsive xHCI host.\n",
1139 urb->ep->desc.bEndpointAddress, urb);
1140 spin_unlock_irqrestore(&xhci->lock, flags);
1141 return -ESHUTDOWN;
1142}
1143
1144
1145
1146
1147
1148static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1149 struct urb *urb)
1150{
1151 unsigned int slot_id;
1152 unsigned int ep_index;
1153 unsigned int stream_id;
1154 struct xhci_virt_ep *ep;
1155
1156 slot_id = urb->dev->slot_id;
1157 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1158 stream_id = urb->stream_id;
1159 ep = &xhci->devs[slot_id]->eps[ep_index];
1160
1161 if (!(ep->ep_state & EP_HAS_STREAMS))
1162 return ep->ring;
1163
1164 if (stream_id == 0) {
1165 xhci_warn(xhci,
1166 "WARN: Slot ID %u, ep index %u has streams, "
1167 "but URB has no stream ID.\n",
1168 slot_id, ep_index);
1169 return NULL;
1170 }
1171
1172 if (stream_id < ep->stream_info->num_streams)
1173 return ep->stream_info->stream_rings[stream_id];
1174
1175 xhci_warn(xhci,
1176 "WARN: Slot ID %u, ep index %u has "
1177 "stream IDs 1 to %u allocated, "
1178 "but stream ID %u is requested.\n",
1179 slot_id, ep_index,
1180 ep->stream_info->num_streams - 1,
1181 stream_id);
1182 return NULL;
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1217{
1218 unsigned long flags;
1219 int ret, i;
1220 u32 temp;
1221 struct xhci_hcd *xhci;
1222 struct urb_priv *urb_priv;
1223 struct xhci_td *td;
1224 unsigned int ep_index;
1225 struct xhci_ring *ep_ring;
1226 struct xhci_virt_ep *ep;
1227
1228 xhci = hcd_to_xhci(hcd);
1229 spin_lock_irqsave(&xhci->lock, flags);
1230
1231 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1232 if (ret || !urb->hcpriv)
1233 goto done;
1234 temp = xhci_readl(xhci, &xhci->op_regs->status);
1235 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1236 xhci_dbg(xhci, "HW died, freeing TD.\n");
1237 urb_priv = urb->hcpriv;
1238
1239 usb_hcd_unlink_urb_from_ep(hcd, urb);
1240 spin_unlock_irqrestore(&xhci->lock, flags);
1241 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1242 xhci_urb_free_priv(xhci, urb_priv);
1243 return ret;
1244 }
1245 if (xhci->xhc_state & XHCI_STATE_DYING) {
1246 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1247 "non-responsive xHCI host.\n",
1248 urb->ep->desc.bEndpointAddress, urb);
1249
1250
1251
1252
1253
1254 goto done;
1255 }
1256
1257 xhci_dbg(xhci, "Cancel URB %p\n", urb);
1258 xhci_dbg(xhci, "Event ring:\n");
1259 xhci_debug_ring(xhci, xhci->event_ring);
1260 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1261 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1262 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1263 if (!ep_ring) {
1264 ret = -EINVAL;
1265 goto done;
1266 }
1267
1268 xhci_dbg(xhci, "Endpoint ring:\n");
1269 xhci_debug_ring(xhci, ep_ring);
1270
1271 urb_priv = urb->hcpriv;
1272
1273 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1274 td = urb_priv->td[i];
1275 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1276 }
1277
1278
1279
1280
1281 if (!(ep->ep_state & EP_HALT_PENDING)) {
1282 ep->ep_state |= EP_HALT_PENDING;
1283 ep->stop_cmds_pending++;
1284 ep->stop_cmd_timer.expires = jiffies +
1285 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1286 add_timer(&ep->stop_cmd_timer);
1287 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1288 xhci_ring_cmd_db(xhci);
1289 }
1290done:
1291 spin_unlock_irqrestore(&xhci->lock, flags);
1292 return ret;
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1309 struct usb_host_endpoint *ep)
1310{
1311 struct xhci_hcd *xhci;
1312 struct xhci_container_ctx *in_ctx, *out_ctx;
1313 struct xhci_input_control_ctx *ctrl_ctx;
1314 struct xhci_slot_ctx *slot_ctx;
1315 unsigned int last_ctx;
1316 unsigned int ep_index;
1317 struct xhci_ep_ctx *ep_ctx;
1318 u32 drop_flag;
1319 u32 new_add_flags, new_drop_flags, new_slot_info;
1320 int ret;
1321
1322 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1323 if (ret <= 0)
1324 return ret;
1325 xhci = hcd_to_xhci(hcd);
1326 if (xhci->xhc_state & XHCI_STATE_DYING)
1327 return -ENODEV;
1328
1329 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1330 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1331 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1332 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1333 __func__, drop_flag);
1334 return 0;
1335 }
1336
1337 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1338 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1339 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1340 ep_index = xhci_get_endpoint_index(&ep->desc);
1341 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1342
1343
1344
1345 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
1346 EP_STATE_DISABLED ||
1347 le32_to_cpu(ctrl_ctx->drop_flags) &
1348 xhci_get_endpoint_flag(&ep->desc)) {
1349 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1350 __func__, ep);
1351 return 0;
1352 }
1353
1354 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1355 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1356
1357 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1358 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1359
1360 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1361 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1362
1363 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1364 LAST_CTX(last_ctx)) {
1365 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1366 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1367 }
1368 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1369
1370 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1371
1372 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1373 (unsigned int) ep->desc.bEndpointAddress,
1374 udev->slot_id,
1375 (unsigned int) new_drop_flags,
1376 (unsigned int) new_add_flags,
1377 (unsigned int) new_slot_info);
1378 return 0;
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1395 struct usb_host_endpoint *ep)
1396{
1397 struct xhci_hcd *xhci;
1398 struct xhci_container_ctx *in_ctx, *out_ctx;
1399 unsigned int ep_index;
1400 struct xhci_ep_ctx *ep_ctx;
1401 struct xhci_slot_ctx *slot_ctx;
1402 struct xhci_input_control_ctx *ctrl_ctx;
1403 u32 added_ctxs;
1404 unsigned int last_ctx;
1405 u32 new_add_flags, new_drop_flags, new_slot_info;
1406 struct xhci_virt_device *virt_dev;
1407 int ret = 0;
1408
1409 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1410 if (ret <= 0) {
1411
1412 ep->hcpriv = NULL;
1413 return ret;
1414 }
1415 xhci = hcd_to_xhci(hcd);
1416 if (xhci->xhc_state & XHCI_STATE_DYING)
1417 return -ENODEV;
1418
1419 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1420 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1421 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1422
1423
1424
1425
1426 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1427 __func__, added_ctxs);
1428 return 0;
1429 }
1430
1431 virt_dev = xhci->devs[udev->slot_id];
1432 in_ctx = virt_dev->in_ctx;
1433 out_ctx = virt_dev->out_ctx;
1434 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1435 ep_index = xhci_get_endpoint_index(&ep->desc);
1436 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1437
1438
1439
1440
1441 if (virt_dev->eps[ep_index].ring &&
1442 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1443 xhci_get_endpoint_flag(&ep->desc))) {
1444 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1445 "without dropping it.\n",
1446 (unsigned int) ep->desc.bEndpointAddress);
1447 return -EINVAL;
1448 }
1449
1450
1451
1452
1453 if (le32_to_cpu(ctrl_ctx->add_flags) &
1454 xhci_get_endpoint_flag(&ep->desc)) {
1455 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1456 __func__, ep);
1457 return 0;
1458 }
1459
1460
1461
1462
1463
1464
1465 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1466 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1467 __func__, ep->desc.bEndpointAddress);
1468 return -ENOMEM;
1469 }
1470
1471 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1472 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1473
1474
1475
1476
1477
1478
1479
1480 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1481
1482 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1483
1484 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1485 LAST_CTX(last_ctx)) {
1486 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1487 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1488 }
1489 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1490
1491
1492 ep->hcpriv = udev;
1493
1494 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1495 (unsigned int) ep->desc.bEndpointAddress,
1496 udev->slot_id,
1497 (unsigned int) new_drop_flags,
1498 (unsigned int) new_add_flags,
1499 (unsigned int) new_slot_info);
1500 return 0;
1501}
1502
1503static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1504{
1505 struct xhci_input_control_ctx *ctrl_ctx;
1506 struct xhci_ep_ctx *ep_ctx;
1507 struct xhci_slot_ctx *slot_ctx;
1508 int i;
1509
1510
1511
1512
1513
1514
1515 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1516 ctrl_ctx->drop_flags = 0;
1517 ctrl_ctx->add_flags = 0;
1518 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1519 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1520
1521 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1522 for (i = 1; i < 31; ++i) {
1523 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1524 ep_ctx->ep_info = 0;
1525 ep_ctx->ep_info2 = 0;
1526 ep_ctx->deq = 0;
1527 ep_ctx->tx_info = 0;
1528 }
1529}
1530
1531static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1532 struct usb_device *udev, u32 *cmd_status)
1533{
1534 int ret;
1535
1536 switch (*cmd_status) {
1537 case COMP_ENOMEM:
1538 dev_warn(&udev->dev, "Not enough host controller resources "
1539 "for new device state.\n");
1540 ret = -ENOMEM;
1541
1542 break;
1543 case COMP_BW_ERR:
1544 dev_warn(&udev->dev, "Not enough bandwidth "
1545 "for new device state.\n");
1546 ret = -ENOSPC;
1547
1548 break;
1549 case COMP_TRB_ERR:
1550
1551 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1552 "add flag = 1, "
1553 "and endpoint is not disabled.\n");
1554 ret = -EINVAL;
1555 break;
1556 case COMP_DEV_ERR:
1557 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1558 "configure command.\n");
1559 ret = -ENODEV;
1560 break;
1561 case COMP_SUCCESS:
1562 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1563 ret = 0;
1564 break;
1565 default:
1566 xhci_err(xhci, "ERROR: unexpected command completion "
1567 "code 0x%x.\n", *cmd_status);
1568 ret = -EINVAL;
1569 break;
1570 }
1571 return ret;
1572}
1573
1574static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1575 struct usb_device *udev, u32 *cmd_status)
1576{
1577 int ret;
1578 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1579
1580 switch (*cmd_status) {
1581 case COMP_EINVAL:
1582 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1583 "context command.\n");
1584 ret = -EINVAL;
1585 break;
1586 case COMP_EBADSLT:
1587 dev_warn(&udev->dev, "WARN: slot not enabled for"
1588 "evaluate context command.\n");
1589 case COMP_CTX_STATE:
1590 dev_warn(&udev->dev, "WARN: invalid context state for "
1591 "evaluate context command.\n");
1592 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1593 ret = -EINVAL;
1594 break;
1595 case COMP_DEV_ERR:
1596 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1597 "context command.\n");
1598 ret = -ENODEV;
1599 break;
1600 case COMP_MEL_ERR:
1601
1602 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1603 ret = -EINVAL;
1604 break;
1605 case COMP_SUCCESS:
1606 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1607 ret = 0;
1608 break;
1609 default:
1610 xhci_err(xhci, "ERROR: unexpected command completion "
1611 "code 0x%x.\n", *cmd_status);
1612 ret = -EINVAL;
1613 break;
1614 }
1615 return ret;
1616}
1617
1618static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1619 struct xhci_container_ctx *in_ctx)
1620{
1621 struct xhci_input_control_ctx *ctrl_ctx;
1622 u32 valid_add_flags;
1623 u32 valid_drop_flags;
1624
1625 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1626
1627
1628
1629
1630 valid_add_flags = ctrl_ctx->add_flags >> 2;
1631 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1632
1633
1634
1635
1636
1637 return hweight32(valid_add_flags) -
1638 hweight32(valid_add_flags & valid_drop_flags);
1639}
1640
1641static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1642 struct xhci_container_ctx *in_ctx)
1643{
1644 struct xhci_input_control_ctx *ctrl_ctx;
1645 u32 valid_add_flags;
1646 u32 valid_drop_flags;
1647
1648 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1649 valid_add_flags = ctrl_ctx->add_flags >> 2;
1650 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1651
1652 return hweight32(valid_drop_flags) -
1653 hweight32(valid_add_flags & valid_drop_flags);
1654}
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1670 struct xhci_container_ctx *in_ctx)
1671{
1672 u32 added_eps;
1673
1674 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1675 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1676 xhci_dbg(xhci, "Not enough ep ctxs: "
1677 "%u active, need to add %u, limit is %u.\n",
1678 xhci->num_active_eps, added_eps,
1679 xhci->limit_active_eps);
1680 return -ENOMEM;
1681 }
1682 xhci->num_active_eps += added_eps;
1683 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1684 xhci->num_active_eps);
1685 return 0;
1686}
1687
1688
1689
1690
1691
1692
1693
1694static void xhci_free_host_resources(struct xhci_hcd *xhci,
1695 struct xhci_container_ctx *in_ctx)
1696{
1697 u32 num_failed_eps;
1698
1699 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1700 xhci->num_active_eps -= num_failed_eps;
1701 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1702 num_failed_eps,
1703 xhci->num_active_eps);
1704}
1705
1706
1707
1708
1709
1710
1711
1712static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1713 struct xhci_container_ctx *in_ctx)
1714{
1715 u32 num_dropped_eps;
1716
1717 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1718 xhci->num_active_eps -= num_dropped_eps;
1719 if (num_dropped_eps)
1720 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1721 num_dropped_eps,
1722 xhci->num_active_eps);
1723}
1724
1725
1726
1727
1728static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1729 struct usb_device *udev,
1730 struct xhci_command *command,
1731 bool ctx_change, bool must_succeed)
1732{
1733 int ret;
1734 int timeleft;
1735 unsigned long flags;
1736 struct xhci_container_ctx *in_ctx;
1737 struct completion *cmd_completion;
1738 u32 *cmd_status;
1739 struct xhci_virt_device *virt_dev;
1740
1741 spin_lock_irqsave(&xhci->lock, flags);
1742 virt_dev = xhci->devs[udev->slot_id];
1743 if (command) {
1744 in_ctx = command->in_ctx;
1745 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
1746 xhci_reserve_host_resources(xhci, in_ctx)) {
1747 spin_unlock_irqrestore(&xhci->lock, flags);
1748 xhci_warn(xhci, "Not enough host resources, "
1749 "active endpoint contexts = %u\n",
1750 xhci->num_active_eps);
1751 return -ENOMEM;
1752 }
1753
1754 cmd_completion = command->completion;
1755 cmd_status = &command->status;
1756 command->command_trb = xhci->cmd_ring->enqueue;
1757
1758
1759
1760
1761 if ((le32_to_cpu(command->command_trb->link.control)
1762 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
1763 command->command_trb =
1764 xhci->cmd_ring->enq_seg->next->trbs;
1765
1766 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1767 } else {
1768 in_ctx = virt_dev->in_ctx;
1769 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
1770 xhci_reserve_host_resources(xhci, in_ctx)) {
1771 spin_unlock_irqrestore(&xhci->lock, flags);
1772 xhci_warn(xhci, "Not enough host resources, "
1773 "active endpoint contexts = %u\n",
1774 xhci->num_active_eps);
1775 return -ENOMEM;
1776 }
1777 cmd_completion = &virt_dev->cmd_completion;
1778 cmd_status = &virt_dev->cmd_status;
1779 }
1780 init_completion(cmd_completion);
1781
1782 if (!ctx_change)
1783 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1784 udev->slot_id, must_succeed);
1785 else
1786 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1787 udev->slot_id);
1788 if (ret < 0) {
1789 if (command)
1790 list_del(&command->cmd_list);
1791 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
1792 xhci_free_host_resources(xhci, in_ctx);
1793 spin_unlock_irqrestore(&xhci->lock, flags);
1794 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1795 return -ENOMEM;
1796 }
1797 xhci_ring_cmd_db(xhci);
1798 spin_unlock_irqrestore(&xhci->lock, flags);
1799
1800
1801 timeleft = wait_for_completion_interruptible_timeout(
1802 cmd_completion,
1803 USB_CTRL_SET_TIMEOUT);
1804 if (timeleft <= 0) {
1805 xhci_warn(xhci, "%s while waiting for %s command\n",
1806 timeleft == 0 ? "Timeout" : "Signal",
1807 ctx_change == 0 ?
1808 "configure endpoint" :
1809 "evaluate context");
1810
1811 return -ETIME;
1812 }
1813
1814 if (!ctx_change)
1815 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
1816 else
1817 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
1818
1819 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
1820 spin_lock_irqsave(&xhci->lock, flags);
1821
1822
1823
1824 if (ret)
1825 xhci_free_host_resources(xhci, in_ctx);
1826 else
1827 xhci_finish_resource_reservation(xhci, in_ctx);
1828 spin_unlock_irqrestore(&xhci->lock, flags);
1829 }
1830 return ret;
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1844{
1845 int i;
1846 int ret = 0;
1847 struct xhci_hcd *xhci;
1848 struct xhci_virt_device *virt_dev;
1849 struct xhci_input_control_ctx *ctrl_ctx;
1850 struct xhci_slot_ctx *slot_ctx;
1851
1852 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1853 if (ret <= 0)
1854 return ret;
1855 xhci = hcd_to_xhci(hcd);
1856 if (xhci->xhc_state & XHCI_STATE_DYING)
1857 return -ENODEV;
1858
1859 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1860 virt_dev = xhci->devs[udev->slot_id];
1861
1862
1863 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1864 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
1865 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
1866 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
1867 xhci_dbg(xhci, "New Input Control Context:\n");
1868 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1869 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1870 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
1871
1872 ret = xhci_configure_endpoint(xhci, udev, NULL,
1873 false, false);
1874 if (ret) {
1875
1876 return ret;
1877 }
1878
1879 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1880 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1881 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
1882
1883
1884 for (i = 1; i < 31; ++i) {
1885 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
1886 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
1887 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1888 }
1889 xhci_zero_in_ctx(xhci, virt_dev);
1890
1891
1892
1893
1894 for (i = 1; i < 31; ++i) {
1895 if (!virt_dev->eps[i].new_ring)
1896 continue;
1897
1898
1899
1900 if (virt_dev->eps[i].ring) {
1901 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1902 }
1903 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1904 virt_dev->eps[i].new_ring = NULL;
1905 }
1906
1907 return ret;
1908}
1909
1910void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1911{
1912 struct xhci_hcd *xhci;
1913 struct xhci_virt_device *virt_dev;
1914 int i, ret;
1915
1916 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1917 if (ret <= 0)
1918 return;
1919 xhci = hcd_to_xhci(hcd);
1920
1921 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1922 virt_dev = xhci->devs[udev->slot_id];
1923
1924 for (i = 0; i < 31; ++i) {
1925 if (virt_dev->eps[i].new_ring) {
1926 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1927 virt_dev->eps[i].new_ring = NULL;
1928 }
1929 }
1930 xhci_zero_in_ctx(xhci, virt_dev);
1931}
1932
1933static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1934 struct xhci_container_ctx *in_ctx,
1935 struct xhci_container_ctx *out_ctx,
1936 u32 add_flags, u32 drop_flags)
1937{
1938 struct xhci_input_control_ctx *ctrl_ctx;
1939 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1940 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
1941 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
1942 xhci_slot_copy(xhci, in_ctx, out_ctx);
1943 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
1944
1945 xhci_dbg(xhci, "Input Context:\n");
1946 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1947}
1948
1949static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1950 unsigned int slot_id, unsigned int ep_index,
1951 struct xhci_dequeue_state *deq_state)
1952{
1953 struct xhci_container_ctx *in_ctx;
1954 struct xhci_ep_ctx *ep_ctx;
1955 u32 added_ctxs;
1956 dma_addr_t addr;
1957
1958 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1959 xhci->devs[slot_id]->out_ctx, ep_index);
1960 in_ctx = xhci->devs[slot_id]->in_ctx;
1961 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1962 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
1963 deq_state->new_deq_ptr);
1964 if (addr == 0) {
1965 xhci_warn(xhci, "WARN Cannot submit config ep after "
1966 "reset ep command\n");
1967 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1968 deq_state->new_deq_seg,
1969 deq_state->new_deq_ptr);
1970 return;
1971 }
1972 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
1973
1974 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1975 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1976 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
1977}
1978
1979void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1980 struct usb_device *udev, unsigned int ep_index)
1981{
1982 struct xhci_dequeue_state deq_state;
1983 struct xhci_virt_ep *ep;
1984
1985 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1986 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1987
1988
1989
1990 xhci_find_new_dequeue_state(xhci, udev->slot_id,
1991 ep_index, ep->stopped_stream, ep->stopped_td,
1992 &deq_state);
1993
1994
1995
1996
1997 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1998 xhci_dbg(xhci, "Queueing new dequeue state\n");
1999 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2000 ep_index, ep->stopped_stream, &deq_state);
2001 } else {
2002
2003
2004
2005
2006
2007 xhci_dbg(xhci, "Setting up input context for "
2008 "configure endpoint command\n");
2009 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2010 ep_index, &deq_state);
2011 }
2012}
2013
2014
2015
2016
2017
2018
2019
2020void xhci_endpoint_reset(struct usb_hcd *hcd,
2021 struct usb_host_endpoint *ep)
2022{
2023 struct xhci_hcd *xhci;
2024 struct usb_device *udev;
2025 unsigned int ep_index;
2026 unsigned long flags;
2027 int ret;
2028 struct xhci_virt_ep *virt_ep;
2029
2030 xhci = hcd_to_xhci(hcd);
2031 udev = (struct usb_device *) ep->hcpriv;
2032
2033
2034
2035 if (!ep->hcpriv)
2036 return;
2037 ep_index = xhci_get_endpoint_index(&ep->desc);
2038 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2039 if (!virt_ep->stopped_td) {
2040 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2041 ep->desc.bEndpointAddress);
2042 return;
2043 }
2044 if (usb_endpoint_xfer_control(&ep->desc)) {
2045 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2046 return;
2047 }
2048
2049 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2050 spin_lock_irqsave(&xhci->lock, flags);
2051 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2052
2053
2054
2055
2056
2057 if (!ret) {
2058 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2059 kfree(virt_ep->stopped_td);
2060 xhci_ring_cmd_db(xhci);
2061 }
2062 virt_ep->stopped_td = NULL;
2063 virt_ep->stopped_trb = NULL;
2064 virt_ep->stopped_stream = 0;
2065 spin_unlock_irqrestore(&xhci->lock, flags);
2066
2067 if (ret)
2068 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2069}
2070
2071static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2072 struct usb_device *udev, struct usb_host_endpoint *ep,
2073 unsigned int slot_id)
2074{
2075 int ret;
2076 unsigned int ep_index;
2077 unsigned int ep_state;
2078
2079 if (!ep)
2080 return -EINVAL;
2081 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2082 if (ret <= 0)
2083 return -EINVAL;
2084 if (ep->ss_ep_comp.bmAttributes == 0) {
2085 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2086 " descriptor for ep 0x%x does not support streams\n",
2087 ep->desc.bEndpointAddress);
2088 return -EINVAL;
2089 }
2090
2091 ep_index = xhci_get_endpoint_index(&ep->desc);
2092 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2093 if (ep_state & EP_HAS_STREAMS ||
2094 ep_state & EP_GETTING_STREAMS) {
2095 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2096 "already has streams set up.\n",
2097 ep->desc.bEndpointAddress);
2098 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2099 "dynamic stream context array reallocation.\n");
2100 return -EINVAL;
2101 }
2102 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2103 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2104 "endpoint 0x%x; URBs are pending.\n",
2105 ep->desc.bEndpointAddress);
2106 return -EINVAL;
2107 }
2108 return 0;
2109}
2110
2111static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2112 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2113{
2114 unsigned int max_streams;
2115
2116
2117 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2118
2119
2120
2121
2122
2123
2124 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2125 if (*num_stream_ctxs > max_streams) {
2126 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2127 max_streams);
2128 *num_stream_ctxs = max_streams;
2129 *num_streams = max_streams;
2130 }
2131}
2132
2133
2134
2135
2136
2137static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2138 struct usb_device *udev,
2139 struct usb_host_endpoint **eps, unsigned int num_eps,
2140 unsigned int *num_streams, u32 *changed_ep_bitmask)
2141{
2142 unsigned int max_streams;
2143 unsigned int endpoint_flag;
2144 int i;
2145 int ret;
2146
2147 for (i = 0; i < num_eps; i++) {
2148 ret = xhci_check_streams_endpoint(xhci, udev,
2149 eps[i], udev->slot_id);
2150 if (ret < 0)
2151 return ret;
2152
2153 max_streams = USB_SS_MAX_STREAMS(
2154 eps[i]->ss_ep_comp.bmAttributes);
2155 if (max_streams < (*num_streams - 1)) {
2156 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2157 eps[i]->desc.bEndpointAddress,
2158 max_streams);
2159 *num_streams = max_streams+1;
2160 }
2161
2162 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2163 if (*changed_ep_bitmask & endpoint_flag)
2164 return -EINVAL;
2165 *changed_ep_bitmask |= endpoint_flag;
2166 }
2167 return 0;
2168}
2169
2170static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2171 struct usb_device *udev,
2172 struct usb_host_endpoint **eps, unsigned int num_eps)
2173{
2174 u32 changed_ep_bitmask = 0;
2175 unsigned int slot_id;
2176 unsigned int ep_index;
2177 unsigned int ep_state;
2178 int i;
2179
2180 slot_id = udev->slot_id;
2181 if (!xhci->devs[slot_id])
2182 return 0;
2183
2184 for (i = 0; i < num_eps; i++) {
2185 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2186 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2187
2188 if (ep_state & EP_GETTING_NO_STREAMS) {
2189 xhci_warn(xhci, "WARN Can't disable streams for "
2190 "endpoint 0x%x\n, "
2191 "streams are being disabled already.",
2192 eps[i]->desc.bEndpointAddress);
2193 return 0;
2194 }
2195
2196 if (!(ep_state & EP_HAS_STREAMS) &&
2197 !(ep_state & EP_GETTING_STREAMS)) {
2198 xhci_warn(xhci, "WARN Can't disable streams for "
2199 "endpoint 0x%x\n, "
2200 "streams are already disabled!",
2201 eps[i]->desc.bEndpointAddress);
2202 xhci_warn(xhci, "WARN xhci_free_streams() called "
2203 "with non-streams endpoint\n");
2204 return 0;
2205 }
2206 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
2207 }
2208 return changed_ep_bitmask;
2209}
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
2228 struct usb_host_endpoint **eps, unsigned int num_eps,
2229 unsigned int num_streams, gfp_t mem_flags)
2230{
2231 int i, ret;
2232 struct xhci_hcd *xhci;
2233 struct xhci_virt_device *vdev;
2234 struct xhci_command *config_cmd;
2235 unsigned int ep_index;
2236 unsigned int num_stream_ctxs;
2237 unsigned long flags;
2238 u32 changed_ep_bitmask = 0;
2239
2240 if (!eps)
2241 return -EINVAL;
2242
2243
2244
2245
2246 num_streams += 1;
2247 xhci = hcd_to_xhci(hcd);
2248 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2249 num_streams);
2250
2251 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2252 if (!config_cmd) {
2253 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2254 return -ENOMEM;
2255 }
2256
2257
2258
2259
2260
2261 spin_lock_irqsave(&xhci->lock, flags);
2262 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2263 num_eps, &num_streams, &changed_ep_bitmask);
2264 if (ret < 0) {
2265 xhci_free_command(xhci, config_cmd);
2266 spin_unlock_irqrestore(&xhci->lock, flags);
2267 return ret;
2268 }
2269 if (num_streams <= 1) {
2270 xhci_warn(xhci, "WARN: endpoints can't handle "
2271 "more than one stream.\n");
2272 xhci_free_command(xhci, config_cmd);
2273 spin_unlock_irqrestore(&xhci->lock, flags);
2274 return -EINVAL;
2275 }
2276 vdev = xhci->devs[udev->slot_id];
2277
2278
2279
2280 for (i = 0; i < num_eps; i++) {
2281 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2282 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2283 }
2284 spin_unlock_irqrestore(&xhci->lock, flags);
2285
2286
2287
2288
2289
2290 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2291 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2292 num_stream_ctxs, num_streams);
2293
2294 for (i = 0; i < num_eps; i++) {
2295 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2296 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2297 num_stream_ctxs,
2298 num_streams, mem_flags);
2299 if (!vdev->eps[ep_index].stream_info)
2300 goto cleanup;
2301
2302
2303
2304 }
2305
2306
2307 for (i = 0; i < num_eps; i++) {
2308 struct xhci_ep_ctx *ep_ctx;
2309
2310 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2311 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2312
2313 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2314 vdev->out_ctx, ep_index);
2315 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2316 vdev->eps[ep_index].stream_info);
2317 }
2318
2319
2320
2321 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2322 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2323
2324
2325 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2326 false, false);
2327
2328
2329
2330
2331
2332 if (ret < 0)
2333 goto cleanup;
2334
2335 spin_lock_irqsave(&xhci->lock, flags);
2336 for (i = 0; i < num_eps; i++) {
2337 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2338 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2339 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
2340 udev->slot_id, ep_index);
2341 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
2342 }
2343 xhci_free_command(xhci, config_cmd);
2344 spin_unlock_irqrestore(&xhci->lock, flags);
2345
2346
2347 return num_streams - 1;
2348
2349cleanup:
2350
2351 for (i = 0; i < num_eps; i++) {
2352 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2353 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
2354 vdev->eps[ep_index].stream_info = NULL;
2355
2356
2357
2358 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2359 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2360 xhci_endpoint_zero(xhci, vdev, eps[i]);
2361 }
2362 xhci_free_command(xhci, config_cmd);
2363 return -ENOMEM;
2364}
2365
2366
2367
2368
2369
2370
2371
2372int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
2373 struct usb_host_endpoint **eps, unsigned int num_eps,
2374 gfp_t mem_flags)
2375{
2376 int i, ret;
2377 struct xhci_hcd *xhci;
2378 struct xhci_virt_device *vdev;
2379 struct xhci_command *command;
2380 unsigned int ep_index;
2381 unsigned long flags;
2382 u32 changed_ep_bitmask;
2383
2384 xhci = hcd_to_xhci(hcd);
2385 vdev = xhci->devs[udev->slot_id];
2386
2387
2388 spin_lock_irqsave(&xhci->lock, flags);
2389 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
2390 udev, eps, num_eps);
2391 if (changed_ep_bitmask == 0) {
2392 spin_unlock_irqrestore(&xhci->lock, flags);
2393 return -EINVAL;
2394 }
2395
2396
2397
2398
2399
2400 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
2401 command = vdev->eps[ep_index].stream_info->free_streams_command;
2402 for (i = 0; i < num_eps; i++) {
2403 struct xhci_ep_ctx *ep_ctx;
2404
2405 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2406 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
2407 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
2408 EP_GETTING_NO_STREAMS;
2409
2410 xhci_endpoint_copy(xhci, command->in_ctx,
2411 vdev->out_ctx, ep_index);
2412 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
2413 &vdev->eps[ep_index]);
2414 }
2415 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
2416 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2417 spin_unlock_irqrestore(&xhci->lock, flags);
2418
2419
2420
2421
2422 ret = xhci_configure_endpoint(xhci, udev, command,
2423 false, true);
2424
2425
2426
2427
2428 if (ret < 0)
2429 return ret;
2430
2431 spin_lock_irqsave(&xhci->lock, flags);
2432 for (i = 0; i < num_eps; i++) {
2433 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2434 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
2435 vdev->eps[ep_index].stream_info = NULL;
2436
2437
2438
2439 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
2440 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2441 }
2442 spin_unlock_irqrestore(&xhci->lock, flags);
2443
2444 return 0;
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
2455 struct xhci_virt_device *virt_dev, bool drop_control_ep)
2456{
2457 int i;
2458 unsigned int num_dropped_eps = 0;
2459 unsigned int drop_flags = 0;
2460
2461 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
2462 if (virt_dev->eps[i].ring) {
2463 drop_flags |= 1 << i;
2464 num_dropped_eps++;
2465 }
2466 }
2467 xhci->num_active_eps -= num_dropped_eps;
2468 if (num_dropped_eps)
2469 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
2470 "%u now active.\n",
2471 num_dropped_eps, drop_flags,
2472 xhci->num_active_eps);
2473}
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2494{
2495 int ret, i;
2496 unsigned long flags;
2497 struct xhci_hcd *xhci;
2498 unsigned int slot_id;
2499 struct xhci_virt_device *virt_dev;
2500 struct xhci_command *reset_device_cmd;
2501 int timeleft;
2502 int last_freed_endpoint;
2503 struct xhci_slot_ctx *slot_ctx;
2504
2505 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
2506 if (ret <= 0)
2507 return ret;
2508 xhci = hcd_to_xhci(hcd);
2509 slot_id = udev->slot_id;
2510 virt_dev = xhci->devs[slot_id];
2511 if (!virt_dev) {
2512 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2513 "not exist. Re-allocate the device\n", slot_id);
2514 ret = xhci_alloc_dev(hcd, udev);
2515 if (ret == 1)
2516 return 0;
2517 else
2518 return -EINVAL;
2519 }
2520
2521 if (virt_dev->udev != udev) {
2522
2523
2524
2525
2526 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2527 "not match the udev. Re-allocate the device\n",
2528 slot_id);
2529 ret = xhci_alloc_dev(hcd, udev);
2530 if (ret == 1)
2531 return 0;
2532 else
2533 return -EINVAL;
2534 }
2535
2536
2537 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2538 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
2539 SLOT_STATE_DISABLED)
2540 return 0;
2541
2542 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
2543
2544
2545
2546
2547
2548
2549 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
2550 if (!reset_device_cmd) {
2551 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
2552 return -ENOMEM;
2553 }
2554
2555
2556 spin_lock_irqsave(&xhci->lock, flags);
2557 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
2558
2559
2560
2561
2562 if ((le32_to_cpu(reset_device_cmd->command_trb->link.control)
2563 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
2564 reset_device_cmd->command_trb =
2565 xhci->cmd_ring->enq_seg->next->trbs;
2566
2567 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
2568 ret = xhci_queue_reset_device(xhci, slot_id);
2569 if (ret) {
2570 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2571 list_del(&reset_device_cmd->cmd_list);
2572 spin_unlock_irqrestore(&xhci->lock, flags);
2573 goto command_cleanup;
2574 }
2575 xhci_ring_cmd_db(xhci);
2576 spin_unlock_irqrestore(&xhci->lock, flags);
2577
2578
2579 timeleft = wait_for_completion_interruptible_timeout(
2580 reset_device_cmd->completion,
2581 USB_CTRL_SET_TIMEOUT);
2582 if (timeleft <= 0) {
2583 xhci_warn(xhci, "%s while waiting for reset device command\n",
2584 timeleft == 0 ? "Timeout" : "Signal");
2585 spin_lock_irqsave(&xhci->lock, flags);
2586
2587
2588
2589 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
2590 list_del(&reset_device_cmd->cmd_list);
2591 spin_unlock_irqrestore(&xhci->lock, flags);
2592 ret = -ETIME;
2593 goto command_cleanup;
2594 }
2595
2596
2597
2598
2599
2600 ret = reset_device_cmd->status;
2601 switch (ret) {
2602 case COMP_EBADSLT:
2603 case COMP_CTX_STATE:
2604 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
2605 slot_id,
2606 xhci_get_slot_state(xhci, virt_dev->out_ctx));
2607 xhci_info(xhci, "Not freeing device rings.\n");
2608
2609 ret = 0;
2610 goto command_cleanup;
2611 case COMP_SUCCESS:
2612 xhci_dbg(xhci, "Successful reset device command.\n");
2613 break;
2614 default:
2615 if (xhci_is_vendor_info_code(xhci, ret))
2616 break;
2617 xhci_warn(xhci, "Unknown completion code %u for "
2618 "reset device command.\n", ret);
2619 ret = -EINVAL;
2620 goto command_cleanup;
2621 }
2622
2623
2624 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2625 spin_lock_irqsave(&xhci->lock, flags);
2626
2627 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
2628 spin_unlock_irqrestore(&xhci->lock, flags);
2629 }
2630
2631
2632 last_freed_endpoint = 1;
2633 for (i = 1; i < 31; ++i) {
2634 struct xhci_virt_ep *ep = &virt_dev->eps[i];
2635
2636 if (ep->ep_state & EP_HAS_STREAMS) {
2637 xhci_free_stream_info(xhci, ep->stream_info);
2638 ep->stream_info = NULL;
2639 ep->ep_state &= ~EP_HAS_STREAMS;
2640 }
2641
2642 if (ep->ring) {
2643 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2644 last_freed_endpoint = i;
2645 }
2646 }
2647 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2648 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2649 ret = 0;
2650
2651command_cleanup:
2652 xhci_free_command(xhci, reset_device_cmd);
2653 return ret;
2654}
2655
2656
2657
2658
2659
2660
2661void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2662{
2663 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2664 struct xhci_virt_device *virt_dev;
2665 unsigned long flags;
2666 u32 state;
2667 int i, ret;
2668
2669 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2670 if (ret <= 0)
2671 return;
2672
2673 virt_dev = xhci->devs[udev->slot_id];
2674
2675
2676 for (i = 0; i < 31; ++i) {
2677 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
2678 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
2679 }
2680
2681 spin_lock_irqsave(&xhci->lock, flags);
2682
2683 state = xhci_readl(xhci, &xhci->op_regs->status);
2684 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
2685 xhci_free_virt_device(xhci, udev->slot_id);
2686 spin_unlock_irqrestore(&xhci->lock, flags);
2687 return;
2688 }
2689
2690 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
2691 spin_unlock_irqrestore(&xhci->lock, flags);
2692 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2693 return;
2694 }
2695 xhci_ring_cmd_db(xhci);
2696 spin_unlock_irqrestore(&xhci->lock, flags);
2697
2698
2699
2700
2701}
2702
2703
2704
2705
2706
2707
2708
2709static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
2710{
2711 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
2712 xhci_dbg(xhci, "Not enough ep ctxs: "
2713 "%u active, need to add 1, limit is %u.\n",
2714 xhci->num_active_eps, xhci->limit_active_eps);
2715 return -ENOMEM;
2716 }
2717 xhci->num_active_eps += 1;
2718 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
2719 xhci->num_active_eps);
2720 return 0;
2721}
2722
2723
2724
2725
2726
2727
2728int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2729{
2730 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2731 unsigned long flags;
2732 int timeleft;
2733 int ret;
2734
2735 spin_lock_irqsave(&xhci->lock, flags);
2736 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
2737 if (ret) {
2738 spin_unlock_irqrestore(&xhci->lock, flags);
2739 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2740 return 0;
2741 }
2742 xhci_ring_cmd_db(xhci);
2743 spin_unlock_irqrestore(&xhci->lock, flags);
2744
2745
2746 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2747 USB_CTRL_SET_TIMEOUT);
2748 if (timeleft <= 0) {
2749 xhci_warn(xhci, "%s while waiting for a slot\n",
2750 timeleft == 0 ? "Timeout" : "Signal");
2751
2752 return 0;
2753 }
2754
2755 if (!xhci->slot_id) {
2756 xhci_err(xhci, "Error while assigning device slot ID\n");
2757 return 0;
2758 }
2759
2760 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2761 spin_lock_irqsave(&xhci->lock, flags);
2762 ret = xhci_reserve_host_control_ep_resources(xhci);
2763 if (ret) {
2764 spin_unlock_irqrestore(&xhci->lock, flags);
2765 xhci_warn(xhci, "Not enough host resources, "
2766 "active endpoint contexts = %u\n",
2767 xhci->num_active_eps);
2768 goto disable_slot;
2769 }
2770 spin_unlock_irqrestore(&xhci->lock, flags);
2771 }
2772
2773
2774
2775
2776 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2777 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2778 goto disable_slot;
2779 }
2780 udev->slot_id = xhci->slot_id;
2781
2782
2783 return 1;
2784
2785disable_slot:
2786
2787 spin_lock_irqsave(&xhci->lock, flags);
2788 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2789 xhci_ring_cmd_db(xhci);
2790 spin_unlock_irqrestore(&xhci->lock, flags);
2791 return 0;
2792}
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2804{
2805 unsigned long flags;
2806 int timeleft;
2807 struct xhci_virt_device *virt_dev;
2808 int ret = 0;
2809 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2810 struct xhci_slot_ctx *slot_ctx;
2811 struct xhci_input_control_ctx *ctrl_ctx;
2812 u64 temp_64;
2813
2814 if (!udev->slot_id) {
2815 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
2816 return -EINVAL;
2817 }
2818
2819 virt_dev = xhci->devs[udev->slot_id];
2820
2821 if (WARN_ON(!virt_dev)) {
2822
2823
2824
2825
2826
2827 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
2828 udev->slot_id);
2829 return -EINVAL;
2830 }
2831
2832 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2833
2834
2835
2836
2837
2838 if (!slot_ctx->dev_info)
2839 xhci_setup_addressable_virt_dev(xhci, udev);
2840
2841 else
2842 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2843 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2844 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2845
2846 spin_lock_irqsave(&xhci->lock, flags);
2847 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
2848 udev->slot_id);
2849 if (ret) {
2850 spin_unlock_irqrestore(&xhci->lock, flags);
2851 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2852 return ret;
2853 }
2854 xhci_ring_cmd_db(xhci);
2855 spin_unlock_irqrestore(&xhci->lock, flags);
2856
2857
2858 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2859 USB_CTRL_SET_TIMEOUT);
2860
2861
2862
2863
2864 if (timeleft <= 0) {
2865 xhci_warn(xhci, "%s while waiting for a slot\n",
2866 timeleft == 0 ? "Timeout" : "Signal");
2867
2868 return -ETIME;
2869 }
2870
2871 switch (virt_dev->cmd_status) {
2872 case COMP_CTX_STATE:
2873 case COMP_EBADSLT:
2874 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
2875 udev->slot_id);
2876 ret = -EINVAL;
2877 break;
2878 case COMP_TX_ERR:
2879 dev_warn(&udev->dev, "Device not responding to set address.\n");
2880 ret = -EPROTO;
2881 break;
2882 case COMP_DEV_ERR:
2883 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
2884 "device command.\n");
2885 ret = -ENODEV;
2886 break;
2887 case COMP_SUCCESS:
2888 xhci_dbg(xhci, "Successful Address Device command\n");
2889 break;
2890 default:
2891 xhci_err(xhci, "ERROR: unexpected command completion "
2892 "code 0x%x.\n", virt_dev->cmd_status);
2893 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2894 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2895 ret = -EINVAL;
2896 break;
2897 }
2898 if (ret) {
2899 return ret;
2900 }
2901 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2902 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
2903 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2904 udev->slot_id,
2905 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
2906 (unsigned long long)
2907 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
2908 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
2909 (unsigned long long)virt_dev->out_ctx->dma);
2910 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2911 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2912 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2913 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2914
2915
2916
2917
2918 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2919
2920
2921 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
2922 + 1;
2923
2924 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2925 ctrl_ctx->add_flags = 0;
2926 ctrl_ctx->drop_flags = 0;
2927
2928 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
2929
2930 return 0;
2931}
2932
2933
2934
2935
2936int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
2937 struct usb_tt *tt, gfp_t mem_flags)
2938{
2939 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2940 struct xhci_virt_device *vdev;
2941 struct xhci_command *config_cmd;
2942 struct xhci_input_control_ctx *ctrl_ctx;
2943 struct xhci_slot_ctx *slot_ctx;
2944 unsigned long flags;
2945 unsigned think_time;
2946 int ret;
2947
2948
2949 if (!hdev->parent)
2950 return 0;
2951
2952 vdev = xhci->devs[hdev->slot_id];
2953 if (!vdev) {
2954 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
2955 return -EINVAL;
2956 }
2957 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2958 if (!config_cmd) {
2959 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2960 return -ENOMEM;
2961 }
2962
2963 spin_lock_irqsave(&xhci->lock, flags);
2964 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
2965 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
2966 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2967 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
2968 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
2969 if (tt->multi)
2970 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
2971 if (xhci->hci_version > 0x95) {
2972 xhci_dbg(xhci, "xHCI version %x needs hub "
2973 "TT think time and number of ports\n",
2974 (unsigned int) xhci->hci_version);
2975 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
2976
2977
2978
2979
2980
2981
2982
2983 think_time = tt->think_time;
2984 if (think_time != 0)
2985 think_time = (think_time / 666) - 1;
2986 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
2987 slot_ctx->tt_info |=
2988 cpu_to_le32(TT_THINK_TIME(think_time));
2989 } else {
2990 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
2991 "TT think time or number of ports\n",
2992 (unsigned int) xhci->hci_version);
2993 }
2994 slot_ctx->dev_state = 0;
2995 spin_unlock_irqrestore(&xhci->lock, flags);
2996
2997 xhci_dbg(xhci, "Set up %s for hub device.\n",
2998 (xhci->hci_version > 0x95) ?
2999 "configure endpoint" : "evaluate context");
3000 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
3001 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
3002
3003
3004
3005
3006 if (xhci->hci_version > 0x95)
3007 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
3008 false, false);
3009 else
3010 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
3011 true, false);
3012
3013 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
3014 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
3015
3016 xhci_free_command(xhci, config_cmd);
3017 return ret;
3018}
3019
3020int xhci_get_frame(struct usb_hcd *hcd)
3021{
3022 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3023
3024 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
3025}
3026
3027MODULE_DESCRIPTION(DRIVER_DESC);
3028MODULE_AUTHOR(DRIVER_AUTHOR);
3029MODULE_LICENSE("GPL");
3030
3031static int __init xhci_hcd_init(void)
3032{
3033#ifdef CONFIG_PCI
3034 int retval = 0;
3035
3036 retval = xhci_register_pci();
3037
3038 if (retval < 0) {
3039 printk(KERN_DEBUG "Problem registering PCI driver.");
3040 return retval;
3041 }
3042#endif
3043
3044
3045
3046
3047 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
3048 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
3049 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
3050
3051
3052
3053 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
3054 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
3055 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
3056 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
3057 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
3058
3059 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
3060 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
3061 return 0;
3062}
3063module_init(xhci_hcd_init);
3064
3065static void __exit xhci_hcd_cleanup(void)
3066{
3067#ifdef CONFIG_PCI
3068 xhci_unregister_pci();
3069#endif
3070}
3071module_exit(xhci_hcd_cleanup);
3072