1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/usb.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include <linux/dmapool.h>
27
28#include "xhci.h"
29
30
31
32
33
34
35
36
37static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
38 unsigned int cycle_state, gfp_t flags)
39{
40 struct xhci_segment *seg;
41 dma_addr_t dma;
42 int i;
43
44 seg = kzalloc(sizeof *seg, flags);
45 if (!seg)
46 return NULL;
47
48 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
49 if (!seg->trbs) {
50 kfree(seg);
51 return NULL;
52 }
53
54 memset(seg->trbs, 0, SEGMENT_SIZE);
55
56 if (cycle_state == 0) {
57 for (i = 0; i < TRBS_PER_SEGMENT; i++)
58 seg->trbs[i].link.control |= TRB_CYCLE;
59 }
60 seg->dma = dma;
61 seg->next = NULL;
62
63 return seg;
64}
65
66static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
67{
68 if (seg->trbs) {
69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
70 seg->trbs = NULL;
71 }
72 kfree(seg);
73}
74
75static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
76 struct xhci_segment *first)
77{
78 struct xhci_segment *seg;
79
80 seg = first->next;
81 while (seg != first) {
82 struct xhci_segment *next = seg->next;
83 xhci_segment_free(xhci, seg);
84 seg = next;
85 }
86 xhci_segment_free(xhci, first);
87}
88
89
90
91
92
93
94
95
96static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
97 struct xhci_segment *next, enum xhci_ring_type type)
98{
99 u32 val;
100
101 if (!prev || !next)
102 return;
103 prev->next = next;
104 if (type != TYPE_EVENT) {
105 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
106 cpu_to_le64(next->dma);
107
108
109 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
110 val &= ~TRB_TYPE_BITMASK;
111 val |= TRB_TYPE(TRB_LINK);
112
113
114 if (xhci_link_trb_quirk(xhci) ||
115 (type == TYPE_ISOC &&
116 (xhci->quirks & XHCI_AMD_0x96_HOST)))
117 val |= TRB_CHAIN;
118 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
119 }
120}
121
122
123
124
125
126static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
127 struct xhci_segment *first, struct xhci_segment *last,
128 unsigned int num_segs)
129{
130 struct xhci_segment *next;
131
132 if (!ring || !first || !last)
133 return;
134
135 next = ring->enq_seg->next;
136 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
137 xhci_link_segments(xhci, last, next, ring->type);
138 ring->num_segs += num_segs;
139 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
140
141 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
142 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
143 &= ~cpu_to_le32(LINK_TOGGLE);
144 last->trbs[TRBS_PER_SEGMENT-1].link.control
145 |= cpu_to_le32(LINK_TOGGLE);
146 ring->last_seg = last;
147 }
148}
149
150
151void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
152{
153 if (!ring)
154 return;
155
156 if (ring->first_seg)
157 xhci_free_segments_for_ring(xhci, ring->first_seg);
158
159 kfree(ring);
160}
161
162static void xhci_initialize_ring_info(struct xhci_ring *ring,
163 unsigned int cycle_state)
164{
165
166 ring->enqueue = ring->first_seg->trbs;
167 ring->enq_seg = ring->first_seg;
168 ring->dequeue = ring->enqueue;
169 ring->deq_seg = ring->first_seg;
170
171
172
173
174
175
176
177 ring->cycle_state = cycle_state;
178
179 ring->enq_updates = 0;
180 ring->deq_updates = 0;
181
182
183
184
185
186 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
187}
188
189
190static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
191 struct xhci_segment **first, struct xhci_segment **last,
192 unsigned int num_segs, unsigned int cycle_state,
193 enum xhci_ring_type type, gfp_t flags)
194{
195 struct xhci_segment *prev;
196
197 prev = xhci_segment_alloc(xhci, cycle_state, flags);
198 if (!prev)
199 return -ENOMEM;
200 num_segs--;
201
202 *first = prev;
203 while (num_segs > 0) {
204 struct xhci_segment *next;
205
206 next = xhci_segment_alloc(xhci, cycle_state, flags);
207 if (!next) {
208 xhci_free_segments_for_ring(xhci, *first);
209 return -ENOMEM;
210 }
211 xhci_link_segments(xhci, prev, next, type);
212
213 prev = next;
214 num_segs--;
215 }
216 xhci_link_segments(xhci, prev, *first, type);
217 *last = prev;
218
219 return 0;
220}
221
222
223
224
225
226
227
228
229static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
230 unsigned int num_segs, unsigned int cycle_state,
231 enum xhci_ring_type type, gfp_t flags)
232{
233 struct xhci_ring *ring;
234 int ret;
235
236 ring = kzalloc(sizeof *(ring), flags);
237 if (!ring)
238 return NULL;
239
240 ring->num_segs = num_segs;
241 INIT_LIST_HEAD(&ring->td_list);
242 ring->type = type;
243 if (num_segs == 0)
244 return ring;
245
246 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
247 &ring->last_seg, num_segs, cycle_state, type, flags);
248 if (ret)
249 goto fail;
250
251
252 if (type != TYPE_EVENT) {
253
254 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
255 cpu_to_le32(LINK_TOGGLE);
256 }
257 xhci_initialize_ring_info(ring, cycle_state);
258 return ring;
259
260fail:
261 xhci_ring_free(xhci, ring);
262 return NULL;
263}
264
265void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
266 struct xhci_virt_device *virt_dev,
267 unsigned int ep_index)
268{
269 int rings_cached;
270
271 rings_cached = virt_dev->num_rings_cached;
272 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
273 virt_dev->ring_cache[rings_cached] =
274 virt_dev->eps[ep_index].ring;
275 virt_dev->num_rings_cached++;
276 xhci_dbg(xhci, "Cached old ring, "
277 "%d ring%s cached\n",
278 virt_dev->num_rings_cached,
279 (virt_dev->num_rings_cached > 1) ? "s" : "");
280 } else {
281 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
282 xhci_dbg(xhci, "Ring cache full (%d rings), "
283 "freeing ring\n",
284 virt_dev->num_rings_cached);
285 }
286 virt_dev->eps[ep_index].ring = NULL;
287}
288
289
290
291
292static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
293 struct xhci_ring *ring, unsigned int cycle_state,
294 enum xhci_ring_type type)
295{
296 struct xhci_segment *seg = ring->first_seg;
297 int i;
298
299 do {
300 memset(seg->trbs, 0,
301 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
302 if (cycle_state == 0) {
303 for (i = 0; i < TRBS_PER_SEGMENT; i++)
304 seg->trbs[i].link.control |= TRB_CYCLE;
305 }
306
307 xhci_link_segments(xhci, seg, seg->next, type);
308 seg = seg->next;
309 } while (seg != ring->first_seg);
310 ring->type = type;
311 xhci_initialize_ring_info(ring, cycle_state);
312
313
314
315 INIT_LIST_HEAD(&ring->td_list);
316}
317
318
319
320
321
322
323int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
324 unsigned int num_trbs, gfp_t flags)
325{
326 struct xhci_segment *first;
327 struct xhci_segment *last;
328 unsigned int num_segs;
329 unsigned int num_segs_needed;
330 int ret;
331
332 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
333 (TRBS_PER_SEGMENT - 1);
334
335
336 num_segs = ring->num_segs > num_segs_needed ?
337 ring->num_segs : num_segs_needed;
338
339 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
340 num_segs, ring->cycle_state, ring->type, flags);
341 if (ret)
342 return -ENOMEM;
343
344 xhci_link_rings(xhci, ring, first, last, num_segs);
345 xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
346 ring->num_segs);
347
348 return 0;
349}
350
351#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
352
353static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
354 int type, gfp_t flags)
355{
356 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
357 if (!ctx)
358 return NULL;
359
360 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
361 ctx->type = type;
362 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
363 if (type == XHCI_CTX_TYPE_INPUT)
364 ctx->size += CTX_SIZE(xhci->hcc_params);
365
366 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
367 memset(ctx->bytes, 0, ctx->size);
368 return ctx;
369}
370
371static void xhci_free_container_ctx(struct xhci_hcd *xhci,
372 struct xhci_container_ctx *ctx)
373{
374 if (!ctx)
375 return;
376 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
377 kfree(ctx);
378}
379
380struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
381 struct xhci_container_ctx *ctx)
382{
383 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
384 return (struct xhci_input_control_ctx *)ctx->bytes;
385}
386
387struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
388 struct xhci_container_ctx *ctx)
389{
390 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
391 return (struct xhci_slot_ctx *)ctx->bytes;
392
393 return (struct xhci_slot_ctx *)
394 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
395}
396
397struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
398 struct xhci_container_ctx *ctx,
399 unsigned int ep_index)
400{
401
402 ep_index++;
403 if (ctx->type == XHCI_CTX_TYPE_INPUT)
404 ep_index++;
405
406 return (struct xhci_ep_ctx *)
407 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
408}
409
410
411
412
413static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
414 unsigned int num_stream_ctxs,
415 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
416{
417 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
418
419 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
420 dma_free_coherent(&pdev->dev,
421 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
422 stream_ctx, dma);
423 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
424 return dma_pool_free(xhci->small_streams_pool,
425 stream_ctx, dma);
426 else
427 return dma_pool_free(xhci->medium_streams_pool,
428 stream_ctx, dma);
429}
430
431
432
433
434
435
436
437
438
439
440
441static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
442 unsigned int num_stream_ctxs, dma_addr_t *dma,
443 gfp_t mem_flags)
444{
445 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
446
447 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
448 return dma_alloc_coherent(&pdev->dev,
449 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
450 dma, mem_flags);
451 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
452 return dma_pool_alloc(xhci->small_streams_pool,
453 mem_flags, dma);
454 else
455 return dma_pool_alloc(xhci->medium_streams_pool,
456 mem_flags, dma);
457}
458
459struct xhci_ring *xhci_dma_to_transfer_ring(
460 struct xhci_virt_ep *ep,
461 u64 address)
462{
463 if (ep->ep_state & EP_HAS_STREAMS)
464 return radix_tree_lookup(&ep->stream_info->trb_address_map,
465 address >> SEGMENT_SHIFT);
466 return ep->ring;
467}
468
469
470#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
471static struct xhci_ring *dma_to_stream_ring(
472 struct xhci_stream_info *stream_info,
473 u64 address)
474{
475 return radix_tree_lookup(&stream_info->trb_address_map,
476 address >> SEGMENT_SHIFT);
477}
478#endif
479
480struct xhci_ring *xhci_stream_id_to_ring(
481 struct xhci_virt_device *dev,
482 unsigned int ep_index,
483 unsigned int stream_id)
484{
485 struct xhci_virt_ep *ep = &dev->eps[ep_index];
486
487 if (stream_id == 0)
488 return ep->ring;
489 if (!ep->stream_info)
490 return NULL;
491
492 if (stream_id > ep->stream_info->num_streams)
493 return NULL;
494 return ep->stream_info->stream_rings[stream_id];
495}
496
497#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
498static int xhci_test_radix_tree(struct xhci_hcd *xhci,
499 unsigned int num_streams,
500 struct xhci_stream_info *stream_info)
501{
502 u32 cur_stream;
503 struct xhci_ring *cur_ring;
504 u64 addr;
505
506 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
507 struct xhci_ring *mapped_ring;
508 int trb_size = sizeof(union xhci_trb);
509
510 cur_ring = stream_info->stream_rings[cur_stream];
511 for (addr = cur_ring->first_seg->dma;
512 addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
513 addr += trb_size) {
514 mapped_ring = dma_to_stream_ring(stream_info, addr);
515 if (cur_ring != mapped_ring) {
516 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
517 "didn't map to stream ID %u; "
518 "mapped to ring %p\n",
519 (unsigned long long) addr,
520 cur_stream,
521 mapped_ring);
522 return -EINVAL;
523 }
524 }
525
526
527
528
529 mapped_ring = dma_to_stream_ring(stream_info, addr);
530 if (mapped_ring != cur_ring) {
531
532 addr = cur_ring->first_seg->dma - trb_size;
533 mapped_ring = dma_to_stream_ring(stream_info, addr);
534 }
535 if (mapped_ring == cur_ring) {
536 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
537 "mapped to valid stream ID %u; "
538 "mapped ring = %p\n",
539 (unsigned long long) addr,
540 cur_stream,
541 mapped_ring);
542 return -EINVAL;
543 }
544 }
545 return 0;
546}
547#endif
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
589 unsigned int num_stream_ctxs,
590 unsigned int num_streams, gfp_t mem_flags)
591{
592 struct xhci_stream_info *stream_info;
593 u32 cur_stream;
594 struct xhci_ring *cur_ring;
595 unsigned long key;
596 u64 addr;
597 int ret;
598
599 xhci_dbg(xhci, "Allocating %u streams and %u "
600 "stream context array entries.\n",
601 num_streams, num_stream_ctxs);
602 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
603 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
604 return NULL;
605 }
606 xhci->cmd_ring_reserved_trbs++;
607
608 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
609 if (!stream_info)
610 goto cleanup_trbs;
611
612 stream_info->num_streams = num_streams;
613 stream_info->num_stream_ctxs = num_stream_ctxs;
614
615
616 stream_info->stream_rings = kzalloc(
617 sizeof(struct xhci_ring *)*num_streams,
618 mem_flags);
619 if (!stream_info->stream_rings)
620 goto cleanup_info;
621
622
623 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
624 num_stream_ctxs, &stream_info->ctx_array_dma,
625 mem_flags);
626 if (!stream_info->stream_ctx_array)
627 goto cleanup_ctx;
628 memset(stream_info->stream_ctx_array, 0,
629 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
630
631
632 stream_info->free_streams_command =
633 xhci_alloc_command(xhci, true, true, mem_flags);
634 if (!stream_info->free_streams_command)
635 goto cleanup_ctx;
636
637 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
638
639
640
641
642
643 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
644 stream_info->stream_rings[cur_stream] =
645 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
646 cur_ring = stream_info->stream_rings[cur_stream];
647 if (!cur_ring)
648 goto cleanup_rings;
649 cur_ring->stream_id = cur_stream;
650
651 addr = cur_ring->first_seg->dma |
652 SCT_FOR_CTX(SCT_PRI_TR) |
653 cur_ring->cycle_state;
654 stream_info->stream_ctx_array[cur_stream].stream_ring =
655 cpu_to_le64(addr);
656 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
657 cur_stream, (unsigned long long) addr);
658
659 key = (unsigned long)
660 (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
661 ret = radix_tree_insert(&stream_info->trb_address_map,
662 key, cur_ring);
663 if (ret) {
664 xhci_ring_free(xhci, cur_ring);
665 stream_info->stream_rings[cur_stream] = NULL;
666 goto cleanup_rings;
667 }
668 }
669
670
671
672
673
674
675#if XHCI_DEBUG
676
677
678
679 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
680 goto cleanup_rings;
681#endif
682
683 return stream_info;
684
685cleanup_rings:
686 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
687 cur_ring = stream_info->stream_rings[cur_stream];
688 if (cur_ring) {
689 addr = cur_ring->first_seg->dma;
690 radix_tree_delete(&stream_info->trb_address_map,
691 addr >> SEGMENT_SHIFT);
692 xhci_ring_free(xhci, cur_ring);
693 stream_info->stream_rings[cur_stream] = NULL;
694 }
695 }
696 xhci_free_command(xhci, stream_info->free_streams_command);
697cleanup_ctx:
698 kfree(stream_info->stream_rings);
699cleanup_info:
700 kfree(stream_info);
701cleanup_trbs:
702 xhci->cmd_ring_reserved_trbs--;
703 return NULL;
704}
705
706
707
708
709void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
710 struct xhci_ep_ctx *ep_ctx,
711 struct xhci_stream_info *stream_info)
712{
713 u32 max_primary_streams;
714
715
716
717
718 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
719 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
720 1 << (max_primary_streams + 1));
721 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
722 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
723 | EP_HAS_LSA);
724 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
725}
726
727
728
729
730
731
732void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
733 struct xhci_ep_ctx *ep_ctx,
734 struct xhci_virt_ep *ep)
735{
736 dma_addr_t addr;
737 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
738 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
739 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
740}
741
742
743
744
745
746void xhci_free_stream_info(struct xhci_hcd *xhci,
747 struct xhci_stream_info *stream_info)
748{
749 int cur_stream;
750 struct xhci_ring *cur_ring;
751 dma_addr_t addr;
752
753 if (!stream_info)
754 return;
755
756 for (cur_stream = 1; cur_stream < stream_info->num_streams;
757 cur_stream++) {
758 cur_ring = stream_info->stream_rings[cur_stream];
759 if (cur_ring) {
760 addr = cur_ring->first_seg->dma;
761 radix_tree_delete(&stream_info->trb_address_map,
762 addr >> SEGMENT_SHIFT);
763 xhci_ring_free(xhci, cur_ring);
764 stream_info->stream_rings[cur_stream] = NULL;
765 }
766 }
767 xhci_free_command(xhci, stream_info->free_streams_command);
768 xhci->cmd_ring_reserved_trbs--;
769 if (stream_info->stream_ctx_array)
770 xhci_free_stream_ctx(xhci,
771 stream_info->num_stream_ctxs,
772 stream_info->stream_ctx_array,
773 stream_info->ctx_array_dma);
774
775 if (stream_info)
776 kfree(stream_info->stream_rings);
777 kfree(stream_info);
778}
779
780
781
782
783static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
784 struct xhci_virt_ep *ep)
785{
786 init_timer(&ep->stop_cmd_timer);
787 ep->stop_cmd_timer.data = (unsigned long) ep;
788 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
789 ep->xhci = xhci;
790}
791
792static void xhci_free_tt_info(struct xhci_hcd *xhci,
793 struct xhci_virt_device *virt_dev,
794 int slot_id)
795{
796 struct list_head *tt_list_head;
797 struct xhci_tt_bw_info *tt_info, *next;
798 bool slot_found = false;
799
800
801
802
803 if (virt_dev->real_port == 0 ||
804 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
805 xhci_dbg(xhci, "Bad real port.\n");
806 return;
807 }
808
809 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
810 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
811
812 if (tt_info->slot_id == slot_id) {
813 slot_found = true;
814 list_del(&tt_info->tt_list);
815 kfree(tt_info);
816 } else if (slot_found) {
817 break;
818 }
819 }
820}
821
822int xhci_alloc_tt_info(struct xhci_hcd *xhci,
823 struct xhci_virt_device *virt_dev,
824 struct usb_device *hdev,
825 struct usb_tt *tt, gfp_t mem_flags)
826{
827 struct xhci_tt_bw_info *tt_info;
828 unsigned int num_ports;
829 int i, j;
830
831 if (!tt->multi)
832 num_ports = 1;
833 else
834 num_ports = hdev->maxchild;
835
836 for (i = 0; i < num_ports; i++, tt_info++) {
837 struct xhci_interval_bw_table *bw_table;
838
839 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
840 if (!tt_info)
841 goto free_tts;
842 INIT_LIST_HEAD(&tt_info->tt_list);
843 list_add(&tt_info->tt_list,
844 &xhci->rh_bw[virt_dev->real_port - 1].tts);
845 tt_info->slot_id = virt_dev->udev->slot_id;
846 if (tt->multi)
847 tt_info->ttport = i+1;
848 bw_table = &tt_info->bw_table;
849 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
850 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
851 }
852 return 0;
853
854free_tts:
855 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
856 return -ENOMEM;
857}
858
859
860
861
862
863
864
865void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
866{
867 struct xhci_virt_device *dev;
868 int i;
869 int old_active_eps = 0;
870
871
872 if (slot_id == 0 || !xhci->devs[slot_id])
873 return;
874
875 dev = xhci->devs[slot_id];
876 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
877 if (!dev)
878 return;
879
880 if (dev->tt_info)
881 old_active_eps = dev->tt_info->active_eps;
882
883 for (i = 0; i < 31; ++i) {
884 if (dev->eps[i].ring)
885 xhci_ring_free(xhci, dev->eps[i].ring);
886 if (dev->eps[i].stream_info)
887 xhci_free_stream_info(xhci,
888 dev->eps[i].stream_info);
889
890
891
892
893
894 if (!list_empty(&dev->eps[i].bw_endpoint_list))
895 xhci_warn(xhci, "Slot %u endpoint %u "
896 "not removed from BW list!\n",
897 slot_id, i);
898 }
899
900 xhci_free_tt_info(xhci, dev, slot_id);
901
902 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
903
904 if (dev->ring_cache) {
905 for (i = 0; i < dev->num_rings_cached; i++)
906 xhci_ring_free(xhci, dev->ring_cache[i]);
907 kfree(dev->ring_cache);
908 }
909
910 if (dev->in_ctx)
911 xhci_free_container_ctx(xhci, dev->in_ctx);
912 if (dev->out_ctx)
913 xhci_free_container_ctx(xhci, dev->out_ctx);
914
915 kfree(xhci->devs[slot_id]);
916 xhci->devs[slot_id] = NULL;
917}
918
919int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
920 struct usb_device *udev, gfp_t flags)
921{
922 struct xhci_virt_device *dev;
923 int i;
924
925
926 if (slot_id == 0 || xhci->devs[slot_id]) {
927 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
928 return 0;
929 }
930
931 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
932 if (!xhci->devs[slot_id])
933 return 0;
934 dev = xhci->devs[slot_id];
935
936
937 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
938 if (!dev->out_ctx)
939 goto fail;
940
941 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
942 (unsigned long long)dev->out_ctx->dma);
943
944
945 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
946 if (!dev->in_ctx)
947 goto fail;
948
949 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
950 (unsigned long long)dev->in_ctx->dma);
951
952
953 for (i = 0; i < 31; i++) {
954 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
955 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
956 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
957 }
958
959
960 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
961 if (!dev->eps[0].ring)
962 goto fail;
963
964
965 dev->ring_cache = kzalloc(
966 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
967 flags);
968 if (!dev->ring_cache)
969 goto fail;
970 dev->num_rings_cached = 0;
971
972 init_completion(&dev->cmd_completion);
973 INIT_LIST_HEAD(&dev->cmd_list);
974 dev->udev = udev;
975
976
977 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
978 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
979 slot_id,
980 &xhci->dcbaa->dev_context_ptrs[slot_id],
981 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
982
983 return 1;
984fail:
985 xhci_free_virt_device(xhci, slot_id);
986 return 0;
987}
988
989void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
990 struct usb_device *udev)
991{
992 struct xhci_virt_device *virt_dev;
993 struct xhci_ep_ctx *ep0_ctx;
994 struct xhci_ring *ep_ring;
995
996 virt_dev = xhci->devs[udev->slot_id];
997 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
998 ep_ring = virt_dev->eps[0].ring;
999
1000
1001
1002
1003
1004
1005
1006 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1007 ep_ring->enqueue)
1008 | ep_ring->cycle_state);
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1024 struct usb_device *udev)
1025{
1026 struct usb_device *top_dev;
1027 unsigned int num_similar_speed_ports;
1028 unsigned int faked_port_num;
1029 int i;
1030
1031 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1032 top_dev = top_dev->parent)
1033 ;
1034 faked_port_num = top_dev->portnum;
1035 for (i = 0, num_similar_speed_ports = 0;
1036 i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
1037 u8 port_speed = xhci->port_array[i];
1038
1039
1040
1041
1042
1043 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1044 continue;
1045
1046
1047
1048
1049
1050
1051 if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
1052 num_similar_speed_ports++;
1053 if (num_similar_speed_ports == faked_port_num)
1054
1055 return i+1;
1056 }
1057 return 0;
1058}
1059
1060
1061int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1062{
1063 struct xhci_virt_device *dev;
1064 struct xhci_ep_ctx *ep0_ctx;
1065 struct xhci_slot_ctx *slot_ctx;
1066 u32 port_num;
1067 struct usb_device *top_dev;
1068
1069 dev = xhci->devs[udev->slot_id];
1070
1071 if (udev->slot_id == 0 || !dev) {
1072 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1073 udev->slot_id);
1074 return -EINVAL;
1075 }
1076 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1077 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1078
1079
1080 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1081 switch (udev->speed) {
1082 case USB_SPEED_SUPER:
1083 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1084 break;
1085 case USB_SPEED_HIGH:
1086 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1087 break;
1088 case USB_SPEED_FULL:
1089 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1090 break;
1091 case USB_SPEED_LOW:
1092 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1093 break;
1094 case USB_SPEED_WIRELESS:
1095 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1096 return -EINVAL;
1097 break;
1098 default:
1099
1100 BUG();
1101 }
1102
1103 port_num = xhci_find_real_port_number(xhci, udev);
1104 if (!port_num)
1105 return -EINVAL;
1106 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1107
1108 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1109 top_dev = top_dev->parent)
1110 ;
1111 dev->fake_port = top_dev->portnum;
1112 dev->real_port = port_num;
1113 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1114 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1115
1116
1117
1118
1119
1120
1121
1122 if (!udev->tt || !udev->tt->hub->parent) {
1123 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1124 } else {
1125 struct xhci_root_port_bw_info *rh_bw;
1126 struct xhci_tt_bw_info *tt_bw;
1127
1128 rh_bw = &xhci->rh_bw[port_num - 1];
1129
1130 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1131 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1132 continue;
1133
1134 if (!dev->udev->tt->multi ||
1135 (udev->tt->multi &&
1136 tt_bw->ttport == dev->udev->ttport)) {
1137 dev->bw_table = &tt_bw->bw_table;
1138 dev->tt_info = tt_bw;
1139 break;
1140 }
1141 }
1142 if (!dev->tt_info)
1143 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1144 }
1145
1146
1147 if (udev->tt && udev->tt->hub->parent) {
1148 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1149 (udev->ttport << 8));
1150 if (udev->tt->multi)
1151 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1152 }
1153 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1154 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1155
1156
1157
1158 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1159
1160
1161
1162 switch (udev->speed) {
1163 case USB_SPEED_SUPER:
1164 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
1165 break;
1166 case USB_SPEED_HIGH:
1167
1168 case USB_SPEED_FULL:
1169 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
1170 break;
1171 case USB_SPEED_LOW:
1172 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
1173 break;
1174 case USB_SPEED_WIRELESS:
1175 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1176 return -EINVAL;
1177 break;
1178 default:
1179
1180 BUG();
1181 }
1182
1183 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
1184
1185 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1186 dev->eps[0].ring->cycle_state);
1187
1188
1189
1190 return 0;
1191}
1192
1193
1194
1195
1196
1197
1198static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1199 struct usb_host_endpoint *ep)
1200{
1201 unsigned int interval;
1202
1203 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1204 if (interval != ep->desc.bInterval - 1)
1205 dev_warn(&udev->dev,
1206 "ep %#x - rounding interval to %d %sframes\n",
1207 ep->desc.bEndpointAddress,
1208 1 << interval,
1209 udev->speed == USB_SPEED_FULL ? "" : "micro");
1210
1211 if (udev->speed == USB_SPEED_FULL) {
1212
1213
1214
1215
1216
1217 interval += 3;
1218 }
1219
1220 return interval;
1221}
1222
1223
1224
1225
1226
1227static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1228 struct usb_host_endpoint *ep, unsigned int desc_interval,
1229 unsigned int min_exponent, unsigned int max_exponent)
1230{
1231 unsigned int interval;
1232
1233 interval = fls(desc_interval) - 1;
1234 interval = clamp_val(interval, min_exponent, max_exponent);
1235 if ((1 << interval) != desc_interval)
1236 dev_warn(&udev->dev,
1237 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1238 ep->desc.bEndpointAddress,
1239 1 << interval,
1240 desc_interval);
1241
1242 return interval;
1243}
1244
1245static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1246 struct usb_host_endpoint *ep)
1247{
1248 return xhci_microframes_to_exponent(udev, ep,
1249 ep->desc.bInterval, 0, 15);
1250}
1251
1252
1253static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1254 struct usb_host_endpoint *ep)
1255{
1256 return xhci_microframes_to_exponent(udev, ep,
1257 ep->desc.bInterval * 8, 3, 10);
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1269 struct usb_host_endpoint *ep)
1270{
1271 unsigned int interval = 0;
1272
1273 switch (udev->speed) {
1274 case USB_SPEED_HIGH:
1275
1276 if (usb_endpoint_xfer_control(&ep->desc) ||
1277 usb_endpoint_xfer_bulk(&ep->desc)) {
1278 interval = xhci_parse_microframe_interval(udev, ep);
1279 break;
1280 }
1281
1282
1283 case USB_SPEED_SUPER:
1284 if (usb_endpoint_xfer_int(&ep->desc) ||
1285 usb_endpoint_xfer_isoc(&ep->desc)) {
1286 interval = xhci_parse_exponent_interval(udev, ep);
1287 }
1288 break;
1289
1290 case USB_SPEED_FULL:
1291 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1292 interval = xhci_parse_exponent_interval(udev, ep);
1293 break;
1294 }
1295
1296
1297
1298
1299
1300
1301 case USB_SPEED_LOW:
1302 if (usb_endpoint_xfer_int(&ep->desc) ||
1303 usb_endpoint_xfer_isoc(&ep->desc)) {
1304
1305 interval = xhci_parse_frame_interval(udev, ep);
1306 }
1307 break;
1308
1309 default:
1310 BUG();
1311 }
1312 return EP_INTERVAL(interval);
1313}
1314
1315
1316
1317
1318
1319
1320static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1321 struct usb_host_endpoint *ep)
1322{
1323 if (udev->speed != USB_SPEED_SUPER ||
1324 !usb_endpoint_xfer_isoc(&ep->desc))
1325 return 0;
1326 return ep->ss_ep_comp.bmAttributes;
1327}
1328
1329static u32 xhci_get_endpoint_type(struct usb_device *udev,
1330 struct usb_host_endpoint *ep)
1331{
1332 int in;
1333 u32 type;
1334
1335 in = usb_endpoint_dir_in(&ep->desc);
1336 if (usb_endpoint_xfer_control(&ep->desc)) {
1337 type = EP_TYPE(CTRL_EP);
1338 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1339 if (in)
1340 type = EP_TYPE(BULK_IN_EP);
1341 else
1342 type = EP_TYPE(BULK_OUT_EP);
1343 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1344 if (in)
1345 type = EP_TYPE(ISOC_IN_EP);
1346 else
1347 type = EP_TYPE(ISOC_OUT_EP);
1348 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1349 if (in)
1350 type = EP_TYPE(INT_IN_EP);
1351 else
1352 type = EP_TYPE(INT_OUT_EP);
1353 } else {
1354 BUG();
1355 }
1356 return type;
1357}
1358
1359
1360
1361
1362
1363static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1364 struct usb_device *udev,
1365 struct usb_host_endpoint *ep)
1366{
1367 int max_burst;
1368 int max_packet;
1369
1370
1371 if (usb_endpoint_xfer_control(&ep->desc) ||
1372 usb_endpoint_xfer_bulk(&ep->desc))
1373 return 0;
1374
1375 if (udev->speed == USB_SPEED_SUPER)
1376 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1377
1378 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1379 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1380
1381 return max_packet * (max_burst + 1);
1382}
1383
1384
1385
1386
1387int xhci_endpoint_init(struct xhci_hcd *xhci,
1388 struct xhci_virt_device *virt_dev,
1389 struct usb_device *udev,
1390 struct usb_host_endpoint *ep,
1391 gfp_t mem_flags)
1392{
1393 unsigned int ep_index;
1394 struct xhci_ep_ctx *ep_ctx;
1395 struct xhci_ring *ep_ring;
1396 unsigned int max_packet;
1397 unsigned int max_burst;
1398 enum xhci_ring_type type;
1399 u32 max_esit_payload;
1400
1401 ep_index = xhci_get_endpoint_index(&ep->desc);
1402 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1403
1404 type = usb_endpoint_type(&ep->desc);
1405
1406 virt_dev->eps[ep_index].new_ring =
1407 xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
1408 if (!virt_dev->eps[ep_index].new_ring) {
1409
1410 if (virt_dev->num_rings_cached == 0)
1411 return -ENOMEM;
1412 virt_dev->eps[ep_index].new_ring =
1413 virt_dev->ring_cache[virt_dev->num_rings_cached];
1414 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1415 virt_dev->num_rings_cached--;
1416 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1417 1, type);
1418 }
1419 virt_dev->eps[ep_index].skip = false;
1420 ep_ring = virt_dev->eps[ep_index].new_ring;
1421 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1422
1423 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1424 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1425
1426
1427
1428
1429
1430
1431 if (!usb_endpoint_xfer_isoc(&ep->desc))
1432 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
1433 else
1434 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
1435
1436 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
1437
1438
1439 switch (udev->speed) {
1440 case USB_SPEED_SUPER:
1441 max_packet = usb_endpoint_maxp(&ep->desc);
1442 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1443
1444 max_packet = ep->ss_ep_comp.bMaxBurst;
1445 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
1446 break;
1447 case USB_SPEED_HIGH:
1448
1449
1450
1451 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1452 usb_endpoint_xfer_int(&ep->desc)) {
1453 max_burst = (usb_endpoint_maxp(&ep->desc)
1454 & 0x1800) >> 11;
1455 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
1456 }
1457
1458 case USB_SPEED_FULL:
1459 case USB_SPEED_LOW:
1460 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1461 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1462 break;
1463 default:
1464 BUG();
1465 }
1466 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1467 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1488 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1489 else
1490 ep_ctx->tx_info |=
1491 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1492
1493
1494 return 0;
1495}
1496
1497void xhci_endpoint_zero(struct xhci_hcd *xhci,
1498 struct xhci_virt_device *virt_dev,
1499 struct usb_host_endpoint *ep)
1500{
1501 unsigned int ep_index;
1502 struct xhci_ep_ctx *ep_ctx;
1503
1504 ep_index = xhci_get_endpoint_index(&ep->desc);
1505 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1506
1507 ep_ctx->ep_info = 0;
1508 ep_ctx->ep_info2 = 0;
1509 ep_ctx->deq = 0;
1510 ep_ctx->tx_info = 0;
1511
1512
1513
1514}
1515
1516void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1517{
1518 bw_info->ep_interval = 0;
1519 bw_info->mult = 0;
1520 bw_info->num_packets = 0;
1521 bw_info->max_packet_size = 0;
1522 bw_info->type = 0;
1523 bw_info->max_esit_payload = 0;
1524}
1525
1526void xhci_update_bw_info(struct xhci_hcd *xhci,
1527 struct xhci_container_ctx *in_ctx,
1528 struct xhci_input_control_ctx *ctrl_ctx,
1529 struct xhci_virt_device *virt_dev)
1530{
1531 struct xhci_bw_info *bw_info;
1532 struct xhci_ep_ctx *ep_ctx;
1533 unsigned int ep_type;
1534 int i;
1535
1536 for (i = 1; i < 31; ++i) {
1537 bw_info = &virt_dev->eps[i].bw_info;
1538
1539
1540
1541
1542
1543
1544 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1545
1546 xhci_clear_endpoint_bw_info(bw_info);
1547 continue;
1548 }
1549
1550 if (EP_IS_ADDED(ctrl_ctx, i)) {
1551 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1552 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1553
1554
1555 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1556 ep_type != ISOC_IN_EP &&
1557 ep_type != INT_IN_EP)
1558 continue;
1559
1560
1561 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1562 le32_to_cpu(ep_ctx->ep_info));
1563
1564
1565
1566
1567 bw_info->mult = CTX_TO_EP_MULT(
1568 le32_to_cpu(ep_ctx->ep_info)) + 1;
1569 bw_info->num_packets = CTX_TO_MAX_BURST(
1570 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1571 bw_info->max_packet_size = MAX_PACKET_DECODED(
1572 le32_to_cpu(ep_ctx->ep_info2));
1573 bw_info->type = ep_type;
1574 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1575 le32_to_cpu(ep_ctx->tx_info));
1576 }
1577 }
1578}
1579
1580
1581
1582
1583
1584void xhci_endpoint_copy(struct xhci_hcd *xhci,
1585 struct xhci_container_ctx *in_ctx,
1586 struct xhci_container_ctx *out_ctx,
1587 unsigned int ep_index)
1588{
1589 struct xhci_ep_ctx *out_ep_ctx;
1590 struct xhci_ep_ctx *in_ep_ctx;
1591
1592 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1593 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1594
1595 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1596 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1597 in_ep_ctx->deq = out_ep_ctx->deq;
1598 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1599}
1600
1601
1602
1603
1604
1605
1606void xhci_slot_copy(struct xhci_hcd *xhci,
1607 struct xhci_container_ctx *in_ctx,
1608 struct xhci_container_ctx *out_ctx)
1609{
1610 struct xhci_slot_ctx *in_slot_ctx;
1611 struct xhci_slot_ctx *out_slot_ctx;
1612
1613 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1614 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1615
1616 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1617 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1618 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1619 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1620}
1621
1622
1623static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1624{
1625 int i;
1626 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1627 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1628
1629 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
1630
1631 if (!num_sp)
1632 return 0;
1633
1634 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1635 if (!xhci->scratchpad)
1636 goto fail_sp;
1637
1638 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1639 num_sp * sizeof(u64),
1640 &xhci->scratchpad->sp_dma, flags);
1641 if (!xhci->scratchpad->sp_array)
1642 goto fail_sp2;
1643
1644 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1645 if (!xhci->scratchpad->sp_buffers)
1646 goto fail_sp3;
1647
1648 xhci->scratchpad->sp_dma_buffers =
1649 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1650
1651 if (!xhci->scratchpad->sp_dma_buffers)
1652 goto fail_sp4;
1653
1654 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1655 for (i = 0; i < num_sp; i++) {
1656 dma_addr_t dma;
1657 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1658 flags);
1659 if (!buf)
1660 goto fail_sp5;
1661
1662 xhci->scratchpad->sp_array[i] = dma;
1663 xhci->scratchpad->sp_buffers[i] = buf;
1664 xhci->scratchpad->sp_dma_buffers[i] = dma;
1665 }
1666
1667 return 0;
1668
1669 fail_sp5:
1670 for (i = i - 1; i >= 0; i--) {
1671 dma_free_coherent(dev, xhci->page_size,
1672 xhci->scratchpad->sp_buffers[i],
1673 xhci->scratchpad->sp_dma_buffers[i]);
1674 }
1675 kfree(xhci->scratchpad->sp_dma_buffers);
1676
1677 fail_sp4:
1678 kfree(xhci->scratchpad->sp_buffers);
1679
1680 fail_sp3:
1681 dma_free_coherent(dev, num_sp * sizeof(u64),
1682 xhci->scratchpad->sp_array,
1683 xhci->scratchpad->sp_dma);
1684
1685 fail_sp2:
1686 kfree(xhci->scratchpad);
1687 xhci->scratchpad = NULL;
1688
1689 fail_sp:
1690 return -ENOMEM;
1691}
1692
1693static void scratchpad_free(struct xhci_hcd *xhci)
1694{
1695 int num_sp;
1696 int i;
1697 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1698
1699 if (!xhci->scratchpad)
1700 return;
1701
1702 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1703
1704 for (i = 0; i < num_sp; i++) {
1705 dma_free_coherent(&pdev->dev, xhci->page_size,
1706 xhci->scratchpad->sp_buffers[i],
1707 xhci->scratchpad->sp_dma_buffers[i]);
1708 }
1709 kfree(xhci->scratchpad->sp_dma_buffers);
1710 kfree(xhci->scratchpad->sp_buffers);
1711 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
1712 xhci->scratchpad->sp_array,
1713 xhci->scratchpad->sp_dma);
1714 kfree(xhci->scratchpad);
1715 xhci->scratchpad = NULL;
1716}
1717
1718struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1719 bool allocate_in_ctx, bool allocate_completion,
1720 gfp_t mem_flags)
1721{
1722 struct xhci_command *command;
1723
1724 command = kzalloc(sizeof(*command), mem_flags);
1725 if (!command)
1726 return NULL;
1727
1728 if (allocate_in_ctx) {
1729 command->in_ctx =
1730 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1731 mem_flags);
1732 if (!command->in_ctx) {
1733 kfree(command);
1734 return NULL;
1735 }
1736 }
1737
1738 if (allocate_completion) {
1739 command->completion =
1740 kzalloc(sizeof(struct completion), mem_flags);
1741 if (!command->completion) {
1742 xhci_free_container_ctx(xhci, command->in_ctx);
1743 kfree(command);
1744 return NULL;
1745 }
1746 init_completion(command->completion);
1747 }
1748
1749 command->status = 0;
1750 INIT_LIST_HEAD(&command->cmd_list);
1751 return command;
1752}
1753
1754void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1755{
1756 if (urb_priv) {
1757 kfree(urb_priv->td[0]);
1758 kfree(urb_priv);
1759 }
1760}
1761
1762void xhci_free_command(struct xhci_hcd *xhci,
1763 struct xhci_command *command)
1764{
1765 xhci_free_container_ctx(xhci,
1766 command->in_ctx);
1767 kfree(command->completion);
1768 kfree(command);
1769}
1770
1771void xhci_mem_cleanup(struct xhci_hcd *xhci)
1772{
1773 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1774 struct dev_info *dev_info, *next;
1775 struct xhci_cd *cur_cd, *next_cd;
1776 unsigned long flags;
1777 int size;
1778 int i, j, num_ports;
1779
1780
1781 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1782 if (xhci->erst.entries)
1783 dma_free_coherent(&pdev->dev, size,
1784 xhci->erst.entries, xhci->erst.erst_dma_addr);
1785 xhci->erst.entries = NULL;
1786 xhci_dbg(xhci, "Freed ERST\n");
1787 if (xhci->event_ring)
1788 xhci_ring_free(xhci, xhci->event_ring);
1789 xhci->event_ring = NULL;
1790 xhci_dbg(xhci, "Freed event ring\n");
1791
1792 if (xhci->lpm_command)
1793 xhci_free_command(xhci, xhci->lpm_command);
1794 xhci->cmd_ring_reserved_trbs = 0;
1795 if (xhci->cmd_ring)
1796 xhci_ring_free(xhci, xhci->cmd_ring);
1797 xhci->cmd_ring = NULL;
1798 xhci_dbg(xhci, "Freed command ring\n");
1799 list_for_each_entry_safe(cur_cd, next_cd,
1800 &xhci->cancel_cmd_list, cancel_cmd_list) {
1801 list_del(&cur_cd->cancel_cmd_list);
1802 kfree(cur_cd);
1803 }
1804
1805 for (i = 1; i < MAX_HC_SLOTS; ++i)
1806 xhci_free_virt_device(xhci, i);
1807
1808 if (xhci->segment_pool)
1809 dma_pool_destroy(xhci->segment_pool);
1810 xhci->segment_pool = NULL;
1811 xhci_dbg(xhci, "Freed segment pool\n");
1812
1813 if (xhci->device_pool)
1814 dma_pool_destroy(xhci->device_pool);
1815 xhci->device_pool = NULL;
1816 xhci_dbg(xhci, "Freed device context pool\n");
1817
1818 if (xhci->small_streams_pool)
1819 dma_pool_destroy(xhci->small_streams_pool);
1820 xhci->small_streams_pool = NULL;
1821 xhci_dbg(xhci, "Freed small stream array pool\n");
1822
1823 if (xhci->medium_streams_pool)
1824 dma_pool_destroy(xhci->medium_streams_pool);
1825 xhci->medium_streams_pool = NULL;
1826 xhci_dbg(xhci, "Freed medium stream array pool\n");
1827
1828 if (xhci->dcbaa)
1829 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
1830 xhci->dcbaa, xhci->dcbaa->dma);
1831 xhci->dcbaa = NULL;
1832
1833 scratchpad_free(xhci);
1834
1835 spin_lock_irqsave(&xhci->lock, flags);
1836 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1837 list_del(&dev_info->list);
1838 kfree(dev_info);
1839 }
1840 spin_unlock_irqrestore(&xhci->lock, flags);
1841
1842 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1843 for (i = 0; i < num_ports; i++) {
1844 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1845 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1846 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1847 while (!list_empty(ep))
1848 list_del_init(ep->next);
1849 }
1850 }
1851
1852 for (i = 0; i < num_ports; i++) {
1853 struct xhci_tt_bw_info *tt, *n;
1854 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1855 list_del(&tt->tt_list);
1856 kfree(tt);
1857 }
1858 }
1859
1860 xhci->num_usb2_ports = 0;
1861 xhci->num_usb3_ports = 0;
1862 xhci->num_active_eps = 0;
1863 kfree(xhci->usb2_ports);
1864 kfree(xhci->usb3_ports);
1865 kfree(xhci->port_array);
1866 kfree(xhci->rh_bw);
1867
1868 xhci->page_size = 0;
1869 xhci->page_shift = 0;
1870 xhci->bus_state[0].bus_suspended = 0;
1871 xhci->bus_state[1].bus_suspended = 0;
1872}
1873
1874static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1875 struct xhci_segment *input_seg,
1876 union xhci_trb *start_trb,
1877 union xhci_trb *end_trb,
1878 dma_addr_t input_dma,
1879 struct xhci_segment *result_seg,
1880 char *test_name, int test_number)
1881{
1882 unsigned long long start_dma;
1883 unsigned long long end_dma;
1884 struct xhci_segment *seg;
1885
1886 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1887 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1888
1889 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1890 if (seg != result_seg) {
1891 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1892 test_name, test_number);
1893 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1894 "input DMA 0x%llx\n",
1895 input_seg,
1896 (unsigned long long) input_dma);
1897 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1898 "ending TRB %p (0x%llx DMA)\n",
1899 start_trb, start_dma,
1900 end_trb, end_dma);
1901 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1902 result_seg, seg);
1903 return -1;
1904 }
1905 return 0;
1906}
1907
1908
1909static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1910{
1911 struct {
1912 dma_addr_t input_dma;
1913 struct xhci_segment *result_seg;
1914 } simple_test_vector [] = {
1915
1916 { 0, NULL },
1917
1918 { xhci->event_ring->first_seg->dma - 16, NULL },
1919
1920 { xhci->event_ring->first_seg->dma - 1, NULL },
1921
1922 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1923
1924 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1925 xhci->event_ring->first_seg },
1926
1927 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1928
1929 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1930
1931 { (dma_addr_t) (~0), NULL },
1932 };
1933 struct {
1934 struct xhci_segment *input_seg;
1935 union xhci_trb *start_trb;
1936 union xhci_trb *end_trb;
1937 dma_addr_t input_dma;
1938 struct xhci_segment *result_seg;
1939 } complex_test_vector [] = {
1940
1941 { .input_seg = xhci->event_ring->first_seg,
1942 .start_trb = xhci->event_ring->first_seg->trbs,
1943 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1944 .input_dma = xhci->cmd_ring->first_seg->dma,
1945 .result_seg = NULL,
1946 },
1947
1948 { .input_seg = xhci->event_ring->first_seg,
1949 .start_trb = xhci->event_ring->first_seg->trbs,
1950 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1951 .input_dma = xhci->cmd_ring->first_seg->dma,
1952 .result_seg = NULL,
1953 },
1954
1955 { .input_seg = xhci->event_ring->first_seg,
1956 .start_trb = xhci->cmd_ring->first_seg->trbs,
1957 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1958 .input_dma = xhci->cmd_ring->first_seg->dma,
1959 .result_seg = NULL,
1960 },
1961
1962 { .input_seg = xhci->event_ring->first_seg,
1963 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1964 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1965 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1966 .result_seg = NULL,
1967 },
1968
1969 { .input_seg = xhci->event_ring->first_seg,
1970 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1971 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1972 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1973 .result_seg = NULL,
1974 },
1975
1976 { .input_seg = xhci->event_ring->first_seg,
1977 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1978 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1979 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1980 .result_seg = NULL,
1981 },
1982
1983 { .input_seg = xhci->event_ring->first_seg,
1984 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1985 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1986 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1987 .result_seg = NULL,
1988 },
1989
1990 { .input_seg = xhci->event_ring->first_seg,
1991 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1992 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1993 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1994 .result_seg = NULL,
1995 },
1996 };
1997
1998 unsigned int num_tests;
1999 int i, ret;
2000
2001 num_tests = ARRAY_SIZE(simple_test_vector);
2002 for (i = 0; i < num_tests; i++) {
2003 ret = xhci_test_trb_in_td(xhci,
2004 xhci->event_ring->first_seg,
2005 xhci->event_ring->first_seg->trbs,
2006 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2007 simple_test_vector[i].input_dma,
2008 simple_test_vector[i].result_seg,
2009 "Simple", i);
2010 if (ret < 0)
2011 return ret;
2012 }
2013
2014 num_tests = ARRAY_SIZE(complex_test_vector);
2015 for (i = 0; i < num_tests; i++) {
2016 ret = xhci_test_trb_in_td(xhci,
2017 complex_test_vector[i].input_seg,
2018 complex_test_vector[i].start_trb,
2019 complex_test_vector[i].end_trb,
2020 complex_test_vector[i].input_dma,
2021 complex_test_vector[i].result_seg,
2022 "Complex", i);
2023 if (ret < 0)
2024 return ret;
2025 }
2026 xhci_dbg(xhci, "TRB math tests passed.\n");
2027 return 0;
2028}
2029
2030static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2031{
2032 u64 temp;
2033 dma_addr_t deq;
2034
2035 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2036 xhci->event_ring->dequeue);
2037 if (deq == 0 && !in_interrupt())
2038 xhci_warn(xhci, "WARN something wrong with SW event ring "
2039 "dequeue ptr.\n");
2040
2041 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2042 temp &= ERST_PTR_MASK;
2043
2044
2045
2046 temp &= ~ERST_EHB;
2047 xhci_dbg(xhci, "// Write event ring dequeue pointer, "
2048 "preserving EHB bit\n");
2049 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2050 &xhci->ir_set->erst_dequeue);
2051}
2052
2053static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2054 __le32 __iomem *addr, u8 major_revision)
2055{
2056 u32 temp, port_offset, port_count;
2057 int i;
2058
2059 if (major_revision > 0x03) {
2060 xhci_warn(xhci, "Ignoring unknown port speed, "
2061 "Ext Cap %p, revision = 0x%x\n",
2062 addr, major_revision);
2063
2064 return;
2065 }
2066
2067
2068 temp = xhci_readl(xhci, addr + 2);
2069 port_offset = XHCI_EXT_PORT_OFF(temp);
2070 port_count = XHCI_EXT_PORT_COUNT(temp);
2071 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
2072 "count = %u, revision = 0x%x\n",
2073 addr, port_offset, port_count, major_revision);
2074
2075 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2076
2077 return;
2078
2079
2080 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2081 (temp & XHCI_L1C)) {
2082 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
2083 xhci->sw_lpm_support = 1;
2084 }
2085
2086 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2087 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
2088 xhci->sw_lpm_support = 1;
2089 if (temp & XHCI_HLC) {
2090 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
2091 xhci->hw_lpm_support = 1;
2092 }
2093 }
2094
2095 port_offset--;
2096 for (i = port_offset; i < (port_offset + port_count); i++) {
2097
2098 if (xhci->port_array[i] != 0) {
2099 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2100 " port %u\n", addr, i);
2101 xhci_warn(xhci, "Port was marked as USB %u, "
2102 "duplicated as USB %u\n",
2103 xhci->port_array[i], major_revision);
2104
2105
2106
2107 if (xhci->port_array[i] != major_revision &&
2108 xhci->port_array[i] != DUPLICATE_ENTRY) {
2109 if (xhci->port_array[i] == 0x03)
2110 xhci->num_usb3_ports--;
2111 else
2112 xhci->num_usb2_ports--;
2113 xhci->port_array[i] = DUPLICATE_ENTRY;
2114 }
2115
2116 continue;
2117 }
2118 xhci->port_array[i] = major_revision;
2119 if (major_revision == 0x03)
2120 xhci->num_usb3_ports++;
2121 else
2122 xhci->num_usb2_ports++;
2123 }
2124
2125}
2126
2127
2128
2129
2130
2131
2132
2133
2134static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2135{
2136 __le32 __iomem *addr;
2137 u32 offset;
2138 unsigned int num_ports;
2139 int i, j, port_index;
2140
2141 addr = &xhci->cap_regs->hcc_params;
2142 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
2143 if (offset == 0) {
2144 xhci_err(xhci, "No Extended Capability registers, "
2145 "unable to set up roothub.\n");
2146 return -ENODEV;
2147 }
2148
2149 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2150 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2151 if (!xhci->port_array)
2152 return -ENOMEM;
2153
2154 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2155 if (!xhci->rh_bw)
2156 return -ENOMEM;
2157 for (i = 0; i < num_ports; i++) {
2158 struct xhci_interval_bw_table *bw_table;
2159
2160 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2161 bw_table = &xhci->rh_bw[i].bw_table;
2162 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2163 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2164 }
2165
2166
2167
2168
2169
2170
2171 addr = &xhci->cap_regs->hc_capbase + offset;
2172 while (1) {
2173 u32 cap_id;
2174
2175 cap_id = xhci_readl(xhci, addr);
2176 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2177 xhci_add_in_port(xhci, num_ports, addr,
2178 (u8) XHCI_EXT_PORT_MAJOR(cap_id));
2179 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2180 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2181 == num_ports)
2182 break;
2183
2184
2185
2186
2187 addr += offset;
2188 }
2189
2190 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2191 xhci_warn(xhci, "No ports on the roothubs?\n");
2192 return -ENODEV;
2193 }
2194 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
2195 xhci->num_usb2_ports, xhci->num_usb3_ports);
2196
2197
2198
2199
2200 if (xhci->num_usb3_ports > 15) {
2201 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
2202 xhci->num_usb3_ports = 15;
2203 }
2204 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2205 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
2206 USB_MAXCHILDREN);
2207 xhci->num_usb2_ports = USB_MAXCHILDREN;
2208 }
2209
2210
2211
2212
2213
2214 if (xhci->num_usb2_ports) {
2215 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2216 xhci->num_usb2_ports, flags);
2217 if (!xhci->usb2_ports)
2218 return -ENOMEM;
2219
2220 port_index = 0;
2221 for (i = 0; i < num_ports; i++) {
2222 if (xhci->port_array[i] == 0x03 ||
2223 xhci->port_array[i] == 0 ||
2224 xhci->port_array[i] == DUPLICATE_ENTRY)
2225 continue;
2226
2227 xhci->usb2_ports[port_index] =
2228 &xhci->op_regs->port_status_base +
2229 NUM_PORT_REGS*i;
2230 xhci_dbg(xhci, "USB 2.0 port at index %u, "
2231 "addr = %p\n", i,
2232 xhci->usb2_ports[port_index]);
2233 port_index++;
2234 if (port_index == xhci->num_usb2_ports)
2235 break;
2236 }
2237 }
2238 if (xhci->num_usb3_ports) {
2239 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2240 xhci->num_usb3_ports, flags);
2241 if (!xhci->usb3_ports)
2242 return -ENOMEM;
2243
2244 port_index = 0;
2245 for (i = 0; i < num_ports; i++)
2246 if (xhci->port_array[i] == 0x03) {
2247 xhci->usb3_ports[port_index] =
2248 &xhci->op_regs->port_status_base +
2249 NUM_PORT_REGS*i;
2250 xhci_dbg(xhci, "USB 3.0 port at index %u, "
2251 "addr = %p\n", i,
2252 xhci->usb3_ports[port_index]);
2253 port_index++;
2254 if (port_index == xhci->num_usb3_ports)
2255 break;
2256 }
2257 }
2258 return 0;
2259}
2260
2261int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2262{
2263 dma_addr_t dma;
2264 struct device *dev = xhci_to_hcd(xhci)->self.controller;
2265 unsigned int val, val2;
2266 u64 val_64;
2267 struct xhci_segment *seg;
2268 u32 page_size, temp;
2269 int i;
2270
2271 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2272 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
2273 for (i = 0; i < 16; i++) {
2274 if ((0x1 & page_size) != 0)
2275 break;
2276 page_size = page_size >> 1;
2277 }
2278 if (i < 16)
2279 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
2280 else
2281 xhci_warn(xhci, "WARN: no supported page size\n");
2282
2283 xhci->page_shift = 12;
2284 xhci->page_size = 1 << xhci->page_shift;
2285 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
2286
2287
2288
2289
2290
2291 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2292 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
2293 (unsigned int) val);
2294 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2295 val |= (val2 & ~HCS_SLOTS_MASK);
2296 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
2297 (unsigned int) val);
2298 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2299
2300
2301
2302
2303
2304 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2305 GFP_KERNEL);
2306 if (!xhci->dcbaa)
2307 goto fail;
2308 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2309 xhci->dcbaa->dma = dma;
2310 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
2311 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2312 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2313
2314
2315
2316
2317
2318
2319
2320 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2321 SEGMENT_SIZE, 64, xhci->page_size);
2322
2323
2324 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2325 2112, 64, xhci->page_size);
2326 if (!xhci->segment_pool || !xhci->device_pool)
2327 goto fail;
2328
2329
2330
2331
2332 xhci->small_streams_pool =
2333 dma_pool_create("xHCI 256 byte stream ctx arrays",
2334 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2335 xhci->medium_streams_pool =
2336 dma_pool_create("xHCI 1KB stream ctx arrays",
2337 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2338
2339
2340
2341
2342 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2343 goto fail;
2344
2345
2346 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2347 if (!xhci->cmd_ring)
2348 goto fail;
2349 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2350 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2351 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2352 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2353
2354
2355 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2356 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2357 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2358 xhci->cmd_ring->cycle_state;
2359 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
2360 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2361 xhci_dbg_cmd_ptrs(xhci);
2362
2363 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
2364 if (!xhci->lpm_command)
2365 goto fail;
2366
2367
2368
2369
2370
2371 xhci->cmd_ring_reserved_trbs++;
2372
2373 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2374 val &= DBOFF_MASK;
2375 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
2376 " from cap regs base addr\n", val);
2377 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2378 xhci_dbg_regs(xhci);
2379 xhci_print_run_regs(xhci);
2380
2381 xhci->ir_set = &xhci->run_regs->ir_set[0];
2382
2383
2384
2385
2386
2387 xhci_dbg(xhci, "// Allocating event ring\n");
2388 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2389 flags);
2390 if (!xhci->event_ring)
2391 goto fail;
2392 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2393 goto fail;
2394
2395 xhci->erst.entries = dma_alloc_coherent(dev,
2396 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2397 GFP_KERNEL);
2398 if (!xhci->erst.entries)
2399 goto fail;
2400 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
2401 (unsigned long long)dma);
2402
2403 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2404 xhci->erst.num_entries = ERST_NUM_SEGS;
2405 xhci->erst.erst_dma_addr = dma;
2406 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
2407 xhci->erst.num_entries,
2408 xhci->erst.entries,
2409 (unsigned long long)xhci->erst.erst_dma_addr);
2410
2411
2412 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2413 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2414 entry->seg_addr = cpu_to_le64(seg->dma);
2415 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2416 entry->rsvd = 0;
2417 seg = seg->next;
2418 }
2419
2420
2421 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2422 val &= ERST_SIZE_MASK;
2423 val |= ERST_NUM_SEGS;
2424 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
2425 val);
2426 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2427
2428 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
2429
2430 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
2431 (unsigned long long)xhci->erst.erst_dma_addr);
2432 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2433 val_64 &= ERST_PTR_MASK;
2434 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2435 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2436
2437
2438 xhci_set_hc_event_deq(xhci);
2439 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
2440 xhci_print_ir_set(xhci, 0);
2441
2442
2443
2444
2445
2446
2447 init_completion(&xhci->addr_dev);
2448 for (i = 0; i < MAX_HC_SLOTS; ++i)
2449 xhci->devs[i] = NULL;
2450 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2451 xhci->bus_state[0].resume_done[i] = 0;
2452 xhci->bus_state[1].resume_done[i] = 0;
2453 }
2454
2455 if (scratchpad_alloc(xhci, flags))
2456 goto fail;
2457 if (xhci_setup_port_arrays(xhci, flags))
2458 goto fail;
2459
2460 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2461
2462
2463
2464
2465
2466 temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
2467 temp &= ~DEV_NOTE_MASK;
2468 temp |= DEV_NOTE_FWAKE;
2469 xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
2470
2471 return 0;
2472
2473fail:
2474 xhci_warn(xhci, "Couldn't initialize memory\n");
2475 xhci_halt(xhci);
2476 xhci_reset(xhci);
2477 xhci_mem_cleanup(xhci);
2478 return -ENOMEM;
2479}
2480