1
2
3
4
5
6
7
8
9
10
11#include <linux/usb.h>
12#include <linux/pci.h>
13#include <linux/slab.h>
14#include <linux/dmapool.h>
15#include <linux/dma-mapping.h>
16
17#include "xhci.h"
18#include "xhci-trace.h"
19#include "xhci-debugfs.h"
20
21
22
23
24
25
26
27
28static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
29 unsigned int cycle_state,
30 unsigned int max_packet,
31 gfp_t flags)
32{
33 struct xhci_segment *seg;
34 dma_addr_t dma;
35 int i;
36 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
37
38 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
39 if (!seg)
40 return NULL;
41
42 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
43 if (!seg->trbs) {
44 kfree(seg);
45 return NULL;
46 }
47
48 if (max_packet) {
49 seg->bounce_buf = kzalloc_node(max_packet, flags,
50 dev_to_node(dev));
51 if (!seg->bounce_buf) {
52 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
53 kfree(seg);
54 return NULL;
55 }
56 }
57
58 if (cycle_state == 0) {
59 for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
61 }
62 seg->dma = dma;
63 seg->next = NULL;
64
65 return seg;
66}
67
68static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
69{
70 if (seg->trbs) {
71 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 seg->trbs = NULL;
73 }
74 kfree(seg->bounce_buf);
75 kfree(seg);
76}
77
78static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
79 struct xhci_segment *first)
80{
81 struct xhci_segment *seg;
82
83 seg = first->next;
84 while (seg != first) {
85 struct xhci_segment *next = seg->next;
86 xhci_segment_free(xhci, seg);
87 seg = next;
88 }
89 xhci_segment_free(xhci, first);
90}
91
92
93
94
95
96
97
98
99static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
100 struct xhci_segment *next, enum xhci_ring_type type)
101{
102 u32 val;
103
104 if (!prev || !next)
105 return;
106 prev->next = next;
107 if (type != TYPE_EVENT) {
108 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
109 cpu_to_le64(next->dma);
110
111
112 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
113 val &= ~TRB_TYPE_BITMASK;
114 val |= TRB_TYPE(TRB_LINK);
115
116
117 if (xhci_link_trb_quirk(xhci) ||
118 (type == TYPE_ISOC &&
119 (xhci->quirks & XHCI_AMD_0x96_HOST)))
120 val |= TRB_CHAIN;
121 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
122 }
123}
124
125
126
127
128
129static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
130 struct xhci_segment *first, struct xhci_segment *last,
131 unsigned int num_segs)
132{
133 struct xhci_segment *next;
134
135 if (!ring || !first || !last)
136 return;
137
138 next = ring->enq_seg->next;
139 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
140 xhci_link_segments(xhci, last, next, ring->type);
141 ring->num_segs += num_segs;
142 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
143
144 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
145 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
146 &= ~cpu_to_le32(LINK_TOGGLE);
147 last->trbs[TRBS_PER_SEGMENT-1].link.control
148 |= cpu_to_le32(LINK_TOGGLE);
149 ring->last_seg = last;
150 }
151}
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
185 struct xhci_ring *ring,
186 struct xhci_segment *seg,
187 gfp_t mem_flags)
188{
189 unsigned long key;
190 int ret;
191
192 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
193
194 if (radix_tree_lookup(trb_address_map, key))
195 return 0;
196
197 ret = radix_tree_maybe_preload(mem_flags);
198 if (ret)
199 return ret;
200 ret = radix_tree_insert(trb_address_map,
201 key, ring);
202 radix_tree_preload_end();
203 return ret;
204}
205
206static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
207 struct xhci_segment *seg)
208{
209 unsigned long key;
210
211 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
212 if (radix_tree_lookup(trb_address_map, key))
213 radix_tree_delete(trb_address_map, key);
214}
215
216static int xhci_update_stream_segment_mapping(
217 struct radix_tree_root *trb_address_map,
218 struct xhci_ring *ring,
219 struct xhci_segment *first_seg,
220 struct xhci_segment *last_seg,
221 gfp_t mem_flags)
222{
223 struct xhci_segment *seg;
224 struct xhci_segment *failed_seg;
225 int ret;
226
227 if (WARN_ON_ONCE(trb_address_map == NULL))
228 return 0;
229
230 seg = first_seg;
231 do {
232 ret = xhci_insert_segment_mapping(trb_address_map,
233 ring, seg, mem_flags);
234 if (ret)
235 goto remove_streams;
236 if (seg == last_seg)
237 return 0;
238 seg = seg->next;
239 } while (seg != first_seg);
240
241 return 0;
242
243remove_streams:
244 failed_seg = seg;
245 seg = first_seg;
246 do {
247 xhci_remove_segment_mapping(trb_address_map, seg);
248 if (seg == failed_seg)
249 return ret;
250 seg = seg->next;
251 } while (seg != first_seg);
252
253 return ret;
254}
255
256static void xhci_remove_stream_mapping(struct xhci_ring *ring)
257{
258 struct xhci_segment *seg;
259
260 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
261 return;
262
263 seg = ring->first_seg;
264 do {
265 xhci_remove_segment_mapping(ring->trb_address_map, seg);
266 seg = seg->next;
267 } while (seg != ring->first_seg);
268}
269
270static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
271{
272 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
273 ring->first_seg, ring->last_seg, mem_flags);
274}
275
276
277void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
278{
279 if (!ring)
280 return;
281
282 trace_xhci_ring_free(ring);
283
284 if (ring->first_seg) {
285 if (ring->type == TYPE_STREAM)
286 xhci_remove_stream_mapping(ring);
287 xhci_free_segments_for_ring(xhci, ring->first_seg);
288 }
289
290 kfree(ring);
291}
292
293static void xhci_initialize_ring_info(struct xhci_ring *ring,
294 unsigned int cycle_state)
295{
296
297 ring->enqueue = ring->first_seg->trbs;
298 ring->enq_seg = ring->first_seg;
299 ring->dequeue = ring->enqueue;
300 ring->deq_seg = ring->first_seg;
301
302
303
304
305
306
307
308 ring->cycle_state = cycle_state;
309
310
311
312
313
314 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
315}
316
317
318static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
319 struct xhci_segment **first, struct xhci_segment **last,
320 unsigned int num_segs, unsigned int cycle_state,
321 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
322{
323 struct xhci_segment *prev;
324
325 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
326 if (!prev)
327 return -ENOMEM;
328 num_segs--;
329
330 *first = prev;
331 while (num_segs > 0) {
332 struct xhci_segment *next;
333
334 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
335 if (!next) {
336 prev = *first;
337 while (prev) {
338 next = prev->next;
339 xhci_segment_free(xhci, prev);
340 prev = next;
341 }
342 return -ENOMEM;
343 }
344 xhci_link_segments(xhci, prev, next, type);
345
346 prev = next;
347 num_segs--;
348 }
349 xhci_link_segments(xhci, prev, *first, type);
350 *last = prev;
351
352 return 0;
353}
354
355
356
357
358
359
360
361
362struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
363 unsigned int num_segs, unsigned int cycle_state,
364 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
365{
366 struct xhci_ring *ring;
367 int ret;
368 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
369
370 ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
371 if (!ring)
372 return NULL;
373
374 ring->num_segs = num_segs;
375 ring->bounce_buf_len = max_packet;
376 INIT_LIST_HEAD(&ring->td_list);
377 ring->type = type;
378 if (num_segs == 0)
379 return ring;
380
381 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
382 &ring->last_seg, num_segs, cycle_state, type,
383 max_packet, flags);
384 if (ret)
385 goto fail;
386
387
388 if (type != TYPE_EVENT) {
389
390 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
391 cpu_to_le32(LINK_TOGGLE);
392 }
393 xhci_initialize_ring_info(ring, cycle_state);
394 trace_xhci_ring_alloc(ring);
395 return ring;
396
397fail:
398 kfree(ring);
399 return NULL;
400}
401
402void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
403 struct xhci_virt_device *virt_dev,
404 unsigned int ep_index)
405{
406 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
407 virt_dev->eps[ep_index].ring = NULL;
408}
409
410
411
412
413
414int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
415 unsigned int num_trbs, gfp_t flags)
416{
417 struct xhci_segment *first;
418 struct xhci_segment *last;
419 unsigned int num_segs;
420 unsigned int num_segs_needed;
421 int ret;
422
423 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
424 (TRBS_PER_SEGMENT - 1);
425
426
427 num_segs = ring->num_segs > num_segs_needed ?
428 ring->num_segs : num_segs_needed;
429
430 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
431 num_segs, ring->cycle_state, ring->type,
432 ring->bounce_buf_len, flags);
433 if (ret)
434 return -ENOMEM;
435
436 if (ring->type == TYPE_STREAM)
437 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
438 ring, first, last, flags);
439 if (ret) {
440 struct xhci_segment *next;
441 do {
442 next = first->next;
443 xhci_segment_free(xhci, first);
444 if (first == last)
445 break;
446 first = next;
447 } while (true);
448 return ret;
449 }
450
451 xhci_link_rings(xhci, ring, first, last, num_segs);
452 trace_xhci_ring_expansion(ring);
453 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
454 "ring expansion succeed, now has %d segments",
455 ring->num_segs);
456
457 return 0;
458}
459
460struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
461 int type, gfp_t flags)
462{
463 struct xhci_container_ctx *ctx;
464 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
465
466 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
467 return NULL;
468
469 ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
470 if (!ctx)
471 return NULL;
472
473 ctx->type = type;
474 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
475 if (type == XHCI_CTX_TYPE_INPUT)
476 ctx->size += CTX_SIZE(xhci->hcc_params);
477
478 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
479 if (!ctx->bytes) {
480 kfree(ctx);
481 return NULL;
482 }
483 return ctx;
484}
485
486void xhci_free_container_ctx(struct xhci_hcd *xhci,
487 struct xhci_container_ctx *ctx)
488{
489 if (!ctx)
490 return;
491 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
492 kfree(ctx);
493}
494
495struct xhci_input_control_ctx *xhci_get_input_control_ctx(
496 struct xhci_container_ctx *ctx)
497{
498 if (ctx->type != XHCI_CTX_TYPE_INPUT)
499 return NULL;
500
501 return (struct xhci_input_control_ctx *)ctx->bytes;
502}
503
504struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
505 struct xhci_container_ctx *ctx)
506{
507 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
508 return (struct xhci_slot_ctx *)ctx->bytes;
509
510 return (struct xhci_slot_ctx *)
511 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
512}
513
514struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
515 struct xhci_container_ctx *ctx,
516 unsigned int ep_index)
517{
518
519 ep_index++;
520 if (ctx->type == XHCI_CTX_TYPE_INPUT)
521 ep_index++;
522
523 return (struct xhci_ep_ctx *)
524 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
525}
526
527
528
529
530static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
531 unsigned int num_stream_ctxs,
532 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
533{
534 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
535 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
536
537 if (size > MEDIUM_STREAM_ARRAY_SIZE)
538 dma_free_coherent(dev, size,
539 stream_ctx, dma);
540 else if (size <= SMALL_STREAM_ARRAY_SIZE)
541 return dma_pool_free(xhci->small_streams_pool,
542 stream_ctx, dma);
543 else
544 return dma_pool_free(xhci->medium_streams_pool,
545 stream_ctx, dma);
546}
547
548
549
550
551
552
553
554
555
556
557
558static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
559 unsigned int num_stream_ctxs, dma_addr_t *dma,
560 gfp_t mem_flags)
561{
562 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
563 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
564
565 if (size > MEDIUM_STREAM_ARRAY_SIZE)
566 return dma_alloc_coherent(dev, size,
567 dma, mem_flags);
568 else if (size <= SMALL_STREAM_ARRAY_SIZE)
569 return dma_pool_alloc(xhci->small_streams_pool,
570 mem_flags, dma);
571 else
572 return dma_pool_alloc(xhci->medium_streams_pool,
573 mem_flags, dma);
574}
575
576struct xhci_ring *xhci_dma_to_transfer_ring(
577 struct xhci_virt_ep *ep,
578 u64 address)
579{
580 if (ep->ep_state & EP_HAS_STREAMS)
581 return radix_tree_lookup(&ep->stream_info->trb_address_map,
582 address >> TRB_SEGMENT_SHIFT);
583 return ep->ring;
584}
585
586struct xhci_ring *xhci_stream_id_to_ring(
587 struct xhci_virt_device *dev,
588 unsigned int ep_index,
589 unsigned int stream_id)
590{
591 struct xhci_virt_ep *ep = &dev->eps[ep_index];
592
593 if (stream_id == 0)
594 return ep->ring;
595 if (!ep->stream_info)
596 return NULL;
597
598 if (stream_id >= ep->stream_info->num_streams)
599 return NULL;
600 return ep->stream_info->stream_rings[stream_id];
601}
602
603
604
605
606
607
608
609
610
611
612struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
613 unsigned int num_stream_ctxs,
614 unsigned int num_streams,
615 unsigned int max_packet, gfp_t mem_flags)
616{
617 struct xhci_stream_info *stream_info;
618 u32 cur_stream;
619 struct xhci_ring *cur_ring;
620 u64 addr;
621 int ret;
622 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
623
624 xhci_dbg(xhci, "Allocating %u streams and %u "
625 "stream context array entries.\n",
626 num_streams, num_stream_ctxs);
627 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
628 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
629 return NULL;
630 }
631 xhci->cmd_ring_reserved_trbs++;
632
633 stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
634 dev_to_node(dev));
635 if (!stream_info)
636 goto cleanup_trbs;
637
638 stream_info->num_streams = num_streams;
639 stream_info->num_stream_ctxs = num_stream_ctxs;
640
641
642 stream_info->stream_rings = kcalloc_node(
643 num_streams, sizeof(struct xhci_ring *), mem_flags,
644 dev_to_node(dev));
645 if (!stream_info->stream_rings)
646 goto cleanup_info;
647
648
649 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
650 num_stream_ctxs, &stream_info->ctx_array_dma,
651 mem_flags);
652 if (!stream_info->stream_ctx_array)
653 goto cleanup_ctx;
654 memset(stream_info->stream_ctx_array, 0,
655 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
656
657
658 stream_info->free_streams_command =
659 xhci_alloc_command_with_ctx(xhci, true, mem_flags);
660 if (!stream_info->free_streams_command)
661 goto cleanup_ctx;
662
663 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
664
665
666
667
668
669
670 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
671 stream_info->stream_rings[cur_stream] =
672 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
673 mem_flags);
674 cur_ring = stream_info->stream_rings[cur_stream];
675 if (!cur_ring)
676 goto cleanup_rings;
677 cur_ring->stream_id = cur_stream;
678 cur_ring->trb_address_map = &stream_info->trb_address_map;
679
680 addr = cur_ring->first_seg->dma |
681 SCT_FOR_CTX(SCT_PRI_TR) |
682 cur_ring->cycle_state;
683 stream_info->stream_ctx_array[cur_stream].stream_ring =
684 cpu_to_le64(addr);
685 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
686 cur_stream, (unsigned long long) addr);
687
688 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
689 if (ret) {
690 xhci_ring_free(xhci, cur_ring);
691 stream_info->stream_rings[cur_stream] = NULL;
692 goto cleanup_rings;
693 }
694 }
695
696
697
698
699
700
701
702 return stream_info;
703
704cleanup_rings:
705 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
706 cur_ring = stream_info->stream_rings[cur_stream];
707 if (cur_ring) {
708 xhci_ring_free(xhci, cur_ring);
709 stream_info->stream_rings[cur_stream] = NULL;
710 }
711 }
712 xhci_free_command(xhci, stream_info->free_streams_command);
713cleanup_ctx:
714 kfree(stream_info->stream_rings);
715cleanup_info:
716 kfree(stream_info);
717cleanup_trbs:
718 xhci->cmd_ring_reserved_trbs--;
719 return NULL;
720}
721
722
723
724
725void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
726 struct xhci_ep_ctx *ep_ctx,
727 struct xhci_stream_info *stream_info)
728{
729 u32 max_primary_streams;
730
731
732
733
734 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
735 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
736 "Setting number of stream ctx array entries to %u",
737 1 << (max_primary_streams + 1));
738 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
739 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
740 | EP_HAS_LSA);
741 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
742}
743
744
745
746
747
748
749void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
750 struct xhci_virt_ep *ep)
751{
752 dma_addr_t addr;
753 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
754 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
755 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
756}
757
758
759
760
761
762void xhci_free_stream_info(struct xhci_hcd *xhci,
763 struct xhci_stream_info *stream_info)
764{
765 int cur_stream;
766 struct xhci_ring *cur_ring;
767
768 if (!stream_info)
769 return;
770
771 for (cur_stream = 1; cur_stream < stream_info->num_streams;
772 cur_stream++) {
773 cur_ring = stream_info->stream_rings[cur_stream];
774 if (cur_ring) {
775 xhci_ring_free(xhci, cur_ring);
776 stream_info->stream_rings[cur_stream] = NULL;
777 }
778 }
779 xhci_free_command(xhci, stream_info->free_streams_command);
780 xhci->cmd_ring_reserved_trbs--;
781 if (stream_info->stream_ctx_array)
782 xhci_free_stream_ctx(xhci,
783 stream_info->num_stream_ctxs,
784 stream_info->stream_ctx_array,
785 stream_info->ctx_array_dma);
786
787 kfree(stream_info->stream_rings);
788 kfree(stream_info);
789}
790
791
792
793
794static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
795 struct xhci_virt_ep *ep)
796{
797 timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
798 0);
799 ep->xhci = xhci;
800}
801
802static void xhci_free_tt_info(struct xhci_hcd *xhci,
803 struct xhci_virt_device *virt_dev,
804 int slot_id)
805{
806 struct list_head *tt_list_head;
807 struct xhci_tt_bw_info *tt_info, *next;
808 bool slot_found = false;
809
810
811
812
813 if (virt_dev->real_port == 0 ||
814 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
815 xhci_dbg(xhci, "Bad real port.\n");
816 return;
817 }
818
819 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
820 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
821
822 if (tt_info->slot_id == slot_id) {
823 slot_found = true;
824 list_del(&tt_info->tt_list);
825 kfree(tt_info);
826 } else if (slot_found) {
827 break;
828 }
829 }
830}
831
832int xhci_alloc_tt_info(struct xhci_hcd *xhci,
833 struct xhci_virt_device *virt_dev,
834 struct usb_device *hdev,
835 struct usb_tt *tt, gfp_t mem_flags)
836{
837 struct xhci_tt_bw_info *tt_info;
838 unsigned int num_ports;
839 int i, j;
840 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
841
842 if (!tt->multi)
843 num_ports = 1;
844 else
845 num_ports = hdev->maxchild;
846
847 for (i = 0; i < num_ports; i++, tt_info++) {
848 struct xhci_interval_bw_table *bw_table;
849
850 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
851 dev_to_node(dev));
852 if (!tt_info)
853 goto free_tts;
854 INIT_LIST_HEAD(&tt_info->tt_list);
855 list_add(&tt_info->tt_list,
856 &xhci->rh_bw[virt_dev->real_port - 1].tts);
857 tt_info->slot_id = virt_dev->udev->slot_id;
858 if (tt->multi)
859 tt_info->ttport = i+1;
860 bw_table = &tt_info->bw_table;
861 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
862 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
863 }
864 return 0;
865
866free_tts:
867 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
868 return -ENOMEM;
869}
870
871
872
873
874
875
876
877void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
878{
879 struct xhci_virt_device *dev;
880 int i;
881 int old_active_eps = 0;
882
883
884 if (slot_id == 0 || !xhci->devs[slot_id])
885 return;
886
887 dev = xhci->devs[slot_id];
888
889 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
890 if (!dev)
891 return;
892
893 trace_xhci_free_virt_device(dev);
894
895 if (dev->tt_info)
896 old_active_eps = dev->tt_info->active_eps;
897
898 for (i = 0; i < 31; i++) {
899 if (dev->eps[i].ring)
900 xhci_ring_free(xhci, dev->eps[i].ring);
901 if (dev->eps[i].stream_info)
902 xhci_free_stream_info(xhci,
903 dev->eps[i].stream_info);
904
905
906
907
908
909 if (!list_empty(&dev->eps[i].bw_endpoint_list))
910 xhci_warn(xhci, "Slot %u endpoint %u "
911 "not removed from BW list!\n",
912 slot_id, i);
913 }
914
915 xhci_free_tt_info(xhci, dev, slot_id);
916
917 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
918
919 if (dev->in_ctx)
920 xhci_free_container_ctx(xhci, dev->in_ctx);
921 if (dev->out_ctx)
922 xhci_free_container_ctx(xhci, dev->out_ctx);
923
924 if (dev->udev && dev->udev->slot_id)
925 dev->udev->slot_id = 0;
926 kfree(xhci->devs[slot_id]);
927 xhci->devs[slot_id] = NULL;
928}
929
930
931
932
933
934
935
936void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
937{
938 struct xhci_virt_device *vdev;
939 struct list_head *tt_list_head;
940 struct xhci_tt_bw_info *tt_info, *next;
941 int i;
942
943 vdev = xhci->devs[slot_id];
944 if (!vdev)
945 return;
946
947 if (vdev->real_port == 0 ||
948 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
949 xhci_dbg(xhci, "Bad vdev->real_port.\n");
950 goto out;
951 }
952
953 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
954 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
955
956 if (tt_info->slot_id == slot_id) {
957
958 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
959 vdev = xhci->devs[i];
960 if (vdev && (vdev->tt_info == tt_info))
961 xhci_free_virt_devices_depth_first(
962 xhci, i);
963 }
964 }
965 }
966out:
967
968 xhci_debugfs_remove_slot(xhci, slot_id);
969 xhci_free_virt_device(xhci, slot_id);
970}
971
972int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
973 struct usb_device *udev, gfp_t flags)
974{
975 struct xhci_virt_device *dev;
976 int i;
977
978
979 if (slot_id == 0 || xhci->devs[slot_id]) {
980 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
981 return 0;
982 }
983
984 dev = kzalloc(sizeof(*dev), flags);
985 if (!dev)
986 return 0;
987
988
989 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
990 if (!dev->out_ctx)
991 goto fail;
992
993 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
994 (unsigned long long)dev->out_ctx->dma);
995
996
997 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
998 if (!dev->in_ctx)
999 goto fail;
1000
1001 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
1002 (unsigned long long)dev->in_ctx->dma);
1003
1004
1005 for (i = 0; i < 31; i++) {
1006 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
1007 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1008 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1009 }
1010
1011
1012 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1013 if (!dev->eps[0].ring)
1014 goto fail;
1015
1016 dev->udev = udev;
1017
1018
1019 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1020 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1021 slot_id,
1022 &xhci->dcbaa->dev_context_ptrs[slot_id],
1023 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1024
1025 trace_xhci_alloc_virt_device(dev);
1026
1027 xhci->devs[slot_id] = dev;
1028
1029 return 1;
1030fail:
1031
1032 if (dev->in_ctx)
1033 xhci_free_container_ctx(xhci, dev->in_ctx);
1034 if (dev->out_ctx)
1035 xhci_free_container_ctx(xhci, dev->out_ctx);
1036 kfree(dev);
1037
1038 return 0;
1039}
1040
1041void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1042 struct usb_device *udev)
1043{
1044 struct xhci_virt_device *virt_dev;
1045 struct xhci_ep_ctx *ep0_ctx;
1046 struct xhci_ring *ep_ring;
1047
1048 virt_dev = xhci->devs[udev->slot_id];
1049 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1050 ep_ring = virt_dev->eps[0].ring;
1051
1052
1053
1054
1055
1056
1057
1058 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1059 ep_ring->enqueue)
1060 | ep_ring->cycle_state);
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1074 struct usb_device *udev)
1075{
1076 struct usb_device *top_dev;
1077 struct usb_hcd *hcd;
1078
1079 if (udev->speed >= USB_SPEED_SUPER)
1080 hcd = xhci->shared_hcd;
1081 else
1082 hcd = xhci->main_hcd;
1083
1084 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1085 top_dev = top_dev->parent)
1086 ;
1087
1088 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1089}
1090
1091
1092int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1093{
1094 struct xhci_virt_device *dev;
1095 struct xhci_ep_ctx *ep0_ctx;
1096 struct xhci_slot_ctx *slot_ctx;
1097 u32 port_num;
1098 u32 max_packets;
1099 struct usb_device *top_dev;
1100
1101 dev = xhci->devs[udev->slot_id];
1102
1103 if (udev->slot_id == 0 || !dev) {
1104 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1105 udev->slot_id);
1106 return -EINVAL;
1107 }
1108 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1109 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1110
1111
1112 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1113 switch (udev->speed) {
1114 case USB_SPEED_SUPER_PLUS:
1115 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1116 max_packets = MAX_PACKET(512);
1117 break;
1118 case USB_SPEED_SUPER:
1119 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1120 max_packets = MAX_PACKET(512);
1121 break;
1122 case USB_SPEED_HIGH:
1123 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1124 max_packets = MAX_PACKET(64);
1125 break;
1126
1127 case USB_SPEED_FULL:
1128 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1129 max_packets = MAX_PACKET(64);
1130 break;
1131 case USB_SPEED_LOW:
1132 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1133 max_packets = MAX_PACKET(8);
1134 break;
1135 case USB_SPEED_WIRELESS:
1136 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1137 return -EINVAL;
1138 break;
1139 default:
1140
1141 return -EINVAL;
1142 }
1143
1144 port_num = xhci_find_real_port_number(xhci, udev);
1145 if (!port_num)
1146 return -EINVAL;
1147 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1148
1149 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1150 top_dev = top_dev->parent)
1151 ;
1152 dev->fake_port = top_dev->portnum;
1153 dev->real_port = port_num;
1154 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1155 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1156
1157
1158
1159
1160
1161
1162
1163 if (!udev->tt || !udev->tt->hub->parent) {
1164 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1165 } else {
1166 struct xhci_root_port_bw_info *rh_bw;
1167 struct xhci_tt_bw_info *tt_bw;
1168
1169 rh_bw = &xhci->rh_bw[port_num - 1];
1170
1171 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1172 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1173 continue;
1174
1175 if (!dev->udev->tt->multi ||
1176 (udev->tt->multi &&
1177 tt_bw->ttport == dev->udev->ttport)) {
1178 dev->bw_table = &tt_bw->bw_table;
1179 dev->tt_info = tt_bw;
1180 break;
1181 }
1182 }
1183 if (!dev->tt_info)
1184 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1185 }
1186
1187
1188 if (udev->tt && udev->tt->hub->parent) {
1189 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1190 (udev->ttport << 8));
1191 if (udev->tt->multi)
1192 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1193 }
1194 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1195 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1196
1197
1198
1199 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1200
1201
1202 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1203 max_packets);
1204
1205 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1206 dev->eps[0].ring->cycle_state);
1207
1208 trace_xhci_setup_addressable_virt_device(dev);
1209
1210
1211
1212 return 0;
1213}
1214
1215
1216
1217
1218
1219
1220static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1221 struct usb_host_endpoint *ep)
1222{
1223 unsigned int interval;
1224
1225 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1226 if (interval != ep->desc.bInterval - 1)
1227 dev_warn(&udev->dev,
1228 "ep %#x - rounding interval to %d %sframes\n",
1229 ep->desc.bEndpointAddress,
1230 1 << interval,
1231 udev->speed == USB_SPEED_FULL ? "" : "micro");
1232
1233 if (udev->speed == USB_SPEED_FULL) {
1234
1235
1236
1237
1238
1239 interval += 3;
1240 }
1241
1242 return interval;
1243}
1244
1245
1246
1247
1248
1249static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1250 struct usb_host_endpoint *ep, unsigned int desc_interval,
1251 unsigned int min_exponent, unsigned int max_exponent)
1252{
1253 unsigned int interval;
1254
1255 interval = fls(desc_interval) - 1;
1256 interval = clamp_val(interval, min_exponent, max_exponent);
1257 if ((1 << interval) != desc_interval)
1258 dev_dbg(&udev->dev,
1259 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1260 ep->desc.bEndpointAddress,
1261 1 << interval,
1262 desc_interval);
1263
1264 return interval;
1265}
1266
1267static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1268 struct usb_host_endpoint *ep)
1269{
1270 if (ep->desc.bInterval == 0)
1271 return 0;
1272 return xhci_microframes_to_exponent(udev, ep,
1273 ep->desc.bInterval, 0, 15);
1274}
1275
1276
1277static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1278 struct usb_host_endpoint *ep)
1279{
1280 return xhci_microframes_to_exponent(udev, ep,
1281 ep->desc.bInterval * 8, 3, 10);
1282}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1293 struct usb_host_endpoint *ep)
1294{
1295 unsigned int interval = 0;
1296
1297 switch (udev->speed) {
1298 case USB_SPEED_HIGH:
1299
1300 if (usb_endpoint_xfer_control(&ep->desc) ||
1301 usb_endpoint_xfer_bulk(&ep->desc)) {
1302 interval = xhci_parse_microframe_interval(udev, ep);
1303 break;
1304 }
1305
1306
1307 case USB_SPEED_SUPER_PLUS:
1308 case USB_SPEED_SUPER:
1309 if (usb_endpoint_xfer_int(&ep->desc) ||
1310 usb_endpoint_xfer_isoc(&ep->desc)) {
1311 interval = xhci_parse_exponent_interval(udev, ep);
1312 }
1313 break;
1314
1315 case USB_SPEED_FULL:
1316 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1317 interval = xhci_parse_exponent_interval(udev, ep);
1318 break;
1319 }
1320
1321
1322
1323
1324
1325
1326
1327 case USB_SPEED_LOW:
1328 if (usb_endpoint_xfer_int(&ep->desc) ||
1329 usb_endpoint_xfer_isoc(&ep->desc)) {
1330
1331 interval = xhci_parse_frame_interval(udev, ep);
1332 }
1333 break;
1334
1335 default:
1336 BUG();
1337 }
1338 return interval;
1339}
1340
1341
1342
1343
1344
1345
1346static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1347 struct usb_host_endpoint *ep)
1348{
1349 if (udev->speed < USB_SPEED_SUPER ||
1350 !usb_endpoint_xfer_isoc(&ep->desc))
1351 return 0;
1352 return ep->ss_ep_comp.bmAttributes;
1353}
1354
1355static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1356 struct usb_host_endpoint *ep)
1357{
1358
1359 if (udev->speed >= USB_SPEED_SUPER)
1360 return ep->ss_ep_comp.bMaxBurst;
1361
1362 if (udev->speed == USB_SPEED_HIGH &&
1363 (usb_endpoint_xfer_isoc(&ep->desc) ||
1364 usb_endpoint_xfer_int(&ep->desc)))
1365 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1366
1367 return 0;
1368}
1369
1370static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1371{
1372 int in;
1373
1374 in = usb_endpoint_dir_in(&ep->desc);
1375
1376 switch (usb_endpoint_type(&ep->desc)) {
1377 case USB_ENDPOINT_XFER_CONTROL:
1378 return CTRL_EP;
1379 case USB_ENDPOINT_XFER_BULK:
1380 return in ? BULK_IN_EP : BULK_OUT_EP;
1381 case USB_ENDPOINT_XFER_ISOC:
1382 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1383 case USB_ENDPOINT_XFER_INT:
1384 return in ? INT_IN_EP : INT_OUT_EP;
1385 }
1386 return 0;
1387}
1388
1389
1390
1391
1392
1393static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1394 struct usb_host_endpoint *ep)
1395{
1396 int max_burst;
1397 int max_packet;
1398
1399
1400 if (usb_endpoint_xfer_control(&ep->desc) ||
1401 usb_endpoint_xfer_bulk(&ep->desc))
1402 return 0;
1403
1404
1405 if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1406 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1407 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1408
1409 else if (udev->speed >= USB_SPEED_SUPER)
1410 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1411
1412 max_packet = usb_endpoint_maxp(&ep->desc);
1413 max_burst = usb_endpoint_maxp_mult(&ep->desc);
1414
1415 return max_packet * max_burst;
1416}
1417
1418
1419
1420
1421int xhci_endpoint_init(struct xhci_hcd *xhci,
1422 struct xhci_virt_device *virt_dev,
1423 struct usb_device *udev,
1424 struct usb_host_endpoint *ep,
1425 gfp_t mem_flags)
1426{
1427 unsigned int ep_index;
1428 struct xhci_ep_ctx *ep_ctx;
1429 struct xhci_ring *ep_ring;
1430 unsigned int max_packet;
1431 enum xhci_ring_type ring_type;
1432 u32 max_esit_payload;
1433 u32 endpoint_type;
1434 unsigned int max_burst;
1435 unsigned int interval;
1436 unsigned int mult;
1437 unsigned int avg_trb_len;
1438 unsigned int err_count = 0;
1439
1440 ep_index = xhci_get_endpoint_index(&ep->desc);
1441 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1442
1443 endpoint_type = xhci_get_endpoint_type(ep);
1444 if (!endpoint_type)
1445 return -EINVAL;
1446
1447 ring_type = usb_endpoint_type(&ep->desc);
1448
1449
1450
1451
1452
1453
1454
1455 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1456 interval = xhci_get_endpoint_interval(udev, ep);
1457
1458
1459 if (usb_endpoint_xfer_int(&ep->desc) ||
1460 usb_endpoint_xfer_isoc(&ep->desc)) {
1461 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1462 udev->speed >= USB_SPEED_HIGH &&
1463 interval >= 7) {
1464 interval = 6;
1465 }
1466 }
1467
1468 mult = xhci_get_endpoint_mult(udev, ep);
1469 max_packet = usb_endpoint_maxp(&ep->desc);
1470 max_burst = xhci_get_endpoint_max_burst(udev, ep);
1471 avg_trb_len = max_esit_payload;
1472
1473
1474
1475
1476 if (!usb_endpoint_xfer_isoc(&ep->desc))
1477 err_count = 3;
1478
1479 if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
1480 max_packet = 512;
1481
1482 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1483 avg_trb_len = 8;
1484
1485 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1486 mult = 0;
1487
1488
1489 virt_dev->eps[ep_index].new_ring =
1490 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1491 if (!virt_dev->eps[ep_index].new_ring)
1492 return -ENOMEM;
1493
1494 virt_dev->eps[ep_index].skip = false;
1495 ep_ring = virt_dev->eps[ep_index].new_ring;
1496
1497
1498 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1499 EP_INTERVAL(interval) |
1500 EP_MULT(mult));
1501 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1502 MAX_PACKET(max_packet) |
1503 MAX_BURST(max_burst) |
1504 ERROR_COUNT(err_count));
1505 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1506 ep_ring->cycle_state);
1507
1508 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1509 EP_AVG_TRB_LENGTH(avg_trb_len));
1510
1511 return 0;
1512}
1513
1514void xhci_endpoint_zero(struct xhci_hcd *xhci,
1515 struct xhci_virt_device *virt_dev,
1516 struct usb_host_endpoint *ep)
1517{
1518 unsigned int ep_index;
1519 struct xhci_ep_ctx *ep_ctx;
1520
1521 ep_index = xhci_get_endpoint_index(&ep->desc);
1522 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1523
1524 ep_ctx->ep_info = 0;
1525 ep_ctx->ep_info2 = 0;
1526 ep_ctx->deq = 0;
1527 ep_ctx->tx_info = 0;
1528
1529
1530
1531}
1532
1533void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1534{
1535 bw_info->ep_interval = 0;
1536 bw_info->mult = 0;
1537 bw_info->num_packets = 0;
1538 bw_info->max_packet_size = 0;
1539 bw_info->type = 0;
1540 bw_info->max_esit_payload = 0;
1541}
1542
1543void xhci_update_bw_info(struct xhci_hcd *xhci,
1544 struct xhci_container_ctx *in_ctx,
1545 struct xhci_input_control_ctx *ctrl_ctx,
1546 struct xhci_virt_device *virt_dev)
1547{
1548 struct xhci_bw_info *bw_info;
1549 struct xhci_ep_ctx *ep_ctx;
1550 unsigned int ep_type;
1551 int i;
1552
1553 for (i = 1; i < 31; i++) {
1554 bw_info = &virt_dev->eps[i].bw_info;
1555
1556
1557
1558
1559
1560
1561 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1562
1563 xhci_clear_endpoint_bw_info(bw_info);
1564 continue;
1565 }
1566
1567 if (EP_IS_ADDED(ctrl_ctx, i)) {
1568 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1569 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1570
1571
1572 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1573 ep_type != ISOC_IN_EP &&
1574 ep_type != INT_IN_EP)
1575 continue;
1576
1577
1578 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1579 le32_to_cpu(ep_ctx->ep_info));
1580
1581
1582
1583
1584 bw_info->mult = CTX_TO_EP_MULT(
1585 le32_to_cpu(ep_ctx->ep_info)) + 1;
1586 bw_info->num_packets = CTX_TO_MAX_BURST(
1587 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1588 bw_info->max_packet_size = MAX_PACKET_DECODED(
1589 le32_to_cpu(ep_ctx->ep_info2));
1590 bw_info->type = ep_type;
1591 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1592 le32_to_cpu(ep_ctx->tx_info));
1593 }
1594 }
1595}
1596
1597
1598
1599
1600
1601void xhci_endpoint_copy(struct xhci_hcd *xhci,
1602 struct xhci_container_ctx *in_ctx,
1603 struct xhci_container_ctx *out_ctx,
1604 unsigned int ep_index)
1605{
1606 struct xhci_ep_ctx *out_ep_ctx;
1607 struct xhci_ep_ctx *in_ep_ctx;
1608
1609 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1610 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1611
1612 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1613 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1614 in_ep_ctx->deq = out_ep_ctx->deq;
1615 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1616 if (xhci->quirks & XHCI_MTK_HOST) {
1617 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1618 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1619 }
1620}
1621
1622
1623
1624
1625
1626
1627void xhci_slot_copy(struct xhci_hcd *xhci,
1628 struct xhci_container_ctx *in_ctx,
1629 struct xhci_container_ctx *out_ctx)
1630{
1631 struct xhci_slot_ctx *in_slot_ctx;
1632 struct xhci_slot_ctx *out_slot_ctx;
1633
1634 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1635 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1636
1637 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1638 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1639 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1640 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1641}
1642
1643
1644static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1645{
1646 int i;
1647 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1648 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1649
1650 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1651 "Allocating %d scratchpad buffers", num_sp);
1652
1653 if (!num_sp)
1654 return 0;
1655
1656 xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1657 dev_to_node(dev));
1658 if (!xhci->scratchpad)
1659 goto fail_sp;
1660
1661 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1662 num_sp * sizeof(u64),
1663 &xhci->scratchpad->sp_dma, flags);
1664 if (!xhci->scratchpad->sp_array)
1665 goto fail_sp2;
1666
1667 xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1668 flags, dev_to_node(dev));
1669 if (!xhci->scratchpad->sp_buffers)
1670 goto fail_sp3;
1671
1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1673 for (i = 0; i < num_sp; i++) {
1674 dma_addr_t dma;
1675 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1676 flags);
1677 if (!buf)
1678 goto fail_sp4;
1679
1680 xhci->scratchpad->sp_array[i] = dma;
1681 xhci->scratchpad->sp_buffers[i] = buf;
1682 }
1683
1684 return 0;
1685
1686 fail_sp4:
1687 for (i = i - 1; i >= 0; i--) {
1688 dma_free_coherent(dev, xhci->page_size,
1689 xhci->scratchpad->sp_buffers[i],
1690 xhci->scratchpad->sp_array[i]);
1691 }
1692
1693 kfree(xhci->scratchpad->sp_buffers);
1694
1695 fail_sp3:
1696 dma_free_coherent(dev, num_sp * sizeof(u64),
1697 xhci->scratchpad->sp_array,
1698 xhci->scratchpad->sp_dma);
1699
1700 fail_sp2:
1701 kfree(xhci->scratchpad);
1702 xhci->scratchpad = NULL;
1703
1704 fail_sp:
1705 return -ENOMEM;
1706}
1707
1708static void scratchpad_free(struct xhci_hcd *xhci)
1709{
1710 int num_sp;
1711 int i;
1712 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1713
1714 if (!xhci->scratchpad)
1715 return;
1716
1717 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1718
1719 for (i = 0; i < num_sp; i++) {
1720 dma_free_coherent(dev, xhci->page_size,
1721 xhci->scratchpad->sp_buffers[i],
1722 xhci->scratchpad->sp_array[i]);
1723 }
1724 kfree(xhci->scratchpad->sp_buffers);
1725 dma_free_coherent(dev, num_sp * sizeof(u64),
1726 xhci->scratchpad->sp_array,
1727 xhci->scratchpad->sp_dma);
1728 kfree(xhci->scratchpad);
1729 xhci->scratchpad = NULL;
1730}
1731
1732struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1733 bool allocate_completion, gfp_t mem_flags)
1734{
1735 struct xhci_command *command;
1736 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1737
1738 command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1739 if (!command)
1740 return NULL;
1741
1742 if (allocate_completion) {
1743 command->completion =
1744 kzalloc_node(sizeof(struct completion), mem_flags,
1745 dev_to_node(dev));
1746 if (!command->completion) {
1747 kfree(command);
1748 return NULL;
1749 }
1750 init_completion(command->completion);
1751 }
1752
1753 command->status = 0;
1754 INIT_LIST_HEAD(&command->cmd_list);
1755 return command;
1756}
1757
1758struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1759 bool allocate_completion, gfp_t mem_flags)
1760{
1761 struct xhci_command *command;
1762
1763 command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1764 if (!command)
1765 return NULL;
1766
1767 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1768 mem_flags);
1769 if (!command->in_ctx) {
1770 kfree(command->completion);
1771 kfree(command);
1772 return NULL;
1773 }
1774 return command;
1775}
1776
1777void xhci_urb_free_priv(struct urb_priv *urb_priv)
1778{
1779 kfree(urb_priv);
1780}
1781
1782void xhci_free_command(struct xhci_hcd *xhci,
1783 struct xhci_command *command)
1784{
1785 xhci_free_container_ctx(xhci,
1786 command->in_ctx);
1787 kfree(command->completion);
1788 kfree(command);
1789}
1790
1791int xhci_alloc_erst(struct xhci_hcd *xhci,
1792 struct xhci_ring *evt_ring,
1793 struct xhci_erst *erst,
1794 gfp_t flags)
1795{
1796 size_t size;
1797 unsigned int val;
1798 struct xhci_segment *seg;
1799 struct xhci_erst_entry *entry;
1800
1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1802 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1803 size, &erst->erst_dma_addr, flags);
1804 if (!erst->entries)
1805 return -ENOMEM;
1806
1807 erst->num_entries = evt_ring->num_segs;
1808
1809 seg = evt_ring->first_seg;
1810 for (val = 0; val < evt_ring->num_segs; val++) {
1811 entry = &erst->entries[val];
1812 entry->seg_addr = cpu_to_le64(seg->dma);
1813 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1814 entry->rsvd = 0;
1815 seg = seg->next;
1816 }
1817
1818 return 0;
1819}
1820
1821void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1822{
1823 size_t size;
1824 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1825
1826 size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1827 if (erst->entries)
1828 dma_free_coherent(dev, size,
1829 erst->entries,
1830 erst->erst_dma_addr);
1831 erst->entries = NULL;
1832}
1833
1834void xhci_mem_cleanup(struct xhci_hcd *xhci)
1835{
1836 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1837 int i, j, num_ports;
1838
1839 cancel_delayed_work_sync(&xhci->cmd_timer);
1840
1841 xhci_free_erst(xhci, &xhci->erst);
1842
1843 if (xhci->event_ring)
1844 xhci_ring_free(xhci, xhci->event_ring);
1845 xhci->event_ring = NULL;
1846 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1847
1848 if (xhci->lpm_command)
1849 xhci_free_command(xhci, xhci->lpm_command);
1850 xhci->lpm_command = NULL;
1851 if (xhci->cmd_ring)
1852 xhci_ring_free(xhci, xhci->cmd_ring);
1853 xhci->cmd_ring = NULL;
1854 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1855 xhci_cleanup_command_queue(xhci);
1856
1857 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1858 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1859 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1860 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1861 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1862 while (!list_empty(ep))
1863 list_del_init(ep->next);
1864 }
1865 }
1866
1867 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1868 xhci_free_virt_devices_depth_first(xhci, i);
1869
1870 dma_pool_destroy(xhci->segment_pool);
1871 xhci->segment_pool = NULL;
1872 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1873
1874 dma_pool_destroy(xhci->device_pool);
1875 xhci->device_pool = NULL;
1876 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1877
1878 dma_pool_destroy(xhci->small_streams_pool);
1879 xhci->small_streams_pool = NULL;
1880 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1881 "Freed small stream array pool");
1882
1883 dma_pool_destroy(xhci->medium_streams_pool);
1884 xhci->medium_streams_pool = NULL;
1885 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1886 "Freed medium stream array pool");
1887
1888 if (xhci->dcbaa)
1889 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1890 xhci->dcbaa, xhci->dcbaa->dma);
1891 xhci->dcbaa = NULL;
1892
1893 scratchpad_free(xhci);
1894
1895 if (!xhci->rh_bw)
1896 goto no_bw;
1897
1898 for (i = 0; i < num_ports; i++) {
1899 struct xhci_tt_bw_info *tt, *n;
1900 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1901 list_del(&tt->tt_list);
1902 kfree(tt);
1903 }
1904 }
1905
1906no_bw:
1907 xhci->cmd_ring_reserved_trbs = 0;
1908 xhci->usb2_rhub.num_ports = 0;
1909 xhci->usb3_rhub.num_ports = 0;
1910 xhci->num_active_eps = 0;
1911 kfree(xhci->usb2_rhub.ports);
1912 kfree(xhci->usb3_rhub.ports);
1913 kfree(xhci->hw_ports);
1914 kfree(xhci->rh_bw);
1915 kfree(xhci->ext_caps);
1916
1917 xhci->usb2_rhub.ports = NULL;
1918 xhci->usb3_rhub.ports = NULL;
1919 xhci->hw_ports = NULL;
1920 xhci->rh_bw = NULL;
1921 xhci->ext_caps = NULL;
1922
1923 xhci->page_size = 0;
1924 xhci->page_shift = 0;
1925 xhci->usb2_rhub.bus_state.bus_suspended = 0;
1926 xhci->usb3_rhub.bus_state.bus_suspended = 0;
1927}
1928
1929static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1930 struct xhci_segment *input_seg,
1931 union xhci_trb *start_trb,
1932 union xhci_trb *end_trb,
1933 dma_addr_t input_dma,
1934 struct xhci_segment *result_seg,
1935 char *test_name, int test_number)
1936{
1937 unsigned long long start_dma;
1938 unsigned long long end_dma;
1939 struct xhci_segment *seg;
1940
1941 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1942 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1943
1944 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1945 if (seg != result_seg) {
1946 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1947 test_name, test_number);
1948 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1949 "input DMA 0x%llx\n",
1950 input_seg,
1951 (unsigned long long) input_dma);
1952 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1953 "ending TRB %p (0x%llx DMA)\n",
1954 start_trb, start_dma,
1955 end_trb, end_dma);
1956 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1957 result_seg, seg);
1958 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1959 true);
1960 return -1;
1961 }
1962 return 0;
1963}
1964
1965
1966static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1967{
1968 struct {
1969 dma_addr_t input_dma;
1970 struct xhci_segment *result_seg;
1971 } simple_test_vector [] = {
1972
1973 { 0, NULL },
1974
1975 { xhci->event_ring->first_seg->dma - 16, NULL },
1976
1977 { xhci->event_ring->first_seg->dma - 1, NULL },
1978
1979 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1980
1981 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1982 xhci->event_ring->first_seg },
1983
1984 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1985
1986 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1987
1988 { (dma_addr_t) (~0), NULL },
1989 };
1990 struct {
1991 struct xhci_segment *input_seg;
1992 union xhci_trb *start_trb;
1993 union xhci_trb *end_trb;
1994 dma_addr_t input_dma;
1995 struct xhci_segment *result_seg;
1996 } complex_test_vector [] = {
1997
1998 { .input_seg = xhci->event_ring->first_seg,
1999 .start_trb = xhci->event_ring->first_seg->trbs,
2000 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2001 .input_dma = xhci->cmd_ring->first_seg->dma,
2002 .result_seg = NULL,
2003 },
2004
2005 { .input_seg = xhci->event_ring->first_seg,
2006 .start_trb = xhci->event_ring->first_seg->trbs,
2007 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2008 .input_dma = xhci->cmd_ring->first_seg->dma,
2009 .result_seg = NULL,
2010 },
2011
2012 { .input_seg = xhci->event_ring->first_seg,
2013 .start_trb = xhci->cmd_ring->first_seg->trbs,
2014 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2015 .input_dma = xhci->cmd_ring->first_seg->dma,
2016 .result_seg = NULL,
2017 },
2018
2019 { .input_seg = xhci->event_ring->first_seg,
2020 .start_trb = &xhci->event_ring->first_seg->trbs[0],
2021 .end_trb = &xhci->event_ring->first_seg->trbs[3],
2022 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
2023 .result_seg = NULL,
2024 },
2025
2026 { .input_seg = xhci->event_ring->first_seg,
2027 .start_trb = &xhci->event_ring->first_seg->trbs[3],
2028 .end_trb = &xhci->event_ring->first_seg->trbs[6],
2029 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2030 .result_seg = NULL,
2031 },
2032
2033 { .input_seg = xhci->event_ring->first_seg,
2034 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2035 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2036 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2037 .result_seg = NULL,
2038 },
2039
2040 { .input_seg = xhci->event_ring->first_seg,
2041 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2042 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2043 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2044 .result_seg = NULL,
2045 },
2046
2047 { .input_seg = xhci->event_ring->first_seg,
2048 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2049 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2050 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2051 .result_seg = NULL,
2052 },
2053 };
2054
2055 unsigned int num_tests;
2056 int i, ret;
2057
2058 num_tests = ARRAY_SIZE(simple_test_vector);
2059 for (i = 0; i < num_tests; i++) {
2060 ret = xhci_test_trb_in_td(xhci,
2061 xhci->event_ring->first_seg,
2062 xhci->event_ring->first_seg->trbs,
2063 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2064 simple_test_vector[i].input_dma,
2065 simple_test_vector[i].result_seg,
2066 "Simple", i);
2067 if (ret < 0)
2068 return ret;
2069 }
2070
2071 num_tests = ARRAY_SIZE(complex_test_vector);
2072 for (i = 0; i < num_tests; i++) {
2073 ret = xhci_test_trb_in_td(xhci,
2074 complex_test_vector[i].input_seg,
2075 complex_test_vector[i].start_trb,
2076 complex_test_vector[i].end_trb,
2077 complex_test_vector[i].input_dma,
2078 complex_test_vector[i].result_seg,
2079 "Complex", i);
2080 if (ret < 0)
2081 return ret;
2082 }
2083 xhci_dbg(xhci, "TRB math tests passed.\n");
2084 return 0;
2085}
2086
2087static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2088{
2089 u64 temp;
2090 dma_addr_t deq;
2091
2092 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2093 xhci->event_ring->dequeue);
2094 if (deq == 0 && !in_interrupt())
2095 xhci_warn(xhci, "WARN something wrong with SW event ring "
2096 "dequeue ptr.\n");
2097
2098 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2099 temp &= ERST_PTR_MASK;
2100
2101
2102
2103 temp &= ~ERST_EHB;
2104 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2105 "// Write event ring dequeue pointer, "
2106 "preserving EHB bit");
2107 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2108 &xhci->ir_set->erst_dequeue);
2109}
2110
2111static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2112 __le32 __iomem *addr, int max_caps)
2113{
2114 u32 temp, port_offset, port_count;
2115 int i;
2116 u8 major_revision, minor_revision;
2117 struct xhci_hub *rhub;
2118 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2119
2120 temp = readl(addr);
2121 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2122 minor_revision = XHCI_EXT_PORT_MINOR(temp);
2123
2124 if (major_revision == 0x03) {
2125 rhub = &xhci->usb3_rhub;
2126 } else if (major_revision <= 0x02) {
2127 rhub = &xhci->usb2_rhub;
2128 } else {
2129 xhci_warn(xhci, "Ignoring unknown port speed, "
2130 "Ext Cap %p, revision = 0x%x\n",
2131 addr, major_revision);
2132
2133 return;
2134 }
2135 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2136
2137 if (rhub->min_rev < minor_revision)
2138 rhub->min_rev = minor_revision;
2139
2140
2141 temp = readl(addr + 2);
2142 port_offset = XHCI_EXT_PORT_OFF(temp);
2143 port_count = XHCI_EXT_PORT_COUNT(temp);
2144 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2145 "Ext Cap %p, port offset = %u, "
2146 "count = %u, revision = 0x%x",
2147 addr, port_offset, port_count, major_revision);
2148
2149 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2150
2151 return;
2152
2153 rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
2154 if (rhub->psi_count) {
2155 rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
2156 GFP_KERNEL, dev_to_node(dev));
2157 if (!rhub->psi)
2158 rhub->psi_count = 0;
2159
2160 rhub->psi_uid_count++;
2161 for (i = 0; i < rhub->psi_count; i++) {
2162 rhub->psi[i] = readl(addr + 4 + i);
2163
2164
2165
2166
2167 if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
2168 XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
2169 rhub->psi_uid_count++;
2170
2171 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2172 XHCI_EXT_PORT_PSIV(rhub->psi[i]),
2173 XHCI_EXT_PORT_PSIE(rhub->psi[i]),
2174 XHCI_EXT_PORT_PLT(rhub->psi[i]),
2175 XHCI_EXT_PORT_PFD(rhub->psi[i]),
2176 XHCI_EXT_PORT_LP(rhub->psi[i]),
2177 XHCI_EXT_PORT_PSIM(rhub->psi[i]));
2178 }
2179 }
2180
2181 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2182 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2183
2184 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2185 (temp & XHCI_HLC)) {
2186 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2187 "xHCI 1.0: support USB2 hardware lpm");
2188 xhci->hw_lpm_support = 1;
2189 }
2190
2191 port_offset--;
2192 for (i = port_offset; i < (port_offset + port_count); i++) {
2193 struct xhci_port *hw_port = &xhci->hw_ports[i];
2194
2195 if (hw_port->rhub) {
2196 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2197 " port %u\n", addr, i);
2198 xhci_warn(xhci, "Port was marked as USB %u, "
2199 "duplicated as USB %u\n",
2200 hw_port->rhub->maj_rev, major_revision);
2201
2202
2203
2204 if (hw_port->rhub != rhub &&
2205 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2206 hw_port->rhub->num_ports--;
2207 hw_port->hcd_portnum = DUPLICATE_ENTRY;
2208 }
2209 continue;
2210 }
2211 hw_port->rhub = rhub;
2212 rhub->num_ports++;
2213 }
2214
2215}
2216
2217static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2218 struct xhci_hub *rhub, gfp_t flags)
2219{
2220 int port_index = 0;
2221 int i;
2222 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2223
2224 if (!rhub->num_ports)
2225 return;
2226 rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
2227 dev_to_node(dev));
2228 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2229 if (xhci->hw_ports[i].rhub != rhub ||
2230 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2231 continue;
2232 xhci->hw_ports[i].hcd_portnum = port_index;
2233 rhub->ports[port_index] = &xhci->hw_ports[i];
2234 port_index++;
2235 if (port_index == rhub->num_ports)
2236 break;
2237 }
2238}
2239
2240
2241
2242
2243
2244
2245
2246
2247static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2248{
2249 void __iomem *base;
2250 u32 offset;
2251 unsigned int num_ports;
2252 int i, j;
2253 int cap_count = 0;
2254 u32 cap_start;
2255 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2256
2257 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2258 xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2259 flags, dev_to_node(dev));
2260 if (!xhci->hw_ports)
2261 return -ENOMEM;
2262
2263 for (i = 0; i < num_ports; i++) {
2264 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2265 NUM_PORT_REGS * i;
2266 xhci->hw_ports[i].hw_portnum = i;
2267 }
2268
2269 xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2270 dev_to_node(dev));
2271 if (!xhci->rh_bw)
2272 return -ENOMEM;
2273 for (i = 0; i < num_ports; i++) {
2274 struct xhci_interval_bw_table *bw_table;
2275
2276 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2277 bw_table = &xhci->rh_bw[i].bw_table;
2278 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2279 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2280 }
2281 base = &xhci->cap_regs->hc_capbase;
2282
2283 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2284 if (!cap_start) {
2285 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2286 return -ENODEV;
2287 }
2288
2289 offset = cap_start;
2290
2291 while (offset) {
2292 cap_count++;
2293 offset = xhci_find_next_ext_cap(base, offset,
2294 XHCI_EXT_CAPS_PROTOCOL);
2295 }
2296
2297 xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2298 flags, dev_to_node(dev));
2299 if (!xhci->ext_caps)
2300 return -ENOMEM;
2301
2302 offset = cap_start;
2303
2304 while (offset) {
2305 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2306 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2307 num_ports)
2308 break;
2309 offset = xhci_find_next_ext_cap(base, offset,
2310 XHCI_EXT_CAPS_PROTOCOL);
2311 }
2312 if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2313 xhci_warn(xhci, "No ports on the roothubs?\n");
2314 return -ENODEV;
2315 }
2316 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2317 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2318 xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2319
2320
2321
2322
2323 if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2324 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2325 "Limiting USB 3.0 roothub ports to %u.",
2326 USB_SS_MAXPORTS);
2327 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2328 }
2329 if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2330 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2331 "Limiting USB 2.0 roothub ports to %u.",
2332 USB_MAXCHILDREN);
2333 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2334 }
2335
2336
2337
2338
2339
2340
2341 xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2342 xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2343
2344 return 0;
2345}
2346
2347int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2348{
2349 dma_addr_t dma;
2350 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2351 unsigned int val, val2;
2352 u64 val_64;
2353 u32 page_size, temp;
2354 int i, ret;
2355
2356 INIT_LIST_HEAD(&xhci->cmd_list);
2357
2358
2359 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2360 init_completion(&xhci->cmd_ring_stop_completion);
2361
2362 page_size = readl(&xhci->op_regs->page_size);
2363 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2364 "Supported page size register = 0x%x", page_size);
2365 for (i = 0; i < 16; i++) {
2366 if ((0x1 & page_size) != 0)
2367 break;
2368 page_size = page_size >> 1;
2369 }
2370 if (i < 16)
2371 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2372 "Supported page size of %iK", (1 << (i+12)) / 1024);
2373 else
2374 xhci_warn(xhci, "WARN: no supported page size\n");
2375
2376 xhci->page_shift = 12;
2377 xhci->page_size = 1 << xhci->page_shift;
2378 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2379 "HCD page size set to %iK", xhci->page_size / 1024);
2380
2381
2382
2383
2384
2385 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2386 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2387 "// xHC can handle at most %d device slots.", val);
2388 val2 = readl(&xhci->op_regs->config_reg);
2389 val |= (val2 & ~HCS_SLOTS_MASK);
2390 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2391 "// Setting Max device slots reg = 0x%x.", val);
2392 writel(val, &xhci->op_regs->config_reg);
2393
2394
2395
2396
2397
2398 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2399 flags);
2400 if (!xhci->dcbaa)
2401 goto fail;
2402 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2403 xhci->dcbaa->dma = dma;
2404 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2405 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2406 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2407 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2408
2409
2410
2411
2412
2413
2414
2415
2416 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2417 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2418
2419
2420 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2421 2112, 64, xhci->page_size);
2422 if (!xhci->segment_pool || !xhci->device_pool)
2423 goto fail;
2424
2425
2426
2427
2428 xhci->small_streams_pool =
2429 dma_pool_create("xHCI 256 byte stream ctx arrays",
2430 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2431 xhci->medium_streams_pool =
2432 dma_pool_create("xHCI 1KB stream ctx arrays",
2433 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2434
2435
2436
2437
2438 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2439 goto fail;
2440
2441
2442 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2443 if (!xhci->cmd_ring)
2444 goto fail;
2445 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2446 "Allocated command ring at %p", xhci->cmd_ring);
2447 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2448 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2449
2450
2451 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2452 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2453 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2454 xhci->cmd_ring->cycle_state;
2455 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2456 "// Setting command ring address to 0x%016llx", val_64);
2457 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2458
2459 xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
2460 if (!xhci->lpm_command)
2461 goto fail;
2462
2463
2464
2465
2466
2467 xhci->cmd_ring_reserved_trbs++;
2468
2469 val = readl(&xhci->cap_regs->db_off);
2470 val &= DBOFF_MASK;
2471 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2472 "// Doorbell array is located at offset 0x%x"
2473 " from cap regs base addr", val);
2474 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2475
2476 xhci->ir_set = &xhci->run_regs->ir_set[0];
2477
2478
2479
2480
2481
2482 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2483 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2484 0, flags);
2485 if (!xhci->event_ring)
2486 goto fail;
2487 if (xhci_check_trb_in_td_math(xhci) < 0)
2488 goto fail;
2489
2490 ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2491 if (ret)
2492 goto fail;
2493
2494
2495 val = readl(&xhci->ir_set->erst_size);
2496 val &= ERST_SIZE_MASK;
2497 val |= ERST_NUM_SEGS;
2498 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2499 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2500 val);
2501 writel(val, &xhci->ir_set->erst_size);
2502
2503 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2504 "// Set ERST entries to point to event ring.");
2505
2506 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2507 "// Set ERST base address for ir_set 0 = 0x%llx",
2508 (unsigned long long)xhci->erst.erst_dma_addr);
2509 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2510 val_64 &= ERST_PTR_MASK;
2511 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2512 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2513
2514
2515 xhci_set_hc_event_deq(xhci);
2516 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2517 "Wrote ERST address to ir_set 0.");
2518
2519
2520
2521
2522
2523
2524 for (i = 0; i < MAX_HC_SLOTS; i++)
2525 xhci->devs[i] = NULL;
2526 for (i = 0; i < USB_MAXCHILDREN; i++) {
2527 xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2528 xhci->usb3_rhub.bus_state.resume_done[i] = 0;
2529
2530 init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
2531 }
2532
2533 if (scratchpad_alloc(xhci, flags))
2534 goto fail;
2535 if (xhci_setup_port_arrays(xhci, flags))
2536 goto fail;
2537
2538
2539
2540
2541
2542 temp = readl(&xhci->op_regs->dev_notification);
2543 temp &= ~DEV_NOTE_MASK;
2544 temp |= DEV_NOTE_FWAKE;
2545 writel(temp, &xhci->op_regs->dev_notification);
2546
2547 return 0;
2548
2549fail:
2550 xhci_halt(xhci);
2551 xhci_reset(xhci);
2552 xhci_mem_cleanup(xhci);
2553 return -ENOMEM;
2554}
2555