1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/usb.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include <linux/dmapool.h>
27#include <linux/dma-mapping.h>
28
29#include "xhci.h"
30#include "xhci-trace.h"
31
32
33
34
35
36
37
38
39static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
40 unsigned int cycle_state, gfp_t flags)
41{
42 struct xhci_segment *seg;
43 dma_addr_t dma;
44 int i;
45
46 seg = kzalloc(sizeof *seg, flags);
47 if (!seg)
48 return NULL;
49
50 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
51 if (!seg->trbs) {
52 kfree(seg);
53 return NULL;
54 }
55
56 memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
57
58 if (cycle_state == 0) {
59 for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
61 }
62 seg->dma = dma;
63 seg->next = NULL;
64
65 return seg;
66}
67
68static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
69{
70 if (seg->trbs) {
71 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 seg->trbs = NULL;
73 }
74 kfree(seg);
75}
76
77static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
78 struct xhci_segment *first)
79{
80 struct xhci_segment *seg;
81
82 seg = first->next;
83 while (seg != first) {
84 struct xhci_segment *next = seg->next;
85 xhci_segment_free(xhci, seg);
86 seg = next;
87 }
88 xhci_segment_free(xhci, first);
89}
90
91
92
93
94
95
96
97
98static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
99 struct xhci_segment *next, enum xhci_ring_type type)
100{
101 u32 val;
102
103 if (!prev || !next)
104 return;
105 prev->next = next;
106 if (type != TYPE_EVENT) {
107 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
108 cpu_to_le64(next->dma);
109
110
111 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
112 val &= ~TRB_TYPE_BITMASK;
113 val |= TRB_TYPE(TRB_LINK);
114
115
116 if (xhci_link_trb_quirk(xhci) ||
117 (type == TYPE_ISOC &&
118 (xhci->quirks & XHCI_AMD_0x96_HOST)))
119 val |= TRB_CHAIN;
120 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
121 }
122}
123
124
125
126
127
128static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
129 struct xhci_segment *first, struct xhci_segment *last,
130 unsigned int num_segs)
131{
132 struct xhci_segment *next;
133
134 if (!ring || !first || !last)
135 return;
136
137 next = ring->enq_seg->next;
138 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
139 xhci_link_segments(xhci, last, next, ring->type);
140 ring->num_segs += num_segs;
141 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
142
143 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
144 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
145 &= ~cpu_to_le32(LINK_TOGGLE);
146 last->trbs[TRBS_PER_SEGMENT-1].link.control
147 |= cpu_to_le32(LINK_TOGGLE);
148 ring->last_seg = last;
149 }
150}
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
184 struct xhci_ring *ring,
185 struct xhci_segment *seg,
186 gfp_t mem_flags)
187{
188 unsigned long key;
189 int ret;
190
191 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
192
193 if (radix_tree_lookup(trb_address_map, key))
194 return 0;
195
196 ret = radix_tree_maybe_preload(mem_flags);
197 if (ret)
198 return ret;
199 ret = radix_tree_insert(trb_address_map,
200 key, ring);
201 radix_tree_preload_end();
202 return ret;
203}
204
205static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
206 struct xhci_segment *seg)
207{
208 unsigned long key;
209
210 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
211 if (radix_tree_lookup(trb_address_map, key))
212 radix_tree_delete(trb_address_map, key);
213}
214
215static int xhci_update_stream_segment_mapping(
216 struct radix_tree_root *trb_address_map,
217 struct xhci_ring *ring,
218 struct xhci_segment *first_seg,
219 struct xhci_segment *last_seg,
220 gfp_t mem_flags)
221{
222 struct xhci_segment *seg;
223 struct xhci_segment *failed_seg;
224 int ret;
225
226 if (WARN_ON_ONCE(trb_address_map == NULL))
227 return 0;
228
229 seg = first_seg;
230 do {
231 ret = xhci_insert_segment_mapping(trb_address_map,
232 ring, seg, mem_flags);
233 if (ret)
234 goto remove_streams;
235 if (seg == last_seg)
236 return 0;
237 seg = seg->next;
238 } while (seg != first_seg);
239
240 return 0;
241
242remove_streams:
243 failed_seg = seg;
244 seg = first_seg;
245 do {
246 xhci_remove_segment_mapping(trb_address_map, seg);
247 if (seg == failed_seg)
248 return ret;
249 seg = seg->next;
250 } while (seg != first_seg);
251
252 return ret;
253}
254
255static void xhci_remove_stream_mapping(struct xhci_ring *ring)
256{
257 struct xhci_segment *seg;
258
259 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
260 return;
261
262 seg = ring->first_seg;
263 do {
264 xhci_remove_segment_mapping(ring->trb_address_map, seg);
265 seg = seg->next;
266 } while (seg != ring->first_seg);
267}
268
269static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
270{
271 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
272 ring->first_seg, ring->last_seg, mem_flags);
273}
274
275
276void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
277{
278 if (!ring)
279 return;
280
281 if (ring->first_seg) {
282 if (ring->type == TYPE_STREAM)
283 xhci_remove_stream_mapping(ring);
284 xhci_free_segments_for_ring(xhci, ring->first_seg);
285 }
286
287 kfree(ring);
288}
289
290static void xhci_initialize_ring_info(struct xhci_ring *ring,
291 unsigned int cycle_state)
292{
293
294 ring->enqueue = ring->first_seg->trbs;
295 ring->enq_seg = ring->first_seg;
296 ring->dequeue = ring->enqueue;
297 ring->deq_seg = ring->first_seg;
298
299
300
301
302
303
304
305 ring->cycle_state = cycle_state;
306
307 ring->enq_updates = 0;
308 ring->deq_updates = 0;
309
310
311
312
313
314 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
315}
316
317
318static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
319 struct xhci_segment **first, struct xhci_segment **last,
320 unsigned int num_segs, unsigned int cycle_state,
321 enum xhci_ring_type type, gfp_t flags)
322{
323 struct xhci_segment *prev;
324
325 prev = xhci_segment_alloc(xhci, cycle_state, flags);
326 if (!prev)
327 return -ENOMEM;
328 num_segs--;
329
330 *first = prev;
331 while (num_segs > 0) {
332 struct xhci_segment *next;
333
334 next = xhci_segment_alloc(xhci, cycle_state, flags);
335 if (!next) {
336 prev = *first;
337 while (prev) {
338 next = prev->next;
339 xhci_segment_free(xhci, prev);
340 prev = next;
341 }
342 return -ENOMEM;
343 }
344 xhci_link_segments(xhci, prev, next, type);
345
346 prev = next;
347 num_segs--;
348 }
349 xhci_link_segments(xhci, prev, *first, type);
350 *last = prev;
351
352 return 0;
353}
354
355
356
357
358
359
360
361
362static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
363 unsigned int num_segs, unsigned int cycle_state,
364 enum xhci_ring_type type, gfp_t flags)
365{
366 struct xhci_ring *ring;
367 int ret;
368
369 ring = kzalloc(sizeof *(ring), flags);
370 if (!ring)
371 return NULL;
372
373 ring->num_segs = num_segs;
374 INIT_LIST_HEAD(&ring->td_list);
375 ring->type = type;
376 if (num_segs == 0)
377 return ring;
378
379 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
380 &ring->last_seg, num_segs, cycle_state, type, flags);
381 if (ret)
382 goto fail;
383
384
385 if (type != TYPE_EVENT) {
386
387 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
388 cpu_to_le32(LINK_TOGGLE);
389 }
390 xhci_initialize_ring_info(ring, cycle_state);
391 return ring;
392
393fail:
394 kfree(ring);
395 return NULL;
396}
397
398void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
399 struct xhci_virt_device *virt_dev,
400 unsigned int ep_index)
401{
402 int rings_cached;
403
404 rings_cached = virt_dev->num_rings_cached;
405 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
406 virt_dev->ring_cache[rings_cached] =
407 virt_dev->eps[ep_index].ring;
408 virt_dev->num_rings_cached++;
409 xhci_dbg(xhci, "Cached old ring, "
410 "%d ring%s cached\n",
411 virt_dev->num_rings_cached,
412 (virt_dev->num_rings_cached > 1) ? "s" : "");
413 } else {
414 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
415 xhci_dbg(xhci, "Ring cache full (%d rings), "
416 "freeing ring\n",
417 virt_dev->num_rings_cached);
418 }
419 virt_dev->eps[ep_index].ring = NULL;
420}
421
422
423
424
425static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
426 struct xhci_ring *ring, unsigned int cycle_state,
427 enum xhci_ring_type type)
428{
429 struct xhci_segment *seg = ring->first_seg;
430 int i;
431
432 do {
433 memset(seg->trbs, 0,
434 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
435 if (cycle_state == 0) {
436 for (i = 0; i < TRBS_PER_SEGMENT; i++)
437 seg->trbs[i].link.control |=
438 cpu_to_le32(TRB_CYCLE);
439 }
440
441 xhci_link_segments(xhci, seg, seg->next, type);
442 seg = seg->next;
443 } while (seg != ring->first_seg);
444 ring->type = type;
445 xhci_initialize_ring_info(ring, cycle_state);
446
447
448
449 INIT_LIST_HEAD(&ring->td_list);
450}
451
452
453
454
455
456
457int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
458 unsigned int num_trbs, gfp_t flags)
459{
460 struct xhci_segment *first;
461 struct xhci_segment *last;
462 unsigned int num_segs;
463 unsigned int num_segs_needed;
464 int ret;
465
466 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
467 (TRBS_PER_SEGMENT - 1);
468
469
470 num_segs = ring->num_segs > num_segs_needed ?
471 ring->num_segs : num_segs_needed;
472
473 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
474 num_segs, ring->cycle_state, ring->type, flags);
475 if (ret)
476 return -ENOMEM;
477
478 if (ring->type == TYPE_STREAM)
479 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
480 ring, first, last, flags);
481 if (ret) {
482 struct xhci_segment *next;
483 do {
484 next = first->next;
485 xhci_segment_free(xhci, first);
486 if (first == last)
487 break;
488 first = next;
489 } while (true);
490 return ret;
491 }
492
493 xhci_link_rings(xhci, ring, first, last, num_segs);
494 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
495 "ring expansion succeed, now has %d segments",
496 ring->num_segs);
497
498 return 0;
499}
500
501#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
502
503static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
504 int type, gfp_t flags)
505{
506 struct xhci_container_ctx *ctx;
507
508 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
509 return NULL;
510
511 ctx = kzalloc(sizeof(*ctx), flags);
512 if (!ctx)
513 return NULL;
514
515 ctx->type = type;
516 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
517 if (type == XHCI_CTX_TYPE_INPUT)
518 ctx->size += CTX_SIZE(xhci->hcc_params);
519
520 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
521 if (!ctx->bytes) {
522 kfree(ctx);
523 return NULL;
524 }
525 memset(ctx->bytes, 0, ctx->size);
526 return ctx;
527}
528
529static void xhci_free_container_ctx(struct xhci_hcd *xhci,
530 struct xhci_container_ctx *ctx)
531{
532 if (!ctx)
533 return;
534 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
535 kfree(ctx);
536}
537
538struct xhci_input_control_ctx *xhci_get_input_control_ctx(
539 struct xhci_container_ctx *ctx)
540{
541 if (ctx->type != XHCI_CTX_TYPE_INPUT)
542 return NULL;
543
544 return (struct xhci_input_control_ctx *)ctx->bytes;
545}
546
547struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
548 struct xhci_container_ctx *ctx)
549{
550 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
551 return (struct xhci_slot_ctx *)ctx->bytes;
552
553 return (struct xhci_slot_ctx *)
554 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
555}
556
557struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
558 struct xhci_container_ctx *ctx,
559 unsigned int ep_index)
560{
561
562 ep_index++;
563 if (ctx->type == XHCI_CTX_TYPE_INPUT)
564 ep_index++;
565
566 return (struct xhci_ep_ctx *)
567 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
568}
569
570
571
572
573static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
574 unsigned int num_stream_ctxs,
575 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
576{
577 struct device *dev = xhci_to_hcd(xhci)->self.controller;
578 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
579
580 if (size > MEDIUM_STREAM_ARRAY_SIZE)
581 dma_free_coherent(dev, size,
582 stream_ctx, dma);
583 else if (size <= SMALL_STREAM_ARRAY_SIZE)
584 return dma_pool_free(xhci->small_streams_pool,
585 stream_ctx, dma);
586 else
587 return dma_pool_free(xhci->medium_streams_pool,
588 stream_ctx, dma);
589}
590
591
592
593
594
595
596
597
598
599
600
601static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
602 unsigned int num_stream_ctxs, dma_addr_t *dma,
603 gfp_t mem_flags)
604{
605 struct device *dev = xhci_to_hcd(xhci)->self.controller;
606 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
607
608 if (size > MEDIUM_STREAM_ARRAY_SIZE)
609 return dma_alloc_coherent(dev, size,
610 dma, mem_flags);
611 else if (size <= SMALL_STREAM_ARRAY_SIZE)
612 return dma_pool_alloc(xhci->small_streams_pool,
613 mem_flags, dma);
614 else
615 return dma_pool_alloc(xhci->medium_streams_pool,
616 mem_flags, dma);
617}
618
619struct xhci_ring *xhci_dma_to_transfer_ring(
620 struct xhci_virt_ep *ep,
621 u64 address)
622{
623 if (ep->ep_state & EP_HAS_STREAMS)
624 return radix_tree_lookup(&ep->stream_info->trb_address_map,
625 address >> TRB_SEGMENT_SHIFT);
626 return ep->ring;
627}
628
629struct xhci_ring *xhci_stream_id_to_ring(
630 struct xhci_virt_device *dev,
631 unsigned int ep_index,
632 unsigned int stream_id)
633{
634 struct xhci_virt_ep *ep = &dev->eps[ep_index];
635
636 if (stream_id == 0)
637 return ep->ring;
638 if (!ep->stream_info)
639 return NULL;
640
641 if (stream_id > ep->stream_info->num_streams)
642 return NULL;
643 return ep->stream_info->stream_rings[stream_id];
644}
645
646
647
648
649
650
651
652
653
654
655struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
656 unsigned int num_stream_ctxs,
657 unsigned int num_streams, gfp_t mem_flags)
658{
659 struct xhci_stream_info *stream_info;
660 u32 cur_stream;
661 struct xhci_ring *cur_ring;
662 u64 addr;
663 int ret;
664
665 xhci_dbg(xhci, "Allocating %u streams and %u "
666 "stream context array entries.\n",
667 num_streams, num_stream_ctxs);
668 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
669 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
670 return NULL;
671 }
672 xhci->cmd_ring_reserved_trbs++;
673
674 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
675 if (!stream_info)
676 goto cleanup_trbs;
677
678 stream_info->num_streams = num_streams;
679 stream_info->num_stream_ctxs = num_stream_ctxs;
680
681
682 stream_info->stream_rings = kzalloc(
683 sizeof(struct xhci_ring *)*num_streams,
684 mem_flags);
685 if (!stream_info->stream_rings)
686 goto cleanup_info;
687
688
689 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
690 num_stream_ctxs, &stream_info->ctx_array_dma,
691 mem_flags);
692 if (!stream_info->stream_ctx_array)
693 goto cleanup_ctx;
694 memset(stream_info->stream_ctx_array, 0,
695 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
696
697
698 stream_info->free_streams_command =
699 xhci_alloc_command(xhci, true, true, mem_flags);
700 if (!stream_info->free_streams_command)
701 goto cleanup_ctx;
702
703 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
704
705
706
707
708
709 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
710 stream_info->stream_rings[cur_stream] =
711 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
712 cur_ring = stream_info->stream_rings[cur_stream];
713 if (!cur_ring)
714 goto cleanup_rings;
715 cur_ring->stream_id = cur_stream;
716 cur_ring->trb_address_map = &stream_info->trb_address_map;
717
718 addr = cur_ring->first_seg->dma |
719 SCT_FOR_CTX(SCT_PRI_TR) |
720 cur_ring->cycle_state;
721 stream_info->stream_ctx_array[cur_stream].stream_ring =
722 cpu_to_le64(addr);
723 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
724 cur_stream, (unsigned long long) addr);
725
726 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
727 if (ret) {
728 xhci_ring_free(xhci, cur_ring);
729 stream_info->stream_rings[cur_stream] = NULL;
730 goto cleanup_rings;
731 }
732 }
733
734
735
736
737
738
739
740 return stream_info;
741
742cleanup_rings:
743 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
744 cur_ring = stream_info->stream_rings[cur_stream];
745 if (cur_ring) {
746 xhci_ring_free(xhci, cur_ring);
747 stream_info->stream_rings[cur_stream] = NULL;
748 }
749 }
750 xhci_free_command(xhci, stream_info->free_streams_command);
751cleanup_ctx:
752 kfree(stream_info->stream_rings);
753cleanup_info:
754 kfree(stream_info);
755cleanup_trbs:
756 xhci->cmd_ring_reserved_trbs--;
757 return NULL;
758}
759
760
761
762
763void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
764 struct xhci_ep_ctx *ep_ctx,
765 struct xhci_stream_info *stream_info)
766{
767 u32 max_primary_streams;
768
769
770
771
772 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
773 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
774 "Setting number of stream ctx array entries to %u",
775 1 << (max_primary_streams + 1));
776 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
777 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
778 | EP_HAS_LSA);
779 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
780}
781
782
783
784
785
786
787void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
788 struct xhci_virt_ep *ep)
789{
790 dma_addr_t addr;
791 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
792 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
793 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
794}
795
796
797
798
799
800void xhci_free_stream_info(struct xhci_hcd *xhci,
801 struct xhci_stream_info *stream_info)
802{
803 int cur_stream;
804 struct xhci_ring *cur_ring;
805
806 if (!stream_info)
807 return;
808
809 for (cur_stream = 1; cur_stream < stream_info->num_streams;
810 cur_stream++) {
811 cur_ring = stream_info->stream_rings[cur_stream];
812 if (cur_ring) {
813 xhci_ring_free(xhci, cur_ring);
814 stream_info->stream_rings[cur_stream] = NULL;
815 }
816 }
817 xhci_free_command(xhci, stream_info->free_streams_command);
818 xhci->cmd_ring_reserved_trbs--;
819 if (stream_info->stream_ctx_array)
820 xhci_free_stream_ctx(xhci,
821 stream_info->num_stream_ctxs,
822 stream_info->stream_ctx_array,
823 stream_info->ctx_array_dma);
824
825 kfree(stream_info->stream_rings);
826 kfree(stream_info);
827}
828
829
830
831
832static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
833 struct xhci_virt_ep *ep)
834{
835 setup_timer(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
836 (unsigned long)ep);
837 ep->xhci = xhci;
838}
839
840static void xhci_free_tt_info(struct xhci_hcd *xhci,
841 struct xhci_virt_device *virt_dev,
842 int slot_id)
843{
844 struct list_head *tt_list_head;
845 struct xhci_tt_bw_info *tt_info, *next;
846 bool slot_found = false;
847
848
849
850
851 if (virt_dev->real_port == 0 ||
852 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
853 xhci_dbg(xhci, "Bad real port.\n");
854 return;
855 }
856
857 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
858 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
859
860 if (tt_info->slot_id == slot_id) {
861 slot_found = true;
862 list_del(&tt_info->tt_list);
863 kfree(tt_info);
864 } else if (slot_found) {
865 break;
866 }
867 }
868}
869
870int xhci_alloc_tt_info(struct xhci_hcd *xhci,
871 struct xhci_virt_device *virt_dev,
872 struct usb_device *hdev,
873 struct usb_tt *tt, gfp_t mem_flags)
874{
875 struct xhci_tt_bw_info *tt_info;
876 unsigned int num_ports;
877 int i, j;
878
879 if (!tt->multi)
880 num_ports = 1;
881 else
882 num_ports = hdev->maxchild;
883
884 for (i = 0; i < num_ports; i++, tt_info++) {
885 struct xhci_interval_bw_table *bw_table;
886
887 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
888 if (!tt_info)
889 goto free_tts;
890 INIT_LIST_HEAD(&tt_info->tt_list);
891 list_add(&tt_info->tt_list,
892 &xhci->rh_bw[virt_dev->real_port - 1].tts);
893 tt_info->slot_id = virt_dev->udev->slot_id;
894 if (tt->multi)
895 tt_info->ttport = i+1;
896 bw_table = &tt_info->bw_table;
897 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
898 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
899 }
900 return 0;
901
902free_tts:
903 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
904 return -ENOMEM;
905}
906
907
908
909
910
911
912
913void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
914{
915 struct xhci_virt_device *dev;
916 int i;
917 int old_active_eps = 0;
918
919
920 if (slot_id == 0 || !xhci->devs[slot_id])
921 return;
922
923 dev = xhci->devs[slot_id];
924 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
925 if (!dev)
926 return;
927
928 if (dev->tt_info)
929 old_active_eps = dev->tt_info->active_eps;
930
931 for (i = 0; i < 31; ++i) {
932 if (dev->eps[i].ring)
933 xhci_ring_free(xhci, dev->eps[i].ring);
934 if (dev->eps[i].stream_info)
935 xhci_free_stream_info(xhci,
936 dev->eps[i].stream_info);
937
938
939
940
941
942 if (!list_empty(&dev->eps[i].bw_endpoint_list))
943 xhci_warn(xhci, "Slot %u endpoint %u "
944 "not removed from BW list!\n",
945 slot_id, i);
946 }
947
948 xhci_free_tt_info(xhci, dev, slot_id);
949
950 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
951
952 if (dev->ring_cache) {
953 for (i = 0; i < dev->num_rings_cached; i++)
954 xhci_ring_free(xhci, dev->ring_cache[i]);
955 kfree(dev->ring_cache);
956 }
957
958 if (dev->in_ctx)
959 xhci_free_container_ctx(xhci, dev->in_ctx);
960 if (dev->out_ctx)
961 xhci_free_container_ctx(xhci, dev->out_ctx);
962
963 kfree(xhci->devs[slot_id]);
964 xhci->devs[slot_id] = NULL;
965}
966
967int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
968 struct usb_device *udev, gfp_t flags)
969{
970 struct xhci_virt_device *dev;
971 int i;
972
973
974 if (slot_id == 0 || xhci->devs[slot_id]) {
975 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
976 return 0;
977 }
978
979 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
980 if (!xhci->devs[slot_id])
981 return 0;
982 dev = xhci->devs[slot_id];
983
984
985 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
986 if (!dev->out_ctx)
987 goto fail;
988
989 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
990 (unsigned long long)dev->out_ctx->dma);
991
992
993 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
994 if (!dev->in_ctx)
995 goto fail;
996
997 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
998 (unsigned long long)dev->in_ctx->dma);
999
1000
1001 for (i = 0; i < 31; i++) {
1002 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
1003 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1004 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1005 }
1006
1007
1008 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
1009 if (!dev->eps[0].ring)
1010 goto fail;
1011
1012
1013 dev->ring_cache = kzalloc(
1014 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
1015 flags);
1016 if (!dev->ring_cache)
1017 goto fail;
1018 dev->num_rings_cached = 0;
1019
1020 init_completion(&dev->cmd_completion);
1021 dev->udev = udev;
1022
1023
1024 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1025 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1026 slot_id,
1027 &xhci->dcbaa->dev_context_ptrs[slot_id],
1028 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1029
1030 return 1;
1031fail:
1032 xhci_free_virt_device(xhci, slot_id);
1033 return 0;
1034}
1035
1036void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1037 struct usb_device *udev)
1038{
1039 struct xhci_virt_device *virt_dev;
1040 struct xhci_ep_ctx *ep0_ctx;
1041 struct xhci_ring *ep_ring;
1042
1043 virt_dev = xhci->devs[udev->slot_id];
1044 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1045 ep_ring = virt_dev->eps[0].ring;
1046
1047
1048
1049
1050
1051
1052
1053 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1054 ep_ring->enqueue)
1055 | ep_ring->cycle_state);
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1070 struct usb_device *udev)
1071{
1072 struct usb_device *top_dev;
1073 struct usb_hcd *hcd;
1074
1075 if (udev->speed == USB_SPEED_SUPER)
1076 hcd = xhci->shared_hcd;
1077 else
1078 hcd = xhci->main_hcd;
1079
1080 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1081 top_dev = top_dev->parent)
1082 ;
1083
1084 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1085}
1086
1087
1088int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1089{
1090 struct xhci_virt_device *dev;
1091 struct xhci_ep_ctx *ep0_ctx;
1092 struct xhci_slot_ctx *slot_ctx;
1093 u32 port_num;
1094 u32 max_packets;
1095 struct usb_device *top_dev;
1096
1097 dev = xhci->devs[udev->slot_id];
1098
1099 if (udev->slot_id == 0 || !dev) {
1100 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1101 udev->slot_id);
1102 return -EINVAL;
1103 }
1104 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1105 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1106
1107
1108 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1109 switch (udev->speed) {
1110 case USB_SPEED_SUPER:
1111 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1112 max_packets = MAX_PACKET(512);
1113 break;
1114 case USB_SPEED_HIGH:
1115 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1116 max_packets = MAX_PACKET(64);
1117 break;
1118
1119 case USB_SPEED_FULL:
1120 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1121 max_packets = MAX_PACKET(64);
1122 break;
1123 case USB_SPEED_LOW:
1124 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1125 max_packets = MAX_PACKET(8);
1126 break;
1127 case USB_SPEED_WIRELESS:
1128 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1129 return -EINVAL;
1130 break;
1131 default:
1132
1133 return -EINVAL;
1134 }
1135
1136 port_num = xhci_find_real_port_number(xhci, udev);
1137 if (!port_num)
1138 return -EINVAL;
1139 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1140
1141 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1142 top_dev = top_dev->parent)
1143 ;
1144 dev->fake_port = top_dev->portnum;
1145 dev->real_port = port_num;
1146 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1147 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1148
1149
1150
1151
1152
1153
1154
1155 if (!udev->tt || !udev->tt->hub->parent) {
1156 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1157 } else {
1158 struct xhci_root_port_bw_info *rh_bw;
1159 struct xhci_tt_bw_info *tt_bw;
1160
1161 rh_bw = &xhci->rh_bw[port_num - 1];
1162
1163 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1164 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1165 continue;
1166
1167 if (!dev->udev->tt->multi ||
1168 (udev->tt->multi &&
1169 tt_bw->ttport == dev->udev->ttport)) {
1170 dev->bw_table = &tt_bw->bw_table;
1171 dev->tt_info = tt_bw;
1172 break;
1173 }
1174 }
1175 if (!dev->tt_info)
1176 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1177 }
1178
1179
1180 if (udev->tt && udev->tt->hub->parent) {
1181 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1182 (udev->ttport << 8));
1183 if (udev->tt->multi)
1184 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1185 }
1186 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1187 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1188
1189
1190
1191 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1192
1193
1194 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1195 max_packets);
1196
1197 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1198 dev->eps[0].ring->cycle_state);
1199
1200
1201
1202 return 0;
1203}
1204
1205
1206
1207
1208
1209
1210static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1211 struct usb_host_endpoint *ep)
1212{
1213 unsigned int interval;
1214
1215 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1216 if (interval != ep->desc.bInterval - 1)
1217 dev_warn(&udev->dev,
1218 "ep %#x - rounding interval to %d %sframes\n",
1219 ep->desc.bEndpointAddress,
1220 1 << interval,
1221 udev->speed == USB_SPEED_FULL ? "" : "micro");
1222
1223 if (udev->speed == USB_SPEED_FULL) {
1224
1225
1226
1227
1228
1229 interval += 3;
1230 }
1231
1232 return interval;
1233}
1234
1235
1236
1237
1238
1239static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1240 struct usb_host_endpoint *ep, unsigned int desc_interval,
1241 unsigned int min_exponent, unsigned int max_exponent)
1242{
1243 unsigned int interval;
1244
1245 interval = fls(desc_interval) - 1;
1246 interval = clamp_val(interval, min_exponent, max_exponent);
1247 if ((1 << interval) != desc_interval)
1248 dev_warn(&udev->dev,
1249 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1250 ep->desc.bEndpointAddress,
1251 1 << interval,
1252 desc_interval);
1253
1254 return interval;
1255}
1256
1257static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1258 struct usb_host_endpoint *ep)
1259{
1260 if (ep->desc.bInterval == 0)
1261 return 0;
1262 return xhci_microframes_to_exponent(udev, ep,
1263 ep->desc.bInterval, 0, 15);
1264}
1265
1266
1267static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1268 struct usb_host_endpoint *ep)
1269{
1270 return xhci_microframes_to_exponent(udev, ep,
1271 ep->desc.bInterval * 8, 3, 10);
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1283 struct usb_host_endpoint *ep)
1284{
1285 unsigned int interval = 0;
1286
1287 switch (udev->speed) {
1288 case USB_SPEED_HIGH:
1289
1290 if (usb_endpoint_xfer_control(&ep->desc) ||
1291 usb_endpoint_xfer_bulk(&ep->desc)) {
1292 interval = xhci_parse_microframe_interval(udev, ep);
1293 break;
1294 }
1295
1296
1297 case USB_SPEED_SUPER:
1298 if (usb_endpoint_xfer_int(&ep->desc) ||
1299 usb_endpoint_xfer_isoc(&ep->desc)) {
1300 interval = xhci_parse_exponent_interval(udev, ep);
1301 }
1302 break;
1303
1304 case USB_SPEED_FULL:
1305 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1306 interval = xhci_parse_exponent_interval(udev, ep);
1307 break;
1308 }
1309
1310
1311
1312
1313
1314
1315 case USB_SPEED_LOW:
1316 if (usb_endpoint_xfer_int(&ep->desc) ||
1317 usb_endpoint_xfer_isoc(&ep->desc)) {
1318
1319 interval = xhci_parse_frame_interval(udev, ep);
1320 }
1321 break;
1322
1323 default:
1324 BUG();
1325 }
1326 return EP_INTERVAL(interval);
1327}
1328
1329
1330
1331
1332
1333
1334static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1335 struct usb_host_endpoint *ep)
1336{
1337 if (udev->speed != USB_SPEED_SUPER ||
1338 !usb_endpoint_xfer_isoc(&ep->desc))
1339 return 0;
1340 return ep->ss_ep_comp.bmAttributes;
1341}
1342
1343static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1344{
1345 int in;
1346 u32 type;
1347
1348 in = usb_endpoint_dir_in(&ep->desc);
1349 if (usb_endpoint_xfer_control(&ep->desc)) {
1350 type = EP_TYPE(CTRL_EP);
1351 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1352 if (in)
1353 type = EP_TYPE(BULK_IN_EP);
1354 else
1355 type = EP_TYPE(BULK_OUT_EP);
1356 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1357 if (in)
1358 type = EP_TYPE(ISOC_IN_EP);
1359 else
1360 type = EP_TYPE(ISOC_OUT_EP);
1361 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1362 if (in)
1363 type = EP_TYPE(INT_IN_EP);
1364 else
1365 type = EP_TYPE(INT_OUT_EP);
1366 } else {
1367 type = 0;
1368 }
1369 return type;
1370}
1371
1372
1373
1374
1375
1376static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1377 struct usb_host_endpoint *ep)
1378{
1379 int max_burst;
1380 int max_packet;
1381
1382
1383 if (usb_endpoint_xfer_control(&ep->desc) ||
1384 usb_endpoint_xfer_bulk(&ep->desc))
1385 return 0;
1386
1387 if (udev->speed == USB_SPEED_SUPER)
1388 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1389
1390 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1391 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1392
1393 return max_packet * (max_burst + 1);
1394}
1395
1396
1397
1398
1399int xhci_endpoint_init(struct xhci_hcd *xhci,
1400 struct xhci_virt_device *virt_dev,
1401 struct usb_device *udev,
1402 struct usb_host_endpoint *ep,
1403 gfp_t mem_flags)
1404{
1405 unsigned int ep_index;
1406 struct xhci_ep_ctx *ep_ctx;
1407 struct xhci_ring *ep_ring;
1408 unsigned int max_packet;
1409 unsigned int max_burst;
1410 enum xhci_ring_type type;
1411 u32 max_esit_payload;
1412 u32 endpoint_type;
1413
1414 ep_index = xhci_get_endpoint_index(&ep->desc);
1415 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1416
1417 endpoint_type = xhci_get_endpoint_type(ep);
1418 if (!endpoint_type)
1419 return -EINVAL;
1420 ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
1421
1422 type = usb_endpoint_type(&ep->desc);
1423
1424 virt_dev->eps[ep_index].new_ring =
1425 xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
1426 if (!virt_dev->eps[ep_index].new_ring) {
1427
1428 if (virt_dev->num_rings_cached == 0)
1429 return -ENOMEM;
1430 virt_dev->num_rings_cached--;
1431 virt_dev->eps[ep_index].new_ring =
1432 virt_dev->ring_cache[virt_dev->num_rings_cached];
1433 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1435 1, type);
1436 }
1437 virt_dev->eps[ep_index].skip = false;
1438 ep_ring = virt_dev->eps[ep_index].new_ring;
1439 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1440
1441 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1442 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1443
1444
1445
1446
1447
1448
1449 if (!usb_endpoint_xfer_isoc(&ep->desc))
1450 ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
1451 else
1452 ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
1453
1454
1455 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1456 max_burst = 0;
1457 switch (udev->speed) {
1458 case USB_SPEED_SUPER:
1459
1460 max_burst = ep->ss_ep_comp.bMaxBurst;
1461 break;
1462 case USB_SPEED_HIGH:
1463
1464 if (usb_endpoint_xfer_bulk(&ep->desc))
1465 max_packet = 512;
1466
1467
1468
1469 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1470 usb_endpoint_xfer_int(&ep->desc)) {
1471 max_burst = (usb_endpoint_maxp(&ep->desc)
1472 & 0x1800) >> 11;
1473 }
1474 break;
1475 case USB_SPEED_FULL:
1476 case USB_SPEED_LOW:
1477 break;
1478 default:
1479 BUG();
1480 }
1481 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
1482 MAX_BURST(max_burst));
1483 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1484 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1505 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1506 else
1507 ep_ctx->tx_info |=
1508 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1509
1510
1511 return 0;
1512}
1513
1514void xhci_endpoint_zero(struct xhci_hcd *xhci,
1515 struct xhci_virt_device *virt_dev,
1516 struct usb_host_endpoint *ep)
1517{
1518 unsigned int ep_index;
1519 struct xhci_ep_ctx *ep_ctx;
1520
1521 ep_index = xhci_get_endpoint_index(&ep->desc);
1522 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1523
1524 ep_ctx->ep_info = 0;
1525 ep_ctx->ep_info2 = 0;
1526 ep_ctx->deq = 0;
1527 ep_ctx->tx_info = 0;
1528
1529
1530
1531}
1532
1533void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1534{
1535 bw_info->ep_interval = 0;
1536 bw_info->mult = 0;
1537 bw_info->num_packets = 0;
1538 bw_info->max_packet_size = 0;
1539 bw_info->type = 0;
1540 bw_info->max_esit_payload = 0;
1541}
1542
1543void xhci_update_bw_info(struct xhci_hcd *xhci,
1544 struct xhci_container_ctx *in_ctx,
1545 struct xhci_input_control_ctx *ctrl_ctx,
1546 struct xhci_virt_device *virt_dev)
1547{
1548 struct xhci_bw_info *bw_info;
1549 struct xhci_ep_ctx *ep_ctx;
1550 unsigned int ep_type;
1551 int i;
1552
1553 for (i = 1; i < 31; ++i) {
1554 bw_info = &virt_dev->eps[i].bw_info;
1555
1556
1557
1558
1559
1560
1561 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1562
1563 xhci_clear_endpoint_bw_info(bw_info);
1564 continue;
1565 }
1566
1567 if (EP_IS_ADDED(ctrl_ctx, i)) {
1568 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1569 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1570
1571
1572 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1573 ep_type != ISOC_IN_EP &&
1574 ep_type != INT_IN_EP)
1575 continue;
1576
1577
1578 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1579 le32_to_cpu(ep_ctx->ep_info));
1580
1581
1582
1583
1584 bw_info->mult = CTX_TO_EP_MULT(
1585 le32_to_cpu(ep_ctx->ep_info)) + 1;
1586 bw_info->num_packets = CTX_TO_MAX_BURST(
1587 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1588 bw_info->max_packet_size = MAX_PACKET_DECODED(
1589 le32_to_cpu(ep_ctx->ep_info2));
1590 bw_info->type = ep_type;
1591 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1592 le32_to_cpu(ep_ctx->tx_info));
1593 }
1594 }
1595}
1596
1597
1598
1599
1600
1601void xhci_endpoint_copy(struct xhci_hcd *xhci,
1602 struct xhci_container_ctx *in_ctx,
1603 struct xhci_container_ctx *out_ctx,
1604 unsigned int ep_index)
1605{
1606 struct xhci_ep_ctx *out_ep_ctx;
1607 struct xhci_ep_ctx *in_ep_ctx;
1608
1609 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1610 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1611
1612 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1613 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1614 in_ep_ctx->deq = out_ep_ctx->deq;
1615 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1616}
1617
1618
1619
1620
1621
1622
1623void xhci_slot_copy(struct xhci_hcd *xhci,
1624 struct xhci_container_ctx *in_ctx,
1625 struct xhci_container_ctx *out_ctx)
1626{
1627 struct xhci_slot_ctx *in_slot_ctx;
1628 struct xhci_slot_ctx *out_slot_ctx;
1629
1630 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1631 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1632
1633 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1634 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1635 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1636 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1637}
1638
1639
1640static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1641{
1642 int i;
1643 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1644 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1645
1646 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1647 "Allocating %d scratchpad buffers", num_sp);
1648
1649 if (!num_sp)
1650 return 0;
1651
1652 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1653 if (!xhci->scratchpad)
1654 goto fail_sp;
1655
1656 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1657 num_sp * sizeof(u64),
1658 &xhci->scratchpad->sp_dma, flags);
1659 if (!xhci->scratchpad->sp_array)
1660 goto fail_sp2;
1661
1662 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1663 if (!xhci->scratchpad->sp_buffers)
1664 goto fail_sp3;
1665
1666 xhci->scratchpad->sp_dma_buffers =
1667 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1668
1669 if (!xhci->scratchpad->sp_dma_buffers)
1670 goto fail_sp4;
1671
1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1673 for (i = 0; i < num_sp; i++) {
1674 dma_addr_t dma;
1675 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1676 flags);
1677 if (!buf)
1678 goto fail_sp5;
1679
1680 xhci->scratchpad->sp_array[i] = dma;
1681 xhci->scratchpad->sp_buffers[i] = buf;
1682 xhci->scratchpad->sp_dma_buffers[i] = dma;
1683 }
1684
1685 return 0;
1686
1687 fail_sp5:
1688 for (i = i - 1; i >= 0; i--) {
1689 dma_free_coherent(dev, xhci->page_size,
1690 xhci->scratchpad->sp_buffers[i],
1691 xhci->scratchpad->sp_dma_buffers[i]);
1692 }
1693 kfree(xhci->scratchpad->sp_dma_buffers);
1694
1695 fail_sp4:
1696 kfree(xhci->scratchpad->sp_buffers);
1697
1698 fail_sp3:
1699 dma_free_coherent(dev, num_sp * sizeof(u64),
1700 xhci->scratchpad->sp_array,
1701 xhci->scratchpad->sp_dma);
1702
1703 fail_sp2:
1704 kfree(xhci->scratchpad);
1705 xhci->scratchpad = NULL;
1706
1707 fail_sp:
1708 return -ENOMEM;
1709}
1710
1711static void scratchpad_free(struct xhci_hcd *xhci)
1712{
1713 int num_sp;
1714 int i;
1715 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1716
1717 if (!xhci->scratchpad)
1718 return;
1719
1720 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1721
1722 for (i = 0; i < num_sp; i++) {
1723 dma_free_coherent(dev, xhci->page_size,
1724 xhci->scratchpad->sp_buffers[i],
1725 xhci->scratchpad->sp_dma_buffers[i]);
1726 }
1727 kfree(xhci->scratchpad->sp_dma_buffers);
1728 kfree(xhci->scratchpad->sp_buffers);
1729 dma_free_coherent(dev, num_sp * sizeof(u64),
1730 xhci->scratchpad->sp_array,
1731 xhci->scratchpad->sp_dma);
1732 kfree(xhci->scratchpad);
1733 xhci->scratchpad = NULL;
1734}
1735
1736struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1737 bool allocate_in_ctx, bool allocate_completion,
1738 gfp_t mem_flags)
1739{
1740 struct xhci_command *command;
1741
1742 command = kzalloc(sizeof(*command), mem_flags);
1743 if (!command)
1744 return NULL;
1745
1746 if (allocate_in_ctx) {
1747 command->in_ctx =
1748 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1749 mem_flags);
1750 if (!command->in_ctx) {
1751 kfree(command);
1752 return NULL;
1753 }
1754 }
1755
1756 if (allocate_completion) {
1757 command->completion =
1758 kzalloc(sizeof(struct completion), mem_flags);
1759 if (!command->completion) {
1760 xhci_free_container_ctx(xhci, command->in_ctx);
1761 kfree(command);
1762 return NULL;
1763 }
1764 init_completion(command->completion);
1765 }
1766
1767 command->status = 0;
1768 INIT_LIST_HEAD(&command->cmd_list);
1769 return command;
1770}
1771
1772void xhci_urb_free_priv(struct urb_priv *urb_priv)
1773{
1774 if (urb_priv) {
1775 kfree(urb_priv->td[0]);
1776 kfree(urb_priv);
1777 }
1778}
1779
1780void xhci_free_command(struct xhci_hcd *xhci,
1781 struct xhci_command *command)
1782{
1783 xhci_free_container_ctx(xhci,
1784 command->in_ctx);
1785 kfree(command->completion);
1786 kfree(command);
1787}
1788
1789void xhci_mem_cleanup(struct xhci_hcd *xhci)
1790{
1791 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1792 int size;
1793 int i, j, num_ports;
1794
1795 del_timer_sync(&xhci->cmd_timer);
1796
1797
1798 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1799 if (xhci->erst.entries)
1800 dma_free_coherent(dev, size,
1801 xhci->erst.entries, xhci->erst.erst_dma_addr);
1802 xhci->erst.entries = NULL;
1803 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
1804 if (xhci->event_ring)
1805 xhci_ring_free(xhci, xhci->event_ring);
1806 xhci->event_ring = NULL;
1807 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1808
1809 if (xhci->lpm_command)
1810 xhci_free_command(xhci, xhci->lpm_command);
1811 xhci->lpm_command = NULL;
1812 if (xhci->cmd_ring)
1813 xhci_ring_free(xhci, xhci->cmd_ring);
1814 xhci->cmd_ring = NULL;
1815 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1816 xhci_cleanup_command_queue(xhci);
1817
1818 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1819 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1820 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1821 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1822 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1823 while (!list_empty(ep))
1824 list_del_init(ep->next);
1825 }
1826 }
1827
1828 for (i = 1; i < MAX_HC_SLOTS; ++i)
1829 xhci_free_virt_device(xhci, i);
1830
1831 if (xhci->segment_pool)
1832 dma_pool_destroy(xhci->segment_pool);
1833 xhci->segment_pool = NULL;
1834 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1835
1836 if (xhci->device_pool)
1837 dma_pool_destroy(xhci->device_pool);
1838 xhci->device_pool = NULL;
1839 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1840
1841 if (xhci->small_streams_pool)
1842 dma_pool_destroy(xhci->small_streams_pool);
1843 xhci->small_streams_pool = NULL;
1844 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1845 "Freed small stream array pool");
1846
1847 if (xhci->medium_streams_pool)
1848 dma_pool_destroy(xhci->medium_streams_pool);
1849 xhci->medium_streams_pool = NULL;
1850 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1851 "Freed medium stream array pool");
1852
1853 if (xhci->dcbaa)
1854 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1855 xhci->dcbaa, xhci->dcbaa->dma);
1856 xhci->dcbaa = NULL;
1857
1858 scratchpad_free(xhci);
1859
1860 if (!xhci->rh_bw)
1861 goto no_bw;
1862
1863 for (i = 0; i < num_ports; i++) {
1864 struct xhci_tt_bw_info *tt, *n;
1865 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1866 list_del(&tt->tt_list);
1867 kfree(tt);
1868 }
1869 }
1870
1871no_bw:
1872 xhci->cmd_ring_reserved_trbs = 0;
1873 xhci->num_usb2_ports = 0;
1874 xhci->num_usb3_ports = 0;
1875 xhci->num_active_eps = 0;
1876 kfree(xhci->usb2_ports);
1877 kfree(xhci->usb3_ports);
1878 kfree(xhci->port_array);
1879 kfree(xhci->rh_bw);
1880 kfree(xhci->ext_caps);
1881
1882 xhci->page_size = 0;
1883 xhci->page_shift = 0;
1884 xhci->bus_state[0].bus_suspended = 0;
1885 xhci->bus_state[1].bus_suspended = 0;
1886}
1887
1888static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1889 struct xhci_segment *input_seg,
1890 union xhci_trb *start_trb,
1891 union xhci_trb *end_trb,
1892 dma_addr_t input_dma,
1893 struct xhci_segment *result_seg,
1894 char *test_name, int test_number)
1895{
1896 unsigned long long start_dma;
1897 unsigned long long end_dma;
1898 struct xhci_segment *seg;
1899
1900 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1901 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1902
1903 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1904 if (seg != result_seg) {
1905 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1906 test_name, test_number);
1907 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1908 "input DMA 0x%llx\n",
1909 input_seg,
1910 (unsigned long long) input_dma);
1911 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1912 "ending TRB %p (0x%llx DMA)\n",
1913 start_trb, start_dma,
1914 end_trb, end_dma);
1915 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1916 result_seg, seg);
1917 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1918 true);
1919 return -1;
1920 }
1921 return 0;
1922}
1923
1924
1925static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1926{
1927 struct {
1928 dma_addr_t input_dma;
1929 struct xhci_segment *result_seg;
1930 } simple_test_vector [] = {
1931
1932 { 0, NULL },
1933
1934 { xhci->event_ring->first_seg->dma - 16, NULL },
1935
1936 { xhci->event_ring->first_seg->dma - 1, NULL },
1937
1938 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1939
1940 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1941 xhci->event_ring->first_seg },
1942
1943 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1944
1945 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1946
1947 { (dma_addr_t) (~0), NULL },
1948 };
1949 struct {
1950 struct xhci_segment *input_seg;
1951 union xhci_trb *start_trb;
1952 union xhci_trb *end_trb;
1953 dma_addr_t input_dma;
1954 struct xhci_segment *result_seg;
1955 } complex_test_vector [] = {
1956
1957 { .input_seg = xhci->event_ring->first_seg,
1958 .start_trb = xhci->event_ring->first_seg->trbs,
1959 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1960 .input_dma = xhci->cmd_ring->first_seg->dma,
1961 .result_seg = NULL,
1962 },
1963
1964 { .input_seg = xhci->event_ring->first_seg,
1965 .start_trb = xhci->event_ring->first_seg->trbs,
1966 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1967 .input_dma = xhci->cmd_ring->first_seg->dma,
1968 .result_seg = NULL,
1969 },
1970
1971 { .input_seg = xhci->event_ring->first_seg,
1972 .start_trb = xhci->cmd_ring->first_seg->trbs,
1973 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1974 .input_dma = xhci->cmd_ring->first_seg->dma,
1975 .result_seg = NULL,
1976 },
1977
1978 { .input_seg = xhci->event_ring->first_seg,
1979 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1980 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1981 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1982 .result_seg = NULL,
1983 },
1984
1985 { .input_seg = xhci->event_ring->first_seg,
1986 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1987 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1988 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1989 .result_seg = NULL,
1990 },
1991
1992 { .input_seg = xhci->event_ring->first_seg,
1993 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1994 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1995 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1996 .result_seg = NULL,
1997 },
1998
1999 { .input_seg = xhci->event_ring->first_seg,
2000 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2001 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2002 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2003 .result_seg = NULL,
2004 },
2005
2006 { .input_seg = xhci->event_ring->first_seg,
2007 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2008 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2009 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2010 .result_seg = NULL,
2011 },
2012 };
2013
2014 unsigned int num_tests;
2015 int i, ret;
2016
2017 num_tests = ARRAY_SIZE(simple_test_vector);
2018 for (i = 0; i < num_tests; i++) {
2019 ret = xhci_test_trb_in_td(xhci,
2020 xhci->event_ring->first_seg,
2021 xhci->event_ring->first_seg->trbs,
2022 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2023 simple_test_vector[i].input_dma,
2024 simple_test_vector[i].result_seg,
2025 "Simple", i);
2026 if (ret < 0)
2027 return ret;
2028 }
2029
2030 num_tests = ARRAY_SIZE(complex_test_vector);
2031 for (i = 0; i < num_tests; i++) {
2032 ret = xhci_test_trb_in_td(xhci,
2033 complex_test_vector[i].input_seg,
2034 complex_test_vector[i].start_trb,
2035 complex_test_vector[i].end_trb,
2036 complex_test_vector[i].input_dma,
2037 complex_test_vector[i].result_seg,
2038 "Complex", i);
2039 if (ret < 0)
2040 return ret;
2041 }
2042 xhci_dbg(xhci, "TRB math tests passed.\n");
2043 return 0;
2044}
2045
2046static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2047{
2048 u64 temp;
2049 dma_addr_t deq;
2050
2051 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2052 xhci->event_ring->dequeue);
2053 if (deq == 0 && !in_interrupt())
2054 xhci_warn(xhci, "WARN something wrong with SW event ring "
2055 "dequeue ptr.\n");
2056
2057 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2058 temp &= ERST_PTR_MASK;
2059
2060
2061
2062 temp &= ~ERST_EHB;
2063 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2064 "// Write event ring dequeue pointer, "
2065 "preserving EHB bit");
2066 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2067 &xhci->ir_set->erst_dequeue);
2068}
2069
2070static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2071 __le32 __iomem *addr, u8 major_revision, int max_caps)
2072{
2073 u32 temp, port_offset, port_count;
2074 int i;
2075
2076 if (major_revision > 0x03) {
2077 xhci_warn(xhci, "Ignoring unknown port speed, "
2078 "Ext Cap %p, revision = 0x%x\n",
2079 addr, major_revision);
2080
2081 return;
2082 }
2083
2084
2085 temp = readl(addr + 2);
2086 port_offset = XHCI_EXT_PORT_OFF(temp);
2087 port_count = XHCI_EXT_PORT_COUNT(temp);
2088 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2089 "Ext Cap %p, port offset = %u, "
2090 "count = %u, revision = 0x%x",
2091 addr, port_offset, port_count, major_revision);
2092
2093 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2094
2095 return;
2096
2097
2098 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2099 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2100
2101
2102 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2103 (temp & XHCI_L1C)) {
2104 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2105 "xHCI 0.96: support USB2 software lpm");
2106 xhci->sw_lpm_support = 1;
2107 }
2108
2109 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2110 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2111 "xHCI 1.0: support USB2 software lpm");
2112 xhci->sw_lpm_support = 1;
2113 if (temp & XHCI_HLC) {
2114 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2115 "xHCI 1.0: support USB2 hardware lpm");
2116 xhci->hw_lpm_support = 1;
2117 }
2118 }
2119
2120 port_offset--;
2121 for (i = port_offset; i < (port_offset + port_count); i++) {
2122
2123 if (xhci->port_array[i] != 0) {
2124 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2125 " port %u\n", addr, i);
2126 xhci_warn(xhci, "Port was marked as USB %u, "
2127 "duplicated as USB %u\n",
2128 xhci->port_array[i], major_revision);
2129
2130
2131
2132 if (xhci->port_array[i] != major_revision &&
2133 xhci->port_array[i] != DUPLICATE_ENTRY) {
2134 if (xhci->port_array[i] == 0x03)
2135 xhci->num_usb3_ports--;
2136 else
2137 xhci->num_usb2_ports--;
2138 xhci->port_array[i] = DUPLICATE_ENTRY;
2139 }
2140
2141 continue;
2142 }
2143 xhci->port_array[i] = major_revision;
2144 if (major_revision == 0x03)
2145 xhci->num_usb3_ports++;
2146 else
2147 xhci->num_usb2_ports++;
2148 }
2149
2150}
2151
2152
2153
2154
2155
2156
2157
2158
2159static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2160{
2161 __le32 __iomem *addr, *tmp_addr;
2162 u32 offset, tmp_offset;
2163 unsigned int num_ports;
2164 int i, j, port_index;
2165 int cap_count = 0;
2166
2167 addr = &xhci->cap_regs->hcc_params;
2168 offset = XHCI_HCC_EXT_CAPS(readl(addr));
2169 if (offset == 0) {
2170 xhci_err(xhci, "No Extended Capability registers, "
2171 "unable to set up roothub.\n");
2172 return -ENODEV;
2173 }
2174
2175 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2176 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2177 if (!xhci->port_array)
2178 return -ENOMEM;
2179
2180 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2181 if (!xhci->rh_bw)
2182 return -ENOMEM;
2183 for (i = 0; i < num_ports; i++) {
2184 struct xhci_interval_bw_table *bw_table;
2185
2186 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2187 bw_table = &xhci->rh_bw[i].bw_table;
2188 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2189 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2190 }
2191
2192
2193
2194
2195
2196
2197 addr = &xhci->cap_regs->hc_capbase + offset;
2198
2199 tmp_addr = addr;
2200 tmp_offset = offset;
2201
2202
2203 do {
2204 u32 cap_id;
2205 cap_id = readl(tmp_addr);
2206 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2207 cap_count++;
2208 tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
2209 tmp_addr += tmp_offset;
2210 } while (tmp_offset);
2211
2212 xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
2213 if (!xhci->ext_caps)
2214 return -ENOMEM;
2215
2216 while (1) {
2217 u32 cap_id;
2218
2219 cap_id = readl(addr);
2220 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2221 xhci_add_in_port(xhci, num_ports, addr,
2222 (u8) XHCI_EXT_PORT_MAJOR(cap_id),
2223 cap_count);
2224 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2225 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2226 == num_ports)
2227 break;
2228
2229
2230
2231
2232 addr += offset;
2233 }
2234
2235 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2236 xhci_warn(xhci, "No ports on the roothubs?\n");
2237 return -ENODEV;
2238 }
2239 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2240 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2241 xhci->num_usb2_ports, xhci->num_usb3_ports);
2242
2243
2244
2245
2246 if (xhci->num_usb3_ports > 15) {
2247 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2248 "Limiting USB 3.0 roothub ports to 15.");
2249 xhci->num_usb3_ports = 15;
2250 }
2251 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2252 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2253 "Limiting USB 2.0 roothub ports to %u.",
2254 USB_MAXCHILDREN);
2255 xhci->num_usb2_ports = USB_MAXCHILDREN;
2256 }
2257
2258
2259
2260
2261
2262 if (xhci->num_usb2_ports) {
2263 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2264 xhci->num_usb2_ports, flags);
2265 if (!xhci->usb2_ports)
2266 return -ENOMEM;
2267
2268 port_index = 0;
2269 for (i = 0; i < num_ports; i++) {
2270 if (xhci->port_array[i] == 0x03 ||
2271 xhci->port_array[i] == 0 ||
2272 xhci->port_array[i] == DUPLICATE_ENTRY)
2273 continue;
2274
2275 xhci->usb2_ports[port_index] =
2276 &xhci->op_regs->port_status_base +
2277 NUM_PORT_REGS*i;
2278 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2279 "USB 2.0 port at index %u, "
2280 "addr = %p", i,
2281 xhci->usb2_ports[port_index]);
2282 port_index++;
2283 if (port_index == xhci->num_usb2_ports)
2284 break;
2285 }
2286 }
2287 if (xhci->num_usb3_ports) {
2288 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2289 xhci->num_usb3_ports, flags);
2290 if (!xhci->usb3_ports)
2291 return -ENOMEM;
2292
2293 port_index = 0;
2294 for (i = 0; i < num_ports; i++)
2295 if (xhci->port_array[i] == 0x03) {
2296 xhci->usb3_ports[port_index] =
2297 &xhci->op_regs->port_status_base +
2298 NUM_PORT_REGS*i;
2299 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2300 "USB 3.0 port at index %u, "
2301 "addr = %p", i,
2302 xhci->usb3_ports[port_index]);
2303 port_index++;
2304 if (port_index == xhci->num_usb3_ports)
2305 break;
2306 }
2307 }
2308 return 0;
2309}
2310
2311int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2312{
2313 dma_addr_t dma;
2314 struct device *dev = xhci_to_hcd(xhci)->self.controller;
2315 unsigned int val, val2;
2316 u64 val_64;
2317 struct xhci_segment *seg;
2318 u32 page_size, temp;
2319 int i;
2320
2321 INIT_LIST_HEAD(&xhci->cmd_list);
2322
2323
2324 setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
2325 (unsigned long)xhci);
2326
2327 page_size = readl(&xhci->op_regs->page_size);
2328 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2329 "Supported page size register = 0x%x", page_size);
2330 for (i = 0; i < 16; i++) {
2331 if ((0x1 & page_size) != 0)
2332 break;
2333 page_size = page_size >> 1;
2334 }
2335 if (i < 16)
2336 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2337 "Supported page size of %iK", (1 << (i+12)) / 1024);
2338 else
2339 xhci_warn(xhci, "WARN: no supported page size\n");
2340
2341 xhci->page_shift = 12;
2342 xhci->page_size = 1 << xhci->page_shift;
2343 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2344 "HCD page size set to %iK", xhci->page_size / 1024);
2345
2346
2347
2348
2349
2350 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2351 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2352 "// xHC can handle at most %d device slots.", val);
2353 val2 = readl(&xhci->op_regs->config_reg);
2354 val |= (val2 & ~HCS_SLOTS_MASK);
2355 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2356 "// Setting Max device slots reg = 0x%x.", val);
2357 writel(val, &xhci->op_regs->config_reg);
2358
2359
2360
2361
2362
2363 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2364 GFP_KERNEL);
2365 if (!xhci->dcbaa)
2366 goto fail;
2367 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2368 xhci->dcbaa->dma = dma;
2369 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2370 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2371 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2372 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2373
2374
2375
2376
2377
2378
2379
2380
2381 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2382 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2383
2384
2385 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2386 2112, 64, xhci->page_size);
2387 if (!xhci->segment_pool || !xhci->device_pool)
2388 goto fail;
2389
2390
2391
2392
2393 xhci->small_streams_pool =
2394 dma_pool_create("xHCI 256 byte stream ctx arrays",
2395 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2396 xhci->medium_streams_pool =
2397 dma_pool_create("xHCI 1KB stream ctx arrays",
2398 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2399
2400
2401
2402
2403 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2404 goto fail;
2405
2406
2407 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2408 if (!xhci->cmd_ring)
2409 goto fail;
2410 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2411 "Allocated command ring at %p", xhci->cmd_ring);
2412 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2413 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2414
2415
2416 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2417 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2418 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2419 xhci->cmd_ring->cycle_state;
2420 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2421 "// Setting command ring address to 0x%x", val);
2422 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2423 xhci_dbg_cmd_ptrs(xhci);
2424
2425 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
2426 if (!xhci->lpm_command)
2427 goto fail;
2428
2429
2430
2431
2432
2433 xhci->cmd_ring_reserved_trbs++;
2434
2435 val = readl(&xhci->cap_regs->db_off);
2436 val &= DBOFF_MASK;
2437 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2438 "// Doorbell array is located at offset 0x%x"
2439 " from cap regs base addr", val);
2440 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2441 xhci_dbg_regs(xhci);
2442 xhci_print_run_regs(xhci);
2443
2444 xhci->ir_set = &xhci->run_regs->ir_set[0];
2445
2446
2447
2448
2449
2450 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2451 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2452 flags);
2453 if (!xhci->event_ring)
2454 goto fail;
2455 if (xhci_check_trb_in_td_math(xhci) < 0)
2456 goto fail;
2457
2458 xhci->erst.entries = dma_alloc_coherent(dev,
2459 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2460 GFP_KERNEL);
2461 if (!xhci->erst.entries)
2462 goto fail;
2463 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2464 "// Allocated event ring segment table at 0x%llx",
2465 (unsigned long long)dma);
2466
2467 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2468 xhci->erst.num_entries = ERST_NUM_SEGS;
2469 xhci->erst.erst_dma_addr = dma;
2470 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2471 "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
2472 xhci->erst.num_entries,
2473 xhci->erst.entries,
2474 (unsigned long long)xhci->erst.erst_dma_addr);
2475
2476
2477 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2478 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2479 entry->seg_addr = cpu_to_le64(seg->dma);
2480 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2481 entry->rsvd = 0;
2482 seg = seg->next;
2483 }
2484
2485
2486 val = readl(&xhci->ir_set->erst_size);
2487 val &= ERST_SIZE_MASK;
2488 val |= ERST_NUM_SEGS;
2489 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2490 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2491 val);
2492 writel(val, &xhci->ir_set->erst_size);
2493
2494 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2495 "// Set ERST entries to point to event ring.");
2496
2497 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2498 "// Set ERST base address for ir_set 0 = 0x%llx",
2499 (unsigned long long)xhci->erst.erst_dma_addr);
2500 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2501 val_64 &= ERST_PTR_MASK;
2502 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2503 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2504
2505
2506 xhci_set_hc_event_deq(xhci);
2507 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2508 "Wrote ERST address to ir_set 0.");
2509 xhci_print_ir_set(xhci, 0);
2510
2511
2512
2513
2514
2515
2516 init_completion(&xhci->addr_dev);
2517 for (i = 0; i < MAX_HC_SLOTS; ++i)
2518 xhci->devs[i] = NULL;
2519 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2520 xhci->bus_state[0].resume_done[i] = 0;
2521 xhci->bus_state[1].resume_done[i] = 0;
2522
2523 init_completion(&xhci->bus_state[1].rexit_done[i]);
2524 }
2525
2526 if (scratchpad_alloc(xhci, flags))
2527 goto fail;
2528 if (xhci_setup_port_arrays(xhci, flags))
2529 goto fail;
2530
2531
2532
2533
2534
2535 temp = readl(&xhci->op_regs->dev_notification);
2536 temp &= ~DEV_NOTE_MASK;
2537 temp |= DEV_NOTE_FWAKE;
2538 writel(temp, &xhci->op_regs->dev_notification);
2539
2540 return 0;
2541
2542fail:
2543 xhci_warn(xhci, "Couldn't initialize memory\n");
2544 xhci_halt(xhci);
2545 xhci_reset(xhci);
2546 xhci_mem_cleanup(xhci);
2547 return -ENOMEM;
2548}
2549