1
2
3
4
5
6
7
8
9
10
11#include <linux/usb.h>
12#include <linux/pci.h>
13#include <linux/slab.h>
14#include <linux/dmapool.h>
15#include <linux/dma-mapping.h>
16
17#include "xhci.h"
18#include "xhci-trace.h"
19#include "xhci-debugfs.h"
20
21
22
23
24
25
26
27
28static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
29 unsigned int cycle_state,
30 unsigned int max_packet,
31 gfp_t flags)
32{
33 struct xhci_segment *seg;
34 dma_addr_t dma;
35 int i;
36
37 seg = kzalloc(sizeof *seg, flags);
38 if (!seg)
39 return NULL;
40
41 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
42 if (!seg->trbs) {
43 kfree(seg);
44 return NULL;
45 }
46
47 if (max_packet) {
48 seg->bounce_buf = kzalloc(max_packet, flags);
49 if (!seg->bounce_buf) {
50 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
51 kfree(seg);
52 return NULL;
53 }
54 }
55
56 if (cycle_state == 0) {
57 for (i = 0; i < TRBS_PER_SEGMENT; i++)
58 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
59 }
60 seg->dma = dma;
61 seg->next = NULL;
62
63 return seg;
64}
65
66static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
67{
68 if (seg->trbs) {
69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
70 seg->trbs = NULL;
71 }
72 kfree(seg->bounce_buf);
73 kfree(seg);
74}
75
76static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
77 struct xhci_segment *first)
78{
79 struct xhci_segment *seg;
80
81 seg = first->next;
82 while (seg != first) {
83 struct xhci_segment *next = seg->next;
84 xhci_segment_free(xhci, seg);
85 seg = next;
86 }
87 xhci_segment_free(xhci, first);
88}
89
90
91
92
93
94
95
96
97static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
98 struct xhci_segment *next, enum xhci_ring_type type)
99{
100 u32 val;
101
102 if (!prev || !next)
103 return;
104 prev->next = next;
105 if (type != TYPE_EVENT) {
106 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
107 cpu_to_le64(next->dma);
108
109
110 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
111 val &= ~TRB_TYPE_BITMASK;
112 val |= TRB_TYPE(TRB_LINK);
113
114
115 if (xhci_link_trb_quirk(xhci) ||
116 (type == TYPE_ISOC &&
117 (xhci->quirks & XHCI_AMD_0x96_HOST)))
118 val |= TRB_CHAIN;
119 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
120 }
121}
122
123
124
125
126
127static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
128 struct xhci_segment *first, struct xhci_segment *last,
129 unsigned int num_segs)
130{
131 struct xhci_segment *next;
132
133 if (!ring || !first || !last)
134 return;
135
136 next = ring->enq_seg->next;
137 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
138 xhci_link_segments(xhci, last, next, ring->type);
139 ring->num_segs += num_segs;
140 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
141
142 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
143 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
144 &= ~cpu_to_le32(LINK_TOGGLE);
145 last->trbs[TRBS_PER_SEGMENT-1].link.control
146 |= cpu_to_le32(LINK_TOGGLE);
147 ring->last_seg = last;
148 }
149}
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
183 struct xhci_ring *ring,
184 struct xhci_segment *seg,
185 gfp_t mem_flags)
186{
187 unsigned long key;
188 int ret;
189
190 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
191
192 if (radix_tree_lookup(trb_address_map, key))
193 return 0;
194
195 ret = radix_tree_maybe_preload(mem_flags);
196 if (ret)
197 return ret;
198 ret = radix_tree_insert(trb_address_map,
199 key, ring);
200 radix_tree_preload_end();
201 return ret;
202}
203
204static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
205 struct xhci_segment *seg)
206{
207 unsigned long key;
208
209 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
210 if (radix_tree_lookup(trb_address_map, key))
211 radix_tree_delete(trb_address_map, key);
212}
213
214static int xhci_update_stream_segment_mapping(
215 struct radix_tree_root *trb_address_map,
216 struct xhci_ring *ring,
217 struct xhci_segment *first_seg,
218 struct xhci_segment *last_seg,
219 gfp_t mem_flags)
220{
221 struct xhci_segment *seg;
222 struct xhci_segment *failed_seg;
223 int ret;
224
225 if (WARN_ON_ONCE(trb_address_map == NULL))
226 return 0;
227
228 seg = first_seg;
229 do {
230 ret = xhci_insert_segment_mapping(trb_address_map,
231 ring, seg, mem_flags);
232 if (ret)
233 goto remove_streams;
234 if (seg == last_seg)
235 return 0;
236 seg = seg->next;
237 } while (seg != first_seg);
238
239 return 0;
240
241remove_streams:
242 failed_seg = seg;
243 seg = first_seg;
244 do {
245 xhci_remove_segment_mapping(trb_address_map, seg);
246 if (seg == failed_seg)
247 return ret;
248 seg = seg->next;
249 } while (seg != first_seg);
250
251 return ret;
252}
253
254static void xhci_remove_stream_mapping(struct xhci_ring *ring)
255{
256 struct xhci_segment *seg;
257
258 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
259 return;
260
261 seg = ring->first_seg;
262 do {
263 xhci_remove_segment_mapping(ring->trb_address_map, seg);
264 seg = seg->next;
265 } while (seg != ring->first_seg);
266}
267
268static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
269{
270 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
271 ring->first_seg, ring->last_seg, mem_flags);
272}
273
274
275void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
276{
277 if (!ring)
278 return;
279
280 trace_xhci_ring_free(ring);
281
282 if (ring->first_seg) {
283 if (ring->type == TYPE_STREAM)
284 xhci_remove_stream_mapping(ring);
285 xhci_free_segments_for_ring(xhci, ring->first_seg);
286 }
287
288 kfree(ring);
289}
290
291static void xhci_initialize_ring_info(struct xhci_ring *ring,
292 unsigned int cycle_state)
293{
294
295 ring->enqueue = ring->first_seg->trbs;
296 ring->enq_seg = ring->first_seg;
297 ring->dequeue = ring->enqueue;
298 ring->deq_seg = ring->first_seg;
299
300
301
302
303
304
305
306 ring->cycle_state = cycle_state;
307
308
309
310
311
312 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
313}
314
315
316static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
317 struct xhci_segment **first, struct xhci_segment **last,
318 unsigned int num_segs, unsigned int cycle_state,
319 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
320{
321 struct xhci_segment *prev;
322
323 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
324 if (!prev)
325 return -ENOMEM;
326 num_segs--;
327
328 *first = prev;
329 while (num_segs > 0) {
330 struct xhci_segment *next;
331
332 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
333 if (!next) {
334 prev = *first;
335 while (prev) {
336 next = prev->next;
337 xhci_segment_free(xhci, prev);
338 prev = next;
339 }
340 return -ENOMEM;
341 }
342 xhci_link_segments(xhci, prev, next, type);
343
344 prev = next;
345 num_segs--;
346 }
347 xhci_link_segments(xhci, prev, *first, type);
348 *last = prev;
349
350 return 0;
351}
352
353
354
355
356
357
358
359
360struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
361 unsigned int num_segs, unsigned int cycle_state,
362 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
363{
364 struct xhci_ring *ring;
365 int ret;
366
367 ring = kzalloc(sizeof *(ring), flags);
368 if (!ring)
369 return NULL;
370
371 ring->num_segs = num_segs;
372 ring->bounce_buf_len = max_packet;
373 INIT_LIST_HEAD(&ring->td_list);
374 ring->type = type;
375 if (num_segs == 0)
376 return ring;
377
378 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
379 &ring->last_seg, num_segs, cycle_state, type,
380 max_packet, flags);
381 if (ret)
382 goto fail;
383
384
385 if (type != TYPE_EVENT) {
386
387 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
388 cpu_to_le32(LINK_TOGGLE);
389 }
390 xhci_initialize_ring_info(ring, cycle_state);
391 trace_xhci_ring_alloc(ring);
392 return ring;
393
394fail:
395 kfree(ring);
396 return NULL;
397}
398
399void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
400 struct xhci_virt_device *virt_dev,
401 unsigned int ep_index)
402{
403 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
404 virt_dev->eps[ep_index].ring = NULL;
405}
406
407
408
409
410
411int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
412 unsigned int num_trbs, gfp_t flags)
413{
414 struct xhci_segment *first;
415 struct xhci_segment *last;
416 unsigned int num_segs;
417 unsigned int num_segs_needed;
418 int ret;
419
420 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
421 (TRBS_PER_SEGMENT - 1);
422
423
424 num_segs = ring->num_segs > num_segs_needed ?
425 ring->num_segs : num_segs_needed;
426
427 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
428 num_segs, ring->cycle_state, ring->type,
429 ring->bounce_buf_len, flags);
430 if (ret)
431 return -ENOMEM;
432
433 if (ring->type == TYPE_STREAM)
434 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
435 ring, first, last, flags);
436 if (ret) {
437 struct xhci_segment *next;
438 do {
439 next = first->next;
440 xhci_segment_free(xhci, first);
441 if (first == last)
442 break;
443 first = next;
444 } while (true);
445 return ret;
446 }
447
448 xhci_link_rings(xhci, ring, first, last, num_segs);
449 trace_xhci_ring_expansion(ring);
450 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
451 "ring expansion succeed, now has %d segments",
452 ring->num_segs);
453
454 return 0;
455}
456
457struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
458 int type, gfp_t flags)
459{
460 struct xhci_container_ctx *ctx;
461
462 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
463 return NULL;
464
465 ctx = kzalloc(sizeof(*ctx), flags);
466 if (!ctx)
467 return NULL;
468
469 ctx->type = type;
470 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
471 if (type == XHCI_CTX_TYPE_INPUT)
472 ctx->size += CTX_SIZE(xhci->hcc_params);
473
474 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
475 if (!ctx->bytes) {
476 kfree(ctx);
477 return NULL;
478 }
479 return ctx;
480}
481
482void xhci_free_container_ctx(struct xhci_hcd *xhci,
483 struct xhci_container_ctx *ctx)
484{
485 if (!ctx)
486 return;
487 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
488 kfree(ctx);
489}
490
491struct xhci_input_control_ctx *xhci_get_input_control_ctx(
492 struct xhci_container_ctx *ctx)
493{
494 if (ctx->type != XHCI_CTX_TYPE_INPUT)
495 return NULL;
496
497 return (struct xhci_input_control_ctx *)ctx->bytes;
498}
499
500struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
501 struct xhci_container_ctx *ctx)
502{
503 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
504 return (struct xhci_slot_ctx *)ctx->bytes;
505
506 return (struct xhci_slot_ctx *)
507 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
508}
509
510struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
511 struct xhci_container_ctx *ctx,
512 unsigned int ep_index)
513{
514
515 ep_index++;
516 if (ctx->type == XHCI_CTX_TYPE_INPUT)
517 ep_index++;
518
519 return (struct xhci_ep_ctx *)
520 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
521}
522
523
524
525
526static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
527 unsigned int num_stream_ctxs,
528 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
529{
530 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
531 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
532
533 if (size > MEDIUM_STREAM_ARRAY_SIZE)
534 dma_free_coherent(dev, size,
535 stream_ctx, dma);
536 else if (size <= SMALL_STREAM_ARRAY_SIZE)
537 return dma_pool_free(xhci->small_streams_pool,
538 stream_ctx, dma);
539 else
540 return dma_pool_free(xhci->medium_streams_pool,
541 stream_ctx, dma);
542}
543
544
545
546
547
548
549
550
551
552
553
554static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
555 unsigned int num_stream_ctxs, dma_addr_t *dma,
556 gfp_t mem_flags)
557{
558 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
559 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
560
561 if (size > MEDIUM_STREAM_ARRAY_SIZE)
562 return dma_alloc_coherent(dev, size,
563 dma, mem_flags);
564 else if (size <= SMALL_STREAM_ARRAY_SIZE)
565 return dma_pool_alloc(xhci->small_streams_pool,
566 mem_flags, dma);
567 else
568 return dma_pool_alloc(xhci->medium_streams_pool,
569 mem_flags, dma);
570}
571
572struct xhci_ring *xhci_dma_to_transfer_ring(
573 struct xhci_virt_ep *ep,
574 u64 address)
575{
576 if (ep->ep_state & EP_HAS_STREAMS)
577 return radix_tree_lookup(&ep->stream_info->trb_address_map,
578 address >> TRB_SEGMENT_SHIFT);
579 return ep->ring;
580}
581
582struct xhci_ring *xhci_stream_id_to_ring(
583 struct xhci_virt_device *dev,
584 unsigned int ep_index,
585 unsigned int stream_id)
586{
587 struct xhci_virt_ep *ep = &dev->eps[ep_index];
588
589 if (stream_id == 0)
590 return ep->ring;
591 if (!ep->stream_info)
592 return NULL;
593
594 if (stream_id > ep->stream_info->num_streams)
595 return NULL;
596 return ep->stream_info->stream_rings[stream_id];
597}
598
599
600
601
602
603
604
605
606
607
608struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
609 unsigned int num_stream_ctxs,
610 unsigned int num_streams,
611 unsigned int max_packet, gfp_t mem_flags)
612{
613 struct xhci_stream_info *stream_info;
614 u32 cur_stream;
615 struct xhci_ring *cur_ring;
616 u64 addr;
617 int ret;
618
619 xhci_dbg(xhci, "Allocating %u streams and %u "
620 "stream context array entries.\n",
621 num_streams, num_stream_ctxs);
622 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
623 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
624 return NULL;
625 }
626 xhci->cmd_ring_reserved_trbs++;
627
628 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
629 if (!stream_info)
630 goto cleanup_trbs;
631
632 stream_info->num_streams = num_streams;
633 stream_info->num_stream_ctxs = num_stream_ctxs;
634
635
636 stream_info->stream_rings = kzalloc(
637 sizeof(struct xhci_ring *)*num_streams,
638 mem_flags);
639 if (!stream_info->stream_rings)
640 goto cleanup_info;
641
642
643 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
644 num_stream_ctxs, &stream_info->ctx_array_dma,
645 mem_flags);
646 if (!stream_info->stream_ctx_array)
647 goto cleanup_ctx;
648 memset(stream_info->stream_ctx_array, 0,
649 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
650
651
652 stream_info->free_streams_command =
653 xhci_alloc_command_with_ctx(xhci, true, mem_flags);
654 if (!stream_info->free_streams_command)
655 goto cleanup_ctx;
656
657 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
658
659
660
661
662
663
664 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
665 stream_info->stream_rings[cur_stream] =
666 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
667 mem_flags);
668 cur_ring = stream_info->stream_rings[cur_stream];
669 if (!cur_ring)
670 goto cleanup_rings;
671 cur_ring->stream_id = cur_stream;
672 cur_ring->trb_address_map = &stream_info->trb_address_map;
673
674 addr = cur_ring->first_seg->dma |
675 SCT_FOR_CTX(SCT_PRI_TR) |
676 cur_ring->cycle_state;
677 stream_info->stream_ctx_array[cur_stream].stream_ring =
678 cpu_to_le64(addr);
679 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
680 cur_stream, (unsigned long long) addr);
681
682 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
683 if (ret) {
684 xhci_ring_free(xhci, cur_ring);
685 stream_info->stream_rings[cur_stream] = NULL;
686 goto cleanup_rings;
687 }
688 }
689
690
691
692
693
694
695
696 return stream_info;
697
698cleanup_rings:
699 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
700 cur_ring = stream_info->stream_rings[cur_stream];
701 if (cur_ring) {
702 xhci_ring_free(xhci, cur_ring);
703 stream_info->stream_rings[cur_stream] = NULL;
704 }
705 }
706 xhci_free_command(xhci, stream_info->free_streams_command);
707cleanup_ctx:
708 kfree(stream_info->stream_rings);
709cleanup_info:
710 kfree(stream_info);
711cleanup_trbs:
712 xhci->cmd_ring_reserved_trbs--;
713 return NULL;
714}
715
716
717
718
719void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
720 struct xhci_ep_ctx *ep_ctx,
721 struct xhci_stream_info *stream_info)
722{
723 u32 max_primary_streams;
724
725
726
727
728 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
729 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
730 "Setting number of stream ctx array entries to %u",
731 1 << (max_primary_streams + 1));
732 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
733 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
734 | EP_HAS_LSA);
735 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
736}
737
738
739
740
741
742
743void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
744 struct xhci_virt_ep *ep)
745{
746 dma_addr_t addr;
747 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
748 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
749 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
750}
751
752
753
754
755
756void xhci_free_stream_info(struct xhci_hcd *xhci,
757 struct xhci_stream_info *stream_info)
758{
759 int cur_stream;
760 struct xhci_ring *cur_ring;
761
762 if (!stream_info)
763 return;
764
765 for (cur_stream = 1; cur_stream < stream_info->num_streams;
766 cur_stream++) {
767 cur_ring = stream_info->stream_rings[cur_stream];
768 if (cur_ring) {
769 xhci_ring_free(xhci, cur_ring);
770 stream_info->stream_rings[cur_stream] = NULL;
771 }
772 }
773 xhci_free_command(xhci, stream_info->free_streams_command);
774 xhci->cmd_ring_reserved_trbs--;
775 if (stream_info->stream_ctx_array)
776 xhci_free_stream_ctx(xhci,
777 stream_info->num_stream_ctxs,
778 stream_info->stream_ctx_array,
779 stream_info->ctx_array_dma);
780
781 kfree(stream_info->stream_rings);
782 kfree(stream_info);
783}
784
785
786
787
788static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
789 struct xhci_virt_ep *ep)
790{
791 timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
792 0);
793 ep->xhci = xhci;
794}
795
796static void xhci_free_tt_info(struct xhci_hcd *xhci,
797 struct xhci_virt_device *virt_dev,
798 int slot_id)
799{
800 struct list_head *tt_list_head;
801 struct xhci_tt_bw_info *tt_info, *next;
802 bool slot_found = false;
803
804
805
806
807 if (virt_dev->real_port == 0 ||
808 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
809 xhci_dbg(xhci, "Bad real port.\n");
810 return;
811 }
812
813 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
814 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
815
816 if (tt_info->slot_id == slot_id) {
817 slot_found = true;
818 list_del(&tt_info->tt_list);
819 kfree(tt_info);
820 } else if (slot_found) {
821 break;
822 }
823 }
824}
825
826int xhci_alloc_tt_info(struct xhci_hcd *xhci,
827 struct xhci_virt_device *virt_dev,
828 struct usb_device *hdev,
829 struct usb_tt *tt, gfp_t mem_flags)
830{
831 struct xhci_tt_bw_info *tt_info;
832 unsigned int num_ports;
833 int i, j;
834
835 if (!tt->multi)
836 num_ports = 1;
837 else
838 num_ports = hdev->maxchild;
839
840 for (i = 0; i < num_ports; i++, tt_info++) {
841 struct xhci_interval_bw_table *bw_table;
842
843 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
844 if (!tt_info)
845 goto free_tts;
846 INIT_LIST_HEAD(&tt_info->tt_list);
847 list_add(&tt_info->tt_list,
848 &xhci->rh_bw[virt_dev->real_port - 1].tts);
849 tt_info->slot_id = virt_dev->udev->slot_id;
850 if (tt->multi)
851 tt_info->ttport = i+1;
852 bw_table = &tt_info->bw_table;
853 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
854 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
855 }
856 return 0;
857
858free_tts:
859 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
860 return -ENOMEM;
861}
862
863
864
865
866
867
868
869void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
870{
871 struct xhci_virt_device *dev;
872 int i;
873 int old_active_eps = 0;
874
875
876 if (slot_id == 0 || !xhci->devs[slot_id])
877 return;
878
879 dev = xhci->devs[slot_id];
880
881 trace_xhci_free_virt_device(dev);
882
883 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
884 if (!dev)
885 return;
886
887 if (dev->tt_info)
888 old_active_eps = dev->tt_info->active_eps;
889
890 for (i = 0; i < 31; i++) {
891 if (dev->eps[i].ring)
892 xhci_ring_free(xhci, dev->eps[i].ring);
893 if (dev->eps[i].stream_info)
894 xhci_free_stream_info(xhci,
895 dev->eps[i].stream_info);
896
897
898
899
900
901 if (!list_empty(&dev->eps[i].bw_endpoint_list))
902 xhci_warn(xhci, "Slot %u endpoint %u "
903 "not removed from BW list!\n",
904 slot_id, i);
905 }
906
907 xhci_free_tt_info(xhci, dev, slot_id);
908
909 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
910
911 if (dev->in_ctx)
912 xhci_free_container_ctx(xhci, dev->in_ctx);
913 if (dev->out_ctx)
914 xhci_free_container_ctx(xhci, dev->out_ctx);
915
916 if (dev->udev && dev->udev->slot_id)
917 dev->udev->slot_id = 0;
918 kfree(xhci->devs[slot_id]);
919 xhci->devs[slot_id] = NULL;
920}
921
922
923
924
925
926
927
928void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
929{
930 struct xhci_virt_device *vdev;
931 struct list_head *tt_list_head;
932 struct xhci_tt_bw_info *tt_info, *next;
933 int i;
934
935 vdev = xhci->devs[slot_id];
936 if (!vdev)
937 return;
938
939 if (vdev->real_port == 0 ||
940 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
941 xhci_dbg(xhci, "Bad vdev->real_port.\n");
942 goto out;
943 }
944
945 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
946 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
947
948 if (tt_info->slot_id == slot_id) {
949
950 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
951 vdev = xhci->devs[i];
952 if (vdev && (vdev->tt_info == tt_info))
953 xhci_free_virt_devices_depth_first(
954 xhci, i);
955 }
956 }
957 }
958out:
959
960 xhci_debugfs_remove_slot(xhci, slot_id);
961 xhci_free_virt_device(xhci, slot_id);
962}
963
964int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
965 struct usb_device *udev, gfp_t flags)
966{
967 struct xhci_virt_device *dev;
968 int i;
969
970
971 if (slot_id == 0 || xhci->devs[slot_id]) {
972 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
973 return 0;
974 }
975
976 dev = kzalloc(sizeof(*dev), flags);
977 if (!dev)
978 return 0;
979
980
981 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
982 if (!dev->out_ctx)
983 goto fail;
984
985 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
986 (unsigned long long)dev->out_ctx->dma);
987
988
989 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
990 if (!dev->in_ctx)
991 goto fail;
992
993 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
994 (unsigned long long)dev->in_ctx->dma);
995
996
997 for (i = 0; i < 31; i++) {
998 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
999 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1000 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1001 }
1002
1003
1004 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1005 if (!dev->eps[0].ring)
1006 goto fail;
1007
1008 dev->udev = udev;
1009
1010
1011 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1012 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1013 slot_id,
1014 &xhci->dcbaa->dev_context_ptrs[slot_id],
1015 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1016
1017 trace_xhci_alloc_virt_device(dev);
1018
1019 xhci->devs[slot_id] = dev;
1020
1021 return 1;
1022fail:
1023
1024 if (dev->in_ctx)
1025 xhci_free_container_ctx(xhci, dev->in_ctx);
1026 if (dev->out_ctx)
1027 xhci_free_container_ctx(xhci, dev->out_ctx);
1028 kfree(dev);
1029
1030 return 0;
1031}
1032
1033void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1034 struct usb_device *udev)
1035{
1036 struct xhci_virt_device *virt_dev;
1037 struct xhci_ep_ctx *ep0_ctx;
1038 struct xhci_ring *ep_ring;
1039
1040 virt_dev = xhci->devs[udev->slot_id];
1041 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1042 ep_ring = virt_dev->eps[0].ring;
1043
1044
1045
1046
1047
1048
1049
1050 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1051 ep_ring->enqueue)
1052 | ep_ring->cycle_state);
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1067 struct usb_device *udev)
1068{
1069 struct usb_device *top_dev;
1070 struct usb_hcd *hcd;
1071
1072 if (udev->speed >= USB_SPEED_SUPER)
1073 hcd = xhci->shared_hcd;
1074 else
1075 hcd = xhci->main_hcd;
1076
1077 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1078 top_dev = top_dev->parent)
1079 ;
1080
1081 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1082}
1083
1084
1085int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1086{
1087 struct xhci_virt_device *dev;
1088 struct xhci_ep_ctx *ep0_ctx;
1089 struct xhci_slot_ctx *slot_ctx;
1090 u32 port_num;
1091 u32 max_packets;
1092 struct usb_device *top_dev;
1093
1094 dev = xhci->devs[udev->slot_id];
1095
1096 if (udev->slot_id == 0 || !dev) {
1097 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1098 udev->slot_id);
1099 return -EINVAL;
1100 }
1101 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1102 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1103
1104
1105 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1106 switch (udev->speed) {
1107 case USB_SPEED_SUPER_PLUS:
1108 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1109 max_packets = MAX_PACKET(512);
1110 break;
1111 case USB_SPEED_SUPER:
1112 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1113 max_packets = MAX_PACKET(512);
1114 break;
1115 case USB_SPEED_HIGH:
1116 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1117 max_packets = MAX_PACKET(64);
1118 break;
1119
1120 case USB_SPEED_FULL:
1121 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1122 max_packets = MAX_PACKET(64);
1123 break;
1124 case USB_SPEED_LOW:
1125 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1126 max_packets = MAX_PACKET(8);
1127 break;
1128 case USB_SPEED_WIRELESS:
1129 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1130 return -EINVAL;
1131 break;
1132 default:
1133
1134 return -EINVAL;
1135 }
1136
1137 port_num = xhci_find_real_port_number(xhci, udev);
1138 if (!port_num)
1139 return -EINVAL;
1140 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1141
1142 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1143 top_dev = top_dev->parent)
1144 ;
1145 dev->fake_port = top_dev->portnum;
1146 dev->real_port = port_num;
1147 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1148 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1149
1150
1151
1152
1153
1154
1155
1156 if (!udev->tt || !udev->tt->hub->parent) {
1157 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1158 } else {
1159 struct xhci_root_port_bw_info *rh_bw;
1160 struct xhci_tt_bw_info *tt_bw;
1161
1162 rh_bw = &xhci->rh_bw[port_num - 1];
1163
1164 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1165 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1166 continue;
1167
1168 if (!dev->udev->tt->multi ||
1169 (udev->tt->multi &&
1170 tt_bw->ttport == dev->udev->ttport)) {
1171 dev->bw_table = &tt_bw->bw_table;
1172 dev->tt_info = tt_bw;
1173 break;
1174 }
1175 }
1176 if (!dev->tt_info)
1177 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1178 }
1179
1180
1181 if (udev->tt && udev->tt->hub->parent) {
1182 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1183 (udev->ttport << 8));
1184 if (udev->tt->multi)
1185 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1186 }
1187 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1188 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1189
1190
1191
1192 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1193
1194
1195 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1196 max_packets);
1197
1198 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1199 dev->eps[0].ring->cycle_state);
1200
1201 trace_xhci_setup_addressable_virt_device(dev);
1202
1203
1204
1205 return 0;
1206}
1207
1208
1209
1210
1211
1212
1213static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1214 struct usb_host_endpoint *ep)
1215{
1216 unsigned int interval;
1217
1218 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1219 if (interval != ep->desc.bInterval - 1)
1220 dev_warn(&udev->dev,
1221 "ep %#x - rounding interval to %d %sframes\n",
1222 ep->desc.bEndpointAddress,
1223 1 << interval,
1224 udev->speed == USB_SPEED_FULL ? "" : "micro");
1225
1226 if (udev->speed == USB_SPEED_FULL) {
1227
1228
1229
1230
1231
1232 interval += 3;
1233 }
1234
1235 return interval;
1236}
1237
1238
1239
1240
1241
1242static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1243 struct usb_host_endpoint *ep, unsigned int desc_interval,
1244 unsigned int min_exponent, unsigned int max_exponent)
1245{
1246 unsigned int interval;
1247
1248 interval = fls(desc_interval) - 1;
1249 interval = clamp_val(interval, min_exponent, max_exponent);
1250 if ((1 << interval) != desc_interval)
1251 dev_dbg(&udev->dev,
1252 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1253 ep->desc.bEndpointAddress,
1254 1 << interval,
1255 desc_interval);
1256
1257 return interval;
1258}
1259
1260static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1261 struct usb_host_endpoint *ep)
1262{
1263 if (ep->desc.bInterval == 0)
1264 return 0;
1265 return xhci_microframes_to_exponent(udev, ep,
1266 ep->desc.bInterval, 0, 15);
1267}
1268
1269
1270static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1271 struct usb_host_endpoint *ep)
1272{
1273 return xhci_microframes_to_exponent(udev, ep,
1274 ep->desc.bInterval * 8, 3, 10);
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1286 struct usb_host_endpoint *ep)
1287{
1288 unsigned int interval = 0;
1289
1290 switch (udev->speed) {
1291 case USB_SPEED_HIGH:
1292
1293 if (usb_endpoint_xfer_control(&ep->desc) ||
1294 usb_endpoint_xfer_bulk(&ep->desc)) {
1295 interval = xhci_parse_microframe_interval(udev, ep);
1296 break;
1297 }
1298
1299
1300 case USB_SPEED_SUPER_PLUS:
1301 case USB_SPEED_SUPER:
1302 if (usb_endpoint_xfer_int(&ep->desc) ||
1303 usb_endpoint_xfer_isoc(&ep->desc)) {
1304 interval = xhci_parse_exponent_interval(udev, ep);
1305 }
1306 break;
1307
1308 case USB_SPEED_FULL:
1309 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1310 interval = xhci_parse_exponent_interval(udev, ep);
1311 break;
1312 }
1313
1314
1315
1316
1317
1318
1319
1320 case USB_SPEED_LOW:
1321 if (usb_endpoint_xfer_int(&ep->desc) ||
1322 usb_endpoint_xfer_isoc(&ep->desc)) {
1323
1324 interval = xhci_parse_frame_interval(udev, ep);
1325 }
1326 break;
1327
1328 default:
1329 BUG();
1330 }
1331 return interval;
1332}
1333
1334
1335
1336
1337
1338
1339static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1340 struct usb_host_endpoint *ep)
1341{
1342 if (udev->speed < USB_SPEED_SUPER ||
1343 !usb_endpoint_xfer_isoc(&ep->desc))
1344 return 0;
1345 return ep->ss_ep_comp.bmAttributes;
1346}
1347
1348static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1349 struct usb_host_endpoint *ep)
1350{
1351
1352 if (udev->speed >= USB_SPEED_SUPER)
1353 return ep->ss_ep_comp.bMaxBurst;
1354
1355 if (udev->speed == USB_SPEED_HIGH &&
1356 (usb_endpoint_xfer_isoc(&ep->desc) ||
1357 usb_endpoint_xfer_int(&ep->desc)))
1358 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1359
1360 return 0;
1361}
1362
1363static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1364{
1365 int in;
1366
1367 in = usb_endpoint_dir_in(&ep->desc);
1368
1369 switch (usb_endpoint_type(&ep->desc)) {
1370 case USB_ENDPOINT_XFER_CONTROL:
1371 return CTRL_EP;
1372 case USB_ENDPOINT_XFER_BULK:
1373 return in ? BULK_IN_EP : BULK_OUT_EP;
1374 case USB_ENDPOINT_XFER_ISOC:
1375 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1376 case USB_ENDPOINT_XFER_INT:
1377 return in ? INT_IN_EP : INT_OUT_EP;
1378 }
1379 return 0;
1380}
1381
1382
1383
1384
1385
1386static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1387 struct usb_host_endpoint *ep)
1388{
1389 int max_burst;
1390 int max_packet;
1391
1392
1393 if (usb_endpoint_xfer_control(&ep->desc) ||
1394 usb_endpoint_xfer_bulk(&ep->desc))
1395 return 0;
1396
1397
1398 if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1399 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1400 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1401
1402 else if (udev->speed >= USB_SPEED_SUPER)
1403 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1404
1405 max_packet = usb_endpoint_maxp(&ep->desc);
1406 max_burst = usb_endpoint_maxp_mult(&ep->desc);
1407
1408 return max_packet * max_burst;
1409}
1410
1411
1412
1413
1414int xhci_endpoint_init(struct xhci_hcd *xhci,
1415 struct xhci_virt_device *virt_dev,
1416 struct usb_device *udev,
1417 struct usb_host_endpoint *ep,
1418 gfp_t mem_flags)
1419{
1420 unsigned int ep_index;
1421 struct xhci_ep_ctx *ep_ctx;
1422 struct xhci_ring *ep_ring;
1423 unsigned int max_packet;
1424 enum xhci_ring_type ring_type;
1425 u32 max_esit_payload;
1426 u32 endpoint_type;
1427 unsigned int max_burst;
1428 unsigned int interval;
1429 unsigned int mult;
1430 unsigned int avg_trb_len;
1431 unsigned int err_count = 0;
1432
1433 ep_index = xhci_get_endpoint_index(&ep->desc);
1434 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1435
1436 endpoint_type = xhci_get_endpoint_type(ep);
1437 if (!endpoint_type)
1438 return -EINVAL;
1439
1440 ring_type = usb_endpoint_type(&ep->desc);
1441
1442
1443
1444
1445
1446
1447
1448 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1449 interval = xhci_get_endpoint_interval(udev, ep);
1450
1451
1452 if (usb_endpoint_xfer_int(&ep->desc) ||
1453 usb_endpoint_xfer_isoc(&ep->desc)) {
1454 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1455 udev->speed >= USB_SPEED_HIGH &&
1456 interval >= 7) {
1457 interval = 6;
1458 }
1459 }
1460
1461 mult = xhci_get_endpoint_mult(udev, ep);
1462 max_packet = usb_endpoint_maxp(&ep->desc);
1463 max_burst = xhci_get_endpoint_max_burst(udev, ep);
1464 avg_trb_len = max_esit_payload;
1465
1466
1467
1468
1469 if (!usb_endpoint_xfer_isoc(&ep->desc))
1470 err_count = 3;
1471
1472 if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
1473 max_packet = 512;
1474
1475 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1476 avg_trb_len = 8;
1477
1478 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1479 mult = 0;
1480
1481
1482 virt_dev->eps[ep_index].new_ring =
1483 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1484 if (!virt_dev->eps[ep_index].new_ring)
1485 return -ENOMEM;
1486
1487 virt_dev->eps[ep_index].skip = false;
1488 ep_ring = virt_dev->eps[ep_index].new_ring;
1489
1490
1491 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1492 EP_INTERVAL(interval) |
1493 EP_MULT(mult));
1494 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1495 MAX_PACKET(max_packet) |
1496 MAX_BURST(max_burst) |
1497 ERROR_COUNT(err_count));
1498 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1499 ep_ring->cycle_state);
1500
1501 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1502 EP_AVG_TRB_LENGTH(avg_trb_len));
1503
1504 return 0;
1505}
1506
1507void xhci_endpoint_zero(struct xhci_hcd *xhci,
1508 struct xhci_virt_device *virt_dev,
1509 struct usb_host_endpoint *ep)
1510{
1511 unsigned int ep_index;
1512 struct xhci_ep_ctx *ep_ctx;
1513
1514 ep_index = xhci_get_endpoint_index(&ep->desc);
1515 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1516
1517 ep_ctx->ep_info = 0;
1518 ep_ctx->ep_info2 = 0;
1519 ep_ctx->deq = 0;
1520 ep_ctx->tx_info = 0;
1521
1522
1523
1524}
1525
1526void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1527{
1528 bw_info->ep_interval = 0;
1529 bw_info->mult = 0;
1530 bw_info->num_packets = 0;
1531 bw_info->max_packet_size = 0;
1532 bw_info->type = 0;
1533 bw_info->max_esit_payload = 0;
1534}
1535
1536void xhci_update_bw_info(struct xhci_hcd *xhci,
1537 struct xhci_container_ctx *in_ctx,
1538 struct xhci_input_control_ctx *ctrl_ctx,
1539 struct xhci_virt_device *virt_dev)
1540{
1541 struct xhci_bw_info *bw_info;
1542 struct xhci_ep_ctx *ep_ctx;
1543 unsigned int ep_type;
1544 int i;
1545
1546 for (i = 1; i < 31; i++) {
1547 bw_info = &virt_dev->eps[i].bw_info;
1548
1549
1550
1551
1552
1553
1554 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1555
1556 xhci_clear_endpoint_bw_info(bw_info);
1557 continue;
1558 }
1559
1560 if (EP_IS_ADDED(ctrl_ctx, i)) {
1561 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1562 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1563
1564
1565 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1566 ep_type != ISOC_IN_EP &&
1567 ep_type != INT_IN_EP)
1568 continue;
1569
1570
1571 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1572 le32_to_cpu(ep_ctx->ep_info));
1573
1574
1575
1576
1577 bw_info->mult = CTX_TO_EP_MULT(
1578 le32_to_cpu(ep_ctx->ep_info)) + 1;
1579 bw_info->num_packets = CTX_TO_MAX_BURST(
1580 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1581 bw_info->max_packet_size = MAX_PACKET_DECODED(
1582 le32_to_cpu(ep_ctx->ep_info2));
1583 bw_info->type = ep_type;
1584 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1585 le32_to_cpu(ep_ctx->tx_info));
1586 }
1587 }
1588}
1589
1590
1591
1592
1593
1594void xhci_endpoint_copy(struct xhci_hcd *xhci,
1595 struct xhci_container_ctx *in_ctx,
1596 struct xhci_container_ctx *out_ctx,
1597 unsigned int ep_index)
1598{
1599 struct xhci_ep_ctx *out_ep_ctx;
1600 struct xhci_ep_ctx *in_ep_ctx;
1601
1602 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1603 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1604
1605 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1606 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1607 in_ep_ctx->deq = out_ep_ctx->deq;
1608 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1609}
1610
1611
1612
1613
1614
1615
1616void xhci_slot_copy(struct xhci_hcd *xhci,
1617 struct xhci_container_ctx *in_ctx,
1618 struct xhci_container_ctx *out_ctx)
1619{
1620 struct xhci_slot_ctx *in_slot_ctx;
1621 struct xhci_slot_ctx *out_slot_ctx;
1622
1623 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1624 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1625
1626 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1627 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1628 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1629 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1630}
1631
1632
1633static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1634{
1635 int i;
1636 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1637 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1638
1639 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1640 "Allocating %d scratchpad buffers", num_sp);
1641
1642 if (!num_sp)
1643 return 0;
1644
1645 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1646 if (!xhci->scratchpad)
1647 goto fail_sp;
1648
1649 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1650 num_sp * sizeof(u64),
1651 &xhci->scratchpad->sp_dma, flags);
1652 if (!xhci->scratchpad->sp_array)
1653 goto fail_sp2;
1654
1655 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1656 if (!xhci->scratchpad->sp_buffers)
1657 goto fail_sp3;
1658
1659 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1660 for (i = 0; i < num_sp; i++) {
1661 dma_addr_t dma;
1662 void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
1663 flags);
1664 if (!buf)
1665 goto fail_sp4;
1666
1667 xhci->scratchpad->sp_array[i] = dma;
1668 xhci->scratchpad->sp_buffers[i] = buf;
1669 }
1670
1671 return 0;
1672
1673 fail_sp4:
1674 for (i = i - 1; i >= 0; i--) {
1675 dma_free_coherent(dev, xhci->page_size,
1676 xhci->scratchpad->sp_buffers[i],
1677 xhci->scratchpad->sp_array[i]);
1678 }
1679
1680 kfree(xhci->scratchpad->sp_buffers);
1681
1682 fail_sp3:
1683 dma_free_coherent(dev, num_sp * sizeof(u64),
1684 xhci->scratchpad->sp_array,
1685 xhci->scratchpad->sp_dma);
1686
1687 fail_sp2:
1688 kfree(xhci->scratchpad);
1689 xhci->scratchpad = NULL;
1690
1691 fail_sp:
1692 return -ENOMEM;
1693}
1694
1695static void scratchpad_free(struct xhci_hcd *xhci)
1696{
1697 int num_sp;
1698 int i;
1699 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1700
1701 if (!xhci->scratchpad)
1702 return;
1703
1704 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1705
1706 for (i = 0; i < num_sp; i++) {
1707 dma_free_coherent(dev, xhci->page_size,
1708 xhci->scratchpad->sp_buffers[i],
1709 xhci->scratchpad->sp_array[i]);
1710 }
1711 kfree(xhci->scratchpad->sp_buffers);
1712 dma_free_coherent(dev, num_sp * sizeof(u64),
1713 xhci->scratchpad->sp_array,
1714 xhci->scratchpad->sp_dma);
1715 kfree(xhci->scratchpad);
1716 xhci->scratchpad = NULL;
1717}
1718
1719struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1720 bool allocate_completion, gfp_t mem_flags)
1721{
1722 struct xhci_command *command;
1723
1724 command = kzalloc(sizeof(*command), mem_flags);
1725 if (!command)
1726 return NULL;
1727
1728 if (allocate_completion) {
1729 command->completion =
1730 kzalloc(sizeof(struct completion), mem_flags);
1731 if (!command->completion) {
1732 kfree(command);
1733 return NULL;
1734 }
1735 init_completion(command->completion);
1736 }
1737
1738 command->status = 0;
1739 INIT_LIST_HEAD(&command->cmd_list);
1740 return command;
1741}
1742
1743struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1744 bool allocate_completion, gfp_t mem_flags)
1745{
1746 struct xhci_command *command;
1747
1748 command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1749 if (!command)
1750 return NULL;
1751
1752 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1753 mem_flags);
1754 if (!command->in_ctx) {
1755 kfree(command->completion);
1756 kfree(command);
1757 return NULL;
1758 }
1759 return command;
1760}
1761
1762void xhci_urb_free_priv(struct urb_priv *urb_priv)
1763{
1764 kfree(urb_priv);
1765}
1766
1767void xhci_free_command(struct xhci_hcd *xhci,
1768 struct xhci_command *command)
1769{
1770 xhci_free_container_ctx(xhci,
1771 command->in_ctx);
1772 kfree(command->completion);
1773 kfree(command);
1774}
1775
1776int xhci_alloc_erst(struct xhci_hcd *xhci,
1777 struct xhci_ring *evt_ring,
1778 struct xhci_erst *erst,
1779 gfp_t flags)
1780{
1781 size_t size;
1782 unsigned int val;
1783 struct xhci_segment *seg;
1784 struct xhci_erst_entry *entry;
1785
1786 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1787 erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1788 size, &erst->erst_dma_addr, flags);
1789 if (!erst->entries)
1790 return -ENOMEM;
1791
1792 erst->num_entries = evt_ring->num_segs;
1793
1794 seg = evt_ring->first_seg;
1795 for (val = 0; val < evt_ring->num_segs; val++) {
1796 entry = &erst->entries[val];
1797 entry->seg_addr = cpu_to_le64(seg->dma);
1798 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1799 entry->rsvd = 0;
1800 seg = seg->next;
1801 }
1802
1803 return 0;
1804}
1805
1806void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1807{
1808 size_t size;
1809 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1810
1811 size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1812 if (erst->entries)
1813 dma_free_coherent(dev, size,
1814 erst->entries,
1815 erst->erst_dma_addr);
1816 erst->entries = NULL;
1817}
1818
1819void xhci_mem_cleanup(struct xhci_hcd *xhci)
1820{
1821 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1822 int i, j, num_ports;
1823
1824 cancel_delayed_work_sync(&xhci->cmd_timer);
1825
1826 xhci_free_erst(xhci, &xhci->erst);
1827
1828 if (xhci->event_ring)
1829 xhci_ring_free(xhci, xhci->event_ring);
1830 xhci->event_ring = NULL;
1831 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1832
1833 if (xhci->lpm_command)
1834 xhci_free_command(xhci, xhci->lpm_command);
1835 xhci->lpm_command = NULL;
1836 if (xhci->cmd_ring)
1837 xhci_ring_free(xhci, xhci->cmd_ring);
1838 xhci->cmd_ring = NULL;
1839 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1840 xhci_cleanup_command_queue(xhci);
1841
1842 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1843 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1844 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1845 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1846 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1847 while (!list_empty(ep))
1848 list_del_init(ep->next);
1849 }
1850 }
1851
1852 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1853 xhci_free_virt_devices_depth_first(xhci, i);
1854
1855 dma_pool_destroy(xhci->segment_pool);
1856 xhci->segment_pool = NULL;
1857 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1858
1859 dma_pool_destroy(xhci->device_pool);
1860 xhci->device_pool = NULL;
1861 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1862
1863 dma_pool_destroy(xhci->small_streams_pool);
1864 xhci->small_streams_pool = NULL;
1865 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1866 "Freed small stream array pool");
1867
1868 dma_pool_destroy(xhci->medium_streams_pool);
1869 xhci->medium_streams_pool = NULL;
1870 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1871 "Freed medium stream array pool");
1872
1873 if (xhci->dcbaa)
1874 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1875 xhci->dcbaa, xhci->dcbaa->dma);
1876 xhci->dcbaa = NULL;
1877
1878 scratchpad_free(xhci);
1879
1880 if (!xhci->rh_bw)
1881 goto no_bw;
1882
1883 for (i = 0; i < num_ports; i++) {
1884 struct xhci_tt_bw_info *tt, *n;
1885 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1886 list_del(&tt->tt_list);
1887 kfree(tt);
1888 }
1889 }
1890
1891no_bw:
1892 xhci->cmd_ring_reserved_trbs = 0;
1893 xhci->num_usb2_ports = 0;
1894 xhci->num_usb3_ports = 0;
1895 xhci->num_active_eps = 0;
1896 kfree(xhci->usb2_ports);
1897 kfree(xhci->usb3_ports);
1898 kfree(xhci->port_array);
1899 kfree(xhci->rh_bw);
1900 kfree(xhci->ext_caps);
1901
1902 xhci->usb2_ports = NULL;
1903 xhci->usb3_ports = NULL;
1904 xhci->port_array = NULL;
1905 xhci->rh_bw = NULL;
1906 xhci->ext_caps = NULL;
1907
1908 xhci->page_size = 0;
1909 xhci->page_shift = 0;
1910 xhci->bus_state[0].bus_suspended = 0;
1911 xhci->bus_state[1].bus_suspended = 0;
1912}
1913
1914static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1915 struct xhci_segment *input_seg,
1916 union xhci_trb *start_trb,
1917 union xhci_trb *end_trb,
1918 dma_addr_t input_dma,
1919 struct xhci_segment *result_seg,
1920 char *test_name, int test_number)
1921{
1922 unsigned long long start_dma;
1923 unsigned long long end_dma;
1924 struct xhci_segment *seg;
1925
1926 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1927 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1928
1929 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1930 if (seg != result_seg) {
1931 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1932 test_name, test_number);
1933 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1934 "input DMA 0x%llx\n",
1935 input_seg,
1936 (unsigned long long) input_dma);
1937 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1938 "ending TRB %p (0x%llx DMA)\n",
1939 start_trb, start_dma,
1940 end_trb, end_dma);
1941 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1942 result_seg, seg);
1943 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1944 true);
1945 return -1;
1946 }
1947 return 0;
1948}
1949
1950
1951static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1952{
1953 struct {
1954 dma_addr_t input_dma;
1955 struct xhci_segment *result_seg;
1956 } simple_test_vector [] = {
1957
1958 { 0, NULL },
1959
1960 { xhci->event_ring->first_seg->dma - 16, NULL },
1961
1962 { xhci->event_ring->first_seg->dma - 1, NULL },
1963
1964 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1965
1966 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1967 xhci->event_ring->first_seg },
1968
1969 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1970
1971 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1972
1973 { (dma_addr_t) (~0), NULL },
1974 };
1975 struct {
1976 struct xhci_segment *input_seg;
1977 union xhci_trb *start_trb;
1978 union xhci_trb *end_trb;
1979 dma_addr_t input_dma;
1980 struct xhci_segment *result_seg;
1981 } complex_test_vector [] = {
1982
1983 { .input_seg = xhci->event_ring->first_seg,
1984 .start_trb = xhci->event_ring->first_seg->trbs,
1985 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1986 .input_dma = xhci->cmd_ring->first_seg->dma,
1987 .result_seg = NULL,
1988 },
1989
1990 { .input_seg = xhci->event_ring->first_seg,
1991 .start_trb = xhci->event_ring->first_seg->trbs,
1992 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1993 .input_dma = xhci->cmd_ring->first_seg->dma,
1994 .result_seg = NULL,
1995 },
1996
1997 { .input_seg = xhci->event_ring->first_seg,
1998 .start_trb = xhci->cmd_ring->first_seg->trbs,
1999 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2000 .input_dma = xhci->cmd_ring->first_seg->dma,
2001 .result_seg = NULL,
2002 },
2003
2004 { .input_seg = xhci->event_ring->first_seg,
2005 .start_trb = &xhci->event_ring->first_seg->trbs[0],
2006 .end_trb = &xhci->event_ring->first_seg->trbs[3],
2007 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
2008 .result_seg = NULL,
2009 },
2010
2011 { .input_seg = xhci->event_ring->first_seg,
2012 .start_trb = &xhci->event_ring->first_seg->trbs[3],
2013 .end_trb = &xhci->event_ring->first_seg->trbs[6],
2014 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2015 .result_seg = NULL,
2016 },
2017
2018 { .input_seg = xhci->event_ring->first_seg,
2019 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2020 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2021 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2022 .result_seg = NULL,
2023 },
2024
2025 { .input_seg = xhci->event_ring->first_seg,
2026 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2027 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2028 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2029 .result_seg = NULL,
2030 },
2031
2032 { .input_seg = xhci->event_ring->first_seg,
2033 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2034 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2035 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2036 .result_seg = NULL,
2037 },
2038 };
2039
2040 unsigned int num_tests;
2041 int i, ret;
2042
2043 num_tests = ARRAY_SIZE(simple_test_vector);
2044 for (i = 0; i < num_tests; i++) {
2045 ret = xhci_test_trb_in_td(xhci,
2046 xhci->event_ring->first_seg,
2047 xhci->event_ring->first_seg->trbs,
2048 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2049 simple_test_vector[i].input_dma,
2050 simple_test_vector[i].result_seg,
2051 "Simple", i);
2052 if (ret < 0)
2053 return ret;
2054 }
2055
2056 num_tests = ARRAY_SIZE(complex_test_vector);
2057 for (i = 0; i < num_tests; i++) {
2058 ret = xhci_test_trb_in_td(xhci,
2059 complex_test_vector[i].input_seg,
2060 complex_test_vector[i].start_trb,
2061 complex_test_vector[i].end_trb,
2062 complex_test_vector[i].input_dma,
2063 complex_test_vector[i].result_seg,
2064 "Complex", i);
2065 if (ret < 0)
2066 return ret;
2067 }
2068 xhci_dbg(xhci, "TRB math tests passed.\n");
2069 return 0;
2070}
2071
2072static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2073{
2074 u64 temp;
2075 dma_addr_t deq;
2076
2077 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2078 xhci->event_ring->dequeue);
2079 if (deq == 0 && !in_interrupt())
2080 xhci_warn(xhci, "WARN something wrong with SW event ring "
2081 "dequeue ptr.\n");
2082
2083 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2084 temp &= ERST_PTR_MASK;
2085
2086
2087
2088 temp &= ~ERST_EHB;
2089 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2090 "// Write event ring dequeue pointer, "
2091 "preserving EHB bit");
2092 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2093 &xhci->ir_set->erst_dequeue);
2094}
2095
2096static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2097 __le32 __iomem *addr, int max_caps)
2098{
2099 u32 temp, port_offset, port_count;
2100 int i;
2101 u8 major_revision, minor_revision;
2102 struct xhci_hub *rhub;
2103
2104 temp = readl(addr);
2105 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2106 minor_revision = XHCI_EXT_PORT_MINOR(temp);
2107
2108 if (major_revision == 0x03) {
2109 rhub = &xhci->usb3_rhub;
2110 } else if (major_revision <= 0x02) {
2111 rhub = &xhci->usb2_rhub;
2112 } else {
2113 xhci_warn(xhci, "Ignoring unknown port speed, "
2114 "Ext Cap %p, revision = 0x%x\n",
2115 addr, major_revision);
2116
2117 return;
2118 }
2119 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2120
2121 if (rhub->min_rev < minor_revision)
2122 rhub->min_rev = minor_revision;
2123
2124
2125 temp = readl(addr + 2);
2126 port_offset = XHCI_EXT_PORT_OFF(temp);
2127 port_count = XHCI_EXT_PORT_COUNT(temp);
2128 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2129 "Ext Cap %p, port offset = %u, "
2130 "count = %u, revision = 0x%x",
2131 addr, port_offset, port_count, major_revision);
2132
2133 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2134
2135 return;
2136
2137 rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
2138 if (rhub->psi_count) {
2139 rhub->psi = kcalloc(rhub->psi_count, sizeof(*rhub->psi),
2140 GFP_KERNEL);
2141 if (!rhub->psi)
2142 rhub->psi_count = 0;
2143
2144 rhub->psi_uid_count++;
2145 for (i = 0; i < rhub->psi_count; i++) {
2146 rhub->psi[i] = readl(addr + 4 + i);
2147
2148
2149
2150
2151 if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
2152 XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
2153 rhub->psi_uid_count++;
2154
2155 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2156 XHCI_EXT_PORT_PSIV(rhub->psi[i]),
2157 XHCI_EXT_PORT_PSIE(rhub->psi[i]),
2158 XHCI_EXT_PORT_PLT(rhub->psi[i]),
2159 XHCI_EXT_PORT_PFD(rhub->psi[i]),
2160 XHCI_EXT_PORT_LP(rhub->psi[i]),
2161 XHCI_EXT_PORT_PSIM(rhub->psi[i]));
2162 }
2163 }
2164
2165 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2166 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2167
2168
2169 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2170 (temp & XHCI_L1C)) {
2171 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2172 "xHCI 0.96: support USB2 software lpm");
2173 xhci->sw_lpm_support = 1;
2174 }
2175
2176 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2177 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2178 "xHCI 1.0: support USB2 software lpm");
2179 xhci->sw_lpm_support = 1;
2180 if (temp & XHCI_HLC) {
2181 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2182 "xHCI 1.0: support USB2 hardware lpm");
2183 xhci->hw_lpm_support = 1;
2184 }
2185 }
2186
2187 port_offset--;
2188 for (i = port_offset; i < (port_offset + port_count); i++) {
2189
2190 if (xhci->port_array[i] != 0) {
2191 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2192 " port %u\n", addr, i);
2193 xhci_warn(xhci, "Port was marked as USB %u, "
2194 "duplicated as USB %u\n",
2195 xhci->port_array[i], major_revision);
2196
2197
2198
2199 if (xhci->port_array[i] != major_revision &&
2200 xhci->port_array[i] != DUPLICATE_ENTRY) {
2201 if (xhci->port_array[i] == 0x03)
2202 xhci->num_usb3_ports--;
2203 else
2204 xhci->num_usb2_ports--;
2205 xhci->port_array[i] = DUPLICATE_ENTRY;
2206 }
2207
2208 continue;
2209 }
2210 xhci->port_array[i] = major_revision;
2211 if (major_revision == 0x03)
2212 xhci->num_usb3_ports++;
2213 else
2214 xhci->num_usb2_ports++;
2215 }
2216
2217}
2218
2219
2220
2221
2222
2223
2224
2225
2226static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2227{
2228 void __iomem *base;
2229 u32 offset;
2230 unsigned int num_ports;
2231 int i, j, port_index;
2232 int cap_count = 0;
2233 u32 cap_start;
2234
2235 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2236 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2237 if (!xhci->port_array)
2238 return -ENOMEM;
2239
2240 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2241 if (!xhci->rh_bw)
2242 return -ENOMEM;
2243 for (i = 0; i < num_ports; i++) {
2244 struct xhci_interval_bw_table *bw_table;
2245
2246 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2247 bw_table = &xhci->rh_bw[i].bw_table;
2248 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2249 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2250 }
2251 base = &xhci->cap_regs->hc_capbase;
2252
2253 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2254 if (!cap_start) {
2255 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2256 return -ENODEV;
2257 }
2258
2259 offset = cap_start;
2260
2261 while (offset) {
2262 cap_count++;
2263 offset = xhci_find_next_ext_cap(base, offset,
2264 XHCI_EXT_CAPS_PROTOCOL);
2265 }
2266
2267 xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
2268 if (!xhci->ext_caps)
2269 return -ENOMEM;
2270
2271 offset = cap_start;
2272
2273 while (offset) {
2274 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2275 if (xhci->num_usb2_ports + xhci->num_usb3_ports == num_ports)
2276 break;
2277 offset = xhci_find_next_ext_cap(base, offset,
2278 XHCI_EXT_CAPS_PROTOCOL);
2279 }
2280
2281 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2282 xhci_warn(xhci, "No ports on the roothubs?\n");
2283 return -ENODEV;
2284 }
2285 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2286 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2287 xhci->num_usb2_ports, xhci->num_usb3_ports);
2288
2289
2290
2291
2292 if (xhci->num_usb3_ports > USB_SS_MAXPORTS) {
2293 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2294 "Limiting USB 3.0 roothub ports to %u.",
2295 USB_SS_MAXPORTS);
2296 xhci->num_usb3_ports = USB_SS_MAXPORTS;
2297 }
2298 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2299 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2300 "Limiting USB 2.0 roothub ports to %u.",
2301 USB_MAXCHILDREN);
2302 xhci->num_usb2_ports = USB_MAXCHILDREN;
2303 }
2304
2305
2306
2307
2308
2309 if (xhci->num_usb2_ports) {
2310 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2311 xhci->num_usb2_ports, flags);
2312 if (!xhci->usb2_ports)
2313 return -ENOMEM;
2314
2315 port_index = 0;
2316 for (i = 0; i < num_ports; i++) {
2317 if (xhci->port_array[i] == 0x03 ||
2318 xhci->port_array[i] == 0 ||
2319 xhci->port_array[i] == DUPLICATE_ENTRY)
2320 continue;
2321
2322 xhci->usb2_ports[port_index] =
2323 &xhci->op_regs->port_status_base +
2324 NUM_PORT_REGS*i;
2325 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2326 "USB 2.0 port at index %u, "
2327 "addr = %p", i,
2328 xhci->usb2_ports[port_index]);
2329 port_index++;
2330 if (port_index == xhci->num_usb2_ports)
2331 break;
2332 }
2333 }
2334 if (xhci->num_usb3_ports) {
2335 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2336 xhci->num_usb3_ports, flags);
2337 if (!xhci->usb3_ports)
2338 return -ENOMEM;
2339
2340 port_index = 0;
2341 for (i = 0; i < num_ports; i++)
2342 if (xhci->port_array[i] == 0x03) {
2343 xhci->usb3_ports[port_index] =
2344 &xhci->op_regs->port_status_base +
2345 NUM_PORT_REGS*i;
2346 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2347 "USB 3.0 port at index %u, "
2348 "addr = %p", i,
2349 xhci->usb3_ports[port_index]);
2350 port_index++;
2351 if (port_index == xhci->num_usb3_ports)
2352 break;
2353 }
2354 }
2355 return 0;
2356}
2357
2358int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2359{
2360 dma_addr_t dma;
2361 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2362 unsigned int val, val2;
2363 u64 val_64;
2364 u32 page_size, temp;
2365 int i, ret;
2366
2367 INIT_LIST_HEAD(&xhci->cmd_list);
2368
2369
2370 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2371 init_completion(&xhci->cmd_ring_stop_completion);
2372
2373 page_size = readl(&xhci->op_regs->page_size);
2374 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2375 "Supported page size register = 0x%x", page_size);
2376 for (i = 0; i < 16; i++) {
2377 if ((0x1 & page_size) != 0)
2378 break;
2379 page_size = page_size >> 1;
2380 }
2381 if (i < 16)
2382 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2383 "Supported page size of %iK", (1 << (i+12)) / 1024);
2384 else
2385 xhci_warn(xhci, "WARN: no supported page size\n");
2386
2387 xhci->page_shift = 12;
2388 xhci->page_size = 1 << xhci->page_shift;
2389 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2390 "HCD page size set to %iK", xhci->page_size / 1024);
2391
2392
2393
2394
2395
2396 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2397 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2398 "// xHC can handle at most %d device slots.", val);
2399 val2 = readl(&xhci->op_regs->config_reg);
2400 val |= (val2 & ~HCS_SLOTS_MASK);
2401 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2402 "// Setting Max device slots reg = 0x%x.", val);
2403 writel(val, &xhci->op_regs->config_reg);
2404
2405
2406
2407
2408
2409 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2410 flags);
2411 if (!xhci->dcbaa)
2412 goto fail;
2413 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2414 xhci->dcbaa->dma = dma;
2415 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2416 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2417 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2418 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2419
2420
2421
2422
2423
2424
2425
2426
2427 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2428 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2429
2430
2431 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2432 2112, 64, xhci->page_size);
2433 if (!xhci->segment_pool || !xhci->device_pool)
2434 goto fail;
2435
2436
2437
2438
2439 xhci->small_streams_pool =
2440 dma_pool_create("xHCI 256 byte stream ctx arrays",
2441 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2442 xhci->medium_streams_pool =
2443 dma_pool_create("xHCI 1KB stream ctx arrays",
2444 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2445
2446
2447
2448
2449 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2450 goto fail;
2451
2452
2453 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2454 if (!xhci->cmd_ring)
2455 goto fail;
2456 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2457 "Allocated command ring at %p", xhci->cmd_ring);
2458 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2459 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2460
2461
2462 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2463 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2464 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2465 xhci->cmd_ring->cycle_state;
2466 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2467 "// Setting command ring address to 0x%016llx", val_64);
2468 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2469
2470 xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
2471 if (!xhci->lpm_command)
2472 goto fail;
2473
2474
2475
2476
2477
2478 xhci->cmd_ring_reserved_trbs++;
2479
2480 val = readl(&xhci->cap_regs->db_off);
2481 val &= DBOFF_MASK;
2482 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2483 "// Doorbell array is located at offset 0x%x"
2484 " from cap regs base addr", val);
2485 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2486
2487 xhci->ir_set = &xhci->run_regs->ir_set[0];
2488
2489
2490
2491
2492
2493 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2494 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2495 0, flags);
2496 if (!xhci->event_ring)
2497 goto fail;
2498 if (xhci_check_trb_in_td_math(xhci) < 0)
2499 goto fail;
2500
2501 ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2502 if (ret)
2503 goto fail;
2504
2505
2506 val = readl(&xhci->ir_set->erst_size);
2507 val &= ERST_SIZE_MASK;
2508 val |= ERST_NUM_SEGS;
2509 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2510 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2511 val);
2512 writel(val, &xhci->ir_set->erst_size);
2513
2514 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2515 "// Set ERST entries to point to event ring.");
2516
2517 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2518 "// Set ERST base address for ir_set 0 = 0x%llx",
2519 (unsigned long long)xhci->erst.erst_dma_addr);
2520 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2521 val_64 &= ERST_PTR_MASK;
2522 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2523 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2524
2525
2526 xhci_set_hc_event_deq(xhci);
2527 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2528 "Wrote ERST address to ir_set 0.");
2529
2530
2531
2532
2533
2534
2535 for (i = 0; i < MAX_HC_SLOTS; i++)
2536 xhci->devs[i] = NULL;
2537 for (i = 0; i < USB_MAXCHILDREN; i++) {
2538 xhci->bus_state[0].resume_done[i] = 0;
2539 xhci->bus_state[1].resume_done[i] = 0;
2540
2541 init_completion(&xhci->bus_state[1].rexit_done[i]);
2542 }
2543
2544 if (scratchpad_alloc(xhci, flags))
2545 goto fail;
2546 if (xhci_setup_port_arrays(xhci, flags))
2547 goto fail;
2548
2549
2550
2551
2552
2553 temp = readl(&xhci->op_regs->dev_notification);
2554 temp &= ~DEV_NOTE_MASK;
2555 temp |= DEV_NOTE_FWAKE;
2556 writel(temp, &xhci->op_regs->dev_notification);
2557
2558 return 0;
2559
2560fail:
2561 xhci_halt(xhci);
2562 xhci_reset(xhci);
2563 xhci_mem_cleanup(xhci);
2564 return -ENOMEM;
2565}
2566