1
2
3
4
5
6
7
8
9
10
11#include <linux/usb.h>
12#include <linux/pci.h>
13#include <linux/slab.h>
14#include <linux/dmapool.h>
15#include <linux/dma-mapping.h>
16
17#include "xhci.h"
18#include "xhci-trace.h"
19#include "xhci-debugfs.h"
20
21
22
23
24
25
26
27
28static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
29 unsigned int cycle_state,
30 unsigned int max_packet,
31 gfp_t flags)
32{
33 struct xhci_segment *seg;
34 dma_addr_t dma;
35 int i;
36 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
37
38 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
39 if (!seg)
40 return NULL;
41
42 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
43 if (!seg->trbs) {
44 kfree(seg);
45 return NULL;
46 }
47
48 if (max_packet) {
49 seg->bounce_buf = kzalloc_node(max_packet, flags,
50 dev_to_node(dev));
51 if (!seg->bounce_buf) {
52 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
53 kfree(seg);
54 return NULL;
55 }
56 }
57
58 if (cycle_state == 0) {
59 for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
61 }
62 seg->dma = dma;
63 seg->next = NULL;
64
65 return seg;
66}
67
68static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
69{
70 if (seg->trbs) {
71 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 seg->trbs = NULL;
73 }
74 kfree(seg->bounce_buf);
75 kfree(seg);
76}
77
78static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
79 struct xhci_segment *first)
80{
81 struct xhci_segment *seg;
82
83 seg = first->next;
84 while (seg != first) {
85 struct xhci_segment *next = seg->next;
86 xhci_segment_free(xhci, seg);
87 seg = next;
88 }
89 xhci_segment_free(xhci, first);
90}
91
92
93
94
95
96
97
98
99static void xhci_link_segments(struct xhci_segment *prev,
100 struct xhci_segment *next,
101 enum xhci_ring_type type, bool chain_links)
102{
103 u32 val;
104
105 if (!prev || !next)
106 return;
107 prev->next = next;
108 if (type != TYPE_EVENT) {
109 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
110 cpu_to_le64(next->dma);
111
112
113 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
114 val &= ~TRB_TYPE_BITMASK;
115 val |= TRB_TYPE(TRB_LINK);
116 if (chain_links)
117 val |= TRB_CHAIN;
118 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
119 }
120}
121
122
123
124
125
126static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
127 struct xhci_segment *first, struct xhci_segment *last,
128 unsigned int num_segs)
129{
130 struct xhci_segment *next;
131 bool chain_links;
132
133 if (!ring || !first || !last)
134 return;
135
136
137 chain_links = !!(xhci_link_trb_quirk(xhci) ||
138 (ring->type == TYPE_ISOC &&
139 (xhci->quirks & XHCI_AMD_0x96_HOST)));
140
141 next = ring->enq_seg->next;
142 xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
143 xhci_link_segments(last, next, ring->type, chain_links);
144 ring->num_segs += num_segs;
145 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
146
147 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
148 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
149 &= ~cpu_to_le32(LINK_TOGGLE);
150 last->trbs[TRBS_PER_SEGMENT-1].link.control
151 |= cpu_to_le32(LINK_TOGGLE);
152 ring->last_seg = last;
153 }
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
188 struct xhci_ring *ring,
189 struct xhci_segment *seg,
190 gfp_t mem_flags)
191{
192 unsigned long key;
193 int ret;
194
195 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
196
197 if (radix_tree_lookup(trb_address_map, key))
198 return 0;
199
200 ret = radix_tree_maybe_preload(mem_flags);
201 if (ret)
202 return ret;
203 ret = radix_tree_insert(trb_address_map,
204 key, ring);
205 radix_tree_preload_end();
206 return ret;
207}
208
209static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
210 struct xhci_segment *seg)
211{
212 unsigned long key;
213
214 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
215 if (radix_tree_lookup(trb_address_map, key))
216 radix_tree_delete(trb_address_map, key);
217}
218
219static int xhci_update_stream_segment_mapping(
220 struct radix_tree_root *trb_address_map,
221 struct xhci_ring *ring,
222 struct xhci_segment *first_seg,
223 struct xhci_segment *last_seg,
224 gfp_t mem_flags)
225{
226 struct xhci_segment *seg;
227 struct xhci_segment *failed_seg;
228 int ret;
229
230 if (WARN_ON_ONCE(trb_address_map == NULL))
231 return 0;
232
233 seg = first_seg;
234 do {
235 ret = xhci_insert_segment_mapping(trb_address_map,
236 ring, seg, mem_flags);
237 if (ret)
238 goto remove_streams;
239 if (seg == last_seg)
240 return 0;
241 seg = seg->next;
242 } while (seg != first_seg);
243
244 return 0;
245
246remove_streams:
247 failed_seg = seg;
248 seg = first_seg;
249 do {
250 xhci_remove_segment_mapping(trb_address_map, seg);
251 if (seg == failed_seg)
252 return ret;
253 seg = seg->next;
254 } while (seg != first_seg);
255
256 return ret;
257}
258
259static void xhci_remove_stream_mapping(struct xhci_ring *ring)
260{
261 struct xhci_segment *seg;
262
263 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
264 return;
265
266 seg = ring->first_seg;
267 do {
268 xhci_remove_segment_mapping(ring->trb_address_map, seg);
269 seg = seg->next;
270 } while (seg != ring->first_seg);
271}
272
273static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
274{
275 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
276 ring->first_seg, ring->last_seg, mem_flags);
277}
278
279
280void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
281{
282 if (!ring)
283 return;
284
285 trace_xhci_ring_free(ring);
286
287 if (ring->first_seg) {
288 if (ring->type == TYPE_STREAM)
289 xhci_remove_stream_mapping(ring);
290 xhci_free_segments_for_ring(xhci, ring->first_seg);
291 }
292
293 kfree(ring);
294}
295
296void xhci_initialize_ring_info(struct xhci_ring *ring,
297 unsigned int cycle_state)
298{
299
300 ring->enqueue = ring->first_seg->trbs;
301 ring->enq_seg = ring->first_seg;
302 ring->dequeue = ring->enqueue;
303 ring->deq_seg = ring->first_seg;
304
305
306
307
308
309
310
311 ring->cycle_state = cycle_state;
312
313
314
315
316
317 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
318}
319
320
321static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
322 struct xhci_segment **first, struct xhci_segment **last,
323 unsigned int num_segs, unsigned int cycle_state,
324 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
325{
326 struct xhci_segment *prev;
327 bool chain_links;
328
329
330 chain_links = !!(xhci_link_trb_quirk(xhci) ||
331 (type == TYPE_ISOC &&
332 (xhci->quirks & XHCI_AMD_0x96_HOST)));
333
334 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
335 if (!prev)
336 return -ENOMEM;
337 num_segs--;
338
339 *first = prev;
340 while (num_segs > 0) {
341 struct xhci_segment *next;
342
343 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
344 if (!next) {
345 prev = *first;
346 while (prev) {
347 next = prev->next;
348 xhci_segment_free(xhci, prev);
349 prev = next;
350 }
351 return -ENOMEM;
352 }
353 xhci_link_segments(prev, next, type, chain_links);
354
355 prev = next;
356 num_segs--;
357 }
358 xhci_link_segments(prev, *first, type, chain_links);
359 *last = prev;
360
361 return 0;
362}
363
364
365
366
367
368
369
370
371struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
372 unsigned int num_segs, unsigned int cycle_state,
373 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
374{
375 struct xhci_ring *ring;
376 int ret;
377 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
378
379 ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
380 if (!ring)
381 return NULL;
382
383 ring->num_segs = num_segs;
384 ring->bounce_buf_len = max_packet;
385 INIT_LIST_HEAD(&ring->td_list);
386 ring->type = type;
387 if (num_segs == 0)
388 return ring;
389
390 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
391 &ring->last_seg, num_segs, cycle_state, type,
392 max_packet, flags);
393 if (ret)
394 goto fail;
395
396
397 if (type != TYPE_EVENT) {
398
399 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
400 cpu_to_le32(LINK_TOGGLE);
401 }
402 xhci_initialize_ring_info(ring, cycle_state);
403 trace_xhci_ring_alloc(ring);
404 return ring;
405
406fail:
407 kfree(ring);
408 return NULL;
409}
410
411void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
412 struct xhci_virt_device *virt_dev,
413 unsigned int ep_index)
414{
415 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
416 virt_dev->eps[ep_index].ring = NULL;
417}
418
419
420
421
422
423int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
424 unsigned int num_trbs, gfp_t flags)
425{
426 struct xhci_segment *first;
427 struct xhci_segment *last;
428 unsigned int num_segs;
429 unsigned int num_segs_needed;
430 int ret;
431
432 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
433 (TRBS_PER_SEGMENT - 1);
434
435
436 num_segs = ring->num_segs > num_segs_needed ?
437 ring->num_segs : num_segs_needed;
438
439 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
440 num_segs, ring->cycle_state, ring->type,
441 ring->bounce_buf_len, flags);
442 if (ret)
443 return -ENOMEM;
444
445 if (ring->type == TYPE_STREAM)
446 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
447 ring, first, last, flags);
448 if (ret) {
449 struct xhci_segment *next;
450 do {
451 next = first->next;
452 xhci_segment_free(xhci, first);
453 if (first == last)
454 break;
455 first = next;
456 } while (true);
457 return ret;
458 }
459
460 xhci_link_rings(xhci, ring, first, last, num_segs);
461 trace_xhci_ring_expansion(ring);
462 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
463 "ring expansion succeed, now has %d segments",
464 ring->num_segs);
465
466 return 0;
467}
468
469struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
470 int type, gfp_t flags)
471{
472 struct xhci_container_ctx *ctx;
473 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
474
475 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
476 return NULL;
477
478 ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
479 if (!ctx)
480 return NULL;
481
482 ctx->type = type;
483 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
484 if (type == XHCI_CTX_TYPE_INPUT)
485 ctx->size += CTX_SIZE(xhci->hcc_params);
486
487 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
488 if (!ctx->bytes) {
489 kfree(ctx);
490 return NULL;
491 }
492 return ctx;
493}
494
495void xhci_free_container_ctx(struct xhci_hcd *xhci,
496 struct xhci_container_ctx *ctx)
497{
498 if (!ctx)
499 return;
500 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
501 kfree(ctx);
502}
503
504struct xhci_input_control_ctx *xhci_get_input_control_ctx(
505 struct xhci_container_ctx *ctx)
506{
507 if (ctx->type != XHCI_CTX_TYPE_INPUT)
508 return NULL;
509
510 return (struct xhci_input_control_ctx *)ctx->bytes;
511}
512
513struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
514 struct xhci_container_ctx *ctx)
515{
516 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
517 return (struct xhci_slot_ctx *)ctx->bytes;
518
519 return (struct xhci_slot_ctx *)
520 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
521}
522
523struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
524 struct xhci_container_ctx *ctx,
525 unsigned int ep_index)
526{
527
528 ep_index++;
529 if (ctx->type == XHCI_CTX_TYPE_INPUT)
530 ep_index++;
531
532 return (struct xhci_ep_ctx *)
533 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
534}
535
536
537
538
539static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
540 unsigned int num_stream_ctxs,
541 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
542{
543 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
544 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
545
546 if (size > MEDIUM_STREAM_ARRAY_SIZE)
547 dma_free_coherent(dev, size,
548 stream_ctx, dma);
549 else if (size <= SMALL_STREAM_ARRAY_SIZE)
550 return dma_pool_free(xhci->small_streams_pool,
551 stream_ctx, dma);
552 else
553 return dma_pool_free(xhci->medium_streams_pool,
554 stream_ctx, dma);
555}
556
557
558
559
560
561
562
563
564
565
566
567static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
568 unsigned int num_stream_ctxs, dma_addr_t *dma,
569 gfp_t mem_flags)
570{
571 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
572 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
573
574 if (size > MEDIUM_STREAM_ARRAY_SIZE)
575 return dma_alloc_coherent(dev, size,
576 dma, mem_flags);
577 else if (size <= SMALL_STREAM_ARRAY_SIZE)
578 return dma_pool_alloc(xhci->small_streams_pool,
579 mem_flags, dma);
580 else
581 return dma_pool_alloc(xhci->medium_streams_pool,
582 mem_flags, dma);
583}
584
585struct xhci_ring *xhci_dma_to_transfer_ring(
586 struct xhci_virt_ep *ep,
587 u64 address)
588{
589 if (ep->ep_state & EP_HAS_STREAMS)
590 return radix_tree_lookup(&ep->stream_info->trb_address_map,
591 address >> TRB_SEGMENT_SHIFT);
592 return ep->ring;
593}
594
595struct xhci_ring *xhci_stream_id_to_ring(
596 struct xhci_virt_device *dev,
597 unsigned int ep_index,
598 unsigned int stream_id)
599{
600 struct xhci_virt_ep *ep = &dev->eps[ep_index];
601
602 if (stream_id == 0)
603 return ep->ring;
604 if (!ep->stream_info)
605 return NULL;
606
607 if (stream_id >= ep->stream_info->num_streams)
608 return NULL;
609 return ep->stream_info->stream_rings[stream_id];
610}
611
612
613
614
615
616
617
618
619
620
621struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
622 unsigned int num_stream_ctxs,
623 unsigned int num_streams,
624 unsigned int max_packet, gfp_t mem_flags)
625{
626 struct xhci_stream_info *stream_info;
627 u32 cur_stream;
628 struct xhci_ring *cur_ring;
629 u64 addr;
630 int ret;
631 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
632
633 xhci_dbg(xhci, "Allocating %u streams and %u "
634 "stream context array entries.\n",
635 num_streams, num_stream_ctxs);
636 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
637 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
638 return NULL;
639 }
640 xhci->cmd_ring_reserved_trbs++;
641
642 stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
643 dev_to_node(dev));
644 if (!stream_info)
645 goto cleanup_trbs;
646
647 stream_info->num_streams = num_streams;
648 stream_info->num_stream_ctxs = num_stream_ctxs;
649
650
651 stream_info->stream_rings = kcalloc_node(
652 num_streams, sizeof(struct xhci_ring *), mem_flags,
653 dev_to_node(dev));
654 if (!stream_info->stream_rings)
655 goto cleanup_info;
656
657
658 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
659 num_stream_ctxs, &stream_info->ctx_array_dma,
660 mem_flags);
661 if (!stream_info->stream_ctx_array)
662 goto cleanup_ctx;
663 memset(stream_info->stream_ctx_array, 0,
664 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
665
666
667 stream_info->free_streams_command =
668 xhci_alloc_command_with_ctx(xhci, true, mem_flags);
669 if (!stream_info->free_streams_command)
670 goto cleanup_ctx;
671
672 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
673
674
675
676
677
678
679 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
680 stream_info->stream_rings[cur_stream] =
681 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
682 mem_flags);
683 cur_ring = stream_info->stream_rings[cur_stream];
684 if (!cur_ring)
685 goto cleanup_rings;
686 cur_ring->stream_id = cur_stream;
687 cur_ring->trb_address_map = &stream_info->trb_address_map;
688
689 addr = cur_ring->first_seg->dma |
690 SCT_FOR_CTX(SCT_PRI_TR) |
691 cur_ring->cycle_state;
692 stream_info->stream_ctx_array[cur_stream].stream_ring =
693 cpu_to_le64(addr);
694 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
695 cur_stream, (unsigned long long) addr);
696
697 if (xhci->quirks & XHCI_STREAM_QUIRK) {
698
699
700
701
702 timer_setup(&cur_ring->stream_timer,
703 xhci_stream_timeout, 0);
704 cur_ring->xhci = xhci;
705 }
706
707 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
708 if (ret) {
709 xhci_ring_free(xhci, cur_ring);
710 stream_info->stream_rings[cur_stream] = NULL;
711 goto cleanup_rings;
712 }
713 }
714
715
716
717
718
719
720
721 return stream_info;
722
723cleanup_rings:
724 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
725 cur_ring = stream_info->stream_rings[cur_stream];
726 if (cur_ring) {
727 xhci_ring_free(xhci, cur_ring);
728 stream_info->stream_rings[cur_stream] = NULL;
729 }
730 }
731 xhci_free_command(xhci, stream_info->free_streams_command);
732cleanup_ctx:
733 kfree(stream_info->stream_rings);
734cleanup_info:
735 kfree(stream_info);
736cleanup_trbs:
737 xhci->cmd_ring_reserved_trbs--;
738 return NULL;
739}
740
741
742
743
744void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
745 struct xhci_ep_ctx *ep_ctx,
746 struct xhci_stream_info *stream_info)
747{
748 u32 max_primary_streams;
749
750
751
752
753 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
754 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
755 "Setting number of stream ctx array entries to %u",
756 1 << (max_primary_streams + 1));
757 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
758 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
759 | EP_HAS_LSA);
760 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
761}
762
763
764
765
766
767
768void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
769 struct xhci_virt_ep *ep)
770{
771 dma_addr_t addr;
772 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
773 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
774 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
775}
776
777
778
779
780
781void xhci_free_stream_info(struct xhci_hcd *xhci,
782 struct xhci_stream_info *stream_info)
783{
784 int cur_stream;
785 struct xhci_ring *cur_ring;
786
787 if (!stream_info)
788 return;
789
790 for (cur_stream = 1; cur_stream < stream_info->num_streams;
791 cur_stream++) {
792 cur_ring = stream_info->stream_rings[cur_stream];
793
794 if (xhci->quirks & XHCI_STREAM_QUIRK)
795 del_timer_sync(&cur_ring->stream_timer);
796
797 if (cur_ring) {
798 xhci_ring_free(xhci, cur_ring);
799 stream_info->stream_rings[cur_stream] = NULL;
800 }
801 }
802 xhci_free_command(xhci, stream_info->free_streams_command);
803 xhci->cmd_ring_reserved_trbs--;
804 if (stream_info->stream_ctx_array)
805 xhci_free_stream_ctx(xhci,
806 stream_info->num_stream_ctxs,
807 stream_info->stream_ctx_array,
808 stream_info->ctx_array_dma);
809
810 kfree(stream_info->stream_rings);
811 kfree(stream_info);
812}
813
814
815
816
817static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
818 struct xhci_virt_ep *ep)
819{
820 timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
821 0);
822 ep->xhci = xhci;
823}
824
825static void xhci_free_tt_info(struct xhci_hcd *xhci,
826 struct xhci_virt_device *virt_dev,
827 int slot_id)
828{
829 struct list_head *tt_list_head;
830 struct xhci_tt_bw_info *tt_info, *next;
831 bool slot_found = false;
832
833
834
835
836 if (virt_dev->real_port == 0 ||
837 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
838 xhci_dbg(xhci, "Bad real port.\n");
839 return;
840 }
841
842 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
843 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
844
845 if (tt_info->slot_id == slot_id) {
846 slot_found = true;
847 list_del(&tt_info->tt_list);
848 kfree(tt_info);
849 } else if (slot_found) {
850 break;
851 }
852 }
853}
854
855int xhci_alloc_tt_info(struct xhci_hcd *xhci,
856 struct xhci_virt_device *virt_dev,
857 struct usb_device *hdev,
858 struct usb_tt *tt, gfp_t mem_flags)
859{
860 struct xhci_tt_bw_info *tt_info;
861 unsigned int num_ports;
862 int i, j;
863 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
864
865 if (!tt->multi)
866 num_ports = 1;
867 else
868 num_ports = hdev->maxchild;
869
870 for (i = 0; i < num_ports; i++, tt_info++) {
871 struct xhci_interval_bw_table *bw_table;
872
873 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
874 dev_to_node(dev));
875 if (!tt_info)
876 goto free_tts;
877 INIT_LIST_HEAD(&tt_info->tt_list);
878 list_add(&tt_info->tt_list,
879 &xhci->rh_bw[virt_dev->real_port - 1].tts);
880 tt_info->slot_id = virt_dev->udev->slot_id;
881 if (tt->multi)
882 tt_info->ttport = i+1;
883 bw_table = &tt_info->bw_table;
884 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
885 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
886 }
887 return 0;
888
889free_tts:
890 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
891 return -ENOMEM;
892}
893
894
895
896
897
898
899
900void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
901{
902 struct xhci_virt_device *dev;
903 int i;
904 int old_active_eps = 0;
905
906
907 if (slot_id == 0 || !xhci->devs[slot_id])
908 return;
909
910 dev = xhci->devs[slot_id];
911
912 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
913 if (!dev)
914 return;
915
916 trace_xhci_free_virt_device(dev);
917
918 if (dev->tt_info)
919 old_active_eps = dev->tt_info->active_eps;
920
921 for (i = 0; i < 31; i++) {
922 if (dev->eps[i].ring)
923 xhci_ring_free(xhci, dev->eps[i].ring);
924 if (dev->eps[i].stream_info)
925 xhci_free_stream_info(xhci,
926 dev->eps[i].stream_info);
927
928
929
930
931
932 if (!list_empty(&dev->eps[i].bw_endpoint_list))
933 xhci_warn(xhci, "Slot %u endpoint %u "
934 "not removed from BW list!\n",
935 slot_id, i);
936 }
937
938 xhci_free_tt_info(xhci, dev, slot_id);
939
940 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
941
942 if (dev->in_ctx)
943 xhci_free_container_ctx(xhci, dev->in_ctx);
944 if (dev->out_ctx)
945 xhci_free_container_ctx(xhci, dev->out_ctx);
946
947 if (dev->udev && dev->udev->slot_id)
948 dev->udev->slot_id = 0;
949 kfree(xhci->devs[slot_id]);
950 xhci->devs[slot_id] = NULL;
951}
952
953
954
955
956
957
958
959static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
960{
961 struct xhci_virt_device *vdev;
962 struct list_head *tt_list_head;
963 struct xhci_tt_bw_info *tt_info, *next;
964 int i;
965
966 vdev = xhci->devs[slot_id];
967 if (!vdev)
968 return;
969
970 if (vdev->real_port == 0 ||
971 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
972 xhci_dbg(xhci, "Bad vdev->real_port.\n");
973 goto out;
974 }
975
976 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
977 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
978
979 if (tt_info->slot_id == slot_id) {
980
981 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
982 vdev = xhci->devs[i];
983 if (vdev && (vdev->tt_info == tt_info))
984 xhci_free_virt_devices_depth_first(
985 xhci, i);
986 }
987 }
988 }
989out:
990
991 xhci_debugfs_remove_slot(xhci, slot_id);
992 xhci_free_virt_device(xhci, slot_id);
993}
994
995int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
996 struct usb_device *udev, gfp_t flags)
997{
998 struct xhci_virt_device *dev;
999 int i;
1000
1001
1002 if (slot_id == 0 || xhci->devs[slot_id]) {
1003 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
1004 return 0;
1005 }
1006
1007 dev = kzalloc(sizeof(*dev), flags);
1008 if (!dev)
1009 return 0;
1010
1011
1012 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
1013 if (!dev->out_ctx)
1014 goto fail;
1015
1016 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
1017 (unsigned long long)dev->out_ctx->dma);
1018
1019
1020 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
1021 if (!dev->in_ctx)
1022 goto fail;
1023
1024 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
1025 (unsigned long long)dev->in_ctx->dma);
1026
1027
1028 for (i = 0; i < 31; i++) {
1029 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
1030 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1031 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1032 }
1033
1034
1035 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1036 if (!dev->eps[0].ring)
1037 goto fail;
1038
1039 dev->udev = udev;
1040
1041
1042 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1043 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1044 slot_id,
1045 &xhci->dcbaa->dev_context_ptrs[slot_id],
1046 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1047
1048 trace_xhci_alloc_virt_device(dev);
1049
1050 xhci->devs[slot_id] = dev;
1051
1052 return 1;
1053fail:
1054
1055 if (dev->in_ctx)
1056 xhci_free_container_ctx(xhci, dev->in_ctx);
1057 if (dev->out_ctx)
1058 xhci_free_container_ctx(xhci, dev->out_ctx);
1059 kfree(dev);
1060
1061 return 0;
1062}
1063
1064void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1065 struct usb_device *udev)
1066{
1067 struct xhci_virt_device *virt_dev;
1068 struct xhci_ep_ctx *ep0_ctx;
1069 struct xhci_ring *ep_ring;
1070
1071 virt_dev = xhci->devs[udev->slot_id];
1072 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1073 ep_ring = virt_dev->eps[0].ring;
1074
1075
1076
1077
1078
1079
1080
1081 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1082 ep_ring->enqueue)
1083 | ep_ring->cycle_state);
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1097 struct usb_device *udev)
1098{
1099 struct usb_device *top_dev;
1100 struct usb_hcd *hcd;
1101
1102 if (udev->speed >= USB_SPEED_SUPER)
1103 hcd = xhci->shared_hcd;
1104 else
1105 hcd = xhci->main_hcd;
1106
1107 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1108 top_dev = top_dev->parent)
1109 ;
1110
1111 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1112}
1113
1114
1115int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1116{
1117 struct xhci_virt_device *dev;
1118 struct xhci_ep_ctx *ep0_ctx;
1119 struct xhci_slot_ctx *slot_ctx;
1120 u32 port_num;
1121 u32 max_packets;
1122 struct usb_device *top_dev;
1123
1124 dev = xhci->devs[udev->slot_id];
1125
1126 if (udev->slot_id == 0 || !dev) {
1127 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1128 udev->slot_id);
1129 return -EINVAL;
1130 }
1131 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1132 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1133
1134
1135 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1136 switch (udev->speed) {
1137 case USB_SPEED_SUPER_PLUS:
1138 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1139 max_packets = MAX_PACKET(512);
1140 break;
1141 case USB_SPEED_SUPER:
1142 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1143 max_packets = MAX_PACKET(512);
1144 break;
1145 case USB_SPEED_HIGH:
1146 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1147 max_packets = MAX_PACKET(64);
1148 break;
1149
1150 case USB_SPEED_FULL:
1151 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1152 max_packets = MAX_PACKET(64);
1153 break;
1154 case USB_SPEED_LOW:
1155 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1156 max_packets = MAX_PACKET(8);
1157 break;
1158 case USB_SPEED_WIRELESS:
1159 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1160 return -EINVAL;
1161 break;
1162 default:
1163
1164 return -EINVAL;
1165 }
1166
1167 port_num = xhci_find_real_port_number(xhci, udev);
1168 if (!port_num)
1169 return -EINVAL;
1170 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1171
1172 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1173 top_dev = top_dev->parent)
1174 ;
1175 dev->fake_port = top_dev->portnum;
1176 dev->real_port = port_num;
1177 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1178 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1179
1180
1181
1182
1183
1184
1185
1186 if (!udev->tt || !udev->tt->hub->parent) {
1187 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1188 } else {
1189 struct xhci_root_port_bw_info *rh_bw;
1190 struct xhci_tt_bw_info *tt_bw;
1191
1192 rh_bw = &xhci->rh_bw[port_num - 1];
1193
1194 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1195 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1196 continue;
1197
1198 if (!dev->udev->tt->multi ||
1199 (udev->tt->multi &&
1200 tt_bw->ttport == dev->udev->ttport)) {
1201 dev->bw_table = &tt_bw->bw_table;
1202 dev->tt_info = tt_bw;
1203 break;
1204 }
1205 }
1206 if (!dev->tt_info)
1207 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1208 }
1209
1210
1211 if (udev->tt && udev->tt->hub->parent) {
1212 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1213 (udev->ttport << 8));
1214 if (udev->tt->multi)
1215 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1216 }
1217 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1218 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1219
1220
1221
1222 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1223
1224
1225 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1226 max_packets);
1227
1228 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1229 dev->eps[0].ring->cycle_state);
1230
1231 trace_xhci_setup_addressable_virt_device(dev);
1232
1233
1234
1235 return 0;
1236}
1237
1238
1239
1240
1241
1242
1243static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1244 struct usb_host_endpoint *ep)
1245{
1246 unsigned int interval;
1247
1248 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1249 if (interval != ep->desc.bInterval - 1)
1250 dev_warn(&udev->dev,
1251 "ep %#x - rounding interval to %d %sframes\n",
1252 ep->desc.bEndpointAddress,
1253 1 << interval,
1254 udev->speed == USB_SPEED_FULL ? "" : "micro");
1255
1256 if (udev->speed == USB_SPEED_FULL) {
1257
1258
1259
1260
1261
1262 interval += 3;
1263 }
1264
1265 return interval;
1266}
1267
1268
1269
1270
1271
1272static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1273 struct usb_host_endpoint *ep, unsigned int desc_interval,
1274 unsigned int min_exponent, unsigned int max_exponent)
1275{
1276 unsigned int interval;
1277
1278 interval = fls(desc_interval) - 1;
1279 interval = clamp_val(interval, min_exponent, max_exponent);
1280 if ((1 << interval) != desc_interval)
1281 dev_dbg(&udev->dev,
1282 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1283 ep->desc.bEndpointAddress,
1284 1 << interval,
1285 desc_interval);
1286
1287 return interval;
1288}
1289
1290static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1291 struct usb_host_endpoint *ep)
1292{
1293 if (ep->desc.bInterval == 0)
1294 return 0;
1295 return xhci_microframes_to_exponent(udev, ep,
1296 ep->desc.bInterval, 0, 15);
1297}
1298
1299
1300static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1301 struct usb_host_endpoint *ep)
1302{
1303 return xhci_microframes_to_exponent(udev, ep,
1304 ep->desc.bInterval * 8, 3, 10);
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1316 struct usb_host_endpoint *ep)
1317{
1318 unsigned int interval = 0;
1319
1320 switch (udev->speed) {
1321 case USB_SPEED_HIGH:
1322
1323 if (usb_endpoint_xfer_control(&ep->desc) ||
1324 usb_endpoint_xfer_bulk(&ep->desc)) {
1325 interval = xhci_parse_microframe_interval(udev, ep);
1326 break;
1327 }
1328 fallthrough;
1329
1330 case USB_SPEED_SUPER_PLUS:
1331 case USB_SPEED_SUPER:
1332 if (usb_endpoint_xfer_int(&ep->desc) ||
1333 usb_endpoint_xfer_isoc(&ep->desc)) {
1334 interval = xhci_parse_exponent_interval(udev, ep);
1335 }
1336 break;
1337
1338 case USB_SPEED_FULL:
1339 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1340 interval = xhci_parse_exponent_interval(udev, ep);
1341 break;
1342 }
1343
1344
1345
1346
1347
1348 fallthrough;
1349
1350 case USB_SPEED_LOW:
1351 if (usb_endpoint_xfer_int(&ep->desc) ||
1352 usb_endpoint_xfer_isoc(&ep->desc)) {
1353
1354 interval = xhci_parse_frame_interval(udev, ep);
1355 }
1356 break;
1357
1358 default:
1359 BUG();
1360 }
1361 return interval;
1362}
1363
1364
1365
1366
1367
1368
1369static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1370 struct usb_host_endpoint *ep)
1371{
1372 if (udev->speed < USB_SPEED_SUPER ||
1373 !usb_endpoint_xfer_isoc(&ep->desc))
1374 return 0;
1375 return ep->ss_ep_comp.bmAttributes;
1376}
1377
1378static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1379 struct usb_host_endpoint *ep)
1380{
1381
1382 if (udev->speed >= USB_SPEED_SUPER)
1383 return ep->ss_ep_comp.bMaxBurst;
1384
1385 if (udev->speed == USB_SPEED_HIGH &&
1386 (usb_endpoint_xfer_isoc(&ep->desc) ||
1387 usb_endpoint_xfer_int(&ep->desc)))
1388 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1389
1390 return 0;
1391}
1392
1393static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1394{
1395 int in;
1396
1397 in = usb_endpoint_dir_in(&ep->desc);
1398
1399 switch (usb_endpoint_type(&ep->desc)) {
1400 case USB_ENDPOINT_XFER_CONTROL:
1401 return CTRL_EP;
1402 case USB_ENDPOINT_XFER_BULK:
1403 return in ? BULK_IN_EP : BULK_OUT_EP;
1404 case USB_ENDPOINT_XFER_ISOC:
1405 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1406 case USB_ENDPOINT_XFER_INT:
1407 return in ? INT_IN_EP : INT_OUT_EP;
1408 }
1409 return 0;
1410}
1411
1412
1413
1414
1415
1416static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1417 struct usb_host_endpoint *ep)
1418{
1419 int max_burst;
1420 int max_packet;
1421
1422
1423 if (usb_endpoint_xfer_control(&ep->desc) ||
1424 usb_endpoint_xfer_bulk(&ep->desc))
1425 return 0;
1426
1427
1428 if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1429 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1430 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1431
1432 else if (udev->speed >= USB_SPEED_SUPER)
1433 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1434
1435 max_packet = usb_endpoint_maxp(&ep->desc);
1436 max_burst = usb_endpoint_maxp_mult(&ep->desc);
1437
1438 return max_packet * max_burst;
1439}
1440
1441
1442
1443
1444int xhci_endpoint_init(struct xhci_hcd *xhci,
1445 struct xhci_virt_device *virt_dev,
1446 struct usb_device *udev,
1447 struct usb_host_endpoint *ep,
1448 gfp_t mem_flags)
1449{
1450 unsigned int ep_index;
1451 struct xhci_ep_ctx *ep_ctx;
1452 struct xhci_ring *ep_ring;
1453 unsigned int max_packet;
1454 enum xhci_ring_type ring_type;
1455 u32 max_esit_payload;
1456 u32 endpoint_type;
1457 unsigned int max_burst;
1458 unsigned int interval;
1459 unsigned int mult;
1460 unsigned int avg_trb_len;
1461 unsigned int err_count = 0;
1462
1463 ep_index = xhci_get_endpoint_index(&ep->desc);
1464 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1465
1466 endpoint_type = xhci_get_endpoint_type(ep);
1467 if (!endpoint_type)
1468 return -EINVAL;
1469
1470 ring_type = usb_endpoint_type(&ep->desc);
1471
1472
1473
1474
1475
1476
1477
1478 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1479 interval = xhci_get_endpoint_interval(udev, ep);
1480
1481
1482 if (usb_endpoint_xfer_int(&ep->desc) ||
1483 usb_endpoint_xfer_isoc(&ep->desc)) {
1484 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1485 udev->speed >= USB_SPEED_HIGH &&
1486 interval >= 7) {
1487 interval = 6;
1488 }
1489 }
1490
1491 mult = xhci_get_endpoint_mult(udev, ep);
1492 max_packet = usb_endpoint_maxp(&ep->desc);
1493 max_burst = xhci_get_endpoint_max_burst(udev, ep);
1494 avg_trb_len = max_esit_payload;
1495
1496
1497
1498
1499 if (!usb_endpoint_xfer_isoc(&ep->desc))
1500 err_count = 3;
1501
1502 if (usb_endpoint_xfer_bulk(&ep->desc)) {
1503 if (udev->speed == USB_SPEED_HIGH)
1504 max_packet = 512;
1505 if (udev->speed == USB_SPEED_FULL) {
1506 max_packet = rounddown_pow_of_two(max_packet);
1507 max_packet = clamp_val(max_packet, 8, 64);
1508 }
1509 }
1510
1511 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1512 avg_trb_len = 8;
1513
1514 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1515 mult = 0;
1516
1517
1518 virt_dev->eps[ep_index].new_ring =
1519 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1520 if (!virt_dev->eps[ep_index].new_ring)
1521 return -ENOMEM;
1522
1523 virt_dev->eps[ep_index].skip = false;
1524 ep_ring = virt_dev->eps[ep_index].new_ring;
1525
1526
1527 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1528 EP_INTERVAL(interval) |
1529 EP_MULT(mult));
1530 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1531 MAX_PACKET(max_packet) |
1532 MAX_BURST(max_burst) |
1533 ERROR_COUNT(err_count));
1534 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1535 ep_ring->cycle_state);
1536
1537 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1538 EP_AVG_TRB_LENGTH(avg_trb_len));
1539
1540 return 0;
1541}
1542
1543void xhci_endpoint_zero(struct xhci_hcd *xhci,
1544 struct xhci_virt_device *virt_dev,
1545 struct usb_host_endpoint *ep)
1546{
1547 unsigned int ep_index;
1548 struct xhci_ep_ctx *ep_ctx;
1549
1550 ep_index = xhci_get_endpoint_index(&ep->desc);
1551 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1552
1553 ep_ctx->ep_info = 0;
1554 ep_ctx->ep_info2 = 0;
1555 ep_ctx->deq = 0;
1556 ep_ctx->tx_info = 0;
1557
1558
1559
1560}
1561
1562void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1563{
1564 bw_info->ep_interval = 0;
1565 bw_info->mult = 0;
1566 bw_info->num_packets = 0;
1567 bw_info->max_packet_size = 0;
1568 bw_info->type = 0;
1569 bw_info->max_esit_payload = 0;
1570}
1571
1572void xhci_update_bw_info(struct xhci_hcd *xhci,
1573 struct xhci_container_ctx *in_ctx,
1574 struct xhci_input_control_ctx *ctrl_ctx,
1575 struct xhci_virt_device *virt_dev)
1576{
1577 struct xhci_bw_info *bw_info;
1578 struct xhci_ep_ctx *ep_ctx;
1579 unsigned int ep_type;
1580 int i;
1581
1582 for (i = 1; i < 31; i++) {
1583 bw_info = &virt_dev->eps[i].bw_info;
1584
1585
1586
1587
1588
1589
1590 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1591
1592 xhci_clear_endpoint_bw_info(bw_info);
1593 continue;
1594 }
1595
1596 if (EP_IS_ADDED(ctrl_ctx, i)) {
1597 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1598 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1599
1600
1601 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1602 ep_type != ISOC_IN_EP &&
1603 ep_type != INT_IN_EP)
1604 continue;
1605
1606
1607 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1608 le32_to_cpu(ep_ctx->ep_info));
1609
1610
1611
1612
1613 bw_info->mult = CTX_TO_EP_MULT(
1614 le32_to_cpu(ep_ctx->ep_info)) + 1;
1615 bw_info->num_packets = CTX_TO_MAX_BURST(
1616 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1617 bw_info->max_packet_size = MAX_PACKET_DECODED(
1618 le32_to_cpu(ep_ctx->ep_info2));
1619 bw_info->type = ep_type;
1620 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1621 le32_to_cpu(ep_ctx->tx_info));
1622 }
1623 }
1624}
1625
1626
1627
1628
1629
1630void xhci_endpoint_copy(struct xhci_hcd *xhci,
1631 struct xhci_container_ctx *in_ctx,
1632 struct xhci_container_ctx *out_ctx,
1633 unsigned int ep_index)
1634{
1635 struct xhci_ep_ctx *out_ep_ctx;
1636 struct xhci_ep_ctx *in_ep_ctx;
1637
1638 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1639 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1640
1641 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1642 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1643 in_ep_ctx->deq = out_ep_ctx->deq;
1644 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1645 if (xhci->quirks & XHCI_MTK_HOST) {
1646 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1647 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1648 }
1649}
1650
1651
1652
1653
1654
1655
1656void xhci_slot_copy(struct xhci_hcd *xhci,
1657 struct xhci_container_ctx *in_ctx,
1658 struct xhci_container_ctx *out_ctx)
1659{
1660 struct xhci_slot_ctx *in_slot_ctx;
1661 struct xhci_slot_ctx *out_slot_ctx;
1662
1663 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1664 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1665
1666 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1667 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1668 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1669 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1670}
1671
1672
1673static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1674{
1675 int i;
1676 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1677 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1678
1679 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1680 "Allocating %d scratchpad buffers", num_sp);
1681
1682 if (!num_sp)
1683 return 0;
1684
1685 xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1686 dev_to_node(dev));
1687 if (!xhci->scratchpad)
1688 goto fail_sp;
1689
1690 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1691 num_sp * sizeof(u64),
1692 &xhci->scratchpad->sp_dma, flags);
1693 if (!xhci->scratchpad->sp_array)
1694 goto fail_sp2;
1695
1696 xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1697 flags, dev_to_node(dev));
1698 if (!xhci->scratchpad->sp_buffers)
1699 goto fail_sp3;
1700
1701 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1702 for (i = 0; i < num_sp; i++) {
1703 dma_addr_t dma;
1704 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1705 flags);
1706 if (!buf)
1707 goto fail_sp4;
1708
1709 xhci->scratchpad->sp_array[i] = dma;
1710 xhci->scratchpad->sp_buffers[i] = buf;
1711 }
1712
1713 return 0;
1714
1715 fail_sp4:
1716 for (i = i - 1; i >= 0; i--) {
1717 dma_free_coherent(dev, xhci->page_size,
1718 xhci->scratchpad->sp_buffers[i],
1719 xhci->scratchpad->sp_array[i]);
1720 }
1721
1722 kfree(xhci->scratchpad->sp_buffers);
1723
1724 fail_sp3:
1725 dma_free_coherent(dev, num_sp * sizeof(u64),
1726 xhci->scratchpad->sp_array,
1727 xhci->scratchpad->sp_dma);
1728
1729 fail_sp2:
1730 kfree(xhci->scratchpad);
1731 xhci->scratchpad = NULL;
1732
1733 fail_sp:
1734 return -ENOMEM;
1735}
1736
1737static void scratchpad_free(struct xhci_hcd *xhci)
1738{
1739 int num_sp;
1740 int i;
1741 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1742
1743 if (!xhci->scratchpad)
1744 return;
1745
1746 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1747
1748 for (i = 0; i < num_sp; i++) {
1749 dma_free_coherent(dev, xhci->page_size,
1750 xhci->scratchpad->sp_buffers[i],
1751 xhci->scratchpad->sp_array[i]);
1752 }
1753 kfree(xhci->scratchpad->sp_buffers);
1754 dma_free_coherent(dev, num_sp * sizeof(u64),
1755 xhci->scratchpad->sp_array,
1756 xhci->scratchpad->sp_dma);
1757 kfree(xhci->scratchpad);
1758 xhci->scratchpad = NULL;
1759}
1760
1761struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1762 bool allocate_completion, gfp_t mem_flags)
1763{
1764 struct xhci_command *command;
1765 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1766
1767 command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1768 if (!command)
1769 return NULL;
1770
1771 if (allocate_completion) {
1772 command->completion =
1773 kzalloc_node(sizeof(struct completion), mem_flags,
1774 dev_to_node(dev));
1775 if (!command->completion) {
1776 kfree(command);
1777 return NULL;
1778 }
1779 init_completion(command->completion);
1780 }
1781
1782 command->status = 0;
1783 INIT_LIST_HEAD(&command->cmd_list);
1784 return command;
1785}
1786
1787struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1788 bool allocate_completion, gfp_t mem_flags)
1789{
1790 struct xhci_command *command;
1791
1792 command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1793 if (!command)
1794 return NULL;
1795
1796 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1797 mem_flags);
1798 if (!command->in_ctx) {
1799 kfree(command->completion);
1800 kfree(command);
1801 return NULL;
1802 }
1803 return command;
1804}
1805
1806void xhci_urb_free_priv(struct urb_priv *urb_priv)
1807{
1808 kfree(urb_priv);
1809}
1810
1811void xhci_free_command(struct xhci_hcd *xhci,
1812 struct xhci_command *command)
1813{
1814 xhci_free_container_ctx(xhci,
1815 command->in_ctx);
1816 kfree(command->completion);
1817 kfree(command);
1818}
1819
1820int xhci_alloc_erst(struct xhci_hcd *xhci,
1821 struct xhci_ring *evt_ring,
1822 struct xhci_erst *erst,
1823 gfp_t flags)
1824{
1825 size_t size;
1826 unsigned int val;
1827 struct xhci_segment *seg;
1828 struct xhci_erst_entry *entry;
1829
1830 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1831 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1832 size, &erst->erst_dma_addr, flags);
1833 if (!erst->entries)
1834 return -ENOMEM;
1835
1836 erst->num_entries = evt_ring->num_segs;
1837
1838 seg = evt_ring->first_seg;
1839 for (val = 0; val < evt_ring->num_segs; val++) {
1840 entry = &erst->entries[val];
1841 entry->seg_addr = cpu_to_le64(seg->dma);
1842 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1843 entry->rsvd = 0;
1844 seg = seg->next;
1845 }
1846
1847 return 0;
1848}
1849
1850void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1851{
1852 size_t size;
1853 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1854
1855 size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1856 if (erst->entries)
1857 dma_free_coherent(dev, size,
1858 erst->entries,
1859 erst->erst_dma_addr);
1860 erst->entries = NULL;
1861}
1862
1863void xhci_mem_cleanup(struct xhci_hcd *xhci)
1864{
1865 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1866 int i, j, num_ports;
1867
1868 cancel_delayed_work_sync(&xhci->cmd_timer);
1869
1870 xhci_free_erst(xhci, &xhci->erst);
1871
1872 if (xhci->event_ring)
1873 xhci_ring_free(xhci, xhci->event_ring);
1874 xhci->event_ring = NULL;
1875 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1876
1877 if (xhci->lpm_command)
1878 xhci_free_command(xhci, xhci->lpm_command);
1879 xhci->lpm_command = NULL;
1880 if (xhci->cmd_ring)
1881 xhci_ring_free(xhci, xhci->cmd_ring);
1882 xhci->cmd_ring = NULL;
1883 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1884 xhci_cleanup_command_queue(xhci);
1885
1886 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1887 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1888 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1889 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1890 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1891 while (!list_empty(ep))
1892 list_del_init(ep->next);
1893 }
1894 }
1895
1896 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1897 xhci_free_virt_devices_depth_first(xhci, i);
1898
1899 dma_pool_destroy(xhci->segment_pool);
1900 xhci->segment_pool = NULL;
1901 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1902
1903 dma_pool_destroy(xhci->device_pool);
1904 xhci->device_pool = NULL;
1905 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1906
1907 dma_pool_destroy(xhci->small_streams_pool);
1908 xhci->small_streams_pool = NULL;
1909 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1910 "Freed small stream array pool");
1911
1912 dma_pool_destroy(xhci->medium_streams_pool);
1913 xhci->medium_streams_pool = NULL;
1914 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1915 "Freed medium stream array pool");
1916
1917 if (xhci->dcbaa)
1918 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1919 xhci->dcbaa, xhci->dcbaa->dma);
1920 xhci->dcbaa = NULL;
1921
1922 scratchpad_free(xhci);
1923
1924 if (!xhci->rh_bw)
1925 goto no_bw;
1926
1927 for (i = 0; i < num_ports; i++) {
1928 struct xhci_tt_bw_info *tt, *n;
1929 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1930 list_del(&tt->tt_list);
1931 kfree(tt);
1932 }
1933 }
1934
1935no_bw:
1936 xhci->cmd_ring_reserved_trbs = 0;
1937 xhci->usb2_rhub.num_ports = 0;
1938 xhci->usb3_rhub.num_ports = 0;
1939 xhci->num_active_eps = 0;
1940 kfree(xhci->usb2_rhub.ports);
1941 kfree(xhci->usb3_rhub.ports);
1942 kfree(xhci->hw_ports);
1943 kfree(xhci->rh_bw);
1944 kfree(xhci->ext_caps);
1945 for (i = 0; i < xhci->num_port_caps; i++)
1946 kfree(xhci->port_caps[i].psi);
1947 kfree(xhci->port_caps);
1948 xhci->num_port_caps = 0;
1949
1950 xhci->usb2_rhub.ports = NULL;
1951 xhci->usb3_rhub.ports = NULL;
1952 xhci->hw_ports = NULL;
1953 xhci->rh_bw = NULL;
1954 xhci->ext_caps = NULL;
1955
1956 xhci->page_size = 0;
1957 xhci->page_shift = 0;
1958 xhci->usb2_rhub.bus_state.bus_suspended = 0;
1959 xhci->usb3_rhub.bus_state.bus_suspended = 0;
1960}
1961
1962static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1963 struct xhci_segment *input_seg,
1964 union xhci_trb *start_trb,
1965 union xhci_trb *end_trb,
1966 dma_addr_t input_dma,
1967 struct xhci_segment *result_seg,
1968 char *test_name, int test_number)
1969{
1970 unsigned long long start_dma;
1971 unsigned long long end_dma;
1972 struct xhci_segment *seg;
1973
1974 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1975 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1976
1977 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1978 if (seg != result_seg) {
1979 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1980 test_name, test_number);
1981 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1982 "input DMA 0x%llx\n",
1983 input_seg,
1984 (unsigned long long) input_dma);
1985 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1986 "ending TRB %p (0x%llx DMA)\n",
1987 start_trb, start_dma,
1988 end_trb, end_dma);
1989 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1990 result_seg, seg);
1991 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1992 true);
1993 return -1;
1994 }
1995 return 0;
1996}
1997
1998
1999static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
2000{
2001 struct {
2002 dma_addr_t input_dma;
2003 struct xhci_segment *result_seg;
2004 } simple_test_vector [] = {
2005
2006 { 0, NULL },
2007
2008 { xhci->event_ring->first_seg->dma - 16, NULL },
2009
2010 { xhci->event_ring->first_seg->dma - 1, NULL },
2011
2012 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
2013
2014 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
2015 xhci->event_ring->first_seg },
2016
2017 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
2018
2019 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
2020
2021 { (dma_addr_t) (~0), NULL },
2022 };
2023 struct {
2024 struct xhci_segment *input_seg;
2025 union xhci_trb *start_trb;
2026 union xhci_trb *end_trb;
2027 dma_addr_t input_dma;
2028 struct xhci_segment *result_seg;
2029 } complex_test_vector [] = {
2030
2031 { .input_seg = xhci->event_ring->first_seg,
2032 .start_trb = xhci->event_ring->first_seg->trbs,
2033 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2034 .input_dma = xhci->cmd_ring->first_seg->dma,
2035 .result_seg = NULL,
2036 },
2037
2038 { .input_seg = xhci->event_ring->first_seg,
2039 .start_trb = xhci->event_ring->first_seg->trbs,
2040 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2041 .input_dma = xhci->cmd_ring->first_seg->dma,
2042 .result_seg = NULL,
2043 },
2044
2045 { .input_seg = xhci->event_ring->first_seg,
2046 .start_trb = xhci->cmd_ring->first_seg->trbs,
2047 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2048 .input_dma = xhci->cmd_ring->first_seg->dma,
2049 .result_seg = NULL,
2050 },
2051
2052 { .input_seg = xhci->event_ring->first_seg,
2053 .start_trb = &xhci->event_ring->first_seg->trbs[0],
2054 .end_trb = &xhci->event_ring->first_seg->trbs[3],
2055 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
2056 .result_seg = NULL,
2057 },
2058
2059 { .input_seg = xhci->event_ring->first_seg,
2060 .start_trb = &xhci->event_ring->first_seg->trbs[3],
2061 .end_trb = &xhci->event_ring->first_seg->trbs[6],
2062 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2063 .result_seg = NULL,
2064 },
2065
2066 { .input_seg = xhci->event_ring->first_seg,
2067 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2068 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2069 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2070 .result_seg = NULL,
2071 },
2072
2073 { .input_seg = xhci->event_ring->first_seg,
2074 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2075 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2076 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2077 .result_seg = NULL,
2078 },
2079
2080 { .input_seg = xhci->event_ring->first_seg,
2081 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2082 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2083 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2084 .result_seg = NULL,
2085 },
2086 };
2087
2088 unsigned int num_tests;
2089 int i, ret;
2090
2091 num_tests = ARRAY_SIZE(simple_test_vector);
2092 for (i = 0; i < num_tests; i++) {
2093 ret = xhci_test_trb_in_td(xhci,
2094 xhci->event_ring->first_seg,
2095 xhci->event_ring->first_seg->trbs,
2096 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2097 simple_test_vector[i].input_dma,
2098 simple_test_vector[i].result_seg,
2099 "Simple", i);
2100 if (ret < 0)
2101 return ret;
2102 }
2103
2104 num_tests = ARRAY_SIZE(complex_test_vector);
2105 for (i = 0; i < num_tests; i++) {
2106 ret = xhci_test_trb_in_td(xhci,
2107 complex_test_vector[i].input_seg,
2108 complex_test_vector[i].start_trb,
2109 complex_test_vector[i].end_trb,
2110 complex_test_vector[i].input_dma,
2111 complex_test_vector[i].result_seg,
2112 "Complex", i);
2113 if (ret < 0)
2114 return ret;
2115 }
2116 xhci_dbg(xhci, "TRB math tests passed.\n");
2117 return 0;
2118}
2119
2120static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2121{
2122 u64 temp;
2123 dma_addr_t deq;
2124
2125 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2126 xhci->event_ring->dequeue);
2127 if (deq == 0 && !in_interrupt())
2128 xhci_warn(xhci, "WARN something wrong with SW event ring "
2129 "dequeue ptr.\n");
2130
2131 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2132 temp &= ERST_PTR_MASK;
2133
2134
2135
2136 temp &= ~ERST_EHB;
2137 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2138 "// Write event ring dequeue pointer, "
2139 "preserving EHB bit");
2140 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2141 &xhci->ir_set->erst_dequeue);
2142}
2143
2144static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2145 __le32 __iomem *addr, int max_caps)
2146{
2147 u32 temp, port_offset, port_count;
2148 int i;
2149 u8 major_revision, minor_revision;
2150 struct xhci_hub *rhub;
2151 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2152 struct xhci_port_cap *port_cap;
2153
2154 temp = readl(addr);
2155 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2156 minor_revision = XHCI_EXT_PORT_MINOR(temp);
2157
2158 if (major_revision == 0x03) {
2159 rhub = &xhci->usb3_rhub;
2160 } else if (major_revision <= 0x02) {
2161 rhub = &xhci->usb2_rhub;
2162 } else {
2163 xhci_warn(xhci, "Ignoring unknown port speed, "
2164 "Ext Cap %p, revision = 0x%x\n",
2165 addr, major_revision);
2166
2167 return;
2168 }
2169 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2170
2171 if (rhub->min_rev < minor_revision)
2172 rhub->min_rev = minor_revision;
2173
2174
2175 temp = readl(addr + 2);
2176 port_offset = XHCI_EXT_PORT_OFF(temp);
2177 port_count = XHCI_EXT_PORT_COUNT(temp);
2178 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2179 "Ext Cap %p, port offset = %u, "
2180 "count = %u, revision = 0x%x",
2181 addr, port_offset, port_count, major_revision);
2182
2183 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2184
2185 return;
2186
2187 port_cap = &xhci->port_caps[xhci->num_port_caps++];
2188 if (xhci->num_port_caps > max_caps)
2189 return;
2190
2191 port_cap->maj_rev = major_revision;
2192 port_cap->min_rev = minor_revision;
2193 port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2194
2195 if (port_cap->psi_count) {
2196 port_cap->psi = kcalloc_node(port_cap->psi_count,
2197 sizeof(*port_cap->psi),
2198 GFP_KERNEL, dev_to_node(dev));
2199 if (!port_cap->psi)
2200 port_cap->psi_count = 0;
2201
2202 port_cap->psi_uid_count++;
2203 for (i = 0; i < port_cap->psi_count; i++) {
2204 port_cap->psi[i] = readl(addr + 4 + i);
2205
2206
2207
2208
2209 if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2210 XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2211 port_cap->psi_uid_count++;
2212
2213 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2214 XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2215 XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2216 XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2217 XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2218 XHCI_EXT_PORT_LP(port_cap->psi[i]),
2219 XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2220 }
2221 }
2222
2223 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2224 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2225
2226 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2227 (temp & XHCI_HLC)) {
2228 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2229 "xHCI 1.0: support USB2 hardware lpm");
2230 xhci->hw_lpm_support = 1;
2231 }
2232
2233 port_offset--;
2234 for (i = port_offset; i < (port_offset + port_count); i++) {
2235 struct xhci_port *hw_port = &xhci->hw_ports[i];
2236
2237 if (hw_port->rhub) {
2238 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2239 " port %u\n", addr, i);
2240 xhci_warn(xhci, "Port was marked as USB %u, "
2241 "duplicated as USB %u\n",
2242 hw_port->rhub->maj_rev, major_revision);
2243
2244
2245
2246 if (hw_port->rhub != rhub &&
2247 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2248 hw_port->rhub->num_ports--;
2249 hw_port->hcd_portnum = DUPLICATE_ENTRY;
2250 }
2251 continue;
2252 }
2253 hw_port->rhub = rhub;
2254 hw_port->port_cap = port_cap;
2255 rhub->num_ports++;
2256 }
2257
2258}
2259
2260static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2261 struct xhci_hub *rhub, gfp_t flags)
2262{
2263 int port_index = 0;
2264 int i;
2265 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2266
2267 if (!rhub->num_ports)
2268 return;
2269 rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
2270 flags, dev_to_node(dev));
2271 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2272 if (xhci->hw_ports[i].rhub != rhub ||
2273 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2274 continue;
2275 xhci->hw_ports[i].hcd_portnum = port_index;
2276 rhub->ports[port_index] = &xhci->hw_ports[i];
2277 port_index++;
2278 if (port_index == rhub->num_ports)
2279 break;
2280 }
2281}
2282
2283
2284
2285
2286
2287
2288
2289
2290static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2291{
2292 void __iomem *base;
2293 u32 offset;
2294 unsigned int num_ports;
2295 int i, j;
2296 int cap_count = 0;
2297 u32 cap_start;
2298 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2299
2300 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2301 xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2302 flags, dev_to_node(dev));
2303 if (!xhci->hw_ports)
2304 return -ENOMEM;
2305
2306 for (i = 0; i < num_ports; i++) {
2307 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2308 NUM_PORT_REGS * i;
2309 xhci->hw_ports[i].hw_portnum = i;
2310 }
2311
2312 xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2313 dev_to_node(dev));
2314 if (!xhci->rh_bw)
2315 return -ENOMEM;
2316 for (i = 0; i < num_ports; i++) {
2317 struct xhci_interval_bw_table *bw_table;
2318
2319 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2320 bw_table = &xhci->rh_bw[i].bw_table;
2321 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2322 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2323 }
2324 base = &xhci->cap_regs->hc_capbase;
2325
2326 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2327 if (!cap_start) {
2328 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2329 return -ENODEV;
2330 }
2331
2332 offset = cap_start;
2333
2334 while (offset) {
2335 cap_count++;
2336 offset = xhci_find_next_ext_cap(base, offset,
2337 XHCI_EXT_CAPS_PROTOCOL);
2338 }
2339
2340 xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2341 flags, dev_to_node(dev));
2342 if (!xhci->ext_caps)
2343 return -ENOMEM;
2344
2345 xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2346 flags, dev_to_node(dev));
2347 if (!xhci->port_caps)
2348 return -ENOMEM;
2349
2350 offset = cap_start;
2351
2352 while (offset) {
2353 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2354 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2355 num_ports)
2356 break;
2357 offset = xhci_find_next_ext_cap(base, offset,
2358 XHCI_EXT_CAPS_PROTOCOL);
2359 }
2360 if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2361 xhci_warn(xhci, "No ports on the roothubs?\n");
2362 return -ENODEV;
2363 }
2364 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2365 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2366 xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2367
2368
2369
2370
2371 if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2372 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2373 "Limiting USB 3.0 roothub ports to %u.",
2374 USB_SS_MAXPORTS);
2375 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2376 }
2377 if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2378 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2379 "Limiting USB 2.0 roothub ports to %u.",
2380 USB_MAXCHILDREN);
2381 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2382 }
2383
2384
2385
2386
2387
2388
2389 xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2390 xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2391
2392 return 0;
2393}
2394
2395int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2396{
2397 dma_addr_t dma;
2398 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2399 unsigned int val, val2;
2400 u64 val_64;
2401 u32 page_size, temp;
2402 int i, ret;
2403
2404 INIT_LIST_HEAD(&xhci->cmd_list);
2405
2406
2407 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2408 init_completion(&xhci->cmd_ring_stop_completion);
2409
2410 page_size = readl(&xhci->op_regs->page_size);
2411 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2412 "Supported page size register = 0x%x", page_size);
2413 for (i = 0; i < 16; i++) {
2414 if ((0x1 & page_size) != 0)
2415 break;
2416 page_size = page_size >> 1;
2417 }
2418 if (i < 16)
2419 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2420 "Supported page size of %iK", (1 << (i+12)) / 1024);
2421 else
2422 xhci_warn(xhci, "WARN: no supported page size\n");
2423
2424 xhci->page_shift = 12;
2425 xhci->page_size = 1 << xhci->page_shift;
2426 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2427 "HCD page size set to %iK", xhci->page_size / 1024);
2428
2429
2430
2431
2432
2433 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2434 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2435 "// xHC can handle at most %d device slots.", val);
2436 val2 = readl(&xhci->op_regs->config_reg);
2437 val |= (val2 & ~HCS_SLOTS_MASK);
2438 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2439 "// Setting Max device slots reg = 0x%x.", val);
2440 writel(val, &xhci->op_regs->config_reg);
2441
2442
2443
2444
2445
2446 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2447 flags);
2448 if (!xhci->dcbaa)
2449 goto fail;
2450 xhci->dcbaa->dma = dma;
2451 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2452 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2453 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2454 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2455
2456
2457
2458
2459
2460
2461
2462
2463 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2464 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2465
2466
2467 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2468 2112, 64, xhci->page_size);
2469 if (!xhci->segment_pool || !xhci->device_pool)
2470 goto fail;
2471
2472
2473
2474
2475 xhci->small_streams_pool =
2476 dma_pool_create("xHCI 256 byte stream ctx arrays",
2477 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2478 xhci->medium_streams_pool =
2479 dma_pool_create("xHCI 1KB stream ctx arrays",
2480 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2481
2482
2483
2484
2485 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2486 goto fail;
2487
2488
2489 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2490 if (!xhci->cmd_ring)
2491 goto fail;
2492 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2493 "Allocated command ring at %p", xhci->cmd_ring);
2494 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2495 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2496
2497
2498 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2499 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2500 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2501 xhci->cmd_ring->cycle_state;
2502 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2503 "// Setting command ring address to 0x%016llx", val_64);
2504 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2505
2506 xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
2507 if (!xhci->lpm_command)
2508 goto fail;
2509
2510
2511
2512
2513
2514 xhci->cmd_ring_reserved_trbs++;
2515
2516 val = readl(&xhci->cap_regs->db_off);
2517 val &= DBOFF_MASK;
2518 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2519 "// Doorbell array is located at offset 0x%x"
2520 " from cap regs base addr", val);
2521 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2522
2523 xhci->ir_set = &xhci->run_regs->ir_set[0];
2524
2525
2526
2527
2528
2529 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2530 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2531 0, flags);
2532 if (!xhci->event_ring)
2533 goto fail;
2534 if (xhci_check_trb_in_td_math(xhci) < 0)
2535 goto fail;
2536
2537 ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2538 if (ret)
2539 goto fail;
2540
2541
2542 val = readl(&xhci->ir_set->erst_size);
2543 val &= ERST_SIZE_MASK;
2544 val |= ERST_NUM_SEGS;
2545 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2546 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2547 val);
2548 writel(val, &xhci->ir_set->erst_size);
2549
2550 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2551 "// Set ERST entries to point to event ring.");
2552
2553 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2554 "// Set ERST base address for ir_set 0 = 0x%llx",
2555 (unsigned long long)xhci->erst.erst_dma_addr);
2556 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2557 val_64 &= ERST_PTR_MASK;
2558 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2559 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2560
2561
2562 xhci_set_hc_event_deq(xhci);
2563 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2564 "Wrote ERST address to ir_set 0.");
2565
2566
2567
2568
2569
2570
2571 for (i = 0; i < MAX_HC_SLOTS; i++)
2572 xhci->devs[i] = NULL;
2573 for (i = 0; i < USB_MAXCHILDREN; i++) {
2574 xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2575 xhci->usb3_rhub.bus_state.resume_done[i] = 0;
2576
2577 init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
2578 init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
2579 }
2580
2581 if (scratchpad_alloc(xhci, flags))
2582 goto fail;
2583 if (xhci_setup_port_arrays(xhci, flags))
2584 goto fail;
2585
2586
2587
2588
2589
2590 temp = readl(&xhci->op_regs->dev_notification);
2591 temp &= ~DEV_NOTE_MASK;
2592 temp |= DEV_NOTE_FWAKE;
2593 writel(temp, &xhci->op_regs->dev_notification);
2594
2595 return 0;
2596
2597fail:
2598 xhci_halt(xhci);
2599 xhci_reset(xhci);
2600 xhci_mem_cleanup(xhci);
2601 return -ENOMEM;
2602}
2603