1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/usb.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include <linux/dmapool.h>
27#include <linux/dma-mapping.h>
28
29#include "xhci.h"
30#include "xhci-trace.h"
31
32
33
34
35
36
37
38
39static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
40 unsigned int cycle_state, gfp_t flags)
41{
42 struct xhci_segment *seg;
43 dma_addr_t dma;
44 int i;
45
46 seg = kzalloc(sizeof *seg, flags);
47 if (!seg)
48 return NULL;
49
50 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
51 if (!seg->trbs) {
52 kfree(seg);
53 return NULL;
54 }
55
56 memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
57
58 if (cycle_state == 0) {
59 for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 seg->trbs[i].link.control |= TRB_CYCLE;
61 }
62 seg->dma = dma;
63 seg->next = NULL;
64
65 return seg;
66}
67
68static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
69{
70 if (seg->trbs) {
71 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 seg->trbs = NULL;
73 }
74 kfree(seg);
75}
76
77static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
78 struct xhci_segment *first)
79{
80 struct xhci_segment *seg;
81
82 seg = first->next;
83 while (seg != first) {
84 struct xhci_segment *next = seg->next;
85 xhci_segment_free(xhci, seg);
86 seg = next;
87 }
88 xhci_segment_free(xhci, first);
89}
90
91
92
93
94
95
96
97
98static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
99 struct xhci_segment *next, enum xhci_ring_type type)
100{
101 u32 val;
102
103 if (!prev || !next)
104 return;
105 prev->next = next;
106 if (type != TYPE_EVENT) {
107 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
108 cpu_to_le64(next->dma);
109
110
111 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
112 val &= ~TRB_TYPE_BITMASK;
113 val |= TRB_TYPE(TRB_LINK);
114
115
116 if (xhci_link_trb_quirk(xhci) ||
117 (type == TYPE_ISOC &&
118 (xhci->quirks & XHCI_AMD_0x96_HOST)))
119 val |= TRB_CHAIN;
120 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
121 }
122}
123
124
125
126
127
128static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
129 struct xhci_segment *first, struct xhci_segment *last,
130 unsigned int num_segs)
131{
132 struct xhci_segment *next;
133
134 if (!ring || !first || !last)
135 return;
136
137 next = ring->enq_seg->next;
138 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
139 xhci_link_segments(xhci, last, next, ring->type);
140 ring->num_segs += num_segs;
141 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
142
143 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
144 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
145 &= ~cpu_to_le32(LINK_TOGGLE);
146 last->trbs[TRBS_PER_SEGMENT-1].link.control
147 |= cpu_to_le32(LINK_TOGGLE);
148 ring->last_seg = last;
149 }
150}
151
152
153void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
154{
155 if (!ring)
156 return;
157
158 if (ring->first_seg)
159 xhci_free_segments_for_ring(xhci, ring->first_seg);
160
161 kfree(ring);
162}
163
164static void xhci_initialize_ring_info(struct xhci_ring *ring,
165 unsigned int cycle_state)
166{
167
168 ring->enqueue = ring->first_seg->trbs;
169 ring->enq_seg = ring->first_seg;
170 ring->dequeue = ring->enqueue;
171 ring->deq_seg = ring->first_seg;
172
173
174
175
176
177
178
179 ring->cycle_state = cycle_state;
180
181 ring->enq_updates = 0;
182 ring->deq_updates = 0;
183
184
185
186
187
188 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
189}
190
191
192static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
193 struct xhci_segment **first, struct xhci_segment **last,
194 unsigned int num_segs, unsigned int cycle_state,
195 enum xhci_ring_type type, gfp_t flags)
196{
197 struct xhci_segment *prev;
198
199 prev = xhci_segment_alloc(xhci, cycle_state, flags);
200 if (!prev)
201 return -ENOMEM;
202 num_segs--;
203
204 *first = prev;
205 while (num_segs > 0) {
206 struct xhci_segment *next;
207
208 next = xhci_segment_alloc(xhci, cycle_state, flags);
209 if (!next) {
210 prev = *first;
211 while (prev) {
212 next = prev->next;
213 xhci_segment_free(xhci, prev);
214 prev = next;
215 }
216 return -ENOMEM;
217 }
218 xhci_link_segments(xhci, prev, next, type);
219
220 prev = next;
221 num_segs--;
222 }
223 xhci_link_segments(xhci, prev, *first, type);
224 *last = prev;
225
226 return 0;
227}
228
229
230
231
232
233
234
235
236static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
237 unsigned int num_segs, unsigned int cycle_state,
238 enum xhci_ring_type type, gfp_t flags)
239{
240 struct xhci_ring *ring;
241 int ret;
242
243 ring = kzalloc(sizeof *(ring), flags);
244 if (!ring)
245 return NULL;
246
247 ring->num_segs = num_segs;
248 INIT_LIST_HEAD(&ring->td_list);
249 ring->type = type;
250 if (num_segs == 0)
251 return ring;
252
253 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
254 &ring->last_seg, num_segs, cycle_state, type, flags);
255 if (ret)
256 goto fail;
257
258
259 if (type != TYPE_EVENT) {
260
261 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
262 cpu_to_le32(LINK_TOGGLE);
263 }
264 xhci_initialize_ring_info(ring, cycle_state);
265 return ring;
266
267fail:
268 kfree(ring);
269 return NULL;
270}
271
272void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
273 struct xhci_virt_device *virt_dev,
274 unsigned int ep_index)
275{
276 int rings_cached;
277
278 rings_cached = virt_dev->num_rings_cached;
279 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
280 virt_dev->ring_cache[rings_cached] =
281 virt_dev->eps[ep_index].ring;
282 virt_dev->num_rings_cached++;
283 xhci_dbg(xhci, "Cached old ring, "
284 "%d ring%s cached\n",
285 virt_dev->num_rings_cached,
286 (virt_dev->num_rings_cached > 1) ? "s" : "");
287 } else {
288 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
289 xhci_dbg(xhci, "Ring cache full (%d rings), "
290 "freeing ring\n",
291 virt_dev->num_rings_cached);
292 }
293 virt_dev->eps[ep_index].ring = NULL;
294}
295
296
297
298
299static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
300 struct xhci_ring *ring, unsigned int cycle_state,
301 enum xhci_ring_type type)
302{
303 struct xhci_segment *seg = ring->first_seg;
304 int i;
305
306 do {
307 memset(seg->trbs, 0,
308 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
309 if (cycle_state == 0) {
310 for (i = 0; i < TRBS_PER_SEGMENT; i++)
311 seg->trbs[i].link.control |= TRB_CYCLE;
312 }
313
314 xhci_link_segments(xhci, seg, seg->next, type);
315 seg = seg->next;
316 } while (seg != ring->first_seg);
317 ring->type = type;
318 xhci_initialize_ring_info(ring, cycle_state);
319
320
321
322 INIT_LIST_HEAD(&ring->td_list);
323}
324
325
326
327
328
329
330int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
331 unsigned int num_trbs, gfp_t flags)
332{
333 struct xhci_segment *first;
334 struct xhci_segment *last;
335 unsigned int num_segs;
336 unsigned int num_segs_needed;
337 int ret;
338
339 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
340 (TRBS_PER_SEGMENT - 1);
341
342
343 num_segs = ring->num_segs > num_segs_needed ?
344 ring->num_segs : num_segs_needed;
345
346 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
347 num_segs, ring->cycle_state, ring->type, flags);
348 if (ret)
349 return -ENOMEM;
350
351 xhci_link_rings(xhci, ring, first, last, num_segs);
352 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
353 "ring expansion succeed, now has %d segments",
354 ring->num_segs);
355
356 return 0;
357}
358
359#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
360
361static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
362 int type, gfp_t flags)
363{
364 struct xhci_container_ctx *ctx;
365
366 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
367 return NULL;
368
369 ctx = kzalloc(sizeof(*ctx), flags);
370 if (!ctx)
371 return NULL;
372
373 ctx->type = type;
374 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
375 if (type == XHCI_CTX_TYPE_INPUT)
376 ctx->size += CTX_SIZE(xhci->hcc_params);
377
378 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
379 if (!ctx->bytes) {
380 kfree(ctx);
381 return NULL;
382 }
383 memset(ctx->bytes, 0, ctx->size);
384 return ctx;
385}
386
387static void xhci_free_container_ctx(struct xhci_hcd *xhci,
388 struct xhci_container_ctx *ctx)
389{
390 if (!ctx)
391 return;
392 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
393 kfree(ctx);
394}
395
396struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
397 struct xhci_container_ctx *ctx)
398{
399 if (ctx->type != XHCI_CTX_TYPE_INPUT)
400 return NULL;
401
402 return (struct xhci_input_control_ctx *)ctx->bytes;
403}
404
405struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
406 struct xhci_container_ctx *ctx)
407{
408 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
409 return (struct xhci_slot_ctx *)ctx->bytes;
410
411 return (struct xhci_slot_ctx *)
412 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
413}
414
415struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
416 struct xhci_container_ctx *ctx,
417 unsigned int ep_index)
418{
419
420 ep_index++;
421 if (ctx->type == XHCI_CTX_TYPE_INPUT)
422 ep_index++;
423
424 return (struct xhci_ep_ctx *)
425 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
426}
427
428
429
430
431static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
432 unsigned int num_stream_ctxs,
433 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
434{
435 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
436
437 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
438 dma_free_coherent(&pdev->dev,
439 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
440 stream_ctx, dma);
441 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
442 return dma_pool_free(xhci->small_streams_pool,
443 stream_ctx, dma);
444 else
445 return dma_pool_free(xhci->medium_streams_pool,
446 stream_ctx, dma);
447}
448
449
450
451
452
453
454
455
456
457
458
459static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
460 unsigned int num_stream_ctxs, dma_addr_t *dma,
461 gfp_t mem_flags)
462{
463 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
464
465 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
466 return dma_alloc_coherent(&pdev->dev,
467 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
468 dma, mem_flags);
469 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
470 return dma_pool_alloc(xhci->small_streams_pool,
471 mem_flags, dma);
472 else
473 return dma_pool_alloc(xhci->medium_streams_pool,
474 mem_flags, dma);
475}
476
477struct xhci_ring *xhci_dma_to_transfer_ring(
478 struct xhci_virt_ep *ep,
479 u64 address)
480{
481 if (ep->ep_state & EP_HAS_STREAMS)
482 return radix_tree_lookup(&ep->stream_info->trb_address_map,
483 address >> TRB_SEGMENT_SHIFT);
484 return ep->ring;
485}
486
487struct xhci_ring *xhci_stream_id_to_ring(
488 struct xhci_virt_device *dev,
489 unsigned int ep_index,
490 unsigned int stream_id)
491{
492 struct xhci_virt_ep *ep = &dev->eps[ep_index];
493
494 if (stream_id == 0)
495 return ep->ring;
496 if (!ep->stream_info)
497 return NULL;
498
499 if (stream_id > ep->stream_info->num_streams)
500 return NULL;
501 return ep->stream_info->stream_rings[stream_id];
502}
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
544 unsigned int num_stream_ctxs,
545 unsigned int num_streams, gfp_t mem_flags)
546{
547 struct xhci_stream_info *stream_info;
548 u32 cur_stream;
549 struct xhci_ring *cur_ring;
550 unsigned long key;
551 u64 addr;
552 int ret;
553
554 xhci_dbg(xhci, "Allocating %u streams and %u "
555 "stream context array entries.\n",
556 num_streams, num_stream_ctxs);
557 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
558 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
559 return NULL;
560 }
561 xhci->cmd_ring_reserved_trbs++;
562
563 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
564 if (!stream_info)
565 goto cleanup_trbs;
566
567 stream_info->num_streams = num_streams;
568 stream_info->num_stream_ctxs = num_stream_ctxs;
569
570
571 stream_info->stream_rings = kzalloc(
572 sizeof(struct xhci_ring *)*num_streams,
573 mem_flags);
574 if (!stream_info->stream_rings)
575 goto cleanup_info;
576
577
578 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
579 num_stream_ctxs, &stream_info->ctx_array_dma,
580 mem_flags);
581 if (!stream_info->stream_ctx_array)
582 goto cleanup_ctx;
583 memset(stream_info->stream_ctx_array, 0,
584 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
585
586
587 stream_info->free_streams_command =
588 xhci_alloc_command(xhci, true, true, mem_flags);
589 if (!stream_info->free_streams_command)
590 goto cleanup_ctx;
591
592 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
593
594
595
596
597
598 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
599 stream_info->stream_rings[cur_stream] =
600 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
601 cur_ring = stream_info->stream_rings[cur_stream];
602 if (!cur_ring)
603 goto cleanup_rings;
604 cur_ring->stream_id = cur_stream;
605
606 addr = cur_ring->first_seg->dma |
607 SCT_FOR_CTX(SCT_PRI_TR) |
608 cur_ring->cycle_state;
609 stream_info->stream_ctx_array[cur_stream].stream_ring =
610 cpu_to_le64(addr);
611 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
612 cur_stream, (unsigned long long) addr);
613
614 key = (unsigned long)
615 (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
616 ret = radix_tree_insert(&stream_info->trb_address_map,
617 key, cur_ring);
618 if (ret) {
619 xhci_ring_free(xhci, cur_ring);
620 stream_info->stream_rings[cur_stream] = NULL;
621 goto cleanup_rings;
622 }
623 }
624
625
626
627
628
629
630
631 return stream_info;
632
633cleanup_rings:
634 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
635 cur_ring = stream_info->stream_rings[cur_stream];
636 if (cur_ring) {
637 addr = cur_ring->first_seg->dma;
638 radix_tree_delete(&stream_info->trb_address_map,
639 addr >> TRB_SEGMENT_SHIFT);
640 xhci_ring_free(xhci, cur_ring);
641 stream_info->stream_rings[cur_stream] = NULL;
642 }
643 }
644 xhci_free_command(xhci, stream_info->free_streams_command);
645cleanup_ctx:
646 kfree(stream_info->stream_rings);
647cleanup_info:
648 kfree(stream_info);
649cleanup_trbs:
650 xhci->cmd_ring_reserved_trbs--;
651 return NULL;
652}
653
654
655
656
657void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
658 struct xhci_ep_ctx *ep_ctx,
659 struct xhci_stream_info *stream_info)
660{
661 u32 max_primary_streams;
662
663
664
665
666 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
667 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
668 "Setting number of stream ctx array entries to %u",
669 1 << (max_primary_streams + 1));
670 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
671 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
672 | EP_HAS_LSA);
673 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
674}
675
676
677
678
679
680
681void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
682 struct xhci_ep_ctx *ep_ctx,
683 struct xhci_virt_ep *ep)
684{
685 dma_addr_t addr;
686 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
687 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
688 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
689}
690
691
692
693
694
695void xhci_free_stream_info(struct xhci_hcd *xhci,
696 struct xhci_stream_info *stream_info)
697{
698 int cur_stream;
699 struct xhci_ring *cur_ring;
700 dma_addr_t addr;
701
702 if (!stream_info)
703 return;
704
705 for (cur_stream = 1; cur_stream < stream_info->num_streams;
706 cur_stream++) {
707 cur_ring = stream_info->stream_rings[cur_stream];
708 if (cur_ring) {
709 addr = cur_ring->first_seg->dma;
710 radix_tree_delete(&stream_info->trb_address_map,
711 addr >> TRB_SEGMENT_SHIFT);
712 xhci_ring_free(xhci, cur_ring);
713 stream_info->stream_rings[cur_stream] = NULL;
714 }
715 }
716 xhci_free_command(xhci, stream_info->free_streams_command);
717 xhci->cmd_ring_reserved_trbs--;
718 if (stream_info->stream_ctx_array)
719 xhci_free_stream_ctx(xhci,
720 stream_info->num_stream_ctxs,
721 stream_info->stream_ctx_array,
722 stream_info->ctx_array_dma);
723
724 if (stream_info)
725 kfree(stream_info->stream_rings);
726 kfree(stream_info);
727}
728
729
730
731
732static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
733 struct xhci_virt_ep *ep)
734{
735 init_timer(&ep->stop_cmd_timer);
736 ep->stop_cmd_timer.data = (unsigned long) ep;
737 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
738 ep->xhci = xhci;
739}
740
741static void xhci_free_tt_info(struct xhci_hcd *xhci,
742 struct xhci_virt_device *virt_dev,
743 int slot_id)
744{
745 struct list_head *tt_list_head;
746 struct xhci_tt_bw_info *tt_info, *next;
747 bool slot_found = false;
748
749
750
751
752 if (virt_dev->real_port == 0 ||
753 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
754 xhci_dbg(xhci, "Bad real port.\n");
755 return;
756 }
757
758 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
759 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
760
761 if (tt_info->slot_id == slot_id) {
762 slot_found = true;
763 list_del(&tt_info->tt_list);
764 kfree(tt_info);
765 } else if (slot_found) {
766 break;
767 }
768 }
769}
770
771int xhci_alloc_tt_info(struct xhci_hcd *xhci,
772 struct xhci_virt_device *virt_dev,
773 struct usb_device *hdev,
774 struct usb_tt *tt, gfp_t mem_flags)
775{
776 struct xhci_tt_bw_info *tt_info;
777 unsigned int num_ports;
778 int i, j;
779
780 if (!tt->multi)
781 num_ports = 1;
782 else
783 num_ports = hdev->maxchild;
784
785 for (i = 0; i < num_ports; i++, tt_info++) {
786 struct xhci_interval_bw_table *bw_table;
787
788 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
789 if (!tt_info)
790 goto free_tts;
791 INIT_LIST_HEAD(&tt_info->tt_list);
792 list_add(&tt_info->tt_list,
793 &xhci->rh_bw[virt_dev->real_port - 1].tts);
794 tt_info->slot_id = virt_dev->udev->slot_id;
795 if (tt->multi)
796 tt_info->ttport = i+1;
797 bw_table = &tt_info->bw_table;
798 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
799 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
800 }
801 return 0;
802
803free_tts:
804 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
805 return -ENOMEM;
806}
807
808
809
810
811
812
813
814void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
815{
816 struct xhci_virt_device *dev;
817 int i;
818 int old_active_eps = 0;
819
820
821 if (slot_id == 0 || !xhci->devs[slot_id])
822 return;
823
824 dev = xhci->devs[slot_id];
825 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
826 if (!dev)
827 return;
828
829 if (dev->tt_info)
830 old_active_eps = dev->tt_info->active_eps;
831
832 for (i = 0; i < 31; ++i) {
833 if (dev->eps[i].ring)
834 xhci_ring_free(xhci, dev->eps[i].ring);
835 if (dev->eps[i].stream_info)
836 xhci_free_stream_info(xhci,
837 dev->eps[i].stream_info);
838
839
840
841
842
843 if (!list_empty(&dev->eps[i].bw_endpoint_list))
844 xhci_warn(xhci, "Slot %u endpoint %u "
845 "not removed from BW list!\n",
846 slot_id, i);
847 }
848
849 xhci_free_tt_info(xhci, dev, slot_id);
850
851 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
852
853 if (dev->ring_cache) {
854 for (i = 0; i < dev->num_rings_cached; i++)
855 xhci_ring_free(xhci, dev->ring_cache[i]);
856 kfree(dev->ring_cache);
857 }
858
859 if (dev->in_ctx)
860 xhci_free_container_ctx(xhci, dev->in_ctx);
861 if (dev->out_ctx)
862 xhci_free_container_ctx(xhci, dev->out_ctx);
863
864 kfree(xhci->devs[slot_id]);
865 xhci->devs[slot_id] = NULL;
866}
867
868int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
869 struct usb_device *udev, gfp_t flags)
870{
871 struct xhci_virt_device *dev;
872 int i;
873
874
875 if (slot_id == 0 || xhci->devs[slot_id]) {
876 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
877 return 0;
878 }
879
880 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
881 if (!xhci->devs[slot_id])
882 return 0;
883 dev = xhci->devs[slot_id];
884
885
886 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
887 if (!dev->out_ctx)
888 goto fail;
889
890 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
891 (unsigned long long)dev->out_ctx->dma);
892
893
894 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
895 if (!dev->in_ctx)
896 goto fail;
897
898 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
899 (unsigned long long)dev->in_ctx->dma);
900
901
902 for (i = 0; i < 31; i++) {
903 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
904 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
905 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
906 }
907
908
909 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
910 if (!dev->eps[0].ring)
911 goto fail;
912
913
914 dev->ring_cache = kzalloc(
915 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
916 flags);
917 if (!dev->ring_cache)
918 goto fail;
919 dev->num_rings_cached = 0;
920
921 init_completion(&dev->cmd_completion);
922 INIT_LIST_HEAD(&dev->cmd_list);
923 dev->udev = udev;
924
925
926 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
927 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
928 slot_id,
929 &xhci->dcbaa->dev_context_ptrs[slot_id],
930 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
931
932 return 1;
933fail:
934 xhci_free_virt_device(xhci, slot_id);
935 return 0;
936}
937
938void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
939 struct usb_device *udev)
940{
941 struct xhci_virt_device *virt_dev;
942 struct xhci_ep_ctx *ep0_ctx;
943 struct xhci_ring *ep_ring;
944
945 virt_dev = xhci->devs[udev->slot_id];
946 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
947 ep_ring = virt_dev->eps[0].ring;
948
949
950
951
952
953
954
955 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
956 ep_ring->enqueue)
957 | ep_ring->cycle_state);
958}
959
960
961
962
963
964
965
966
967
968
969
970
971static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
972 struct usb_device *udev)
973{
974 struct usb_device *top_dev;
975 struct usb_hcd *hcd;
976
977 if (udev->speed == USB_SPEED_SUPER)
978 hcd = xhci->shared_hcd;
979 else
980 hcd = xhci->main_hcd;
981
982 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
983 top_dev = top_dev->parent)
984 ;
985
986 return xhci_find_raw_port_number(hcd, top_dev->portnum);
987}
988
989
990int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
991{
992 struct xhci_virt_device *dev;
993 struct xhci_ep_ctx *ep0_ctx;
994 struct xhci_slot_ctx *slot_ctx;
995 u32 port_num;
996 u32 max_packets;
997 struct usb_device *top_dev;
998
999 dev = xhci->devs[udev->slot_id];
1000
1001 if (udev->slot_id == 0 || !dev) {
1002 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1003 udev->slot_id);
1004 return -EINVAL;
1005 }
1006 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1007 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1008
1009
1010 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1011 switch (udev->speed) {
1012 case USB_SPEED_SUPER:
1013 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1014 max_packets = MAX_PACKET(512);
1015 break;
1016 case USB_SPEED_HIGH:
1017 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1018 max_packets = MAX_PACKET(64);
1019 break;
1020
1021 case USB_SPEED_FULL:
1022 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1023 max_packets = MAX_PACKET(64);
1024 break;
1025 case USB_SPEED_LOW:
1026 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1027 max_packets = MAX_PACKET(8);
1028 break;
1029 case USB_SPEED_WIRELESS:
1030 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1031 return -EINVAL;
1032 break;
1033 default:
1034
1035 return -EINVAL;
1036 }
1037
1038 port_num = xhci_find_real_port_number(xhci, udev);
1039 if (!port_num)
1040 return -EINVAL;
1041 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1042
1043 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1044 top_dev = top_dev->parent)
1045 ;
1046 dev->fake_port = top_dev->portnum;
1047 dev->real_port = port_num;
1048 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1049 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1050
1051
1052
1053
1054
1055
1056
1057 if (!udev->tt || !udev->tt->hub->parent) {
1058 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1059 } else {
1060 struct xhci_root_port_bw_info *rh_bw;
1061 struct xhci_tt_bw_info *tt_bw;
1062
1063 rh_bw = &xhci->rh_bw[port_num - 1];
1064
1065 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1066 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1067 continue;
1068
1069 if (!dev->udev->tt->multi ||
1070 (udev->tt->multi &&
1071 tt_bw->ttport == dev->udev->ttport)) {
1072 dev->bw_table = &tt_bw->bw_table;
1073 dev->tt_info = tt_bw;
1074 break;
1075 }
1076 }
1077 if (!dev->tt_info)
1078 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1079 }
1080
1081
1082 if (udev->tt && udev->tt->hub->parent) {
1083 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1084 (udev->ttport << 8));
1085 if (udev->tt->multi)
1086 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1087 }
1088 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1089 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1090
1091
1092
1093 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1094
1095
1096 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1097 max_packets);
1098
1099 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1100 dev->eps[0].ring->cycle_state);
1101
1102
1103
1104 return 0;
1105}
1106
1107
1108
1109
1110
1111
1112static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1113 struct usb_host_endpoint *ep)
1114{
1115 unsigned int interval;
1116
1117 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1118 if (interval != ep->desc.bInterval - 1)
1119 dev_warn(&udev->dev,
1120 "ep %#x - rounding interval to %d %sframes\n",
1121 ep->desc.bEndpointAddress,
1122 1 << interval,
1123 udev->speed == USB_SPEED_FULL ? "" : "micro");
1124
1125 if (udev->speed == USB_SPEED_FULL) {
1126
1127
1128
1129
1130
1131 interval += 3;
1132 }
1133
1134 return interval;
1135}
1136
1137
1138
1139
1140
1141static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1142 struct usb_host_endpoint *ep, unsigned int desc_interval,
1143 unsigned int min_exponent, unsigned int max_exponent)
1144{
1145 unsigned int interval;
1146
1147 interval = fls(desc_interval) - 1;
1148 interval = clamp_val(interval, min_exponent, max_exponent);
1149 if ((1 << interval) != desc_interval)
1150 dev_warn(&udev->dev,
1151 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1152 ep->desc.bEndpointAddress,
1153 1 << interval,
1154 desc_interval);
1155
1156 return interval;
1157}
1158
1159static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1160 struct usb_host_endpoint *ep)
1161{
1162 if (ep->desc.bInterval == 0)
1163 return 0;
1164 return xhci_microframes_to_exponent(udev, ep,
1165 ep->desc.bInterval, 0, 15);
1166}
1167
1168
1169static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1170 struct usb_host_endpoint *ep)
1171{
1172 return xhci_microframes_to_exponent(udev, ep,
1173 ep->desc.bInterval * 8, 3, 10);
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1185 struct usb_host_endpoint *ep)
1186{
1187 unsigned int interval = 0;
1188
1189 switch (udev->speed) {
1190 case USB_SPEED_HIGH:
1191
1192 if (usb_endpoint_xfer_control(&ep->desc) ||
1193 usb_endpoint_xfer_bulk(&ep->desc)) {
1194 interval = xhci_parse_microframe_interval(udev, ep);
1195 break;
1196 }
1197
1198
1199 case USB_SPEED_SUPER:
1200 if (usb_endpoint_xfer_int(&ep->desc) ||
1201 usb_endpoint_xfer_isoc(&ep->desc)) {
1202 interval = xhci_parse_exponent_interval(udev, ep);
1203 }
1204 break;
1205
1206 case USB_SPEED_FULL:
1207 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1208 interval = xhci_parse_exponent_interval(udev, ep);
1209 break;
1210 }
1211
1212
1213
1214
1215
1216
1217 case USB_SPEED_LOW:
1218 if (usb_endpoint_xfer_int(&ep->desc) ||
1219 usb_endpoint_xfer_isoc(&ep->desc)) {
1220
1221 interval = xhci_parse_frame_interval(udev, ep);
1222 }
1223 break;
1224
1225 default:
1226 BUG();
1227 }
1228 return EP_INTERVAL(interval);
1229}
1230
1231
1232
1233
1234
1235
1236static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1237 struct usb_host_endpoint *ep)
1238{
1239 if (udev->speed != USB_SPEED_SUPER ||
1240 !usb_endpoint_xfer_isoc(&ep->desc))
1241 return 0;
1242 return ep->ss_ep_comp.bmAttributes;
1243}
1244
1245static u32 xhci_get_endpoint_type(struct usb_device *udev,
1246 struct usb_host_endpoint *ep)
1247{
1248 int in;
1249 u32 type;
1250
1251 in = usb_endpoint_dir_in(&ep->desc);
1252 if (usb_endpoint_xfer_control(&ep->desc)) {
1253 type = EP_TYPE(CTRL_EP);
1254 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1255 if (in)
1256 type = EP_TYPE(BULK_IN_EP);
1257 else
1258 type = EP_TYPE(BULK_OUT_EP);
1259 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1260 if (in)
1261 type = EP_TYPE(ISOC_IN_EP);
1262 else
1263 type = EP_TYPE(ISOC_OUT_EP);
1264 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1265 if (in)
1266 type = EP_TYPE(INT_IN_EP);
1267 else
1268 type = EP_TYPE(INT_OUT_EP);
1269 } else {
1270 type = 0;
1271 }
1272 return type;
1273}
1274
1275
1276
1277
1278
1279static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1280 struct usb_device *udev,
1281 struct usb_host_endpoint *ep)
1282{
1283 int max_burst;
1284 int max_packet;
1285
1286
1287 if (usb_endpoint_xfer_control(&ep->desc) ||
1288 usb_endpoint_xfer_bulk(&ep->desc))
1289 return 0;
1290
1291 if (udev->speed == USB_SPEED_SUPER)
1292 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1293
1294 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1295 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1296
1297 return max_packet * (max_burst + 1);
1298}
1299
1300
1301
1302
1303int xhci_endpoint_init(struct xhci_hcd *xhci,
1304 struct xhci_virt_device *virt_dev,
1305 struct usb_device *udev,
1306 struct usb_host_endpoint *ep,
1307 gfp_t mem_flags)
1308{
1309 unsigned int ep_index;
1310 struct xhci_ep_ctx *ep_ctx;
1311 struct xhci_ring *ep_ring;
1312 unsigned int max_packet;
1313 unsigned int max_burst;
1314 enum xhci_ring_type type;
1315 u32 max_esit_payload;
1316 u32 endpoint_type;
1317
1318 ep_index = xhci_get_endpoint_index(&ep->desc);
1319 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1320
1321 endpoint_type = xhci_get_endpoint_type(udev, ep);
1322 if (!endpoint_type)
1323 return -EINVAL;
1324 ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
1325
1326 type = usb_endpoint_type(&ep->desc);
1327
1328 virt_dev->eps[ep_index].new_ring =
1329 xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
1330 if (!virt_dev->eps[ep_index].new_ring) {
1331
1332 if (virt_dev->num_rings_cached == 0)
1333 return -ENOMEM;
1334 virt_dev->eps[ep_index].new_ring =
1335 virt_dev->ring_cache[virt_dev->num_rings_cached];
1336 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1337 virt_dev->num_rings_cached--;
1338 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1339 1, type);
1340 }
1341 virt_dev->eps[ep_index].skip = false;
1342 ep_ring = virt_dev->eps[ep_index].new_ring;
1343 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1344
1345 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1346 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1347
1348
1349
1350
1351
1352
1353 if (!usb_endpoint_xfer_isoc(&ep->desc))
1354 ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
1355 else
1356 ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
1357
1358
1359 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1360 max_burst = 0;
1361 switch (udev->speed) {
1362 case USB_SPEED_SUPER:
1363
1364 max_burst = ep->ss_ep_comp.bMaxBurst;
1365 break;
1366 case USB_SPEED_HIGH:
1367
1368 if (usb_endpoint_xfer_bulk(&ep->desc))
1369 max_packet = 512;
1370
1371
1372
1373 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1374 usb_endpoint_xfer_int(&ep->desc)) {
1375 max_burst = (usb_endpoint_maxp(&ep->desc)
1376 & 0x1800) >> 11;
1377 }
1378 break;
1379 case USB_SPEED_FULL:
1380 case USB_SPEED_LOW:
1381 break;
1382 default:
1383 BUG();
1384 }
1385 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
1386 MAX_BURST(max_burst));
1387 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1388 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1409 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1410 else
1411 ep_ctx->tx_info |=
1412 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1413
1414
1415 return 0;
1416}
1417
1418void xhci_endpoint_zero(struct xhci_hcd *xhci,
1419 struct xhci_virt_device *virt_dev,
1420 struct usb_host_endpoint *ep)
1421{
1422 unsigned int ep_index;
1423 struct xhci_ep_ctx *ep_ctx;
1424
1425 ep_index = xhci_get_endpoint_index(&ep->desc);
1426 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1427
1428 ep_ctx->ep_info = 0;
1429 ep_ctx->ep_info2 = 0;
1430 ep_ctx->deq = 0;
1431 ep_ctx->tx_info = 0;
1432
1433
1434
1435}
1436
1437void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1438{
1439 bw_info->ep_interval = 0;
1440 bw_info->mult = 0;
1441 bw_info->num_packets = 0;
1442 bw_info->max_packet_size = 0;
1443 bw_info->type = 0;
1444 bw_info->max_esit_payload = 0;
1445}
1446
1447void xhci_update_bw_info(struct xhci_hcd *xhci,
1448 struct xhci_container_ctx *in_ctx,
1449 struct xhci_input_control_ctx *ctrl_ctx,
1450 struct xhci_virt_device *virt_dev)
1451{
1452 struct xhci_bw_info *bw_info;
1453 struct xhci_ep_ctx *ep_ctx;
1454 unsigned int ep_type;
1455 int i;
1456
1457 for (i = 1; i < 31; ++i) {
1458 bw_info = &virt_dev->eps[i].bw_info;
1459
1460
1461
1462
1463
1464
1465 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1466
1467 xhci_clear_endpoint_bw_info(bw_info);
1468 continue;
1469 }
1470
1471 if (EP_IS_ADDED(ctrl_ctx, i)) {
1472 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1473 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1474
1475
1476 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1477 ep_type != ISOC_IN_EP &&
1478 ep_type != INT_IN_EP)
1479 continue;
1480
1481
1482 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1483 le32_to_cpu(ep_ctx->ep_info));
1484
1485
1486
1487
1488 bw_info->mult = CTX_TO_EP_MULT(
1489 le32_to_cpu(ep_ctx->ep_info)) + 1;
1490 bw_info->num_packets = CTX_TO_MAX_BURST(
1491 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1492 bw_info->max_packet_size = MAX_PACKET_DECODED(
1493 le32_to_cpu(ep_ctx->ep_info2));
1494 bw_info->type = ep_type;
1495 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1496 le32_to_cpu(ep_ctx->tx_info));
1497 }
1498 }
1499}
1500
1501
1502
1503
1504
1505void xhci_endpoint_copy(struct xhci_hcd *xhci,
1506 struct xhci_container_ctx *in_ctx,
1507 struct xhci_container_ctx *out_ctx,
1508 unsigned int ep_index)
1509{
1510 struct xhci_ep_ctx *out_ep_ctx;
1511 struct xhci_ep_ctx *in_ep_ctx;
1512
1513 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1514 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1515
1516 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1517 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1518 in_ep_ctx->deq = out_ep_ctx->deq;
1519 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1520}
1521
1522
1523
1524
1525
1526
1527void xhci_slot_copy(struct xhci_hcd *xhci,
1528 struct xhci_container_ctx *in_ctx,
1529 struct xhci_container_ctx *out_ctx)
1530{
1531 struct xhci_slot_ctx *in_slot_ctx;
1532 struct xhci_slot_ctx *out_slot_ctx;
1533
1534 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1535 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1536
1537 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1538 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1539 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1540 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1541}
1542
1543
1544static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1545{
1546 int i;
1547 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1548 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1549
1550 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1551 "Allocating %d scratchpad buffers", num_sp);
1552
1553 if (!num_sp)
1554 return 0;
1555
1556 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1557 if (!xhci->scratchpad)
1558 goto fail_sp;
1559
1560 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1561 num_sp * sizeof(u64),
1562 &xhci->scratchpad->sp_dma, flags);
1563 if (!xhci->scratchpad->sp_array)
1564 goto fail_sp2;
1565
1566 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1567 if (!xhci->scratchpad->sp_buffers)
1568 goto fail_sp3;
1569
1570 xhci->scratchpad->sp_dma_buffers =
1571 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1572
1573 if (!xhci->scratchpad->sp_dma_buffers)
1574 goto fail_sp4;
1575
1576 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1577 for (i = 0; i < num_sp; i++) {
1578 dma_addr_t dma;
1579 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1580 flags);
1581 if (!buf)
1582 goto fail_sp5;
1583
1584 xhci->scratchpad->sp_array[i] = dma;
1585 xhci->scratchpad->sp_buffers[i] = buf;
1586 xhci->scratchpad->sp_dma_buffers[i] = dma;
1587 }
1588
1589 return 0;
1590
1591 fail_sp5:
1592 for (i = i - 1; i >= 0; i--) {
1593 dma_free_coherent(dev, xhci->page_size,
1594 xhci->scratchpad->sp_buffers[i],
1595 xhci->scratchpad->sp_dma_buffers[i]);
1596 }
1597 kfree(xhci->scratchpad->sp_dma_buffers);
1598
1599 fail_sp4:
1600 kfree(xhci->scratchpad->sp_buffers);
1601
1602 fail_sp3:
1603 dma_free_coherent(dev, num_sp * sizeof(u64),
1604 xhci->scratchpad->sp_array,
1605 xhci->scratchpad->sp_dma);
1606
1607 fail_sp2:
1608 kfree(xhci->scratchpad);
1609 xhci->scratchpad = NULL;
1610
1611 fail_sp:
1612 return -ENOMEM;
1613}
1614
1615static void scratchpad_free(struct xhci_hcd *xhci)
1616{
1617 int num_sp;
1618 int i;
1619 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1620
1621 if (!xhci->scratchpad)
1622 return;
1623
1624 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1625
1626 for (i = 0; i < num_sp; i++) {
1627 dma_free_coherent(&pdev->dev, xhci->page_size,
1628 xhci->scratchpad->sp_buffers[i],
1629 xhci->scratchpad->sp_dma_buffers[i]);
1630 }
1631 kfree(xhci->scratchpad->sp_dma_buffers);
1632 kfree(xhci->scratchpad->sp_buffers);
1633 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
1634 xhci->scratchpad->sp_array,
1635 xhci->scratchpad->sp_dma);
1636 kfree(xhci->scratchpad);
1637 xhci->scratchpad = NULL;
1638}
1639
1640struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1641 bool allocate_in_ctx, bool allocate_completion,
1642 gfp_t mem_flags)
1643{
1644 struct xhci_command *command;
1645
1646 command = kzalloc(sizeof(*command), mem_flags);
1647 if (!command)
1648 return NULL;
1649
1650 if (allocate_in_ctx) {
1651 command->in_ctx =
1652 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1653 mem_flags);
1654 if (!command->in_ctx) {
1655 kfree(command);
1656 return NULL;
1657 }
1658 }
1659
1660 if (allocate_completion) {
1661 command->completion =
1662 kzalloc(sizeof(struct completion), mem_flags);
1663 if (!command->completion) {
1664 xhci_free_container_ctx(xhci, command->in_ctx);
1665 kfree(command);
1666 return NULL;
1667 }
1668 init_completion(command->completion);
1669 }
1670
1671 command->status = 0;
1672 INIT_LIST_HEAD(&command->cmd_list);
1673 return command;
1674}
1675
1676void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1677{
1678 if (urb_priv) {
1679 kfree(urb_priv->td[0]);
1680 kfree(urb_priv);
1681 }
1682}
1683
1684void xhci_free_command(struct xhci_hcd *xhci,
1685 struct xhci_command *command)
1686{
1687 xhci_free_container_ctx(xhci,
1688 command->in_ctx);
1689 kfree(command->completion);
1690 kfree(command);
1691}
1692
1693void xhci_mem_cleanup(struct xhci_hcd *xhci)
1694{
1695 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1696 struct xhci_cd *cur_cd, *next_cd;
1697 int size;
1698 int i, j, num_ports;
1699
1700
1701 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1702 if (xhci->erst.entries)
1703 dma_free_coherent(&pdev->dev, size,
1704 xhci->erst.entries, xhci->erst.erst_dma_addr);
1705 xhci->erst.entries = NULL;
1706 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
1707 if (xhci->event_ring)
1708 xhci_ring_free(xhci, xhci->event_ring);
1709 xhci->event_ring = NULL;
1710 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1711
1712 if (xhci->lpm_command)
1713 xhci_free_command(xhci, xhci->lpm_command);
1714 xhci->cmd_ring_reserved_trbs = 0;
1715 if (xhci->cmd_ring)
1716 xhci_ring_free(xhci, xhci->cmd_ring);
1717 xhci->cmd_ring = NULL;
1718 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1719 list_for_each_entry_safe(cur_cd, next_cd,
1720 &xhci->cancel_cmd_list, cancel_cmd_list) {
1721 list_del(&cur_cd->cancel_cmd_list);
1722 kfree(cur_cd);
1723 }
1724
1725 for (i = 1; i < MAX_HC_SLOTS; ++i)
1726 xhci_free_virt_device(xhci, i);
1727
1728 if (xhci->segment_pool)
1729 dma_pool_destroy(xhci->segment_pool);
1730 xhci->segment_pool = NULL;
1731 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1732
1733 if (xhci->device_pool)
1734 dma_pool_destroy(xhci->device_pool);
1735 xhci->device_pool = NULL;
1736 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1737
1738 if (xhci->small_streams_pool)
1739 dma_pool_destroy(xhci->small_streams_pool);
1740 xhci->small_streams_pool = NULL;
1741 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1742 "Freed small stream array pool");
1743
1744 if (xhci->medium_streams_pool)
1745 dma_pool_destroy(xhci->medium_streams_pool);
1746 xhci->medium_streams_pool = NULL;
1747 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1748 "Freed medium stream array pool");
1749
1750 if (xhci->dcbaa)
1751 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
1752 xhci->dcbaa, xhci->dcbaa->dma);
1753 xhci->dcbaa = NULL;
1754
1755 scratchpad_free(xhci);
1756
1757 if (!xhci->rh_bw)
1758 goto no_bw;
1759
1760 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1761 for (i = 0; i < num_ports; i++) {
1762 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1763 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1764 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1765 while (!list_empty(ep))
1766 list_del_init(ep->next);
1767 }
1768 }
1769
1770 for (i = 0; i < num_ports; i++) {
1771 struct xhci_tt_bw_info *tt, *n;
1772 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1773 list_del(&tt->tt_list);
1774 kfree(tt);
1775 }
1776 }
1777
1778no_bw:
1779 xhci->num_usb2_ports = 0;
1780 xhci->num_usb3_ports = 0;
1781 xhci->num_active_eps = 0;
1782 kfree(xhci->usb2_ports);
1783 kfree(xhci->usb3_ports);
1784 kfree(xhci->port_array);
1785 kfree(xhci->rh_bw);
1786 kfree(xhci->ext_caps);
1787
1788 xhci->page_size = 0;
1789 xhci->page_shift = 0;
1790 xhci->bus_state[0].bus_suspended = 0;
1791 xhci->bus_state[1].bus_suspended = 0;
1792}
1793
1794static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1795 struct xhci_segment *input_seg,
1796 union xhci_trb *start_trb,
1797 union xhci_trb *end_trb,
1798 dma_addr_t input_dma,
1799 struct xhci_segment *result_seg,
1800 char *test_name, int test_number)
1801{
1802 unsigned long long start_dma;
1803 unsigned long long end_dma;
1804 struct xhci_segment *seg;
1805
1806 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1807 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1808
1809 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1810 if (seg != result_seg) {
1811 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1812 test_name, test_number);
1813 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1814 "input DMA 0x%llx\n",
1815 input_seg,
1816 (unsigned long long) input_dma);
1817 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1818 "ending TRB %p (0x%llx DMA)\n",
1819 start_trb, start_dma,
1820 end_trb, end_dma);
1821 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1822 result_seg, seg);
1823 return -1;
1824 }
1825 return 0;
1826}
1827
1828
1829static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1830{
1831 struct {
1832 dma_addr_t input_dma;
1833 struct xhci_segment *result_seg;
1834 } simple_test_vector [] = {
1835
1836 { 0, NULL },
1837
1838 { xhci->event_ring->first_seg->dma - 16, NULL },
1839
1840 { xhci->event_ring->first_seg->dma - 1, NULL },
1841
1842 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1843
1844 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1845 xhci->event_ring->first_seg },
1846
1847 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1848
1849 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1850
1851 { (dma_addr_t) (~0), NULL },
1852 };
1853 struct {
1854 struct xhci_segment *input_seg;
1855 union xhci_trb *start_trb;
1856 union xhci_trb *end_trb;
1857 dma_addr_t input_dma;
1858 struct xhci_segment *result_seg;
1859 } complex_test_vector [] = {
1860
1861 { .input_seg = xhci->event_ring->first_seg,
1862 .start_trb = xhci->event_ring->first_seg->trbs,
1863 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1864 .input_dma = xhci->cmd_ring->first_seg->dma,
1865 .result_seg = NULL,
1866 },
1867
1868 { .input_seg = xhci->event_ring->first_seg,
1869 .start_trb = xhci->event_ring->first_seg->trbs,
1870 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1871 .input_dma = xhci->cmd_ring->first_seg->dma,
1872 .result_seg = NULL,
1873 },
1874
1875 { .input_seg = xhci->event_ring->first_seg,
1876 .start_trb = xhci->cmd_ring->first_seg->trbs,
1877 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1878 .input_dma = xhci->cmd_ring->first_seg->dma,
1879 .result_seg = NULL,
1880 },
1881
1882 { .input_seg = xhci->event_ring->first_seg,
1883 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1884 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1885 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1886 .result_seg = NULL,
1887 },
1888
1889 { .input_seg = xhci->event_ring->first_seg,
1890 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1891 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1892 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1893 .result_seg = NULL,
1894 },
1895
1896 { .input_seg = xhci->event_ring->first_seg,
1897 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1898 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1899 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1900 .result_seg = NULL,
1901 },
1902
1903 { .input_seg = xhci->event_ring->first_seg,
1904 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1905 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1906 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1907 .result_seg = NULL,
1908 },
1909
1910 { .input_seg = xhci->event_ring->first_seg,
1911 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1912 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1913 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1914 .result_seg = NULL,
1915 },
1916 };
1917
1918 unsigned int num_tests;
1919 int i, ret;
1920
1921 num_tests = ARRAY_SIZE(simple_test_vector);
1922 for (i = 0; i < num_tests; i++) {
1923 ret = xhci_test_trb_in_td(xhci,
1924 xhci->event_ring->first_seg,
1925 xhci->event_ring->first_seg->trbs,
1926 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1927 simple_test_vector[i].input_dma,
1928 simple_test_vector[i].result_seg,
1929 "Simple", i);
1930 if (ret < 0)
1931 return ret;
1932 }
1933
1934 num_tests = ARRAY_SIZE(complex_test_vector);
1935 for (i = 0; i < num_tests; i++) {
1936 ret = xhci_test_trb_in_td(xhci,
1937 complex_test_vector[i].input_seg,
1938 complex_test_vector[i].start_trb,
1939 complex_test_vector[i].end_trb,
1940 complex_test_vector[i].input_dma,
1941 complex_test_vector[i].result_seg,
1942 "Complex", i);
1943 if (ret < 0)
1944 return ret;
1945 }
1946 xhci_dbg(xhci, "TRB math tests passed.\n");
1947 return 0;
1948}
1949
1950static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1951{
1952 u64 temp;
1953 dma_addr_t deq;
1954
1955 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
1956 xhci->event_ring->dequeue);
1957 if (deq == 0 && !in_interrupt())
1958 xhci_warn(xhci, "WARN something wrong with SW event ring "
1959 "dequeue ptr.\n");
1960
1961 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
1962 temp &= ERST_PTR_MASK;
1963
1964
1965
1966 temp &= ~ERST_EHB;
1967 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1968 "// Write event ring dequeue pointer, "
1969 "preserving EHB bit");
1970 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
1971 &xhci->ir_set->erst_dequeue);
1972}
1973
1974static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1975 __le32 __iomem *addr, u8 major_revision, int max_caps)
1976{
1977 u32 temp, port_offset, port_count;
1978 int i;
1979
1980 if (major_revision > 0x03) {
1981 xhci_warn(xhci, "Ignoring unknown port speed, "
1982 "Ext Cap %p, revision = 0x%x\n",
1983 addr, major_revision);
1984
1985 return;
1986 }
1987
1988
1989 temp = xhci_readl(xhci, addr + 2);
1990 port_offset = XHCI_EXT_PORT_OFF(temp);
1991 port_count = XHCI_EXT_PORT_COUNT(temp);
1992 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1993 "Ext Cap %p, port offset = %u, "
1994 "count = %u, revision = 0x%x",
1995 addr, port_offset, port_count, major_revision);
1996
1997 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
1998
1999 return;
2000
2001
2002 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2003 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2004
2005
2006 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2007 (temp & XHCI_L1C)) {
2008 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2009 "xHCI 0.96: support USB2 software lpm");
2010 xhci->sw_lpm_support = 1;
2011 }
2012
2013 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2014 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2015 "xHCI 1.0: support USB2 software lpm");
2016 xhci->sw_lpm_support = 1;
2017 if (temp & XHCI_HLC) {
2018 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2019 "xHCI 1.0: support USB2 hardware lpm");
2020 xhci->hw_lpm_support = 1;
2021 }
2022 }
2023
2024 port_offset--;
2025 for (i = port_offset; i < (port_offset + port_count); i++) {
2026
2027 if (xhci->port_array[i] != 0) {
2028 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2029 " port %u\n", addr, i);
2030 xhci_warn(xhci, "Port was marked as USB %u, "
2031 "duplicated as USB %u\n",
2032 xhci->port_array[i], major_revision);
2033
2034
2035
2036 if (xhci->port_array[i] != major_revision &&
2037 xhci->port_array[i] != DUPLICATE_ENTRY) {
2038 if (xhci->port_array[i] == 0x03)
2039 xhci->num_usb3_ports--;
2040 else
2041 xhci->num_usb2_ports--;
2042 xhci->port_array[i] = DUPLICATE_ENTRY;
2043 }
2044
2045 continue;
2046 }
2047 xhci->port_array[i] = major_revision;
2048 if (major_revision == 0x03)
2049 xhci->num_usb3_ports++;
2050 else
2051 xhci->num_usb2_ports++;
2052 }
2053
2054}
2055
2056
2057
2058
2059
2060
2061
2062
2063static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2064{
2065 __le32 __iomem *addr, *tmp_addr;
2066 u32 offset, tmp_offset;
2067 unsigned int num_ports;
2068 int i, j, port_index;
2069 int cap_count = 0;
2070
2071 addr = &xhci->cap_regs->hcc_params;
2072 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
2073 if (offset == 0) {
2074 xhci_err(xhci, "No Extended Capability registers, "
2075 "unable to set up roothub.\n");
2076 return -ENODEV;
2077 }
2078
2079 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2080 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2081 if (!xhci->port_array)
2082 return -ENOMEM;
2083
2084 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2085 if (!xhci->rh_bw)
2086 return -ENOMEM;
2087 for (i = 0; i < num_ports; i++) {
2088 struct xhci_interval_bw_table *bw_table;
2089
2090 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2091 bw_table = &xhci->rh_bw[i].bw_table;
2092 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2093 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2094 }
2095
2096
2097
2098
2099
2100
2101 addr = &xhci->cap_regs->hc_capbase + offset;
2102
2103 tmp_addr = addr;
2104 tmp_offset = offset;
2105
2106
2107 do {
2108 u32 cap_id;
2109 cap_id = xhci_readl(xhci, tmp_addr);
2110 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2111 cap_count++;
2112 tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
2113 tmp_addr += tmp_offset;
2114 } while (tmp_offset);
2115
2116 xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
2117 if (!xhci->ext_caps)
2118 return -ENOMEM;
2119
2120 while (1) {
2121 u32 cap_id;
2122
2123 cap_id = xhci_readl(xhci, addr);
2124 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2125 xhci_add_in_port(xhci, num_ports, addr,
2126 (u8) XHCI_EXT_PORT_MAJOR(cap_id),
2127 cap_count);
2128 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2129 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2130 == num_ports)
2131 break;
2132
2133
2134
2135
2136 addr += offset;
2137 }
2138
2139 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2140 xhci_warn(xhci, "No ports on the roothubs?\n");
2141 return -ENODEV;
2142 }
2143 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2144 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2145 xhci->num_usb2_ports, xhci->num_usb3_ports);
2146
2147
2148
2149
2150 if (xhci->num_usb3_ports > 15) {
2151 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2152 "Limiting USB 3.0 roothub ports to 15.");
2153 xhci->num_usb3_ports = 15;
2154 }
2155 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2156 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2157 "Limiting USB 2.0 roothub ports to %u.",
2158 USB_MAXCHILDREN);
2159 xhci->num_usb2_ports = USB_MAXCHILDREN;
2160 }
2161
2162
2163
2164
2165
2166 if (xhci->num_usb2_ports) {
2167 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2168 xhci->num_usb2_ports, flags);
2169 if (!xhci->usb2_ports)
2170 return -ENOMEM;
2171
2172 port_index = 0;
2173 for (i = 0; i < num_ports; i++) {
2174 if (xhci->port_array[i] == 0x03 ||
2175 xhci->port_array[i] == 0 ||
2176 xhci->port_array[i] == DUPLICATE_ENTRY)
2177 continue;
2178
2179 xhci->usb2_ports[port_index] =
2180 &xhci->op_regs->port_status_base +
2181 NUM_PORT_REGS*i;
2182 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2183 "USB 2.0 port at index %u, "
2184 "addr = %p", i,
2185 xhci->usb2_ports[port_index]);
2186 port_index++;
2187 if (port_index == xhci->num_usb2_ports)
2188 break;
2189 }
2190 }
2191 if (xhci->num_usb3_ports) {
2192 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2193 xhci->num_usb3_ports, flags);
2194 if (!xhci->usb3_ports)
2195 return -ENOMEM;
2196
2197 port_index = 0;
2198 for (i = 0; i < num_ports; i++)
2199 if (xhci->port_array[i] == 0x03) {
2200 xhci->usb3_ports[port_index] =
2201 &xhci->op_regs->port_status_base +
2202 NUM_PORT_REGS*i;
2203 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2204 "USB 3.0 port at index %u, "
2205 "addr = %p", i,
2206 xhci->usb3_ports[port_index]);
2207 port_index++;
2208 if (port_index == xhci->num_usb3_ports)
2209 break;
2210 }
2211 }
2212 return 0;
2213}
2214
2215int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2216{
2217 dma_addr_t dma;
2218 struct device *dev = xhci_to_hcd(xhci)->self.controller;
2219 unsigned int val, val2;
2220 u64 val_64;
2221 struct xhci_segment *seg;
2222 u32 page_size, temp;
2223 int i;
2224
2225 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2226
2227 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2228 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2229 "Supported page size register = 0x%x", page_size);
2230 for (i = 0; i < 16; i++) {
2231 if ((0x1 & page_size) != 0)
2232 break;
2233 page_size = page_size >> 1;
2234 }
2235 if (i < 16)
2236 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2237 "Supported page size of %iK", (1 << (i+12)) / 1024);
2238 else
2239 xhci_warn(xhci, "WARN: no supported page size\n");
2240
2241 xhci->page_shift = 12;
2242 xhci->page_size = 1 << xhci->page_shift;
2243 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2244 "HCD page size set to %iK", xhci->page_size / 1024);
2245
2246
2247
2248
2249
2250 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2251 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2252 "// xHC can handle at most %d device slots.", val);
2253 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2254 val |= (val2 & ~HCS_SLOTS_MASK);
2255 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2256 "// Setting Max device slots reg = 0x%x.", val);
2257 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2258
2259
2260
2261
2262
2263 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2264 GFP_KERNEL);
2265 if (!xhci->dcbaa)
2266 goto fail;
2267 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2268 xhci->dcbaa->dma = dma;
2269 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2270 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2271 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2272 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2273
2274
2275
2276
2277
2278
2279
2280 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2281 TRB_SEGMENT_SIZE, 64, xhci->page_size);
2282
2283
2284 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2285 2112, 64, xhci->page_size);
2286 if (!xhci->segment_pool || !xhci->device_pool)
2287 goto fail;
2288
2289
2290
2291
2292 xhci->small_streams_pool =
2293 dma_pool_create("xHCI 256 byte stream ctx arrays",
2294 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2295 xhci->medium_streams_pool =
2296 dma_pool_create("xHCI 1KB stream ctx arrays",
2297 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2298
2299
2300
2301
2302 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2303 goto fail;
2304
2305
2306 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2307 if (!xhci->cmd_ring)
2308 goto fail;
2309 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2310 "Allocated command ring at %p", xhci->cmd_ring);
2311 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2312 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2313
2314
2315 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2316 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2317 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2318 xhci->cmd_ring->cycle_state;
2319 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2320 "// Setting command ring address to 0x%x", val);
2321 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2322 xhci_dbg_cmd_ptrs(xhci);
2323
2324 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
2325 if (!xhci->lpm_command)
2326 goto fail;
2327
2328
2329
2330
2331
2332 xhci->cmd_ring_reserved_trbs++;
2333
2334 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2335 val &= DBOFF_MASK;
2336 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2337 "// Doorbell array is located at offset 0x%x"
2338 " from cap regs base addr", val);
2339 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2340 xhci_dbg_regs(xhci);
2341 xhci_print_run_regs(xhci);
2342
2343 xhci->ir_set = &xhci->run_regs->ir_set[0];
2344
2345
2346
2347
2348
2349 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2350 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2351 flags);
2352 if (!xhci->event_ring)
2353 goto fail;
2354 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2355 goto fail;
2356
2357 xhci->erst.entries = dma_alloc_coherent(dev,
2358 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2359 GFP_KERNEL);
2360 if (!xhci->erst.entries)
2361 goto fail;
2362 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2363 "// Allocated event ring segment table at 0x%llx",
2364 (unsigned long long)dma);
2365
2366 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2367 xhci->erst.num_entries = ERST_NUM_SEGS;
2368 xhci->erst.erst_dma_addr = dma;
2369 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2370 "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
2371 xhci->erst.num_entries,
2372 xhci->erst.entries,
2373 (unsigned long long)xhci->erst.erst_dma_addr);
2374
2375
2376 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2377 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2378 entry->seg_addr = cpu_to_le64(seg->dma);
2379 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2380 entry->rsvd = 0;
2381 seg = seg->next;
2382 }
2383
2384
2385 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2386 val &= ERST_SIZE_MASK;
2387 val |= ERST_NUM_SEGS;
2388 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2389 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2390 val);
2391 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2392
2393 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2394 "// Set ERST entries to point to event ring.");
2395
2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2397 "// Set ERST base address for ir_set 0 = 0x%llx",
2398 (unsigned long long)xhci->erst.erst_dma_addr);
2399 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2400 val_64 &= ERST_PTR_MASK;
2401 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2402 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2403
2404
2405 xhci_set_hc_event_deq(xhci);
2406 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2407 "Wrote ERST address to ir_set 0.");
2408 xhci_print_ir_set(xhci, 0);
2409
2410
2411
2412
2413
2414
2415 init_completion(&xhci->addr_dev);
2416 for (i = 0; i < MAX_HC_SLOTS; ++i)
2417 xhci->devs[i] = NULL;
2418 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2419 xhci->bus_state[0].resume_done[i] = 0;
2420 xhci->bus_state[1].resume_done[i] = 0;
2421
2422 init_completion(&xhci->bus_state[1].rexit_done[i]);
2423 }
2424
2425 if (scratchpad_alloc(xhci, flags))
2426 goto fail;
2427 if (xhci_setup_port_arrays(xhci, flags))
2428 goto fail;
2429
2430
2431
2432
2433
2434 temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
2435 temp &= ~DEV_NOTE_MASK;
2436 temp |= DEV_NOTE_FWAKE;
2437 xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
2438
2439 return 0;
2440
2441fail:
2442 xhci_warn(xhci, "Couldn't initialize memory\n");
2443 xhci_halt(xhci);
2444 xhci_reset(xhci);
2445 xhci_mem_cleanup(xhci);
2446 return -ENOMEM;
2447}
2448