1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/gcd.h>
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/dma-mapping.h>
48#include <linux/io.h>
49#include <linux/slab.h>
50#include <linux/usb.h>
51
52#include <linux/usb/hcd.h>
53#include <linux/usb/ch11.h>
54
55#include "core.h"
56#include "hcd.h"
57
58
59#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
60
61
62#define DWC2_RETRY_WAIT_DELAY 1*1E6L
63
64
65
66
67
68
69
70
71
72static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
73{
74
75
76
77
78
79 int status;
80 int num_channels;
81
82 num_channels = hsotg->params.host_channels;
83 if ((hsotg->periodic_channels + hsotg->non_periodic_channels <
84 num_channels) && (hsotg->periodic_channels < num_channels - 1)) {
85 status = 0;
86 } else {
87 dev_dbg(hsotg->dev,
88 "%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
89 __func__, num_channels,
90 hsotg->periodic_channels, hsotg->non_periodic_channels);
91 status = -ENOSPC;
92 }
93
94 return status;
95}
96
97
98
99
100
101
102
103
104
105
106
107
108
109static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
110 struct dwc2_qh *qh)
111{
112 int status;
113 s16 max_claimed_usecs;
114
115 status = 0;
116
117 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
118
119
120
121
122 max_claimed_usecs = 100 - qh->host_us;
123 } else {
124
125
126
127
128 max_claimed_usecs = 900 - qh->host_us;
129 }
130
131 if (hsotg->periodic_usecs > max_claimed_usecs) {
132 dev_err(hsotg->dev,
133 "%s: already claimed usecs %d, required usecs %d\n",
134 __func__, hsotg->periodic_usecs, qh->host_us);
135 status = -ENOSPC;
136 }
137
138 return status;
139}
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232static int pmap_schedule(unsigned long *map, int bits_per_period,
233 int periods_in_map, int num_bits,
234 int interval, int start, bool only_one_period)
235{
236 int interval_bits;
237 int to_reserve;
238 int first_end;
239 int i;
240
241 if (num_bits > bits_per_period)
242 return -ENOSPC;
243
244
245 interval = gcd(interval, periods_in_map);
246
247 interval_bits = bits_per_period * interval;
248 to_reserve = periods_in_map / interval;
249
250
251 if (start >= interval_bits)
252 return -ENOSPC;
253
254 if (only_one_period)
255
256 first_end = (start / bits_per_period + 1) * bits_per_period;
257 else
258
259 first_end = interval_bits;
260
261
262
263
264
265
266
267 while (start + num_bits <= first_end) {
268 int end;
269
270
271 end = (start / bits_per_period + 1) * bits_per_period;
272
273
274 start = bitmap_find_next_zero_area(map, end, start, num_bits,
275 0);
276
277
278
279
280
281
282 if (start >= end) {
283 start = end;
284 continue;
285 }
286
287
288 for (i = 1; i < to_reserve; i++) {
289 int ith_start = start + interval_bits * i;
290 int ith_end = end + interval_bits * i;
291 int ret;
292
293
294 ret = bitmap_find_next_zero_area(
295 map, ith_start + num_bits, ith_start, num_bits,
296 0);
297
298
299 if (ret == ith_start)
300 continue;
301
302
303 ith_start = bitmap_find_next_zero_area(
304 map, ith_end, ith_start, num_bits, 0);
305 if (ith_start >= ith_end)
306
307 start = end;
308 else
309 start = ith_start - interval_bits * i;
310 break;
311 }
312
313
314 if (i == to_reserve)
315 break;
316 }
317
318 if (start + num_bits > first_end)
319 return -ENOSPC;
320
321 for (i = 0; i < to_reserve; i++) {
322 int ith_start = start + interval_bits * i;
323
324 bitmap_set(map, ith_start, num_bits);
325 }
326
327 return start;
328}
329
330
331
332
333
334
335
336
337
338
339
340static void pmap_unschedule(unsigned long *map, int bits_per_period,
341 int periods_in_map, int num_bits,
342 int interval, int start)
343{
344 int interval_bits;
345 int to_release;
346 int i;
347
348
349 interval = gcd(interval, periods_in_map);
350
351 interval_bits = bits_per_period * interval;
352 to_release = periods_in_map / interval;
353
354 for (i = 0; i < to_release; i++) {
355 int ith_start = start + interval_bits * i;
356
357 bitmap_clear(map, ith_start, num_bits);
358 }
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
375 struct dwc2_qh *qh)
376{
377 unsigned long *map;
378
379
380 if (WARN_ON(!qh->dwc_tt))
381 return NULL;
382
383
384 map = qh->dwc_tt->periodic_bitmaps;
385 if (qh->dwc_tt->usb_tt->multi)
386 map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
387
388 return map;
389}
390
391#ifdef DWC2_PRINT_SCHEDULE
392
393
394
395
396
397
398
399
400
401
402
403
404static __printf(3, 4)
405void cat_printf(char **buf, size_t *size, const char *fmt, ...)
406{
407 va_list args;
408 int i;
409
410 if (*size == 0)
411 return;
412
413 va_start(args, fmt);
414 i = vsnprintf(*buf, *size, fmt, args);
415 va_end(args);
416
417 if (i >= *size) {
418 (*buf)[*size - 1] = '\0';
419 *buf += *size;
420 *size = 0;
421 } else {
422 *buf += i;
423 *size -= i;
424 }
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440static void pmap_print(unsigned long *map, int bits_per_period,
441 int periods_in_map, const char *period_name,
442 const char *units,
443 void (*print_fn)(const char *str, void *data),
444 void *print_data)
445{
446 int period;
447
448 for (period = 0; period < periods_in_map; period++) {
449 char tmp[64];
450 char *buf = tmp;
451 size_t buf_size = sizeof(tmp);
452 int period_start = period * bits_per_period;
453 int period_end = period_start + bits_per_period;
454 int start = 0;
455 int count = 0;
456 bool printed = false;
457 int i;
458
459 for (i = period_start; i < period_end + 1; i++) {
460
461 if (i < period_end &&
462 bitmap_find_next_zero_area(map, i + 1,
463 i, 1, 0) != i) {
464 if (count == 0)
465 start = i - period_start;
466 count++;
467 continue;
468 }
469
470
471 if (count == 0)
472 continue;
473
474 if (!printed)
475 cat_printf(&buf, &buf_size, "%s %d: ",
476 period_name, period);
477 else
478 cat_printf(&buf, &buf_size, ", ");
479 printed = true;
480
481 cat_printf(&buf, &buf_size, "%d %s -%3d %s", start,
482 units, start + count - 1, units);
483 count = 0;
484 }
485
486 if (printed)
487 print_fn(tmp, print_data);
488 }
489}
490
491struct dwc2_qh_print_data {
492 struct dwc2_hsotg *hsotg;
493 struct dwc2_qh *qh;
494};
495
496
497
498
499
500
501
502static void dwc2_qh_print(const char *str, void *data)
503{
504 struct dwc2_qh_print_data *print_data = data;
505
506 dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
507}
508
509
510
511
512
513
514
515static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
516 struct dwc2_qh *qh)
517{
518 struct dwc2_qh_print_data print_data = { hsotg, qh };
519 int i;
520
521
522
523
524
525
526
527 if (qh->schedule_low_speed) {
528 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
529
530 dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us",
531 qh, qh->device_us,
532 DWC2_ROUND_US_TO_SLICE(qh->device_us),
533 DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
534
535 if (map) {
536 dwc2_sch_dbg(hsotg,
537 "QH=%p Whole low/full speed map %p now:\n",
538 qh, map);
539 pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
540 DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices",
541 dwc2_qh_print, &print_data);
542 }
543 }
544
545 for (i = 0; i < qh->num_hs_transfers; i++) {
546 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
547 int uframe = trans_time->start_schedule_us /
548 DWC2_HS_PERIODIC_US_PER_UFRAME;
549 int rel_us = trans_time->start_schedule_us %
550 DWC2_HS_PERIODIC_US_PER_UFRAME;
551
552 dwc2_sch_dbg(hsotg,
553 "QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n",
554 qh, i, trans_time->duration_us, uframe, rel_us);
555 }
556 if (qh->num_hs_transfers) {
557 dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
558 pmap_print(hsotg->hs_periodic_bitmap,
559 DWC2_HS_PERIODIC_US_PER_UFRAME,
560 DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
561 dwc2_qh_print, &print_data);
562 }
563}
564#else
565static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
566 struct dwc2_qh *qh) {};
567#endif
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
585 int search_slice)
586{
587 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
588 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
589 int slice;
590
591 if (!map)
592 return -EINVAL;
593
594
595
596
597
598
599
600
601
602
603
604
605
606 slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
607 DWC2_LS_SCHEDULE_FRAMES, slices,
608 qh->device_interval, search_slice, false);
609
610 if (slice < 0)
611 return slice;
612
613 qh->ls_start_schedule_slice = slice;
614 return 0;
615}
616
617
618
619
620
621
622
623static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg,
624 struct dwc2_qh *qh)
625{
626 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
627 unsigned long *map = dwc2_get_ls_map(hsotg, qh);
628
629
630 if (!map)
631 return;
632
633 pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
634 DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
635 qh->ls_start_schedule_slice);
636}
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
659 bool only_one_period, int index)
660{
661 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
662 int us;
663
664 us = pmap_schedule(hsotg->hs_periodic_bitmap,
665 DWC2_HS_PERIODIC_US_PER_UFRAME,
666 DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
667 qh->host_interval, trans_time->start_schedule_us,
668 only_one_period);
669
670 if (us < 0)
671 return us;
672
673 trans_time->start_schedule_us = us;
674 return 0;
675}
676
677
678
679
680
681
682
683
684static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
685 struct dwc2_qh *qh, int index)
686{
687 struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
688
689 pmap_unschedule(hsotg->hs_periodic_bitmap,
690 DWC2_HS_PERIODIC_US_PER_UFRAME,
691 DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
692 qh->host_interval, trans_time->start_schedule_us);
693}
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
709 struct dwc2_qh *qh)
710{
711 int bytecount = qh->maxp_mult * qh->maxp;
712 int ls_search_slice;
713 int err = 0;
714 int host_interval_in_sched;
715
716
717
718
719
720 host_interval_in_sched = gcd(qh->host_interval,
721 DWC2_HS_SCHEDULE_UFRAMES);
722
723
724
725
726
727
728
729
730
731
732
733
734 ls_search_slice = 0;
735
736 while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) {
737 int start_s_uframe;
738 int ssplit_s_uframe;
739 int second_s_uframe;
740 int rel_uframe;
741 int first_count;
742 int middle_count;
743 int end_count;
744 int first_data_bytes;
745 int other_data_bytes;
746 int i;
747
748 if (qh->schedule_low_speed) {
749 err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
750
751
752
753
754
755
756
757 if (err)
758 return err;
759 } else {
760
761 WARN_ON_ONCE(1);
762 }
763
764
765
766
767
768 start_s_uframe = qh->ls_start_schedule_slice /
769 DWC2_SLICES_PER_UFRAME;
770
771
772 rel_uframe = (start_s_uframe % 8);
773
774
775
776
777
778
779
780
781
782 if (rel_uframe == 7) {
783 if (qh->schedule_low_speed)
784 dwc2_ls_pmap_unschedule(hsotg, qh);
785 ls_search_slice =
786 (qh->ls_start_schedule_slice /
787 DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) *
788 DWC2_LS_PERIODIC_SLICES_PER_FRAME;
789 continue;
790 }
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823 ssplit_s_uframe = (start_s_uframe +
824 host_interval_in_sched - 1) %
825 host_interval_in_sched;
826 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
827 second_s_uframe = start_s_uframe;
828 else
829 second_s_uframe = start_s_uframe + 1;
830
831
832 first_data_bytes = 188 -
833 DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
834 DWC2_SLICES_PER_UFRAME),
835 DWC2_SLICES_PER_UFRAME);
836 if (first_data_bytes > bytecount)
837 first_data_bytes = bytecount;
838 other_data_bytes = bytecount - first_data_bytes;
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855 if (!qh->ep_is_in &&
856 (first_data_bytes != min_t(int, 188, bytecount))) {
857 dwc2_sch_dbg(hsotg,
858 "QH=%p avoiding broken 1st xfer (%d, %d)\n",
859 qh, first_data_bytes, bytecount);
860 if (qh->schedule_low_speed)
861 dwc2_ls_pmap_unschedule(hsotg, qh);
862 ls_search_slice = (start_s_uframe + 1) *
863 DWC2_SLICES_PER_UFRAME;
864 continue;
865 }
866
867
868 qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
869
870
871
872
873
874
875 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
876 if (rel_uframe == 6)
877 qh->num_hs_transfers += 2;
878 else
879 qh->num_hs_transfers += 3;
880
881 if (qh->ep_is_in) {
882
883
884
885
886 first_count = 4;
887 middle_count = bytecount;
888 end_count = bytecount;
889 } else {
890
891
892
893
894
895 first_count = first_data_bytes;
896 middle_count = max_t(int, 4, other_data_bytes);
897 end_count = 4;
898 }
899 } else {
900 if (qh->ep_is_in) {
901 int last;
902
903
904 qh->num_hs_transfers++;
905
906
907 last = rel_uframe + qh->num_hs_transfers + 1;
908
909
910 if (last <= 6)
911 qh->num_hs_transfers += 2;
912 else
913 qh->num_hs_transfers += 1;
914
915
916 if (last >= 6 && rel_uframe == 0)
917 qh->num_hs_transfers--;
918
919
920 first_count = 4;
921 middle_count = min_t(int, 188, bytecount);
922 end_count = middle_count;
923 } else {
924
925 first_count = first_data_bytes;
926 middle_count = min_t(int, 188,
927 other_data_bytes);
928 end_count = other_data_bytes % 188;
929 }
930 }
931
932
933 qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
934 for (i = 1; i < qh->num_hs_transfers - 1; i++)
935 qh->hs_transfers[i].duration_us =
936 HS_USECS_ISO(middle_count);
937 if (qh->num_hs_transfers > 1)
938 qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
939 HS_USECS_ISO(end_count);
940
941
942
943
944
945
946 qh->hs_transfers[0].start_schedule_us =
947 ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME;
948 for (i = 1; i < qh->num_hs_transfers; i++)
949 qh->hs_transfers[i].start_schedule_us =
950 ((second_s_uframe + i - 1) %
951 DWC2_HS_SCHEDULE_UFRAMES) *
952 DWC2_HS_PERIODIC_US_PER_UFRAME;
953
954
955 for (i = 0; i < qh->num_hs_transfers; i++) {
956 err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
957 if (err)
958 break;
959 }
960
961
962 if (i == qh->num_hs_transfers)
963 break;
964
965 for (; i >= 0; i--)
966 dwc2_hs_pmap_unschedule(hsotg, qh, i);
967
968 if (qh->schedule_low_speed)
969 dwc2_ls_pmap_unschedule(hsotg, qh);
970
971
972 ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME;
973 }
974
975 if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES)
976 return -ENOSPC;
977
978 return 0;
979}
980
981
982
983
984
985
986
987
988
989
990static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
991{
992
993 WARN_ON(qh->host_us != qh->device_us);
994 WARN_ON(qh->host_interval != qh->device_interval);
995 WARN_ON(qh->num_hs_transfers != 1);
996
997
998 qh->hs_transfers[0].start_schedule_us = 0;
999 qh->hs_transfers[0].duration_us = qh->host_us;
1000
1001 return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1014{
1015
1016 WARN_ON(qh->host_us != qh->device_us);
1017 WARN_ON(qh->host_interval != qh->device_interval);
1018 WARN_ON(!qh->schedule_low_speed);
1019
1020
1021 return dwc2_ls_pmap_schedule(hsotg, qh, 0);
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1034{
1035 int ret;
1036
1037 if (qh->dev_speed == USB_SPEED_HIGH)
1038 ret = dwc2_uframe_schedule_hs(hsotg, qh);
1039 else if (!qh->do_split)
1040 ret = dwc2_uframe_schedule_ls(hsotg, qh);
1041 else
1042 ret = dwc2_uframe_schedule_split(hsotg, qh);
1043
1044 if (ret)
1045 dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
1046 else
1047 dwc2_qh_schedule_print(hsotg, qh);
1048
1049 return ret;
1050}
1051
1052
1053
1054
1055
1056
1057
1058static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1059{
1060 int i;
1061
1062 for (i = 0; i < qh->num_hs_transfers; i++)
1063 dwc2_hs_pmap_unschedule(hsotg, qh, i);
1064
1065 if (qh->schedule_low_speed)
1066 dwc2_ls_pmap_unschedule(hsotg, qh);
1067
1068 dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1087{
1088 u16 frame_number;
1089 u16 earliest_frame;
1090 u16 next_active_frame;
1091 u16 relative_frame;
1092 u16 interval;
1093
1094
1095
1096
1097
1098 frame_number = dwc2_hcd_get_frame_number(hsotg);
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 earliest_frame = dwc2_frame_num_inc(frame_number, 1);
1109 next_active_frame = earliest_frame;
1110
1111
1112 if (!hsotg->params.uframe_sched) {
1113 if (qh->do_split)
1114
1115 next_active_frame |= 0x7;
1116 goto exit;
1117 }
1118
1119 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
1120
1121
1122
1123
1124
1125
1126 WARN_ON(qh->num_hs_transfers < 1);
1127
1128 relative_frame = qh->hs_transfers[0].start_schedule_us /
1129 DWC2_HS_PERIODIC_US_PER_UFRAME;
1130
1131
1132 interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
1133
1134 } else {
1135
1136
1137
1138
1139
1140
1141
1142 relative_frame = qh->ls_start_schedule_slice /
1143 DWC2_LS_PERIODIC_SLICES_PER_FRAME;
1144 interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
1145 }
1146
1147
1148 WARN_ON(relative_frame >= interval);
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 next_active_frame = (next_active_frame / interval) * interval;
1159
1160
1161
1162
1163
1164 next_active_frame = dwc2_frame_num_inc(next_active_frame,
1165 relative_frame);
1166
1167
1168
1169
1170
1171
1172 next_active_frame = dwc2_frame_num_dec(next_active_frame, 1);
1173
1174
1175
1176
1177
1178 while (dwc2_frame_num_gt(earliest_frame, next_active_frame))
1179 next_active_frame = dwc2_frame_num_inc(next_active_frame,
1180 interval);
1181
1182exit:
1183 qh->next_active_frame = next_active_frame;
1184 qh->start_active_frame = next_active_frame;
1185
1186 dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n",
1187 qh, frame_number, qh->next_active_frame);
1188}
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1202{
1203 int status;
1204
1205 if (hsotg->params.uframe_sched) {
1206 status = dwc2_uframe_schedule(hsotg, qh);
1207 } else {
1208 status = dwc2_periodic_channel_available(hsotg);
1209 if (status) {
1210 dev_info(hsotg->dev,
1211 "%s: No host channel available for periodic transfer\n",
1212 __func__);
1213 return status;
1214 }
1215
1216 status = dwc2_check_periodic_bandwidth(hsotg, qh);
1217 }
1218
1219 if (status) {
1220 dev_dbg(hsotg->dev,
1221 "%s: Insufficient periodic bandwidth for periodic transfer\n",
1222 __func__);
1223 return status;
1224 }
1225
1226 if (!hsotg->params.uframe_sched)
1227
1228 hsotg->periodic_channels++;
1229
1230
1231 hsotg->periodic_usecs += qh->host_us;
1232
1233 dwc2_pick_first_frame(hsotg, qh);
1234
1235 return 0;
1236}
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1248{
1249 assert_spin_locked(&hsotg->lock);
1250
1251 WARN_ON(!qh->unreserve_pending);
1252
1253
1254 qh->unreserve_pending = false;
1255
1256 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
1257 list_del_init(&qh->qh_list_entry);
1258
1259
1260 hsotg->periodic_usecs -= qh->host_us;
1261
1262 if (hsotg->params.uframe_sched) {
1263 dwc2_uframe_unschedule(hsotg, qh);
1264 } else {
1265
1266 hsotg->periodic_channels--;
1267 }
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static void dwc2_unreserve_timer_fn(struct timer_list *t)
1283{
1284 struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
1285 struct dwc2_hsotg *hsotg = qh->hsotg;
1286 unsigned long flags;
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
1298 if (timer_pending(&qh->unreserve_timer))
1299 return;
1300 }
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 if (qh->unreserve_pending)
1313 dwc2_do_unreserve(hsotg, qh);
1314
1315 spin_unlock_irqrestore(&hsotg->lock, flags);
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
1329 struct dwc2_qh *qh)
1330{
1331 u32 max_xfer_size;
1332 u32 max_channel_xfer_size;
1333 int status = 0;
1334
1335 max_xfer_size = qh->maxp * qh->maxp_mult;
1336 max_channel_xfer_size = hsotg->params.max_transfer_size;
1337
1338 if (max_xfer_size > max_channel_xfer_size) {
1339 dev_err(hsotg->dev,
1340 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
1341 __func__, max_xfer_size, max_channel_xfer_size);
1342 status = -ENOSPC;
1343 }
1344
1345 return status;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1359{
1360 int status;
1361
1362 status = dwc2_check_max_xfer_size(hsotg, qh);
1363 if (status) {
1364 dev_dbg(hsotg->dev,
1365 "%s: Channel max transfer size too small for periodic transfer\n",
1366 __func__);
1367 return status;
1368 }
1369
1370
1371 if (del_timer(&qh->unreserve_timer))
1372 WARN_ON(!qh->unreserve_pending);
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (!qh->unreserve_pending) {
1382 status = dwc2_do_reserve(hsotg, qh);
1383 if (status)
1384 return status;
1385 } else {
1386
1387
1388
1389
1390
1391
1392 if (dwc2_frame_num_le(qh->next_active_frame,
1393 hsotg->frame_number))
1394 dwc2_pick_first_frame(hsotg, qh);
1395 }
1396
1397 qh->unreserve_pending = 0;
1398
1399 if (hsotg->params.dma_desc_enable)
1400
1401 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
1402 else
1403
1404 list_add_tail(&qh->qh_list_entry,
1405 &hsotg->periodic_sched_inactive);
1406
1407 return 0;
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
1418 struct dwc2_qh *qh)
1419{
1420 bool did_modify;
1421
1422 assert_spin_locked(&hsotg->lock);
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 did_modify = mod_timer(&qh->unreserve_timer,
1440 jiffies + DWC2_UNRESERVE_DELAY + 1);
1441 WARN_ON(did_modify);
1442 qh->unreserve_pending = 1;
1443
1444 list_del_init(&qh->qh_list_entry);
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
1471{
1472 struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
1473 struct dwc2_hsotg *hsotg = qh->hsotg;
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&hsotg->lock, flags);
1477
1478
1479
1480
1481
1482 if (!qh->wait_timer_cancel) {
1483 enum dwc2_transaction_type tr_type;
1484
1485 qh->want_wait = false;
1486
1487 list_move(&qh->qh_list_entry,
1488 &hsotg->non_periodic_sched_inactive);
1489
1490 tr_type = dwc2_hcd_select_transactions(hsotg);
1491 if (tr_type != DWC2_TRANSACTION_NONE)
1492 dwc2_hcd_queue_transactions(hsotg, tr_type);
1493 }
1494
1495 spin_unlock_irqrestore(&hsotg->lock, flags);
1496 return HRTIMER_NORESTART;
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1509 struct dwc2_hcd_urb *urb, gfp_t mem_flags)
1510{
1511 int dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
1512 u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1513 bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info);
1514 bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC);
1515 bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT);
1516 u32 hprt = dwc2_readl(hsotg, HPRT0);
1517 u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
1518 bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
1519 dev_speed != USB_SPEED_HIGH);
1520 int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
1521 int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
1522 int bytecount = maxp_mult * maxp;
1523 char *speed, *type;
1524
1525
1526 qh->hsotg = hsotg;
1527 timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
1528 hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1529 qh->wait_timer.function = &dwc2_wait_timer_fn;
1530 qh->ep_type = ep_type;
1531 qh->ep_is_in = ep_is_in;
1532
1533 qh->data_toggle = DWC2_HC_PID_DATA0;
1534 qh->maxp = maxp;
1535 qh->maxp_mult = maxp_mult;
1536 INIT_LIST_HEAD(&qh->qtd_list);
1537 INIT_LIST_HEAD(&qh->qh_list_entry);
1538
1539 qh->do_split = do_split;
1540 qh->dev_speed = dev_speed;
1541
1542 if (ep_is_int || ep_is_isoc) {
1543
1544 int host_speed = do_split ? USB_SPEED_HIGH : dev_speed;
1545 struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv,
1546 mem_flags,
1547 &qh->ttport);
1548 int device_ns;
1549
1550 qh->dwc_tt = dwc_tt;
1551
1552 qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
1553 ep_is_isoc, bytecount));
1554 device_ns = usb_calc_bus_time(dev_speed, ep_is_in,
1555 ep_is_isoc, bytecount);
1556
1557 if (do_split && dwc_tt)
1558 device_ns += dwc_tt->usb_tt->think_time;
1559 qh->device_us = NS_TO_US(device_ns);
1560
1561 qh->device_interval = urb->interval;
1562 qh->host_interval = urb->interval * (do_split ? 8 : 1);
1563
1564
1565
1566
1567
1568
1569 qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
1570 dwc_tt;
1571
1572 if (do_split) {
1573
1574 qh->num_hs_transfers = -1;
1575 } else if (dev_speed == USB_SPEED_HIGH) {
1576 qh->num_hs_transfers = 1;
1577 } else {
1578 qh->num_hs_transfers = 0;
1579 }
1580
1581
1582 }
1583
1584 switch (dev_speed) {
1585 case USB_SPEED_LOW:
1586 speed = "low";
1587 break;
1588 case USB_SPEED_FULL:
1589 speed = "full";
1590 break;
1591 case USB_SPEED_HIGH:
1592 speed = "high";
1593 break;
1594 default:
1595 speed = "?";
1596 break;
1597 }
1598
1599 switch (qh->ep_type) {
1600 case USB_ENDPOINT_XFER_ISOC:
1601 type = "isochronous";
1602 break;
1603 case USB_ENDPOINT_XFER_INT:
1604 type = "interrupt";
1605 break;
1606 case USB_ENDPOINT_XFER_CONTROL:
1607 type = "control";
1608 break;
1609 case USB_ENDPOINT_XFER_BULK:
1610 type = "bulk";
1611 break;
1612 default:
1613 type = "?";
1614 break;
1615 }
1616
1617 dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
1618 speed, bytecount);
1619 dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
1620 dwc2_hcd_get_dev_addr(&urb->pipe_info),
1621 dwc2_hcd_get_ep_num(&urb->pipe_info),
1622 ep_is_in ? "IN" : "OUT");
1623 if (ep_is_int || ep_is_isoc) {
1624 dwc2_sch_dbg(hsotg,
1625 "QH=%p ...duration: host=%d us, device=%d us\n",
1626 qh, qh->host_us, qh->device_us);
1627 dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n",
1628 qh, qh->host_interval, qh->device_interval);
1629 if (qh->schedule_low_speed)
1630 dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n",
1631 qh, dwc2_get_ls_map(hsotg, qh));
1632 }
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
1646 struct dwc2_hcd_urb *urb,
1647 gfp_t mem_flags)
1648{
1649 struct dwc2_qh *qh;
1650
1651 if (!urb->priv)
1652 return NULL;
1653
1654
1655 qh = kzalloc(sizeof(*qh), mem_flags);
1656 if (!qh)
1657 return NULL;
1658
1659 dwc2_qh_init(hsotg, qh, urb, mem_flags);
1660
1661 if (hsotg->params.dma_desc_enable &&
1662 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
1663 dwc2_hcd_qh_free(hsotg, qh);
1664 return NULL;
1665 }
1666
1667 return qh;
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1682{
1683
1684 if (del_timer_sync(&qh->unreserve_timer)) {
1685 unsigned long flags;
1686
1687 spin_lock_irqsave(&hsotg->lock, flags);
1688 dwc2_do_unreserve(hsotg, qh);
1689 spin_unlock_irqrestore(&hsotg->lock, flags);
1690 }
1691
1692
1693
1694
1695
1696
1697
1698
1699 hrtimer_cancel(&qh->wait_timer);
1700
1701 dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
1702
1703 if (qh->desc_list)
1704 dwc2_hcd_qh_free_ddma(hsotg, qh);
1705 else if (hsotg->unaligned_cache && qh->dw_align_buf)
1706 kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
1707
1708 kfree(qh);
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1722{
1723 int status;
1724 u32 intr_mask;
1725 ktime_t delay;
1726
1727 if (dbg_qh(qh))
1728 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1729
1730 if (!list_empty(&qh->qh_list_entry))
1731
1732 return 0;
1733
1734
1735 if (dwc2_qh_is_non_per(qh)) {
1736
1737 qh->start_active_frame = hsotg->frame_number;
1738 qh->next_active_frame = qh->start_active_frame;
1739
1740 if (qh->want_wait) {
1741 list_add_tail(&qh->qh_list_entry,
1742 &hsotg->non_periodic_sched_waiting);
1743 qh->wait_timer_cancel = false;
1744 delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
1745 hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
1746 } else {
1747 list_add_tail(&qh->qh_list_entry,
1748 &hsotg->non_periodic_sched_inactive);
1749 }
1750 return 0;
1751 }
1752
1753 status = dwc2_schedule_periodic(hsotg, qh);
1754 if (status)
1755 return status;
1756 if (!hsotg->periodic_qh_count) {
1757 intr_mask = dwc2_readl(hsotg, GINTMSK);
1758 intr_mask |= GINTSTS_SOF;
1759 dwc2_writel(hsotg, intr_mask, GINTMSK);
1760 }
1761 hsotg->periodic_qh_count++;
1762
1763 return 0;
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1774{
1775 u32 intr_mask;
1776
1777 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1778
1779
1780 qh->wait_timer_cancel = true;
1781
1782 if (list_empty(&qh->qh_list_entry))
1783
1784 return;
1785
1786 if (dwc2_qh_is_non_per(qh)) {
1787 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
1788 hsotg->non_periodic_qh_ptr =
1789 hsotg->non_periodic_qh_ptr->next;
1790 list_del_init(&qh->qh_list_entry);
1791 return;
1792 }
1793
1794 dwc2_deschedule_periodic(hsotg, qh);
1795 hsotg->periodic_qh_count--;
1796 if (!hsotg->periodic_qh_count &&
1797 !hsotg->params.dma_desc_enable) {
1798 intr_mask = dwc2_readl(hsotg, GINTMSK);
1799 intr_mask &= ~GINTSTS_SOF;
1800 dwc2_writel(hsotg, intr_mask, GINTMSK);
1801 }
1802}
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg,
1824 struct dwc2_qh *qh, u16 frame_number)
1825{
1826 u16 old_frame = qh->next_active_frame;
1827 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
1828 int missed = 0;
1829 u16 incr;
1830
1831
1832
1833
1834
1835
1836
1837 if (old_frame == qh->start_active_frame &&
1838 !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
1839 incr = 2;
1840 else
1841 incr = 1;
1842
1843 qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
1854
1855
1856
1857
1858 missed = dwc2_frame_num_dec(prev_frame_number,
1859 qh->next_active_frame);
1860 qh->next_active_frame = frame_number;
1861 }
1862
1863 return missed;
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg,
1887 struct dwc2_qh *qh, u16 frame_number)
1888{
1889 int missed = 0;
1890 u16 interval = qh->host_interval;
1891 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
1892
1893 qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
1894 interval);
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904 if (interval >= 0x1000)
1905 goto exit;
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933 if (qh->start_active_frame == qh->next_active_frame ||
1934 dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
1935 u16 ideal_start = qh->start_active_frame;
1936 int periods_in_map;
1937
1938
1939
1940
1941
1942 if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
1943 periods_in_map = DWC2_HS_SCHEDULE_UFRAMES;
1944 else
1945 periods_in_map = DWC2_LS_SCHEDULE_FRAMES;
1946 interval = gcd(interval, periods_in_map);
1947
1948 do {
1949 qh->start_active_frame = dwc2_frame_num_inc(
1950 qh->start_active_frame, interval);
1951 } while (dwc2_frame_num_gt(prev_frame_number,
1952 qh->start_active_frame));
1953
1954 missed = dwc2_frame_num_dec(qh->start_active_frame,
1955 ideal_start);
1956 }
1957
1958exit:
1959 qh->next_active_frame = qh->start_active_frame;
1960
1961 return missed;
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1978 int sched_next_periodic_split)
1979{
1980 u16 old_frame = qh->next_active_frame;
1981 u16 frame_number;
1982 int missed;
1983
1984 if (dbg_qh(qh))
1985 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1986
1987 if (dwc2_qh_is_non_per(qh)) {
1988 dwc2_hcd_qh_unlink(hsotg, qh);
1989 if (!list_empty(&qh->qtd_list))
1990
1991 dwc2_hcd_qh_add(hsotg, qh);
1992 return;
1993 }
1994
1995
1996
1997
1998
1999
2000
2001 frame_number = dwc2_hcd_get_frame_number(hsotg);
2002
2003 if (sched_next_periodic_split)
2004 missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
2005 else
2006 missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
2007
2008 dwc2_sch_vdbg(hsotg,
2009 "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n",
2010 qh, sched_next_periodic_split, frame_number, old_frame,
2011 qh->next_active_frame,
2012 dwc2_frame_num_dec(qh->next_active_frame, old_frame),
2013 missed, missed ? "MISS" : "");
2014
2015 if (list_empty(&qh->qtd_list)) {
2016 dwc2_hcd_qh_unlink(hsotg, qh);
2017 return;
2018 }
2019
2020
2021
2022
2023
2024
2025
2026
2027 if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
2028 list_move_tail(&qh->qh_list_entry,
2029 &hsotg->periodic_sched_ready);
2030 else
2031 list_move_tail(&qh->qh_list_entry,
2032 &hsotg->periodic_sched_inactive);
2033}
2034
2035
2036
2037
2038
2039
2040
2041void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
2042{
2043 qtd->urb = urb;
2044 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
2045 USB_ENDPOINT_XFER_CONTROL) {
2046
2047
2048
2049
2050
2051 qtd->data_toggle = DWC2_HC_PID_DATA1;
2052 qtd->control_phase = DWC2_CONTROL_SETUP;
2053 }
2054
2055
2056 qtd->complete_split = 0;
2057 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
2058 qtd->isoc_split_offset = 0;
2059 qtd->in_process = 0;
2060
2061
2062 urb->qtd = qtd;
2063}
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
2079 struct dwc2_qh *qh)
2080{
2081 int retval;
2082
2083 if (unlikely(!qh)) {
2084 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
2085 retval = -EINVAL;
2086 goto fail;
2087 }
2088
2089 retval = dwc2_hcd_qh_add(hsotg, qh);
2090 if (retval)
2091 goto fail;
2092
2093 qtd->qh = qh;
2094 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
2095
2096 return 0;
2097fail:
2098 return retval;
2099}
2100