1
2
3
4
5
6
7
8
9#include <linux/delay.h>
10#include <linux/slab.h>
11#include <linux/list.h>
12
13#include "tunnel.h"
14#include "tb.h"
15
16
17#define TB_PCI_HOPID 8
18
19#define TB_PCI_PATH_DOWN 0
20#define TB_PCI_PATH_UP 1
21
22
23#define TB_USB3_HOPID 8
24
25#define TB_USB3_PATH_DOWN 0
26#define TB_USB3_PATH_UP 1
27
28
29#define TB_DP_AUX_TX_HOPID 8
30#define TB_DP_AUX_RX_HOPID 8
31#define TB_DP_VIDEO_HOPID 9
32
33#define TB_DP_VIDEO_PATH_OUT 0
34#define TB_DP_AUX_PATH_OUT 1
35#define TB_DP_AUX_PATH_IN 2
36
37
38#define TB_MIN_PCIE_CREDITS 6U
39
40
41
42
43#define TB_DMA_CREDITS 14U
44
45#define TB_MIN_DMA_CREDITS 1U
46
47static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
48
49#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
50 do { \
51 struct tb_tunnel *__tunnel = (tunnel); \
52 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
53 tb_route(__tunnel->src_port->sw), \
54 __tunnel->src_port->port, \
55 tb_route(__tunnel->dst_port->sw), \
56 __tunnel->dst_port->port, \
57 tb_tunnel_names[__tunnel->type], \
58 ## arg); \
59 } while (0)
60
61#define tb_tunnel_WARN(tunnel, fmt, arg...) \
62 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
63#define tb_tunnel_warn(tunnel, fmt, arg...) \
64 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
65#define tb_tunnel_info(tunnel, fmt, arg...) \
66 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
67#define tb_tunnel_dbg(tunnel, fmt, arg...) \
68 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
69
70static inline unsigned int tb_usable_credits(const struct tb_port *port)
71{
72 return port->total_credits - port->ctl_credits;
73}
74
75
76
77
78
79
80
81static unsigned int tb_available_credits(const struct tb_port *port,
82 size_t *max_dp_streams)
83{
84 const struct tb_switch *sw = port->sw;
85 int credits, usb3, pcie, spare;
86 size_t ndp;
87
88 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
89 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
90
91 if (tb_acpi_is_xdomain_allowed()) {
92 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
93
94 spare += TB_MIN_DMA_CREDITS;
95 } else {
96 spare = 0;
97 }
98
99 credits = tb_usable_credits(port);
100 if (tb_acpi_may_tunnel_dp()) {
101
102
103
104
105 ndp = (credits - (usb3 + pcie + spare)) /
106 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
107 } else {
108 ndp = 0;
109 }
110 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
111 credits -= usb3;
112
113 if (max_dp_streams)
114 *max_dp_streams = ndp;
115
116 return credits > 0 ? credits : 0;
117}
118
119static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
120 enum tb_tunnel_type type)
121{
122 struct tb_tunnel *tunnel;
123
124 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
125 if (!tunnel)
126 return NULL;
127
128 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
129 if (!tunnel->paths) {
130 tb_tunnel_free(tunnel);
131 return NULL;
132 }
133
134 INIT_LIST_HEAD(&tunnel->list);
135 tunnel->tb = tb;
136 tunnel->npaths = npaths;
137 tunnel->type = type;
138
139 return tunnel;
140}
141
142static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
143{
144 int res;
145
146 res = tb_pci_port_enable(tunnel->src_port, activate);
147 if (res)
148 return res;
149
150 if (tb_port_is_pcie_up(tunnel->dst_port))
151 return tb_pci_port_enable(tunnel->dst_port, activate);
152
153 return 0;
154}
155
156static int tb_pci_init_credits(struct tb_path_hop *hop)
157{
158 struct tb_port *port = hop->in_port;
159 struct tb_switch *sw = port->sw;
160 unsigned int credits;
161
162 if (tb_port_use_credit_allocation(port)) {
163 unsigned int available;
164
165 available = tb_available_credits(port, NULL);
166 credits = min(sw->max_pcie_credits, available);
167
168 if (credits < TB_MIN_PCIE_CREDITS)
169 return -ENOSPC;
170
171 credits = max(TB_MIN_PCIE_CREDITS, credits);
172 } else {
173 if (tb_port_is_null(port))
174 credits = port->bonded ? 32 : 16;
175 else
176 credits = 7;
177 }
178
179 hop->initial_credits = credits;
180 return 0;
181}
182
183static int tb_pci_init_path(struct tb_path *path)
184{
185 struct tb_path_hop *hop;
186
187 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
188 path->egress_shared_buffer = TB_PATH_NONE;
189 path->ingress_fc_enable = TB_PATH_ALL;
190 path->ingress_shared_buffer = TB_PATH_NONE;
191 path->priority = 3;
192 path->weight = 1;
193 path->drop_packages = 0;
194
195 tb_path_for_each_hop(path, hop) {
196 int ret;
197
198 ret = tb_pci_init_credits(hop);
199 if (ret)
200 return ret;
201 }
202
203 return 0;
204}
205
206
207
208
209
210
211
212
213
214
215struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
216{
217 struct tb_tunnel *tunnel;
218 struct tb_path *path;
219
220 if (!tb_pci_port_is_enabled(down))
221 return NULL;
222
223 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
224 if (!tunnel)
225 return NULL;
226
227 tunnel->activate = tb_pci_activate;
228 tunnel->src_port = down;
229
230
231
232
233
234
235 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
236 &tunnel->dst_port, "PCIe Up");
237 if (!path) {
238
239 tb_pci_port_enable(down, false);
240 goto err_free;
241 }
242 tunnel->paths[TB_PCI_PATH_UP] = path;
243 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
244 goto err_free;
245
246 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
247 "PCIe Down");
248 if (!path)
249 goto err_deactivate;
250 tunnel->paths[TB_PCI_PATH_DOWN] = path;
251 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
252 goto err_deactivate;
253
254
255 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
256 tb_port_warn(tunnel->dst_port,
257 "path does not end on a PCIe adapter, cleaning up\n");
258 goto err_deactivate;
259 }
260
261 if (down != tunnel->src_port) {
262 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
263 goto err_deactivate;
264 }
265
266 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
267 tb_tunnel_warn(tunnel,
268 "tunnel is not fully activated, cleaning up\n");
269 goto err_deactivate;
270 }
271
272 tb_tunnel_dbg(tunnel, "discovered\n");
273 return tunnel;
274
275err_deactivate:
276 tb_tunnel_deactivate(tunnel);
277err_free:
278 tb_tunnel_free(tunnel);
279
280 return NULL;
281}
282
283
284
285
286
287
288
289
290
291
292
293
294struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
295 struct tb_port *down)
296{
297 struct tb_tunnel *tunnel;
298 struct tb_path *path;
299
300 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
301 if (!tunnel)
302 return NULL;
303
304 tunnel->activate = tb_pci_activate;
305 tunnel->src_port = down;
306 tunnel->dst_port = up;
307
308 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
309 "PCIe Down");
310 if (!path)
311 goto err_free;
312 tunnel->paths[TB_PCI_PATH_DOWN] = path;
313 if (tb_pci_init_path(path))
314 goto err_free;
315
316 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
317 "PCIe Up");
318 if (!path)
319 goto err_free;
320 tunnel->paths[TB_PCI_PATH_UP] = path;
321 if (tb_pci_init_path(path))
322 goto err_free;
323
324 return tunnel;
325
326err_free:
327 tb_tunnel_free(tunnel);
328 return NULL;
329}
330
331static bool tb_dp_is_usb4(const struct tb_switch *sw)
332{
333
334 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
335}
336
337static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
338{
339 int timeout = 10;
340 u32 val;
341 int ret;
342
343
344 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
345 return 0;
346
347 ret = tb_port_read(out, &val, TB_CFG_PORT,
348 out->cap_adap + DP_STATUS_CTRL, 1);
349 if (ret)
350 return ret;
351
352 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
353
354 ret = tb_port_write(out, &val, TB_CFG_PORT,
355 out->cap_adap + DP_STATUS_CTRL, 1);
356 if (ret)
357 return ret;
358
359 do {
360 ret = tb_port_read(out, &val, TB_CFG_PORT,
361 out->cap_adap + DP_STATUS_CTRL, 1);
362 if (ret)
363 return ret;
364 if (!(val & DP_STATUS_CTRL_CMHS))
365 return 0;
366 usleep_range(10, 100);
367 } while (timeout--);
368
369 return -ETIMEDOUT;
370}
371
372static inline u32 tb_dp_cap_get_rate(u32 val)
373{
374 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
375
376 switch (rate) {
377 case DP_COMMON_CAP_RATE_RBR:
378 return 1620;
379 case DP_COMMON_CAP_RATE_HBR:
380 return 2700;
381 case DP_COMMON_CAP_RATE_HBR2:
382 return 5400;
383 case DP_COMMON_CAP_RATE_HBR3:
384 return 8100;
385 default:
386 return 0;
387 }
388}
389
390static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
391{
392 val &= ~DP_COMMON_CAP_RATE_MASK;
393 switch (rate) {
394 default:
395 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
396 fallthrough;
397 case 1620:
398 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
399 break;
400 case 2700:
401 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
402 break;
403 case 5400:
404 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
405 break;
406 case 8100:
407 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
408 break;
409 }
410 return val;
411}
412
413static inline u32 tb_dp_cap_get_lanes(u32 val)
414{
415 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
416
417 switch (lanes) {
418 case DP_COMMON_CAP_1_LANE:
419 return 1;
420 case DP_COMMON_CAP_2_LANES:
421 return 2;
422 case DP_COMMON_CAP_4_LANES:
423 return 4;
424 default:
425 return 0;
426 }
427}
428
429static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
430{
431 val &= ~DP_COMMON_CAP_LANES_MASK;
432 switch (lanes) {
433 default:
434 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
435 lanes);
436 fallthrough;
437 case 1:
438 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
439 break;
440 case 2:
441 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
442 break;
443 case 4:
444 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
445 break;
446 }
447 return val;
448}
449
450static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
451{
452
453 return rate * lanes * 8 / 10;
454}
455
456static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
457 u32 out_rate, u32 out_lanes, u32 *new_rate,
458 u32 *new_lanes)
459{
460 static const u32 dp_bw[][2] = {
461
462 { 8100, 4 },
463 { 5400, 4 },
464 { 8100, 2 },
465 { 2700, 4 },
466 { 5400, 2 },
467 { 8100, 1 },
468 { 1620, 4 },
469 { 5400, 1 },
470 { 2700, 2 },
471 { 1620, 2 },
472 { 2700, 1 },
473 { 1620, 1 },
474 };
475 unsigned int i;
476
477
478
479
480
481
482 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
483 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
484 continue;
485
486 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
487 continue;
488
489 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
490 *new_rate = dp_bw[i][0];
491 *new_lanes = dp_bw[i][1];
492 return 0;
493 }
494 }
495
496 return -ENOSR;
497}
498
499static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
500{
501 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
502 struct tb_port *out = tunnel->dst_port;
503 struct tb_port *in = tunnel->src_port;
504 int ret, max_bw;
505
506
507
508
509
510 if (in->sw->generation < 2 || out->sw->generation < 2)
511 return 0;
512
513
514
515
516
517 ret = tb_dp_cm_handshake(in, out);
518 if (ret)
519 return ret;
520
521
522 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
523 in->cap_adap + DP_LOCAL_CAP, 1);
524 if (ret)
525 return ret;
526
527 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
528 out->cap_adap + DP_LOCAL_CAP, 1);
529 if (ret)
530 return ret;
531
532
533 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
534 out->cap_adap + DP_REMOTE_CAP, 1);
535 if (ret)
536 return ret;
537
538 in_rate = tb_dp_cap_get_rate(in_dp_cap);
539 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
540 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
541 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
542
543
544
545
546
547 out_rate = tb_dp_cap_get_rate(out_dp_cap);
548 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
549 bw = tb_dp_bandwidth(out_rate, out_lanes);
550 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
551 out_rate, out_lanes, bw);
552
553 if (in->sw->config.depth < out->sw->config.depth)
554 max_bw = tunnel->max_down;
555 else
556 max_bw = tunnel->max_up;
557
558 if (max_bw && bw > max_bw) {
559 u32 new_rate, new_lanes, new_bw;
560
561 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
562 out_rate, out_lanes, &new_rate,
563 &new_lanes);
564 if (ret) {
565 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
566 return ret;
567 }
568
569 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
570 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
571 new_rate, new_lanes, new_bw);
572
573
574
575
576
577 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
578 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
579 }
580
581 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
582 in->cap_adap + DP_REMOTE_CAP, 1);
583}
584
585static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
586{
587 int ret;
588
589 if (active) {
590 struct tb_path **paths;
591 int last;
592
593 paths = tunnel->paths;
594 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
595
596 tb_dp_port_set_hops(tunnel->src_port,
597 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
598 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
599 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
600
601 tb_dp_port_set_hops(tunnel->dst_port,
602 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
603 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
604 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
605 } else {
606 tb_dp_port_hpd_clear(tunnel->src_port);
607 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
608 if (tb_port_is_dpout(tunnel->dst_port))
609 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
610 }
611
612 ret = tb_dp_port_enable(tunnel->src_port, active);
613 if (ret)
614 return ret;
615
616 if (tb_port_is_dpout(tunnel->dst_port))
617 return tb_dp_port_enable(tunnel->dst_port, active);
618
619 return 0;
620}
621
622static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
623 int *consumed_down)
624{
625 struct tb_port *in = tunnel->src_port;
626 const struct tb_switch *sw = in->sw;
627 u32 val, rate = 0, lanes = 0;
628 int ret;
629
630 if (tb_dp_is_usb4(sw)) {
631 int timeout = 20;
632
633
634
635
636
637 do {
638 ret = tb_port_read(in, &val, TB_CFG_PORT,
639 in->cap_adap + DP_COMMON_CAP, 1);
640 if (ret)
641 return ret;
642
643 if (val & DP_COMMON_CAP_DPRX_DONE) {
644 rate = tb_dp_cap_get_rate(val);
645 lanes = tb_dp_cap_get_lanes(val);
646 break;
647 }
648 msleep(250);
649 } while (timeout--);
650
651 if (!timeout)
652 return -ETIMEDOUT;
653 } else if (sw->generation >= 2) {
654
655
656
657
658 ret = tb_port_read(in, &val, TB_CFG_PORT,
659 in->cap_adap + DP_REMOTE_CAP, 1);
660 if (ret)
661 return ret;
662
663 rate = tb_dp_cap_get_rate(val);
664 lanes = tb_dp_cap_get_lanes(val);
665 } else {
666
667 *consumed_up = 0;
668 *consumed_down = 0;
669 return 0;
670 }
671
672 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
673 *consumed_up = 0;
674 *consumed_down = tb_dp_bandwidth(rate, lanes);
675 } else {
676 *consumed_up = tb_dp_bandwidth(rate, lanes);
677 *consumed_down = 0;
678 }
679
680 return 0;
681}
682
683static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
684{
685 struct tb_port *port = hop->in_port;
686 struct tb_switch *sw = port->sw;
687
688 if (tb_port_use_credit_allocation(port))
689 hop->initial_credits = sw->min_dp_aux_credits;
690 else
691 hop->initial_credits = 1;
692}
693
694static void tb_dp_init_aux_path(struct tb_path *path)
695{
696 struct tb_path_hop *hop;
697
698 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
699 path->egress_shared_buffer = TB_PATH_NONE;
700 path->ingress_fc_enable = TB_PATH_ALL;
701 path->ingress_shared_buffer = TB_PATH_NONE;
702 path->priority = 2;
703 path->weight = 1;
704
705 tb_path_for_each_hop(path, hop)
706 tb_dp_init_aux_credits(hop);
707}
708
709static int tb_dp_init_video_credits(struct tb_path_hop *hop)
710{
711 struct tb_port *port = hop->in_port;
712 struct tb_switch *sw = port->sw;
713
714 if (tb_port_use_credit_allocation(port)) {
715 unsigned int nfc_credits;
716 size_t max_dp_streams;
717
718 tb_available_credits(port, &max_dp_streams);
719
720
721
722
723
724
725 nfc_credits = port->config.nfc_credits &
726 ADP_CS_4_NFC_BUFFERS_MASK;
727 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
728 return -ENOSPC;
729
730 hop->nfc_credits = sw->min_dp_main_credits;
731 } else {
732 hop->nfc_credits = min(port->total_credits - 2, 12U);
733 }
734
735 return 0;
736}
737
738static int tb_dp_init_video_path(struct tb_path *path)
739{
740 struct tb_path_hop *hop;
741
742 path->egress_fc_enable = TB_PATH_NONE;
743 path->egress_shared_buffer = TB_PATH_NONE;
744 path->ingress_fc_enable = TB_PATH_NONE;
745 path->ingress_shared_buffer = TB_PATH_NONE;
746 path->priority = 1;
747 path->weight = 1;
748
749 tb_path_for_each_hop(path, hop) {
750 int ret;
751
752 ret = tb_dp_init_video_credits(hop);
753 if (ret)
754 return ret;
755 }
756
757 return 0;
758}
759
760
761
762
763
764
765
766
767
768
769
770
771struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
772{
773 struct tb_tunnel *tunnel;
774 struct tb_port *port;
775 struct tb_path *path;
776
777 if (!tb_dp_port_is_enabled(in))
778 return NULL;
779
780 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
781 if (!tunnel)
782 return NULL;
783
784 tunnel->init = tb_dp_xchg_caps;
785 tunnel->activate = tb_dp_activate;
786 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
787 tunnel->src_port = in;
788
789 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
790 &tunnel->dst_port, "Video");
791 if (!path) {
792
793 tb_dp_port_enable(in, false);
794 goto err_free;
795 }
796 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
797 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
798 goto err_free;
799
800 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
801 if (!path)
802 goto err_deactivate;
803 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
804 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
805
806 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
807 &port, "AUX RX");
808 if (!path)
809 goto err_deactivate;
810 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
811 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
812
813
814 if (!tb_port_is_dpout(tunnel->dst_port)) {
815 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
816 goto err_deactivate;
817 }
818
819 if (!tb_dp_port_is_enabled(tunnel->dst_port))
820 goto err_deactivate;
821
822 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
823 goto err_deactivate;
824
825 if (port != tunnel->src_port) {
826 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
827 goto err_deactivate;
828 }
829
830 tb_tunnel_dbg(tunnel, "discovered\n");
831 return tunnel;
832
833err_deactivate:
834 tb_tunnel_deactivate(tunnel);
835err_free:
836 tb_tunnel_free(tunnel);
837
838 return NULL;
839}
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
857 struct tb_port *out, int max_up,
858 int max_down)
859{
860 struct tb_tunnel *tunnel;
861 struct tb_path **paths;
862 struct tb_path *path;
863
864 if (WARN_ON(!in->cap_adap || !out->cap_adap))
865 return NULL;
866
867 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
868 if (!tunnel)
869 return NULL;
870
871 tunnel->init = tb_dp_xchg_caps;
872 tunnel->activate = tb_dp_activate;
873 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
874 tunnel->src_port = in;
875 tunnel->dst_port = out;
876 tunnel->max_up = max_up;
877 tunnel->max_down = max_down;
878
879 paths = tunnel->paths;
880
881 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
882 1, "Video");
883 if (!path)
884 goto err_free;
885 tb_dp_init_video_path(path);
886 paths[TB_DP_VIDEO_PATH_OUT] = path;
887
888 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
889 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
890 if (!path)
891 goto err_free;
892 tb_dp_init_aux_path(path);
893 paths[TB_DP_AUX_PATH_OUT] = path;
894
895 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
896 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
897 if (!path)
898 goto err_free;
899 tb_dp_init_aux_path(path);
900 paths[TB_DP_AUX_PATH_IN] = path;
901
902 return tunnel;
903
904err_free:
905 tb_tunnel_free(tunnel);
906 return NULL;
907}
908
909static unsigned int tb_dma_available_credits(const struct tb_port *port)
910{
911 const struct tb_switch *sw = port->sw;
912 int credits;
913
914 credits = tb_available_credits(port, NULL);
915 if (tb_acpi_may_tunnel_pcie())
916 credits -= sw->max_pcie_credits;
917 credits -= port->dma_credits;
918
919 return credits > 0 ? credits : 0;
920}
921
922static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
923{
924 struct tb_port *port = hop->in_port;
925
926 if (tb_port_use_credit_allocation(port)) {
927 unsigned int available = tb_dma_available_credits(port);
928
929
930
931
932
933 if (available < TB_MIN_DMA_CREDITS)
934 return -ENOSPC;
935
936 while (credits > available)
937 credits--;
938
939 tb_port_dbg(port, "reserving %u credits for DMA path\n",
940 credits);
941
942 port->dma_credits += credits;
943 } else {
944 if (tb_port_is_null(port))
945 credits = port->bonded ? 14 : 6;
946 else
947 credits = min(port->total_credits, credits);
948 }
949
950 hop->initial_credits = credits;
951 return 0;
952}
953
954
955static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
956{
957 struct tb_path_hop *hop;
958 unsigned int i, tmp;
959
960 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
961 path->ingress_fc_enable = TB_PATH_ALL;
962 path->egress_shared_buffer = TB_PATH_NONE;
963 path->ingress_shared_buffer = TB_PATH_NONE;
964 path->priority = 5;
965 path->weight = 1;
966 path->clear_fc = true;
967
968
969
970
971
972
973 hop = &path->hops[0];
974 tmp = min(tb_usable_credits(hop->in_port), credits);
975 hop->initial_credits = tmp;
976 hop->in_port->dma_credits += tmp;
977
978 for (i = 1; i < path->path_length; i++) {
979 int ret;
980
981 ret = tb_dma_reserve_credits(&path->hops[i], credits);
982 if (ret)
983 return ret;
984 }
985
986 return 0;
987}
988
989
990static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
991{
992 struct tb_path_hop *hop;
993
994 path->egress_fc_enable = TB_PATH_ALL;
995 path->ingress_fc_enable = TB_PATH_ALL;
996 path->egress_shared_buffer = TB_PATH_NONE;
997 path->ingress_shared_buffer = TB_PATH_NONE;
998 path->priority = 5;
999 path->weight = 1;
1000 path->clear_fc = true;
1001
1002 tb_path_for_each_hop(path, hop) {
1003 int ret;
1004
1005 ret = tb_dma_reserve_credits(hop, credits);
1006 if (ret)
1007 return ret;
1008 }
1009
1010 return 0;
1011}
1012
1013static void tb_dma_release_credits(struct tb_path_hop *hop)
1014{
1015 struct tb_port *port = hop->in_port;
1016
1017 if (tb_port_use_credit_allocation(port)) {
1018 port->dma_credits -= hop->initial_credits;
1019
1020 tb_port_dbg(port, "released %u DMA path credits\n",
1021 hop->initial_credits);
1022 }
1023}
1024
1025static void tb_dma_deinit_path(struct tb_path *path)
1026{
1027 struct tb_path_hop *hop;
1028
1029 tb_path_for_each_hop(path, hop)
1030 tb_dma_release_credits(hop);
1031}
1032
1033static void tb_dma_deinit(struct tb_tunnel *tunnel)
1034{
1035 int i;
1036
1037 for (i = 0; i < tunnel->npaths; i++) {
1038 if (!tunnel->paths[i])
1039 continue;
1040 tb_dma_deinit_path(tunnel->paths[i]);
1041 }
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1059 struct tb_port *dst, int transmit_path,
1060 int transmit_ring, int receive_path,
1061 int receive_ring)
1062{
1063 struct tb_tunnel *tunnel;
1064 size_t npaths = 0, i = 0;
1065 struct tb_path *path;
1066 int credits;
1067
1068 if (receive_ring > 0)
1069 npaths++;
1070 if (transmit_ring > 0)
1071 npaths++;
1072
1073 if (WARN_ON(!npaths))
1074 return NULL;
1075
1076 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1077 if (!tunnel)
1078 return NULL;
1079
1080 tunnel->src_port = nhi;
1081 tunnel->dst_port = dst;
1082 tunnel->deinit = tb_dma_deinit;
1083
1084 credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
1085
1086 if (receive_ring > 0) {
1087 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1088 "DMA RX");
1089 if (!path)
1090 goto err_free;
1091 tunnel->paths[i++] = path;
1092 if (tb_dma_init_rx_path(path, credits)) {
1093 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1094 goto err_free;
1095 }
1096 }
1097
1098 if (transmit_ring > 0) {
1099 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1100 "DMA TX");
1101 if (!path)
1102 goto err_free;
1103 tunnel->paths[i++] = path;
1104 if (tb_dma_init_tx_path(path, credits)) {
1105 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1106 goto err_free;
1107 }
1108 }
1109
1110 return tunnel;
1111
1112err_free:
1113 tb_tunnel_free(tunnel);
1114 return NULL;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1132 int transmit_ring, int receive_path, int receive_ring)
1133{
1134 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1135 int i;
1136
1137 if (!receive_ring || !transmit_ring)
1138 return false;
1139
1140 for (i = 0; i < tunnel->npaths; i++) {
1141 const struct tb_path *path = tunnel->paths[i];
1142
1143 if (!path)
1144 continue;
1145
1146 if (tb_port_is_nhi(path->hops[0].in_port))
1147 tx_path = path;
1148 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1149 rx_path = path;
1150 }
1151
1152 if (transmit_ring > 0 || transmit_path > 0) {
1153 if (!tx_path)
1154 return false;
1155 if (transmit_ring > 0 &&
1156 (tx_path->hops[0].in_hop_index != transmit_ring))
1157 return false;
1158 if (transmit_path > 0 &&
1159 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1160 return false;
1161 }
1162
1163 if (receive_ring > 0 || receive_path > 0) {
1164 if (!rx_path)
1165 return false;
1166 if (receive_path > 0 &&
1167 (rx_path->hops[0].in_hop_index != receive_path))
1168 return false;
1169 if (receive_ring > 0 &&
1170 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1171 return false;
1172 }
1173
1174 return true;
1175}
1176
1177static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1178{
1179 int ret, up_max_rate, down_max_rate;
1180
1181 ret = usb4_usb3_port_max_link_rate(up);
1182 if (ret < 0)
1183 return ret;
1184 up_max_rate = ret;
1185
1186 ret = usb4_usb3_port_max_link_rate(down);
1187 if (ret < 0)
1188 return ret;
1189 down_max_rate = ret;
1190
1191 return min(up_max_rate, down_max_rate);
1192}
1193
1194static int tb_usb3_init(struct tb_tunnel *tunnel)
1195{
1196 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1197 tunnel->allocated_up, tunnel->allocated_down);
1198
1199 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1200 &tunnel->allocated_up,
1201 &tunnel->allocated_down);
1202}
1203
1204static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1205{
1206 int res;
1207
1208 res = tb_usb3_port_enable(tunnel->src_port, activate);
1209 if (res)
1210 return res;
1211
1212 if (tb_port_is_usb3_up(tunnel->dst_port))
1213 return tb_usb3_port_enable(tunnel->dst_port, activate);
1214
1215 return 0;
1216}
1217
1218static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1219 int *consumed_up, int *consumed_down)
1220{
1221 int pcie_enabled = tb_acpi_may_tunnel_pcie();
1222
1223
1224
1225
1226
1227 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1228 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
1229 return 0;
1230}
1231
1232static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1233{
1234 int ret;
1235
1236 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1237 &tunnel->allocated_up,
1238 &tunnel->allocated_down);
1239 if (ret)
1240 return ret;
1241
1242 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1243 tunnel->allocated_up, tunnel->allocated_down);
1244 return 0;
1245}
1246
1247static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1248 int *available_up,
1249 int *available_down)
1250{
1251 int ret, max_rate, allocate_up, allocate_down;
1252
1253 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1254 if (ret < 0) {
1255 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1256 return;
1257 } else if (!ret) {
1258
1259 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
1260 if (ret < 0) {
1261 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1262 return;
1263 }
1264 }
1265
1266
1267
1268
1269
1270 max_rate = ret * 90 / 100;
1271
1272
1273 if (tunnel->allocated_up >= max_rate &&
1274 tunnel->allocated_down >= max_rate)
1275 return;
1276
1277
1278 allocate_up = min(max_rate, *available_up);
1279 if (allocate_up < tunnel->allocated_up)
1280 allocate_up = tunnel->allocated_up;
1281
1282 allocate_down = min(max_rate, *available_down);
1283 if (allocate_down < tunnel->allocated_down)
1284 allocate_down = tunnel->allocated_down;
1285
1286
1287 if (allocate_up == tunnel->allocated_up &&
1288 allocate_down == tunnel->allocated_down)
1289 return;
1290
1291 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1292 &allocate_down);
1293 if (ret) {
1294 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1295 return;
1296 }
1297
1298 tunnel->allocated_up = allocate_up;
1299 *available_up -= tunnel->allocated_up;
1300
1301 tunnel->allocated_down = allocate_down;
1302 *available_down -= tunnel->allocated_down;
1303
1304 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1305 tunnel->allocated_up, tunnel->allocated_down);
1306}
1307
1308static void tb_usb3_init_credits(struct tb_path_hop *hop)
1309{
1310 struct tb_port *port = hop->in_port;
1311 struct tb_switch *sw = port->sw;
1312 unsigned int credits;
1313
1314 if (tb_port_use_credit_allocation(port)) {
1315 credits = sw->max_usb3_credits;
1316 } else {
1317 if (tb_port_is_null(port))
1318 credits = port->bonded ? 32 : 16;
1319 else
1320 credits = 7;
1321 }
1322
1323 hop->initial_credits = credits;
1324}
1325
1326static void tb_usb3_init_path(struct tb_path *path)
1327{
1328 struct tb_path_hop *hop;
1329
1330 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1331 path->egress_shared_buffer = TB_PATH_NONE;
1332 path->ingress_fc_enable = TB_PATH_ALL;
1333 path->ingress_shared_buffer = TB_PATH_NONE;
1334 path->priority = 3;
1335 path->weight = 3;
1336 path->drop_packages = 0;
1337
1338 tb_path_for_each_hop(path, hop)
1339 tb_usb3_init_credits(hop);
1340}
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1352{
1353 struct tb_tunnel *tunnel;
1354 struct tb_path *path;
1355
1356 if (!tb_usb3_port_is_enabled(down))
1357 return NULL;
1358
1359 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1360 if (!tunnel)
1361 return NULL;
1362
1363 tunnel->activate = tb_usb3_activate;
1364 tunnel->src_port = down;
1365
1366
1367
1368
1369
1370
1371 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1372 &tunnel->dst_port, "USB3 Down");
1373 if (!path) {
1374
1375 tb_usb3_port_enable(down, false);
1376 goto err_free;
1377 }
1378 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1379 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1380
1381 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1382 "USB3 Up");
1383 if (!path)
1384 goto err_deactivate;
1385 tunnel->paths[TB_USB3_PATH_UP] = path;
1386 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1387
1388
1389 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1390 tb_port_warn(tunnel->dst_port,
1391 "path does not end on an USB3 adapter, cleaning up\n");
1392 goto err_deactivate;
1393 }
1394
1395 if (down != tunnel->src_port) {
1396 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1397 goto err_deactivate;
1398 }
1399
1400 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1401 tb_tunnel_warn(tunnel,
1402 "tunnel is not fully activated, cleaning up\n");
1403 goto err_deactivate;
1404 }
1405
1406 if (!tb_route(down->sw)) {
1407 int ret;
1408
1409
1410
1411
1412
1413 ret = usb4_usb3_port_allocated_bandwidth(down,
1414 &tunnel->allocated_up, &tunnel->allocated_down);
1415 if (ret)
1416 goto err_deactivate;
1417
1418 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1419 tunnel->allocated_up, tunnel->allocated_down);
1420
1421 tunnel->init = tb_usb3_init;
1422 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1423 tunnel->release_unused_bandwidth =
1424 tb_usb3_release_unused_bandwidth;
1425 tunnel->reclaim_available_bandwidth =
1426 tb_usb3_reclaim_available_bandwidth;
1427 }
1428
1429 tb_tunnel_dbg(tunnel, "discovered\n");
1430 return tunnel;
1431
1432err_deactivate:
1433 tb_tunnel_deactivate(tunnel);
1434err_free:
1435 tb_tunnel_free(tunnel);
1436
1437 return NULL;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1456 struct tb_port *down, int max_up,
1457 int max_down)
1458{
1459 struct tb_tunnel *tunnel;
1460 struct tb_path *path;
1461 int max_rate = 0;
1462
1463
1464
1465
1466
1467 if (max_up > 0 || max_down > 0) {
1468 max_rate = tb_usb3_max_link_rate(down, up);
1469 if (max_rate < 0)
1470 return NULL;
1471
1472
1473 max_rate = max_rate * 90 / 100;
1474 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1475 max_rate);
1476
1477 if (max_rate > max_up || max_rate > max_down) {
1478 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1479 return NULL;
1480 }
1481 }
1482
1483 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1484 if (!tunnel)
1485 return NULL;
1486
1487 tunnel->activate = tb_usb3_activate;
1488 tunnel->src_port = down;
1489 tunnel->dst_port = up;
1490 tunnel->max_up = max_up;
1491 tunnel->max_down = max_down;
1492
1493 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1494 "USB3 Down");
1495 if (!path) {
1496 tb_tunnel_free(tunnel);
1497 return NULL;
1498 }
1499 tb_usb3_init_path(path);
1500 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1501
1502 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1503 "USB3 Up");
1504 if (!path) {
1505 tb_tunnel_free(tunnel);
1506 return NULL;
1507 }
1508 tb_usb3_init_path(path);
1509 tunnel->paths[TB_USB3_PATH_UP] = path;
1510
1511 if (!tb_route(down->sw)) {
1512 tunnel->allocated_up = max_rate;
1513 tunnel->allocated_down = max_rate;
1514
1515 tunnel->init = tb_usb3_init;
1516 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1517 tunnel->release_unused_bandwidth =
1518 tb_usb3_release_unused_bandwidth;
1519 tunnel->reclaim_available_bandwidth =
1520 tb_usb3_reclaim_available_bandwidth;
1521 }
1522
1523 return tunnel;
1524}
1525
1526
1527
1528
1529
1530
1531
1532void tb_tunnel_free(struct tb_tunnel *tunnel)
1533{
1534 int i;
1535
1536 if (!tunnel)
1537 return;
1538
1539 if (tunnel->deinit)
1540 tunnel->deinit(tunnel);
1541
1542 for (i = 0; i < tunnel->npaths; i++) {
1543 if (tunnel->paths[i])
1544 tb_path_free(tunnel->paths[i]);
1545 }
1546
1547 kfree(tunnel->paths);
1548 kfree(tunnel);
1549}
1550
1551
1552
1553
1554
1555bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1556{
1557 int i;
1558
1559 for (i = 0; i < tunnel->npaths; i++) {
1560 WARN_ON(!tunnel->paths[i]->activated);
1561 if (tb_path_is_invalid(tunnel->paths[i]))
1562 return true;
1563 }
1564
1565 return false;
1566}
1567
1568
1569
1570
1571
1572
1573
1574int tb_tunnel_restart(struct tb_tunnel *tunnel)
1575{
1576 int res, i;
1577
1578 tb_tunnel_dbg(tunnel, "activating\n");
1579
1580
1581
1582
1583
1584 for (i = 0; i < tunnel->npaths; i++) {
1585 if (tunnel->paths[i]->activated) {
1586 tb_path_deactivate(tunnel->paths[i]);
1587 tunnel->paths[i]->activated = false;
1588 }
1589 }
1590
1591 if (tunnel->init) {
1592 res = tunnel->init(tunnel);
1593 if (res)
1594 return res;
1595 }
1596
1597 for (i = 0; i < tunnel->npaths; i++) {
1598 res = tb_path_activate(tunnel->paths[i]);
1599 if (res)
1600 goto err;
1601 }
1602
1603 if (tunnel->activate) {
1604 res = tunnel->activate(tunnel, true);
1605 if (res)
1606 goto err;
1607 }
1608
1609 return 0;
1610
1611err:
1612 tb_tunnel_warn(tunnel, "activation failed\n");
1613 tb_tunnel_deactivate(tunnel);
1614 return res;
1615}
1616
1617
1618
1619
1620
1621
1622
1623int tb_tunnel_activate(struct tb_tunnel *tunnel)
1624{
1625 int i;
1626
1627 for (i = 0; i < tunnel->npaths; i++) {
1628 if (tunnel->paths[i]->activated) {
1629 tb_tunnel_WARN(tunnel,
1630 "trying to activate an already activated tunnel\n");
1631 return -EINVAL;
1632 }
1633 }
1634
1635 return tb_tunnel_restart(tunnel);
1636}
1637
1638
1639
1640
1641
1642void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1643{
1644 int i;
1645
1646 tb_tunnel_dbg(tunnel, "deactivating\n");
1647
1648 if (tunnel->activate)
1649 tunnel->activate(tunnel, false);
1650
1651 for (i = 0; i < tunnel->npaths; i++) {
1652 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1653 tb_path_deactivate(tunnel->paths[i]);
1654 }
1655}
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1666 const struct tb_port *port)
1667{
1668 int i;
1669
1670 for (i = 0; i < tunnel->npaths; i++) {
1671 if (!tunnel->paths[i])
1672 continue;
1673
1674 if (tb_path_port_on_path(tunnel->paths[i], port))
1675 return true;
1676 }
1677
1678 return false;
1679}
1680
1681static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1682{
1683 int i;
1684
1685 for (i = 0; i < tunnel->npaths; i++) {
1686 if (!tunnel->paths[i])
1687 return false;
1688 if (!tunnel->paths[i]->activated)
1689 return false;
1690 }
1691
1692 return true;
1693}
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1708 int *consumed_down)
1709{
1710 int up_bw = 0, down_bw = 0;
1711
1712 if (!tb_tunnel_is_active(tunnel))
1713 goto out;
1714
1715 if (tunnel->consumed_bandwidth) {
1716 int ret;
1717
1718 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1719 if (ret)
1720 return ret;
1721
1722 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1723 down_bw);
1724 }
1725
1726out:
1727 if (consumed_up)
1728 *consumed_up = up_bw;
1729 if (consumed_down)
1730 *consumed_down = down_bw;
1731
1732 return 0;
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1745{
1746 if (!tb_tunnel_is_active(tunnel))
1747 return 0;
1748
1749 if (tunnel->release_unused_bandwidth) {
1750 int ret;
1751
1752 ret = tunnel->release_unused_bandwidth(tunnel);
1753 if (ret)
1754 return ret;
1755 }
1756
1757 return 0;
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1772 int *available_up,
1773 int *available_down)
1774{
1775 if (!tb_tunnel_is_active(tunnel))
1776 return;
1777
1778 if (tunnel->reclaim_available_bandwidth)
1779 tunnel->reclaim_available_bandwidth(tunnel, available_up,
1780 available_down);
1781}
1782