linux/drivers/thunderbolt/tunnel.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - Tunneling support
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2019, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/slab.h>
  11#include <linux/list.h>
  12
  13#include "tunnel.h"
  14#include "tb.h"
  15
  16/* PCIe adapters use always HopID of 8 for both directions */
  17#define TB_PCI_HOPID                    8
  18
  19#define TB_PCI_PATH_DOWN                0
  20#define TB_PCI_PATH_UP                  1
  21
  22/* USB3 adapters use always HopID of 8 for both directions */
  23#define TB_USB3_HOPID                   8
  24
  25#define TB_USB3_PATH_DOWN               0
  26#define TB_USB3_PATH_UP                 1
  27
  28/* DP adapters use HopID 8 for AUX and 9 for Video */
  29#define TB_DP_AUX_TX_HOPID              8
  30#define TB_DP_AUX_RX_HOPID              8
  31#define TB_DP_VIDEO_HOPID               9
  32
  33#define TB_DP_VIDEO_PATH_OUT            0
  34#define TB_DP_AUX_PATH_OUT              1
  35#define TB_DP_AUX_PATH_IN               2
  36
  37/* Minimum number of credits needed for PCIe path */
  38#define TB_MIN_PCIE_CREDITS             6U
  39/*
  40 * Number of credits we try to allocate for each DMA path if not limited
  41 * by the host router baMaxHI.
  42 */
  43#define TB_DMA_CREDITS                  14U
  44/* Minimum number of credits for DMA path */
  45#define TB_MIN_DMA_CREDITS              1U
  46
  47static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
  48
  49#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
  50        do {                                                            \
  51                struct tb_tunnel *__tunnel = (tunnel);                  \
  52                level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
  53                      tb_route(__tunnel->src_port->sw),                 \
  54                      __tunnel->src_port->port,                         \
  55                      tb_route(__tunnel->dst_port->sw),                 \
  56                      __tunnel->dst_port->port,                         \
  57                      tb_tunnel_names[__tunnel->type],                  \
  58                      ## arg);                                          \
  59        } while (0)
  60
  61#define tb_tunnel_WARN(tunnel, fmt, arg...) \
  62        __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
  63#define tb_tunnel_warn(tunnel, fmt, arg...) \
  64        __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
  65#define tb_tunnel_info(tunnel, fmt, arg...) \
  66        __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
  67#define tb_tunnel_dbg(tunnel, fmt, arg...) \
  68        __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
  69
  70static inline unsigned int tb_usable_credits(const struct tb_port *port)
  71{
  72        return port->total_credits - port->ctl_credits;
  73}
  74
  75/**
  76 * tb_available_credits() - Available credits for PCIe and DMA
  77 * @port: Lane adapter to check
  78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
  79 *                  streams possible through this lane adapter
  80 */
  81static unsigned int tb_available_credits(const struct tb_port *port,
  82                                         size_t *max_dp_streams)
  83{
  84        const struct tb_switch *sw = port->sw;
  85        int credits, usb3, pcie, spare;
  86        size_t ndp;
  87
  88        usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
  89        pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
  90
  91        if (tb_acpi_is_xdomain_allowed()) {
  92                spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
  93                /* Add some credits for potential second DMA tunnel */
  94                spare += TB_MIN_DMA_CREDITS;
  95        } else {
  96                spare = 0;
  97        }
  98
  99        credits = tb_usable_credits(port);
 100        if (tb_acpi_may_tunnel_dp()) {
 101                /*
 102                 * Maximum number of DP streams possible through the
 103                 * lane adapter.
 104                 */
 105                ndp = (credits - (usb3 + pcie + spare)) /
 106                      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
 107        } else {
 108                ndp = 0;
 109        }
 110        credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
 111        credits -= usb3;
 112
 113        if (max_dp_streams)
 114                *max_dp_streams = ndp;
 115
 116        return credits > 0 ? credits : 0;
 117}
 118
 119static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
 120                                         enum tb_tunnel_type type)
 121{
 122        struct tb_tunnel *tunnel;
 123
 124        tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
 125        if (!tunnel)
 126                return NULL;
 127
 128        tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
 129        if (!tunnel->paths) {
 130                tb_tunnel_free(tunnel);
 131                return NULL;
 132        }
 133
 134        INIT_LIST_HEAD(&tunnel->list);
 135        tunnel->tb = tb;
 136        tunnel->npaths = npaths;
 137        tunnel->type = type;
 138
 139        return tunnel;
 140}
 141
 142static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
 143{
 144        int res;
 145
 146        res = tb_pci_port_enable(tunnel->src_port, activate);
 147        if (res)
 148                return res;
 149
 150        if (tb_port_is_pcie_up(tunnel->dst_port))
 151                return tb_pci_port_enable(tunnel->dst_port, activate);
 152
 153        return 0;
 154}
 155
 156static int tb_pci_init_credits(struct tb_path_hop *hop)
 157{
 158        struct tb_port *port = hop->in_port;
 159        struct tb_switch *sw = port->sw;
 160        unsigned int credits;
 161
 162        if (tb_port_use_credit_allocation(port)) {
 163                unsigned int available;
 164
 165                available = tb_available_credits(port, NULL);
 166                credits = min(sw->max_pcie_credits, available);
 167
 168                if (credits < TB_MIN_PCIE_CREDITS)
 169                        return -ENOSPC;
 170
 171                credits = max(TB_MIN_PCIE_CREDITS, credits);
 172        } else {
 173                if (tb_port_is_null(port))
 174                        credits = port->bonded ? 32 : 16;
 175                else
 176                        credits = 7;
 177        }
 178
 179        hop->initial_credits = credits;
 180        return 0;
 181}
 182
 183static int tb_pci_init_path(struct tb_path *path)
 184{
 185        struct tb_path_hop *hop;
 186
 187        path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 188        path->egress_shared_buffer = TB_PATH_NONE;
 189        path->ingress_fc_enable = TB_PATH_ALL;
 190        path->ingress_shared_buffer = TB_PATH_NONE;
 191        path->priority = 3;
 192        path->weight = 1;
 193        path->drop_packages = 0;
 194
 195        tb_path_for_each_hop(path, hop) {
 196                int ret;
 197
 198                ret = tb_pci_init_credits(hop);
 199                if (ret)
 200                        return ret;
 201        }
 202
 203        return 0;
 204}
 205
 206/**
 207 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
 208 * @tb: Pointer to the domain structure
 209 * @down: PCIe downstream adapter
 210 *
 211 * If @down adapter is active, follows the tunnel to the PCIe upstream
 212 * adapter and back. Returns the discovered tunnel or %NULL if there was
 213 * no tunnel.
 214 */
 215struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
 216{
 217        struct tb_tunnel *tunnel;
 218        struct tb_path *path;
 219
 220        if (!tb_pci_port_is_enabled(down))
 221                return NULL;
 222
 223        tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 224        if (!tunnel)
 225                return NULL;
 226
 227        tunnel->activate = tb_pci_activate;
 228        tunnel->src_port = down;
 229
 230        /*
 231         * Discover both paths even if they are not complete. We will
 232         * clean them up by calling tb_tunnel_deactivate() below in that
 233         * case.
 234         */
 235        path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
 236                                &tunnel->dst_port, "PCIe Up");
 237        if (!path) {
 238                /* Just disable the downstream port */
 239                tb_pci_port_enable(down, false);
 240                goto err_free;
 241        }
 242        tunnel->paths[TB_PCI_PATH_UP] = path;
 243        if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
 244                goto err_free;
 245
 246        path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
 247                                "PCIe Down");
 248        if (!path)
 249                goto err_deactivate;
 250        tunnel->paths[TB_PCI_PATH_DOWN] = path;
 251        if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
 252                goto err_deactivate;
 253
 254        /* Validate that the tunnel is complete */
 255        if (!tb_port_is_pcie_up(tunnel->dst_port)) {
 256                tb_port_warn(tunnel->dst_port,
 257                             "path does not end on a PCIe adapter, cleaning up\n");
 258                goto err_deactivate;
 259        }
 260
 261        if (down != tunnel->src_port) {
 262                tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 263                goto err_deactivate;
 264        }
 265
 266        if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
 267                tb_tunnel_warn(tunnel,
 268                               "tunnel is not fully activated, cleaning up\n");
 269                goto err_deactivate;
 270        }
 271
 272        tb_tunnel_dbg(tunnel, "discovered\n");
 273        return tunnel;
 274
 275err_deactivate:
 276        tb_tunnel_deactivate(tunnel);
 277err_free:
 278        tb_tunnel_free(tunnel);
 279
 280        return NULL;
 281}
 282
 283/**
 284 * tb_tunnel_alloc_pci() - allocate a pci tunnel
 285 * @tb: Pointer to the domain structure
 286 * @up: PCIe upstream adapter port
 287 * @down: PCIe downstream adapter port
 288 *
 289 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
 290 * TB_TYPE_PCIE_DOWN.
 291 *
 292 * Return: Returns a tb_tunnel on success or NULL on failure.
 293 */
 294struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
 295                                      struct tb_port *down)
 296{
 297        struct tb_tunnel *tunnel;
 298        struct tb_path *path;
 299
 300        tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 301        if (!tunnel)
 302                return NULL;
 303
 304        tunnel->activate = tb_pci_activate;
 305        tunnel->src_port = down;
 306        tunnel->dst_port = up;
 307
 308        path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
 309                             "PCIe Down");
 310        if (!path)
 311                goto err_free;
 312        tunnel->paths[TB_PCI_PATH_DOWN] = path;
 313        if (tb_pci_init_path(path))
 314                goto err_free;
 315
 316        path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
 317                             "PCIe Up");
 318        if (!path)
 319                goto err_free;
 320        tunnel->paths[TB_PCI_PATH_UP] = path;
 321        if (tb_pci_init_path(path))
 322                goto err_free;
 323
 324        return tunnel;
 325
 326err_free:
 327        tb_tunnel_free(tunnel);
 328        return NULL;
 329}
 330
 331static bool tb_dp_is_usb4(const struct tb_switch *sw)
 332{
 333        /* Titan Ridge DP adapters need the same treatment as USB4 */
 334        return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
 335}
 336
 337static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
 338{
 339        int timeout = 10;
 340        u32 val;
 341        int ret;
 342
 343        /* Both ends need to support this */
 344        if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
 345                return 0;
 346
 347        ret = tb_port_read(out, &val, TB_CFG_PORT,
 348                           out->cap_adap + DP_STATUS_CTRL, 1);
 349        if (ret)
 350                return ret;
 351
 352        val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
 353
 354        ret = tb_port_write(out, &val, TB_CFG_PORT,
 355                            out->cap_adap + DP_STATUS_CTRL, 1);
 356        if (ret)
 357                return ret;
 358
 359        do {
 360                ret = tb_port_read(out, &val, TB_CFG_PORT,
 361                                   out->cap_adap + DP_STATUS_CTRL, 1);
 362                if (ret)
 363                        return ret;
 364                if (!(val & DP_STATUS_CTRL_CMHS))
 365                        return 0;
 366                usleep_range(10, 100);
 367        } while (timeout--);
 368
 369        return -ETIMEDOUT;
 370}
 371
 372static inline u32 tb_dp_cap_get_rate(u32 val)
 373{
 374        u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
 375
 376        switch (rate) {
 377        case DP_COMMON_CAP_RATE_RBR:
 378                return 1620;
 379        case DP_COMMON_CAP_RATE_HBR:
 380                return 2700;
 381        case DP_COMMON_CAP_RATE_HBR2:
 382                return 5400;
 383        case DP_COMMON_CAP_RATE_HBR3:
 384                return 8100;
 385        default:
 386                return 0;
 387        }
 388}
 389
 390static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
 391{
 392        val &= ~DP_COMMON_CAP_RATE_MASK;
 393        switch (rate) {
 394        default:
 395                WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
 396                fallthrough;
 397        case 1620:
 398                val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
 399                break;
 400        case 2700:
 401                val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
 402                break;
 403        case 5400:
 404                val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
 405                break;
 406        case 8100:
 407                val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
 408                break;
 409        }
 410        return val;
 411}
 412
 413static inline u32 tb_dp_cap_get_lanes(u32 val)
 414{
 415        u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
 416
 417        switch (lanes) {
 418        case DP_COMMON_CAP_1_LANE:
 419                return 1;
 420        case DP_COMMON_CAP_2_LANES:
 421                return 2;
 422        case DP_COMMON_CAP_4_LANES:
 423                return 4;
 424        default:
 425                return 0;
 426        }
 427}
 428
 429static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
 430{
 431        val &= ~DP_COMMON_CAP_LANES_MASK;
 432        switch (lanes) {
 433        default:
 434                WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
 435                     lanes);
 436                fallthrough;
 437        case 1:
 438                val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
 439                break;
 440        case 2:
 441                val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
 442                break;
 443        case 4:
 444                val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
 445                break;
 446        }
 447        return val;
 448}
 449
 450static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
 451{
 452        /* Tunneling removes the DP 8b/10b encoding */
 453        return rate * lanes * 8 / 10;
 454}
 455
 456static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
 457                                  u32 out_rate, u32 out_lanes, u32 *new_rate,
 458                                  u32 *new_lanes)
 459{
 460        static const u32 dp_bw[][2] = {
 461                /* Mb/s, lanes */
 462                { 8100, 4 }, /* 25920 Mb/s */
 463                { 5400, 4 }, /* 17280 Mb/s */
 464                { 8100, 2 }, /* 12960 Mb/s */
 465                { 2700, 4 }, /* 8640 Mb/s */
 466                { 5400, 2 }, /* 8640 Mb/s */
 467                { 8100, 1 }, /* 6480 Mb/s */
 468                { 1620, 4 }, /* 5184 Mb/s */
 469                { 5400, 1 }, /* 4320 Mb/s */
 470                { 2700, 2 }, /* 4320 Mb/s */
 471                { 1620, 2 }, /* 2592 Mb/s */
 472                { 2700, 1 }, /* 2160 Mb/s */
 473                { 1620, 1 }, /* 1296 Mb/s */
 474        };
 475        unsigned int i;
 476
 477        /*
 478         * Find a combination that can fit into max_bw and does not
 479         * exceed the maximum rate and lanes supported by the DP OUT and
 480         * DP IN adapters.
 481         */
 482        for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
 483                if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
 484                        continue;
 485
 486                if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
 487                        continue;
 488
 489                if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
 490                        *new_rate = dp_bw[i][0];
 491                        *new_lanes = dp_bw[i][1];
 492                        return 0;
 493                }
 494        }
 495
 496        return -ENOSR;
 497}
 498
 499static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
 500{
 501        u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
 502        struct tb_port *out = tunnel->dst_port;
 503        struct tb_port *in = tunnel->src_port;
 504        int ret, max_bw;
 505
 506        /*
 507         * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
 508         * newer generation hardware.
 509         */
 510        if (in->sw->generation < 2 || out->sw->generation < 2)
 511                return 0;
 512
 513        /*
 514         * Perform connection manager handshake between IN and OUT ports
 515         * before capabilities exchange can take place.
 516         */
 517        ret = tb_dp_cm_handshake(in, out);
 518        if (ret)
 519                return ret;
 520
 521        /* Read both DP_LOCAL_CAP registers */
 522        ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
 523                           in->cap_adap + DP_LOCAL_CAP, 1);
 524        if (ret)
 525                return ret;
 526
 527        ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
 528                           out->cap_adap + DP_LOCAL_CAP, 1);
 529        if (ret)
 530                return ret;
 531
 532        /* Write IN local caps to OUT remote caps */
 533        ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
 534                            out->cap_adap + DP_REMOTE_CAP, 1);
 535        if (ret)
 536                return ret;
 537
 538        in_rate = tb_dp_cap_get_rate(in_dp_cap);
 539        in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
 540        tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 541                    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
 542
 543        /*
 544         * If the tunnel bandwidth is limited (max_bw is set) then see
 545         * if we need to reduce bandwidth to fit there.
 546         */
 547        out_rate = tb_dp_cap_get_rate(out_dp_cap);
 548        out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
 549        bw = tb_dp_bandwidth(out_rate, out_lanes);
 550        tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 551                    out_rate, out_lanes, bw);
 552
 553        if (in->sw->config.depth < out->sw->config.depth)
 554                max_bw = tunnel->max_down;
 555        else
 556                max_bw = tunnel->max_up;
 557
 558        if (max_bw && bw > max_bw) {
 559                u32 new_rate, new_lanes, new_bw;
 560
 561                ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
 562                                             out_rate, out_lanes, &new_rate,
 563                                             &new_lanes);
 564                if (ret) {
 565                        tb_port_info(out, "not enough bandwidth for DP tunnel\n");
 566                        return ret;
 567                }
 568
 569                new_bw = tb_dp_bandwidth(new_rate, new_lanes);
 570                tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
 571                            new_rate, new_lanes, new_bw);
 572
 573                /*
 574                 * Set new rate and number of lanes before writing it to
 575                 * the IN port remote caps.
 576                 */
 577                out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
 578                out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
 579        }
 580
 581        return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
 582                             in->cap_adap + DP_REMOTE_CAP, 1);
 583}
 584
 585static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
 586{
 587        int ret;
 588
 589        if (active) {
 590                struct tb_path **paths;
 591                int last;
 592
 593                paths = tunnel->paths;
 594                last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
 595
 596                tb_dp_port_set_hops(tunnel->src_port,
 597                        paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
 598                        paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
 599                        paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
 600
 601                tb_dp_port_set_hops(tunnel->dst_port,
 602                        paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
 603                        paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
 604                        paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
 605        } else {
 606                tb_dp_port_hpd_clear(tunnel->src_port);
 607                tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
 608                if (tb_port_is_dpout(tunnel->dst_port))
 609                        tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
 610        }
 611
 612        ret = tb_dp_port_enable(tunnel->src_port, active);
 613        if (ret)
 614                return ret;
 615
 616        if (tb_port_is_dpout(tunnel->dst_port))
 617                return tb_dp_port_enable(tunnel->dst_port, active);
 618
 619        return 0;
 620}
 621
 622static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
 623                                    int *consumed_down)
 624{
 625        struct tb_port *in = tunnel->src_port;
 626        const struct tb_switch *sw = in->sw;
 627        u32 val, rate = 0, lanes = 0;
 628        int ret;
 629
 630        if (tb_dp_is_usb4(sw)) {
 631                int timeout = 20;
 632
 633                /*
 634                 * Wait for DPRX done. Normally it should be already set
 635                 * for active tunnel.
 636                 */
 637                do {
 638                        ret = tb_port_read(in, &val, TB_CFG_PORT,
 639                                           in->cap_adap + DP_COMMON_CAP, 1);
 640                        if (ret)
 641                                return ret;
 642
 643                        if (val & DP_COMMON_CAP_DPRX_DONE) {
 644                                rate = tb_dp_cap_get_rate(val);
 645                                lanes = tb_dp_cap_get_lanes(val);
 646                                break;
 647                        }
 648                        msleep(250);
 649                } while (timeout--);
 650
 651                if (!timeout)
 652                        return -ETIMEDOUT;
 653        } else if (sw->generation >= 2) {
 654                /*
 655                 * Read from the copied remote cap so that we take into
 656                 * account if capabilities were reduced during exchange.
 657                 */
 658                ret = tb_port_read(in, &val, TB_CFG_PORT,
 659                                   in->cap_adap + DP_REMOTE_CAP, 1);
 660                if (ret)
 661                        return ret;
 662
 663                rate = tb_dp_cap_get_rate(val);
 664                lanes = tb_dp_cap_get_lanes(val);
 665        } else {
 666                /* No bandwidth management for legacy devices  */
 667                *consumed_up = 0;
 668                *consumed_down = 0;
 669                return 0;
 670        }
 671
 672        if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
 673                *consumed_up = 0;
 674                *consumed_down = tb_dp_bandwidth(rate, lanes);
 675        } else {
 676                *consumed_up = tb_dp_bandwidth(rate, lanes);
 677                *consumed_down = 0;
 678        }
 679
 680        return 0;
 681}
 682
 683static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
 684{
 685        struct tb_port *port = hop->in_port;
 686        struct tb_switch *sw = port->sw;
 687
 688        if (tb_port_use_credit_allocation(port))
 689                hop->initial_credits = sw->min_dp_aux_credits;
 690        else
 691                hop->initial_credits = 1;
 692}
 693
 694static void tb_dp_init_aux_path(struct tb_path *path)
 695{
 696        struct tb_path_hop *hop;
 697
 698        path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 699        path->egress_shared_buffer = TB_PATH_NONE;
 700        path->ingress_fc_enable = TB_PATH_ALL;
 701        path->ingress_shared_buffer = TB_PATH_NONE;
 702        path->priority = 2;
 703        path->weight = 1;
 704
 705        tb_path_for_each_hop(path, hop)
 706                tb_dp_init_aux_credits(hop);
 707}
 708
 709static int tb_dp_init_video_credits(struct tb_path_hop *hop)
 710{
 711        struct tb_port *port = hop->in_port;
 712        struct tb_switch *sw = port->sw;
 713
 714        if (tb_port_use_credit_allocation(port)) {
 715                unsigned int nfc_credits;
 716                size_t max_dp_streams;
 717
 718                tb_available_credits(port, &max_dp_streams);
 719                /*
 720                 * Read the number of currently allocated NFC credits
 721                 * from the lane adapter. Since we only use them for DP
 722                 * tunneling we can use that to figure out how many DP
 723                 * tunnels already go through the lane adapter.
 724                 */
 725                nfc_credits = port->config.nfc_credits &
 726                                ADP_CS_4_NFC_BUFFERS_MASK;
 727                if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
 728                        return -ENOSPC;
 729
 730                hop->nfc_credits = sw->min_dp_main_credits;
 731        } else {
 732                hop->nfc_credits = min(port->total_credits - 2, 12U);
 733        }
 734
 735        return 0;
 736}
 737
 738static int tb_dp_init_video_path(struct tb_path *path)
 739{
 740        struct tb_path_hop *hop;
 741
 742        path->egress_fc_enable = TB_PATH_NONE;
 743        path->egress_shared_buffer = TB_PATH_NONE;
 744        path->ingress_fc_enable = TB_PATH_NONE;
 745        path->ingress_shared_buffer = TB_PATH_NONE;
 746        path->priority = 1;
 747        path->weight = 1;
 748
 749        tb_path_for_each_hop(path, hop) {
 750                int ret;
 751
 752                ret = tb_dp_init_video_credits(hop);
 753                if (ret)
 754                        return ret;
 755        }
 756
 757        return 0;
 758}
 759
 760/**
 761 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
 762 * @tb: Pointer to the domain structure
 763 * @in: DP in adapter
 764 *
 765 * If @in adapter is active, follows the tunnel to the DP out adapter
 766 * and back. Returns the discovered tunnel or %NULL if there was no
 767 * tunnel.
 768 *
 769 * Return: DP tunnel or %NULL if no tunnel found.
 770 */
 771struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
 772{
 773        struct tb_tunnel *tunnel;
 774        struct tb_port *port;
 775        struct tb_path *path;
 776
 777        if (!tb_dp_port_is_enabled(in))
 778                return NULL;
 779
 780        tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
 781        if (!tunnel)
 782                return NULL;
 783
 784        tunnel->init = tb_dp_xchg_caps;
 785        tunnel->activate = tb_dp_activate;
 786        tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
 787        tunnel->src_port = in;
 788
 789        path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
 790                                &tunnel->dst_port, "Video");
 791        if (!path) {
 792                /* Just disable the DP IN port */
 793                tb_dp_port_enable(in, false);
 794                goto err_free;
 795        }
 796        tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
 797        if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
 798                goto err_free;
 799
 800        path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
 801        if (!path)
 802                goto err_deactivate;
 803        tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
 804        tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
 805
 806        path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
 807                                &port, "AUX RX");
 808        if (!path)
 809                goto err_deactivate;
 810        tunnel->paths[TB_DP_AUX_PATH_IN] = path;
 811        tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
 812
 813        /* Validate that the tunnel is complete */
 814        if (!tb_port_is_dpout(tunnel->dst_port)) {
 815                tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
 816                goto err_deactivate;
 817        }
 818
 819        if (!tb_dp_port_is_enabled(tunnel->dst_port))
 820                goto err_deactivate;
 821
 822        if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
 823                goto err_deactivate;
 824
 825        if (port != tunnel->src_port) {
 826                tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 827                goto err_deactivate;
 828        }
 829
 830        tb_tunnel_dbg(tunnel, "discovered\n");
 831        return tunnel;
 832
 833err_deactivate:
 834        tb_tunnel_deactivate(tunnel);
 835err_free:
 836        tb_tunnel_free(tunnel);
 837
 838        return NULL;
 839}
 840
 841/**
 842 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
 843 * @tb: Pointer to the domain structure
 844 * @in: DP in adapter port
 845 * @out: DP out adapter port
 846 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
 847 *          if not limited)
 848 * @max_down: Maximum available downstream bandwidth for the DP tunnel
 849 *            (%0 if not limited)
 850 *
 851 * Allocates a tunnel between @in and @out that is capable of tunneling
 852 * Display Port traffic.
 853 *
 854 * Return: Returns a tb_tunnel on success or NULL on failure.
 855 */
 856struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
 857                                     struct tb_port *out, int max_up,
 858                                     int max_down)
 859{
 860        struct tb_tunnel *tunnel;
 861        struct tb_path **paths;
 862        struct tb_path *path;
 863
 864        if (WARN_ON(!in->cap_adap || !out->cap_adap))
 865                return NULL;
 866
 867        tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
 868        if (!tunnel)
 869                return NULL;
 870
 871        tunnel->init = tb_dp_xchg_caps;
 872        tunnel->activate = tb_dp_activate;
 873        tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
 874        tunnel->src_port = in;
 875        tunnel->dst_port = out;
 876        tunnel->max_up = max_up;
 877        tunnel->max_down = max_down;
 878
 879        paths = tunnel->paths;
 880
 881        path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
 882                             1, "Video");
 883        if (!path)
 884                goto err_free;
 885        tb_dp_init_video_path(path);
 886        paths[TB_DP_VIDEO_PATH_OUT] = path;
 887
 888        path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
 889                             TB_DP_AUX_TX_HOPID, 1, "AUX TX");
 890        if (!path)
 891                goto err_free;
 892        tb_dp_init_aux_path(path);
 893        paths[TB_DP_AUX_PATH_OUT] = path;
 894
 895        path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
 896                             TB_DP_AUX_RX_HOPID, 1, "AUX RX");
 897        if (!path)
 898                goto err_free;
 899        tb_dp_init_aux_path(path);
 900        paths[TB_DP_AUX_PATH_IN] = path;
 901
 902        return tunnel;
 903
 904err_free:
 905        tb_tunnel_free(tunnel);
 906        return NULL;
 907}
 908
 909static unsigned int tb_dma_available_credits(const struct tb_port *port)
 910{
 911        const struct tb_switch *sw = port->sw;
 912        int credits;
 913
 914        credits = tb_available_credits(port, NULL);
 915        if (tb_acpi_may_tunnel_pcie())
 916                credits -= sw->max_pcie_credits;
 917        credits -= port->dma_credits;
 918
 919        return credits > 0 ? credits : 0;
 920}
 921
 922static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
 923{
 924        struct tb_port *port = hop->in_port;
 925
 926        if (tb_port_use_credit_allocation(port)) {
 927                unsigned int available = tb_dma_available_credits(port);
 928
 929                /*
 930                 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
 931                 * DMA path cannot be established.
 932                 */
 933                if (available < TB_MIN_DMA_CREDITS)
 934                        return -ENOSPC;
 935
 936                while (credits > available)
 937                        credits--;
 938
 939                tb_port_dbg(port, "reserving %u credits for DMA path\n",
 940                            credits);
 941
 942                port->dma_credits += credits;
 943        } else {
 944                if (tb_port_is_null(port))
 945                        credits = port->bonded ? 14 : 6;
 946                else
 947                        credits = min(port->total_credits, credits);
 948        }
 949
 950        hop->initial_credits = credits;
 951        return 0;
 952}
 953
 954/* Path from lane adapter to NHI */
 955static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
 956{
 957        struct tb_path_hop *hop;
 958        unsigned int i, tmp;
 959
 960        path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 961        path->ingress_fc_enable = TB_PATH_ALL;
 962        path->egress_shared_buffer = TB_PATH_NONE;
 963        path->ingress_shared_buffer = TB_PATH_NONE;
 964        path->priority = 5;
 965        path->weight = 1;
 966        path->clear_fc = true;
 967
 968        /*
 969         * First lane adapter is the one connected to the remote host.
 970         * We don't tunnel other traffic over this link so can use all
 971         * the credits (except the ones reserved for control traffic).
 972         */
 973        hop = &path->hops[0];
 974        tmp = min(tb_usable_credits(hop->in_port), credits);
 975        hop->initial_credits = tmp;
 976        hop->in_port->dma_credits += tmp;
 977
 978        for (i = 1; i < path->path_length; i++) {
 979                int ret;
 980
 981                ret = tb_dma_reserve_credits(&path->hops[i], credits);
 982                if (ret)
 983                        return ret;
 984        }
 985
 986        return 0;
 987}
 988
 989/* Path from NHI to lane adapter */
 990static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
 991{
 992        struct tb_path_hop *hop;
 993
 994        path->egress_fc_enable = TB_PATH_ALL;
 995        path->ingress_fc_enable = TB_PATH_ALL;
 996        path->egress_shared_buffer = TB_PATH_NONE;
 997        path->ingress_shared_buffer = TB_PATH_NONE;
 998        path->priority = 5;
 999        path->weight = 1;
1000        path->clear_fc = true;
1001
1002        tb_path_for_each_hop(path, hop) {
1003                int ret;
1004
1005                ret = tb_dma_reserve_credits(hop, credits);
1006                if (ret)
1007                        return ret;
1008        }
1009
1010        return 0;
1011}
1012
1013static void tb_dma_release_credits(struct tb_path_hop *hop)
1014{
1015        struct tb_port *port = hop->in_port;
1016
1017        if (tb_port_use_credit_allocation(port)) {
1018                port->dma_credits -= hop->initial_credits;
1019
1020                tb_port_dbg(port, "released %u DMA path credits\n",
1021                            hop->initial_credits);
1022        }
1023}
1024
1025static void tb_dma_deinit_path(struct tb_path *path)
1026{
1027        struct tb_path_hop *hop;
1028
1029        tb_path_for_each_hop(path, hop)
1030                tb_dma_release_credits(hop);
1031}
1032
1033static void tb_dma_deinit(struct tb_tunnel *tunnel)
1034{
1035        int i;
1036
1037        for (i = 0; i < tunnel->npaths; i++) {
1038                if (!tunnel->paths[i])
1039                        continue;
1040                tb_dma_deinit_path(tunnel->paths[i]);
1041        }
1042}
1043
1044/**
1045 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1046 * @tb: Pointer to the domain structure
1047 * @nhi: Host controller port
1048 * @dst: Destination null port which the other domain is connected to
1049 * @transmit_path: HopID used for transmitting packets
1050 * @transmit_ring: NHI ring number used to send packets towards the
1051 *                 other domain. Set to %-1 if TX path is not needed.
1052 * @receive_path: HopID used for receiving packets
1053 * @receive_ring: NHI ring number used to receive packets from the
1054 *                other domain. Set to %-1 if RX path is not needed.
1055 *
1056 * Return: Returns a tb_tunnel on success or NULL on failure.
1057 */
1058struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1059                                      struct tb_port *dst, int transmit_path,
1060                                      int transmit_ring, int receive_path,
1061                                      int receive_ring)
1062{
1063        struct tb_tunnel *tunnel;
1064        size_t npaths = 0, i = 0;
1065        struct tb_path *path;
1066        int credits;
1067
1068        if (receive_ring > 0)
1069                npaths++;
1070        if (transmit_ring > 0)
1071                npaths++;
1072
1073        if (WARN_ON(!npaths))
1074                return NULL;
1075
1076        tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1077        if (!tunnel)
1078                return NULL;
1079
1080        tunnel->src_port = nhi;
1081        tunnel->dst_port = dst;
1082        tunnel->deinit = tb_dma_deinit;
1083
1084        credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
1085
1086        if (receive_ring > 0) {
1087                path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1088                                     "DMA RX");
1089                if (!path)
1090                        goto err_free;
1091                tunnel->paths[i++] = path;
1092                if (tb_dma_init_rx_path(path, credits)) {
1093                        tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1094                        goto err_free;
1095                }
1096        }
1097
1098        if (transmit_ring > 0) {
1099                path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1100                                     "DMA TX");
1101                if (!path)
1102                        goto err_free;
1103                tunnel->paths[i++] = path;
1104                if (tb_dma_init_tx_path(path, credits)) {
1105                        tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1106                        goto err_free;
1107                }
1108        }
1109
1110        return tunnel;
1111
1112err_free:
1113        tb_tunnel_free(tunnel);
1114        return NULL;
1115}
1116
1117/**
1118 * tb_tunnel_match_dma() - Match DMA tunnel
1119 * @tunnel: Tunnel to match
1120 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1121 * @transmit_ring: NHI ring number used to send packets towards the
1122 *                 other domain. Pass %-1 to ignore.
1123 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1124 * @receive_ring: NHI ring number used to receive packets from the
1125 *                other domain. Pass %-1 to ignore.
1126 *
1127 * This function can be used to match specific DMA tunnel, if there are
1128 * multiple DMA tunnels going through the same XDomain connection.
1129 * Returns true if there is match and false otherwise.
1130 */
1131bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1132                         int transmit_ring, int receive_path, int receive_ring)
1133{
1134        const struct tb_path *tx_path = NULL, *rx_path = NULL;
1135        int i;
1136
1137        if (!receive_ring || !transmit_ring)
1138                return false;
1139
1140        for (i = 0; i < tunnel->npaths; i++) {
1141                const struct tb_path *path = tunnel->paths[i];
1142
1143                if (!path)
1144                        continue;
1145
1146                if (tb_port_is_nhi(path->hops[0].in_port))
1147                        tx_path = path;
1148                else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1149                        rx_path = path;
1150        }
1151
1152        if (transmit_ring > 0 || transmit_path > 0) {
1153                if (!tx_path)
1154                        return false;
1155                if (transmit_ring > 0 &&
1156                    (tx_path->hops[0].in_hop_index != transmit_ring))
1157                        return false;
1158                if (transmit_path > 0 &&
1159                    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1160                        return false;
1161        }
1162
1163        if (receive_ring > 0 || receive_path > 0) {
1164                if (!rx_path)
1165                        return false;
1166                if (receive_path > 0 &&
1167                    (rx_path->hops[0].in_hop_index != receive_path))
1168                        return false;
1169                if (receive_ring > 0 &&
1170                    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1171                        return false;
1172        }
1173
1174        return true;
1175}
1176
1177static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1178{
1179        int ret, up_max_rate, down_max_rate;
1180
1181        ret = usb4_usb3_port_max_link_rate(up);
1182        if (ret < 0)
1183                return ret;
1184        up_max_rate = ret;
1185
1186        ret = usb4_usb3_port_max_link_rate(down);
1187        if (ret < 0)
1188                return ret;
1189        down_max_rate = ret;
1190
1191        return min(up_max_rate, down_max_rate);
1192}
1193
1194static int tb_usb3_init(struct tb_tunnel *tunnel)
1195{
1196        tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1197                      tunnel->allocated_up, tunnel->allocated_down);
1198
1199        return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1200                                                 &tunnel->allocated_up,
1201                                                 &tunnel->allocated_down);
1202}
1203
1204static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1205{
1206        int res;
1207
1208        res = tb_usb3_port_enable(tunnel->src_port, activate);
1209        if (res)
1210                return res;
1211
1212        if (tb_port_is_usb3_up(tunnel->dst_port))
1213                return tb_usb3_port_enable(tunnel->dst_port, activate);
1214
1215        return 0;
1216}
1217
1218static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1219                int *consumed_up, int *consumed_down)
1220{
1221        int pcie_enabled = tb_acpi_may_tunnel_pcie();
1222
1223        /*
1224         * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1225         * take that it into account here.
1226         */
1227        *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1228        *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
1229        return 0;
1230}
1231
1232static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1233{
1234        int ret;
1235
1236        ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1237                                               &tunnel->allocated_up,
1238                                               &tunnel->allocated_down);
1239        if (ret)
1240                return ret;
1241
1242        tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1243                      tunnel->allocated_up, tunnel->allocated_down);
1244        return 0;
1245}
1246
1247static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1248                                                int *available_up,
1249                                                int *available_down)
1250{
1251        int ret, max_rate, allocate_up, allocate_down;
1252
1253        ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1254        if (ret < 0) {
1255                tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1256                return;
1257        } else if (!ret) {
1258                /* Use maximum link rate if the link valid is not set */
1259                ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
1260                if (ret < 0) {
1261                        tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1262                        return;
1263                }
1264        }
1265
1266        /*
1267         * 90% of the max rate can be allocated for isochronous
1268         * transfers.
1269         */
1270        max_rate = ret * 90 / 100;
1271
1272        /* No need to reclaim if already at maximum */
1273        if (tunnel->allocated_up >= max_rate &&
1274            tunnel->allocated_down >= max_rate)
1275                return;
1276
1277        /* Don't go lower than what is already allocated */
1278        allocate_up = min(max_rate, *available_up);
1279        if (allocate_up < tunnel->allocated_up)
1280                allocate_up = tunnel->allocated_up;
1281
1282        allocate_down = min(max_rate, *available_down);
1283        if (allocate_down < tunnel->allocated_down)
1284                allocate_down = tunnel->allocated_down;
1285
1286        /* If no changes no need to do more */
1287        if (allocate_up == tunnel->allocated_up &&
1288            allocate_down == tunnel->allocated_down)
1289                return;
1290
1291        ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1292                                                &allocate_down);
1293        if (ret) {
1294                tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1295                return;
1296        }
1297
1298        tunnel->allocated_up = allocate_up;
1299        *available_up -= tunnel->allocated_up;
1300
1301        tunnel->allocated_down = allocate_down;
1302        *available_down -= tunnel->allocated_down;
1303
1304        tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1305                      tunnel->allocated_up, tunnel->allocated_down);
1306}
1307
1308static void tb_usb3_init_credits(struct tb_path_hop *hop)
1309{
1310        struct tb_port *port = hop->in_port;
1311        struct tb_switch *sw = port->sw;
1312        unsigned int credits;
1313
1314        if (tb_port_use_credit_allocation(port)) {
1315                credits = sw->max_usb3_credits;
1316        } else {
1317                if (tb_port_is_null(port))
1318                        credits = port->bonded ? 32 : 16;
1319                else
1320                        credits = 7;
1321        }
1322
1323        hop->initial_credits = credits;
1324}
1325
1326static void tb_usb3_init_path(struct tb_path *path)
1327{
1328        struct tb_path_hop *hop;
1329
1330        path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1331        path->egress_shared_buffer = TB_PATH_NONE;
1332        path->ingress_fc_enable = TB_PATH_ALL;
1333        path->ingress_shared_buffer = TB_PATH_NONE;
1334        path->priority = 3;
1335        path->weight = 3;
1336        path->drop_packages = 0;
1337
1338        tb_path_for_each_hop(path, hop)
1339                tb_usb3_init_credits(hop);
1340}
1341
1342/**
1343 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1344 * @tb: Pointer to the domain structure
1345 * @down: USB3 downstream adapter
1346 *
1347 * If @down adapter is active, follows the tunnel to the USB3 upstream
1348 * adapter and back. Returns the discovered tunnel or %NULL if there was
1349 * no tunnel.
1350 */
1351struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1352{
1353        struct tb_tunnel *tunnel;
1354        struct tb_path *path;
1355
1356        if (!tb_usb3_port_is_enabled(down))
1357                return NULL;
1358
1359        tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1360        if (!tunnel)
1361                return NULL;
1362
1363        tunnel->activate = tb_usb3_activate;
1364        tunnel->src_port = down;
1365
1366        /*
1367         * Discover both paths even if they are not complete. We will
1368         * clean them up by calling tb_tunnel_deactivate() below in that
1369         * case.
1370         */
1371        path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1372                                &tunnel->dst_port, "USB3 Down");
1373        if (!path) {
1374                /* Just disable the downstream port */
1375                tb_usb3_port_enable(down, false);
1376                goto err_free;
1377        }
1378        tunnel->paths[TB_USB3_PATH_DOWN] = path;
1379        tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1380
1381        path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1382                                "USB3 Up");
1383        if (!path)
1384                goto err_deactivate;
1385        tunnel->paths[TB_USB3_PATH_UP] = path;
1386        tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1387
1388        /* Validate that the tunnel is complete */
1389        if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1390                tb_port_warn(tunnel->dst_port,
1391                             "path does not end on an USB3 adapter, cleaning up\n");
1392                goto err_deactivate;
1393        }
1394
1395        if (down != tunnel->src_port) {
1396                tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1397                goto err_deactivate;
1398        }
1399
1400        if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1401                tb_tunnel_warn(tunnel,
1402                               "tunnel is not fully activated, cleaning up\n");
1403                goto err_deactivate;
1404        }
1405
1406        if (!tb_route(down->sw)) {
1407                int ret;
1408
1409                /*
1410                 * Read the initial bandwidth allocation for the first
1411                 * hop tunnel.
1412                 */
1413                ret = usb4_usb3_port_allocated_bandwidth(down,
1414                        &tunnel->allocated_up, &tunnel->allocated_down);
1415                if (ret)
1416                        goto err_deactivate;
1417
1418                tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1419                              tunnel->allocated_up, tunnel->allocated_down);
1420
1421                tunnel->init = tb_usb3_init;
1422                tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1423                tunnel->release_unused_bandwidth =
1424                        tb_usb3_release_unused_bandwidth;
1425                tunnel->reclaim_available_bandwidth =
1426                        tb_usb3_reclaim_available_bandwidth;
1427        }
1428
1429        tb_tunnel_dbg(tunnel, "discovered\n");
1430        return tunnel;
1431
1432err_deactivate:
1433        tb_tunnel_deactivate(tunnel);
1434err_free:
1435        tb_tunnel_free(tunnel);
1436
1437        return NULL;
1438}
1439
1440/**
1441 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1442 * @tb: Pointer to the domain structure
1443 * @up: USB3 upstream adapter port
1444 * @down: USB3 downstream adapter port
1445 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1446 *          if not limited).
1447 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1448 *            (%0 if not limited).
1449 *
1450 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1451 * @TB_TYPE_USB3_DOWN.
1452 *
1453 * Return: Returns a tb_tunnel on success or %NULL on failure.
1454 */
1455struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1456                                       struct tb_port *down, int max_up,
1457                                       int max_down)
1458{
1459        struct tb_tunnel *tunnel;
1460        struct tb_path *path;
1461        int max_rate = 0;
1462
1463        /*
1464         * Check that we have enough bandwidth available for the new
1465         * USB3 tunnel.
1466         */
1467        if (max_up > 0 || max_down > 0) {
1468                max_rate = tb_usb3_max_link_rate(down, up);
1469                if (max_rate < 0)
1470                        return NULL;
1471
1472                /* Only 90% can be allocated for USB3 isochronous transfers */
1473                max_rate = max_rate * 90 / 100;
1474                tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1475                            max_rate);
1476
1477                if (max_rate > max_up || max_rate > max_down) {
1478                        tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1479                        return NULL;
1480                }
1481        }
1482
1483        tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1484        if (!tunnel)
1485                return NULL;
1486
1487        tunnel->activate = tb_usb3_activate;
1488        tunnel->src_port = down;
1489        tunnel->dst_port = up;
1490        tunnel->max_up = max_up;
1491        tunnel->max_down = max_down;
1492
1493        path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1494                             "USB3 Down");
1495        if (!path) {
1496                tb_tunnel_free(tunnel);
1497                return NULL;
1498        }
1499        tb_usb3_init_path(path);
1500        tunnel->paths[TB_USB3_PATH_DOWN] = path;
1501
1502        path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1503                             "USB3 Up");
1504        if (!path) {
1505                tb_tunnel_free(tunnel);
1506                return NULL;
1507        }
1508        tb_usb3_init_path(path);
1509        tunnel->paths[TB_USB3_PATH_UP] = path;
1510
1511        if (!tb_route(down->sw)) {
1512                tunnel->allocated_up = max_rate;
1513                tunnel->allocated_down = max_rate;
1514
1515                tunnel->init = tb_usb3_init;
1516                tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1517                tunnel->release_unused_bandwidth =
1518                        tb_usb3_release_unused_bandwidth;
1519                tunnel->reclaim_available_bandwidth =
1520                        tb_usb3_reclaim_available_bandwidth;
1521        }
1522
1523        return tunnel;
1524}
1525
1526/**
1527 * tb_tunnel_free() - free a tunnel
1528 * @tunnel: Tunnel to be freed
1529 *
1530 * Frees a tunnel. The tunnel does not need to be deactivated.
1531 */
1532void tb_tunnel_free(struct tb_tunnel *tunnel)
1533{
1534        int i;
1535
1536        if (!tunnel)
1537                return;
1538
1539        if (tunnel->deinit)
1540                tunnel->deinit(tunnel);
1541
1542        for (i = 0; i < tunnel->npaths; i++) {
1543                if (tunnel->paths[i])
1544                        tb_path_free(tunnel->paths[i]);
1545        }
1546
1547        kfree(tunnel->paths);
1548        kfree(tunnel);
1549}
1550
1551/**
1552 * tb_tunnel_is_invalid - check whether an activated path is still valid
1553 * @tunnel: Tunnel to check
1554 */
1555bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1556{
1557        int i;
1558
1559        for (i = 0; i < tunnel->npaths; i++) {
1560                WARN_ON(!tunnel->paths[i]->activated);
1561                if (tb_path_is_invalid(tunnel->paths[i]))
1562                        return true;
1563        }
1564
1565        return false;
1566}
1567
1568/**
1569 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1570 * @tunnel: Tunnel to restart
1571 *
1572 * Return: 0 on success and negative errno in case if failure
1573 */
1574int tb_tunnel_restart(struct tb_tunnel *tunnel)
1575{
1576        int res, i;
1577
1578        tb_tunnel_dbg(tunnel, "activating\n");
1579
1580        /*
1581         * Make sure all paths are properly disabled before enabling
1582         * them again.
1583         */
1584        for (i = 0; i < tunnel->npaths; i++) {
1585                if (tunnel->paths[i]->activated) {
1586                        tb_path_deactivate(tunnel->paths[i]);
1587                        tunnel->paths[i]->activated = false;
1588                }
1589        }
1590
1591        if (tunnel->init) {
1592                res = tunnel->init(tunnel);
1593                if (res)
1594                        return res;
1595        }
1596
1597        for (i = 0; i < tunnel->npaths; i++) {
1598                res = tb_path_activate(tunnel->paths[i]);
1599                if (res)
1600                        goto err;
1601        }
1602
1603        if (tunnel->activate) {
1604                res = tunnel->activate(tunnel, true);
1605                if (res)
1606                        goto err;
1607        }
1608
1609        return 0;
1610
1611err:
1612        tb_tunnel_warn(tunnel, "activation failed\n");
1613        tb_tunnel_deactivate(tunnel);
1614        return res;
1615}
1616
1617/**
1618 * tb_tunnel_activate() - activate a tunnel
1619 * @tunnel: Tunnel to activate
1620 *
1621 * Return: Returns 0 on success or an error code on failure.
1622 */
1623int tb_tunnel_activate(struct tb_tunnel *tunnel)
1624{
1625        int i;
1626
1627        for (i = 0; i < tunnel->npaths; i++) {
1628                if (tunnel->paths[i]->activated) {
1629                        tb_tunnel_WARN(tunnel,
1630                                       "trying to activate an already activated tunnel\n");
1631                        return -EINVAL;
1632                }
1633        }
1634
1635        return tb_tunnel_restart(tunnel);
1636}
1637
1638/**
1639 * tb_tunnel_deactivate() - deactivate a tunnel
1640 * @tunnel: Tunnel to deactivate
1641 */
1642void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1643{
1644        int i;
1645
1646        tb_tunnel_dbg(tunnel, "deactivating\n");
1647
1648        if (tunnel->activate)
1649                tunnel->activate(tunnel, false);
1650
1651        for (i = 0; i < tunnel->npaths; i++) {
1652                if (tunnel->paths[i] && tunnel->paths[i]->activated)
1653                        tb_path_deactivate(tunnel->paths[i]);
1654        }
1655}
1656
1657/**
1658 * tb_tunnel_port_on_path() - Does the tunnel go through port
1659 * @tunnel: Tunnel to check
1660 * @port: Port to check
1661 *
1662 * Returns true if @tunnel goes through @port (direction does not matter),
1663 * false otherwise.
1664 */
1665bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1666                            const struct tb_port *port)
1667{
1668        int i;
1669
1670        for (i = 0; i < tunnel->npaths; i++) {
1671                if (!tunnel->paths[i])
1672                        continue;
1673
1674                if (tb_path_port_on_path(tunnel->paths[i], port))
1675                        return true;
1676        }
1677
1678        return false;
1679}
1680
1681static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1682{
1683        int i;
1684
1685        for (i = 0; i < tunnel->npaths; i++) {
1686                if (!tunnel->paths[i])
1687                        return false;
1688                if (!tunnel->paths[i]->activated)
1689                        return false;
1690        }
1691
1692        return true;
1693}
1694
1695/**
1696 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1697 * @tunnel: Tunnel to check
1698 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1699 *               Can be %NULL.
1700 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1701 *                 Can be %NULL.
1702 *
1703 * Stores the amount of isochronous bandwidth @tunnel consumes in
1704 * @consumed_up and @consumed_down. In case of success returns %0,
1705 * negative errno otherwise.
1706 */
1707int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1708                                 int *consumed_down)
1709{
1710        int up_bw = 0, down_bw = 0;
1711
1712        if (!tb_tunnel_is_active(tunnel))
1713                goto out;
1714
1715        if (tunnel->consumed_bandwidth) {
1716                int ret;
1717
1718                ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1719                if (ret)
1720                        return ret;
1721
1722                tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1723                              down_bw);
1724        }
1725
1726out:
1727        if (consumed_up)
1728                *consumed_up = up_bw;
1729        if (consumed_down)
1730                *consumed_down = down_bw;
1731
1732        return 0;
1733}
1734
1735/**
1736 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1737 * @tunnel: Tunnel whose unused bandwidth to release
1738 *
1739 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1740 * moment) this function makes it to release all the unused bandwidth.
1741 *
1742 * Returns %0 in case of success and negative errno otherwise.
1743 */
1744int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1745{
1746        if (!tb_tunnel_is_active(tunnel))
1747                return 0;
1748
1749        if (tunnel->release_unused_bandwidth) {
1750                int ret;
1751
1752                ret = tunnel->release_unused_bandwidth(tunnel);
1753                if (ret)
1754                        return ret;
1755        }
1756
1757        return 0;
1758}
1759
1760/**
1761 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1762 * @tunnel: Tunnel reclaiming available bandwidth
1763 * @available_up: Available upstream bandwidth (in Mb/s)
1764 * @available_down: Available downstream bandwidth (in Mb/s)
1765 *
1766 * Reclaims bandwidth from @available_up and @available_down and updates
1767 * the variables accordingly (e.g decreases both according to what was
1768 * reclaimed by the tunnel). If nothing was reclaimed the values are
1769 * kept as is.
1770 */
1771void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1772                                           int *available_up,
1773                                           int *available_down)
1774{
1775        if (!tb_tunnel_is_active(tunnel))
1776                return;
1777
1778        if (tunnel->reclaim_available_bandwidth)
1779                tunnel->reclaim_available_bandwidth(tunnel, available_up,
1780                                                    available_down);
1781}
1782