linux/drivers/thunderbolt/tunnel.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - Tunneling support
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2019, Intel Corporation
   7 */
   8
   9#include <linux/slab.h>
  10#include <linux/list.h>
  11
  12#include "tunnel.h"
  13#include "tb.h"
  14
  15/* PCIe adapters use always HopID of 8 for both directions */
  16#define TB_PCI_HOPID                    8
  17
  18#define TB_PCI_PATH_DOWN                0
  19#define TB_PCI_PATH_UP                  1
  20
  21/* DP adapters use HopID 8 for AUX and 9 for Video */
  22#define TB_DP_AUX_TX_HOPID              8
  23#define TB_DP_AUX_RX_HOPID              8
  24#define TB_DP_VIDEO_HOPID               9
  25
  26#define TB_DP_VIDEO_PATH_OUT            0
  27#define TB_DP_AUX_PATH_OUT              1
  28#define TB_DP_AUX_PATH_IN               2
  29
  30#define TB_DMA_PATH_OUT                 0
  31#define TB_DMA_PATH_IN                  1
  32
  33static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
  34
  35#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
  36        do {                                                            \
  37                struct tb_tunnel *__tunnel = (tunnel);                  \
  38                level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
  39                      tb_route(__tunnel->src_port->sw),                 \
  40                      __tunnel->src_port->port,                         \
  41                      tb_route(__tunnel->dst_port->sw),                 \
  42                      __tunnel->dst_port->port,                         \
  43                      tb_tunnel_names[__tunnel->type],                  \
  44                      ## arg);                                          \
  45        } while (0)
  46
  47#define tb_tunnel_WARN(tunnel, fmt, arg...) \
  48        __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
  49#define tb_tunnel_warn(tunnel, fmt, arg...) \
  50        __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
  51#define tb_tunnel_info(tunnel, fmt, arg...) \
  52        __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
  53#define tb_tunnel_dbg(tunnel, fmt, arg...) \
  54        __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
  55
  56static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
  57                                         enum tb_tunnel_type type)
  58{
  59        struct tb_tunnel *tunnel;
  60
  61        tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
  62        if (!tunnel)
  63                return NULL;
  64
  65        tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
  66        if (!tunnel->paths) {
  67                tb_tunnel_free(tunnel);
  68                return NULL;
  69        }
  70
  71        INIT_LIST_HEAD(&tunnel->list);
  72        tunnel->tb = tb;
  73        tunnel->npaths = npaths;
  74        tunnel->type = type;
  75
  76        return tunnel;
  77}
  78
  79static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
  80{
  81        int res;
  82
  83        res = tb_pci_port_enable(tunnel->src_port, activate);
  84        if (res)
  85                return res;
  86
  87        if (tb_port_is_pcie_up(tunnel->dst_port))
  88                return tb_pci_port_enable(tunnel->dst_port, activate);
  89
  90        return 0;
  91}
  92
  93static void tb_pci_init_path(struct tb_path *path)
  94{
  95        path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  96        path->egress_shared_buffer = TB_PATH_NONE;
  97        path->ingress_fc_enable = TB_PATH_ALL;
  98        path->ingress_shared_buffer = TB_PATH_NONE;
  99        path->priority = 3;
 100        path->weight = 1;
 101        path->drop_packages = 0;
 102        path->nfc_credits = 0;
 103        path->hops[0].initial_credits = 7;
 104        path->hops[1].initial_credits = 16;
 105}
 106
 107/**
 108 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
 109 * @tb: Pointer to the domain structure
 110 * @down: PCIe downstream adapter
 111 *
 112 * If @down adapter is active, follows the tunnel to the PCIe upstream
 113 * adapter and back. Returns the discovered tunnel or %NULL if there was
 114 * no tunnel.
 115 */
 116struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
 117{
 118        struct tb_tunnel *tunnel;
 119        struct tb_path *path;
 120
 121        if (!tb_pci_port_is_enabled(down))
 122                return NULL;
 123
 124        tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 125        if (!tunnel)
 126                return NULL;
 127
 128        tunnel->activate = tb_pci_activate;
 129        tunnel->src_port = down;
 130
 131        /*
 132         * Discover both paths even if they are not complete. We will
 133         * clean them up by calling tb_tunnel_deactivate() below in that
 134         * case.
 135         */
 136        path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
 137                                &tunnel->dst_port, "PCIe Up");
 138        if (!path) {
 139                /* Just disable the downstream port */
 140                tb_pci_port_enable(down, false);
 141                goto err_free;
 142        }
 143        tunnel->paths[TB_PCI_PATH_UP] = path;
 144        tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
 145
 146        path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
 147                                "PCIe Down");
 148        if (!path)
 149                goto err_deactivate;
 150        tunnel->paths[TB_PCI_PATH_DOWN] = path;
 151        tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
 152
 153        /* Validate that the tunnel is complete */
 154        if (!tb_port_is_pcie_up(tunnel->dst_port)) {
 155                tb_port_warn(tunnel->dst_port,
 156                             "path does not end on a PCIe adapter, cleaning up\n");
 157                goto err_deactivate;
 158        }
 159
 160        if (down != tunnel->src_port) {
 161                tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 162                goto err_deactivate;
 163        }
 164
 165        if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
 166                tb_tunnel_warn(tunnel,
 167                               "tunnel is not fully activated, cleaning up\n");
 168                goto err_deactivate;
 169        }
 170
 171        tb_tunnel_dbg(tunnel, "discovered\n");
 172        return tunnel;
 173
 174err_deactivate:
 175        tb_tunnel_deactivate(tunnel);
 176err_free:
 177        tb_tunnel_free(tunnel);
 178
 179        return NULL;
 180}
 181
 182/**
 183 * tb_tunnel_alloc_pci() - allocate a pci tunnel
 184 * @tb: Pointer to the domain structure
 185 * @up: PCIe upstream adapter port
 186 * @down: PCIe downstream adapter port
 187 *
 188 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
 189 * TB_TYPE_PCIE_DOWN.
 190 *
 191 * Return: Returns a tb_tunnel on success or NULL on failure.
 192 */
 193struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
 194                                      struct tb_port *down)
 195{
 196        struct tb_tunnel *tunnel;
 197        struct tb_path *path;
 198
 199        tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 200        if (!tunnel)
 201                return NULL;
 202
 203        tunnel->activate = tb_pci_activate;
 204        tunnel->src_port = down;
 205        tunnel->dst_port = up;
 206
 207        path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
 208                             "PCIe Down");
 209        if (!path) {
 210                tb_tunnel_free(tunnel);
 211                return NULL;
 212        }
 213        tb_pci_init_path(path);
 214        tunnel->paths[TB_PCI_PATH_DOWN] = path;
 215
 216        path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
 217                             "PCIe Up");
 218        if (!path) {
 219                tb_tunnel_free(tunnel);
 220                return NULL;
 221        }
 222        tb_pci_init_path(path);
 223        tunnel->paths[TB_PCI_PATH_UP] = path;
 224
 225        return tunnel;
 226}
 227
 228static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
 229{
 230        struct tb_port *out = tunnel->dst_port;
 231        struct tb_port *in = tunnel->src_port;
 232        u32 in_dp_cap, out_dp_cap;
 233        int ret;
 234
 235        /*
 236         * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
 237         * newer generation hardware.
 238         */
 239        if (in->sw->generation < 2 || out->sw->generation < 2)
 240                return 0;
 241
 242        /* Read both DP_LOCAL_CAP registers */
 243        ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
 244                           in->cap_adap + TB_DP_LOCAL_CAP, 1);
 245        if (ret)
 246                return ret;
 247
 248        ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
 249                           out->cap_adap + TB_DP_LOCAL_CAP, 1);
 250        if (ret)
 251                return ret;
 252
 253        /* Write IN local caps to OUT remote caps */
 254        ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
 255                            out->cap_adap + TB_DP_REMOTE_CAP, 1);
 256        if (ret)
 257                return ret;
 258
 259        return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
 260                             in->cap_adap + TB_DP_REMOTE_CAP, 1);
 261}
 262
 263static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
 264{
 265        int ret;
 266
 267        if (active) {
 268                struct tb_path **paths;
 269                int last;
 270
 271                paths = tunnel->paths;
 272                last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
 273
 274                tb_dp_port_set_hops(tunnel->src_port,
 275                        paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
 276                        paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
 277                        paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
 278
 279                tb_dp_port_set_hops(tunnel->dst_port,
 280                        paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
 281                        paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
 282                        paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
 283        } else {
 284                tb_dp_port_hpd_clear(tunnel->src_port);
 285                tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
 286                if (tb_port_is_dpout(tunnel->dst_port))
 287                        tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
 288        }
 289
 290        ret = tb_dp_port_enable(tunnel->src_port, active);
 291        if (ret)
 292                return ret;
 293
 294        if (tb_port_is_dpout(tunnel->dst_port))
 295                return tb_dp_port_enable(tunnel->dst_port, active);
 296
 297        return 0;
 298}
 299
 300static void tb_dp_init_aux_path(struct tb_path *path)
 301{
 302        int i;
 303
 304        path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 305        path->egress_shared_buffer = TB_PATH_NONE;
 306        path->ingress_fc_enable = TB_PATH_ALL;
 307        path->ingress_shared_buffer = TB_PATH_NONE;
 308        path->priority = 2;
 309        path->weight = 1;
 310
 311        for (i = 0; i < path->path_length; i++)
 312                path->hops[i].initial_credits = 1;
 313}
 314
 315static void tb_dp_init_video_path(struct tb_path *path, bool discover)
 316{
 317        u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
 318
 319        path->egress_fc_enable = TB_PATH_NONE;
 320        path->egress_shared_buffer = TB_PATH_NONE;
 321        path->ingress_fc_enable = TB_PATH_NONE;
 322        path->ingress_shared_buffer = TB_PATH_NONE;
 323        path->priority = 1;
 324        path->weight = 1;
 325
 326        if (discover) {
 327                path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
 328        } else {
 329                u32 max_credits;
 330
 331                max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
 332                        TB_PORT_MAX_CREDITS_SHIFT;
 333                /* Leave some credits for AUX path */
 334                path->nfc_credits = min(max_credits - 2, 12U);
 335        }
 336}
 337
 338/**
 339 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
 340 * @tb: Pointer to the domain structure
 341 * @in: DP in adapter
 342 *
 343 * If @in adapter is active, follows the tunnel to the DP out adapter
 344 * and back. Returns the discovered tunnel or %NULL if there was no
 345 * tunnel.
 346 *
 347 * Return: DP tunnel or %NULL if no tunnel found.
 348 */
 349struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
 350{
 351        struct tb_tunnel *tunnel;
 352        struct tb_port *port;
 353        struct tb_path *path;
 354
 355        if (!tb_dp_port_is_enabled(in))
 356                return NULL;
 357
 358        tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
 359        if (!tunnel)
 360                return NULL;
 361
 362        tunnel->init = tb_dp_xchg_caps;
 363        tunnel->activate = tb_dp_activate;
 364        tunnel->src_port = in;
 365
 366        path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
 367                                &tunnel->dst_port, "Video");
 368        if (!path) {
 369                /* Just disable the DP IN port */
 370                tb_dp_port_enable(in, false);
 371                goto err_free;
 372        }
 373        tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
 374        tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
 375
 376        path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
 377        if (!path)
 378                goto err_deactivate;
 379        tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
 380        tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
 381
 382        path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
 383                                &port, "AUX RX");
 384        if (!path)
 385                goto err_deactivate;
 386        tunnel->paths[TB_DP_AUX_PATH_IN] = path;
 387        tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
 388
 389        /* Validate that the tunnel is complete */
 390        if (!tb_port_is_dpout(tunnel->dst_port)) {
 391                tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
 392                goto err_deactivate;
 393        }
 394
 395        if (!tb_dp_port_is_enabled(tunnel->dst_port))
 396                goto err_deactivate;
 397
 398        if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
 399                goto err_deactivate;
 400
 401        if (port != tunnel->src_port) {
 402                tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 403                goto err_deactivate;
 404        }
 405
 406        tb_tunnel_dbg(tunnel, "discovered\n");
 407        return tunnel;
 408
 409err_deactivate:
 410        tb_tunnel_deactivate(tunnel);
 411err_free:
 412        tb_tunnel_free(tunnel);
 413
 414        return NULL;
 415}
 416
 417/**
 418 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
 419 * @tb: Pointer to the domain structure
 420 * @in: DP in adapter port
 421 * @out: DP out adapter port
 422 *
 423 * Allocates a tunnel between @in and @out that is capable of tunneling
 424 * Display Port traffic.
 425 *
 426 * Return: Returns a tb_tunnel on success or NULL on failure.
 427 */
 428struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
 429                                     struct tb_port *out)
 430{
 431        struct tb_tunnel *tunnel;
 432        struct tb_path **paths;
 433        struct tb_path *path;
 434
 435        if (WARN_ON(!in->cap_adap || !out->cap_adap))
 436                return NULL;
 437
 438        tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
 439        if (!tunnel)
 440                return NULL;
 441
 442        tunnel->init = tb_dp_xchg_caps;
 443        tunnel->activate = tb_dp_activate;
 444        tunnel->src_port = in;
 445        tunnel->dst_port = out;
 446
 447        paths = tunnel->paths;
 448
 449        path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
 450                             1, "Video");
 451        if (!path)
 452                goto err_free;
 453        tb_dp_init_video_path(path, false);
 454        paths[TB_DP_VIDEO_PATH_OUT] = path;
 455
 456        path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
 457                             TB_DP_AUX_TX_HOPID, 1, "AUX TX");
 458        if (!path)
 459                goto err_free;
 460        tb_dp_init_aux_path(path);
 461        paths[TB_DP_AUX_PATH_OUT] = path;
 462
 463        path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
 464                             TB_DP_AUX_RX_HOPID, 1, "AUX RX");
 465        if (!path)
 466                goto err_free;
 467        tb_dp_init_aux_path(path);
 468        paths[TB_DP_AUX_PATH_IN] = path;
 469
 470        return tunnel;
 471
 472err_free:
 473        tb_tunnel_free(tunnel);
 474        return NULL;
 475}
 476
 477static u32 tb_dma_credits(struct tb_port *nhi)
 478{
 479        u32 max_credits;
 480
 481        max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
 482                TB_PORT_MAX_CREDITS_SHIFT;
 483        return min(max_credits, 13U);
 484}
 485
 486static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
 487{
 488        struct tb_port *nhi = tunnel->src_port;
 489        u32 credits;
 490
 491        credits = active ? tb_dma_credits(nhi) : 0;
 492        return tb_port_set_initial_credits(nhi, credits);
 493}
 494
 495static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
 496                             unsigned int efc, u32 credits)
 497{
 498        int i;
 499
 500        path->egress_fc_enable = efc;
 501        path->ingress_fc_enable = TB_PATH_ALL;
 502        path->egress_shared_buffer = TB_PATH_NONE;
 503        path->ingress_shared_buffer = isb;
 504        path->priority = 5;
 505        path->weight = 1;
 506        path->clear_fc = true;
 507
 508        for (i = 0; i < path->path_length; i++)
 509                path->hops[i].initial_credits = credits;
 510}
 511
 512/**
 513 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
 514 * @tb: Pointer to the domain structure
 515 * @nhi: Host controller port
 516 * @dst: Destination null port which the other domain is connected to
 517 * @transmit_ring: NHI ring number used to send packets towards the
 518 *                 other domain
 519 * @transmit_path: HopID used for transmitting packets
 520 * @receive_ring: NHI ring number used to receive packets from the
 521 *                other domain
 522 * @reveive_path: HopID used for receiving packets
 523 *
 524 * Return: Returns a tb_tunnel on success or NULL on failure.
 525 */
 526struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
 527                                      struct tb_port *dst, int transmit_ring,
 528                                      int transmit_path, int receive_ring,
 529                                      int receive_path)
 530{
 531        struct tb_tunnel *tunnel;
 532        struct tb_path *path;
 533        u32 credits;
 534
 535        tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
 536        if (!tunnel)
 537                return NULL;
 538
 539        tunnel->activate = tb_dma_activate;
 540        tunnel->src_port = nhi;
 541        tunnel->dst_port = dst;
 542
 543        credits = tb_dma_credits(nhi);
 544
 545        path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
 546        if (!path) {
 547                tb_tunnel_free(tunnel);
 548                return NULL;
 549        }
 550        tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
 551                         credits);
 552        tunnel->paths[TB_DMA_PATH_IN] = path;
 553
 554        path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
 555        if (!path) {
 556                tb_tunnel_free(tunnel);
 557                return NULL;
 558        }
 559        tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
 560        tunnel->paths[TB_DMA_PATH_OUT] = path;
 561
 562        return tunnel;
 563}
 564
 565/**
 566 * tb_tunnel_free() - free a tunnel
 567 * @tunnel: Tunnel to be freed
 568 *
 569 * Frees a tunnel. The tunnel does not need to be deactivated.
 570 */
 571void tb_tunnel_free(struct tb_tunnel *tunnel)
 572{
 573        int i;
 574
 575        if (!tunnel)
 576                return;
 577
 578        for (i = 0; i < tunnel->npaths; i++) {
 579                if (tunnel->paths[i])
 580                        tb_path_free(tunnel->paths[i]);
 581        }
 582
 583        kfree(tunnel->paths);
 584        kfree(tunnel);
 585}
 586
 587/**
 588 * tb_tunnel_is_invalid - check whether an activated path is still valid
 589 * @tunnel: Tunnel to check
 590 */
 591bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
 592{
 593        int i;
 594
 595        for (i = 0; i < tunnel->npaths; i++) {
 596                WARN_ON(!tunnel->paths[i]->activated);
 597                if (tb_path_is_invalid(tunnel->paths[i]))
 598                        return true;
 599        }
 600
 601        return false;
 602}
 603
 604/**
 605 * tb_tunnel_restart() - activate a tunnel after a hardware reset
 606 * @tunnel: Tunnel to restart
 607 *
 608 * Return: 0 on success and negative errno in case if failure
 609 */
 610int tb_tunnel_restart(struct tb_tunnel *tunnel)
 611{
 612        int res, i;
 613
 614        tb_tunnel_dbg(tunnel, "activating\n");
 615
 616        /*
 617         * Make sure all paths are properly disabled before enabling
 618         * them again.
 619         */
 620        for (i = 0; i < tunnel->npaths; i++) {
 621                if (tunnel->paths[i]->activated) {
 622                        tb_path_deactivate(tunnel->paths[i]);
 623                        tunnel->paths[i]->activated = false;
 624                }
 625        }
 626
 627        if (tunnel->init) {
 628                res = tunnel->init(tunnel);
 629                if (res)
 630                        return res;
 631        }
 632
 633        for (i = 0; i < tunnel->npaths; i++) {
 634                res = tb_path_activate(tunnel->paths[i]);
 635                if (res)
 636                        goto err;
 637        }
 638
 639        if (tunnel->activate) {
 640                res = tunnel->activate(tunnel, true);
 641                if (res)
 642                        goto err;
 643        }
 644
 645        return 0;
 646
 647err:
 648        tb_tunnel_warn(tunnel, "activation failed\n");
 649        tb_tunnel_deactivate(tunnel);
 650        return res;
 651}
 652
 653/**
 654 * tb_tunnel_activate() - activate a tunnel
 655 * @tunnel: Tunnel to activate
 656 *
 657 * Return: Returns 0 on success or an error code on failure.
 658 */
 659int tb_tunnel_activate(struct tb_tunnel *tunnel)
 660{
 661        int i;
 662
 663        for (i = 0; i < tunnel->npaths; i++) {
 664                if (tunnel->paths[i]->activated) {
 665                        tb_tunnel_WARN(tunnel,
 666                                       "trying to activate an already activated tunnel\n");
 667                        return -EINVAL;
 668                }
 669        }
 670
 671        return tb_tunnel_restart(tunnel);
 672}
 673
 674/**
 675 * tb_tunnel_deactivate() - deactivate a tunnel
 676 * @tunnel: Tunnel to deactivate
 677 */
 678void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
 679{
 680        int i;
 681
 682        tb_tunnel_dbg(tunnel, "deactivating\n");
 683
 684        if (tunnel->activate)
 685                tunnel->activate(tunnel, false);
 686
 687        for (i = 0; i < tunnel->npaths; i++) {
 688                if (tunnel->paths[i] && tunnel->paths[i]->activated)
 689                        tb_path_deactivate(tunnel->paths[i]);
 690        }
 691}
 692