linux/drivers/thunderbolt/switch.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - switch/port utility functions
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2018, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/idr.h>
  11#include <linux/nvmem-provider.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/sched/signal.h>
  14#include <linux/sizes.h>
  15#include <linux/slab.h>
  16
  17#include "tb.h"
  18
  19/* Switch NVM support */
  20
  21#define NVM_CSS                 0x10
  22
  23struct nvm_auth_status {
  24        struct list_head list;
  25        uuid_t uuid;
  26        u32 status;
  27};
  28
  29/*
  30 * Hold NVM authentication failure status per switch This information
  31 * needs to stay around even when the switch gets power cycled so we
  32 * keep it separately.
  33 */
  34static LIST_HEAD(nvm_auth_status_cache);
  35static DEFINE_MUTEX(nvm_auth_status_lock);
  36
  37static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
  38{
  39        struct nvm_auth_status *st;
  40
  41        list_for_each_entry(st, &nvm_auth_status_cache, list) {
  42                if (uuid_equal(&st->uuid, sw->uuid))
  43                        return st;
  44        }
  45
  46        return NULL;
  47}
  48
  49static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
  50{
  51        struct nvm_auth_status *st;
  52
  53        mutex_lock(&nvm_auth_status_lock);
  54        st = __nvm_get_auth_status(sw);
  55        mutex_unlock(&nvm_auth_status_lock);
  56
  57        *status = st ? st->status : 0;
  58}
  59
  60static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
  61{
  62        struct nvm_auth_status *st;
  63
  64        if (WARN_ON(!sw->uuid))
  65                return;
  66
  67        mutex_lock(&nvm_auth_status_lock);
  68        st = __nvm_get_auth_status(sw);
  69
  70        if (!st) {
  71                st = kzalloc(sizeof(*st), GFP_KERNEL);
  72                if (!st)
  73                        goto unlock;
  74
  75                memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
  76                INIT_LIST_HEAD(&st->list);
  77                list_add_tail(&st->list, &nvm_auth_status_cache);
  78        }
  79
  80        st->status = status;
  81unlock:
  82        mutex_unlock(&nvm_auth_status_lock);
  83}
  84
  85static void nvm_clear_auth_status(const struct tb_switch *sw)
  86{
  87        struct nvm_auth_status *st;
  88
  89        mutex_lock(&nvm_auth_status_lock);
  90        st = __nvm_get_auth_status(sw);
  91        if (st) {
  92                list_del(&st->list);
  93                kfree(st);
  94        }
  95        mutex_unlock(&nvm_auth_status_lock);
  96}
  97
  98static int nvm_validate_and_write(struct tb_switch *sw)
  99{
 100        unsigned int image_size, hdr_size;
 101        const u8 *buf = sw->nvm->buf;
 102        u16 ds_size;
 103        int ret;
 104
 105        if (!buf)
 106                return -EINVAL;
 107
 108        image_size = sw->nvm->buf_data_size;
 109        if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
 110                return -EINVAL;
 111
 112        /*
 113         * FARB pointer must point inside the image and must at least
 114         * contain parts of the digital section we will be reading here.
 115         */
 116        hdr_size = (*(u32 *)buf) & 0xffffff;
 117        if (hdr_size + NVM_DEVID + 2 >= image_size)
 118                return -EINVAL;
 119
 120        /* Digital section start should be aligned to 4k page */
 121        if (!IS_ALIGNED(hdr_size, SZ_4K))
 122                return -EINVAL;
 123
 124        /*
 125         * Read digital section size and check that it also fits inside
 126         * the image.
 127         */
 128        ds_size = *(u16 *)(buf + hdr_size);
 129        if (ds_size >= image_size)
 130                return -EINVAL;
 131
 132        if (!sw->safe_mode) {
 133                u16 device_id;
 134
 135                /*
 136                 * Make sure the device ID in the image matches the one
 137                 * we read from the switch config space.
 138                 */
 139                device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
 140                if (device_id != sw->config.device_id)
 141                        return -EINVAL;
 142
 143                if (sw->generation < 3) {
 144                        /* Write CSS headers first */
 145                        ret = dma_port_flash_write(sw->dma_port,
 146                                DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
 147                                DMA_PORT_CSS_MAX_SIZE);
 148                        if (ret)
 149                                return ret;
 150                }
 151
 152                /* Skip headers in the image */
 153                buf += hdr_size;
 154                image_size -= hdr_size;
 155        }
 156
 157        if (tb_switch_is_usb4(sw))
 158                ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
 159        else
 160                ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
 161        if (!ret)
 162                sw->nvm->flushed = true;
 163        return ret;
 164}
 165
 166static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
 167{
 168        int ret = 0;
 169
 170        /*
 171         * Root switch NVM upgrade requires that we disconnect the
 172         * existing paths first (in case it is not in safe mode
 173         * already).
 174         */
 175        if (!sw->safe_mode) {
 176                u32 status;
 177
 178                ret = tb_domain_disconnect_all_paths(sw->tb);
 179                if (ret)
 180                        return ret;
 181                /*
 182                 * The host controller goes away pretty soon after this if
 183                 * everything goes well so getting timeout is expected.
 184                 */
 185                ret = dma_port_flash_update_auth(sw->dma_port);
 186                if (!ret || ret == -ETIMEDOUT)
 187                        return 0;
 188
 189                /*
 190                 * Any error from update auth operation requires power
 191                 * cycling of the host router.
 192                 */
 193                tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
 194                if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
 195                        nvm_set_auth_status(sw, status);
 196        }
 197
 198        /*
 199         * From safe mode we can get out by just power cycling the
 200         * switch.
 201         */
 202        dma_port_power_cycle(sw->dma_port);
 203        return ret;
 204}
 205
 206static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
 207{
 208        int ret, retries = 10;
 209
 210        ret = dma_port_flash_update_auth(sw->dma_port);
 211        switch (ret) {
 212        case 0:
 213        case -ETIMEDOUT:
 214        case -EACCES:
 215        case -EINVAL:
 216                /* Power cycle is required */
 217                break;
 218        default:
 219                return ret;
 220        }
 221
 222        /*
 223         * Poll here for the authentication status. It takes some time
 224         * for the device to respond (we get timeout for a while). Once
 225         * we get response the device needs to be power cycled in order
 226         * to the new NVM to be taken into use.
 227         */
 228        do {
 229                u32 status;
 230
 231                ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
 232                if (ret < 0 && ret != -ETIMEDOUT)
 233                        return ret;
 234                if (ret > 0) {
 235                        if (status) {
 236                                tb_sw_warn(sw, "failed to authenticate NVM\n");
 237                                nvm_set_auth_status(sw, status);
 238                        }
 239
 240                        tb_sw_info(sw, "power cycling the switch now\n");
 241                        dma_port_power_cycle(sw->dma_port);
 242                        return 0;
 243                }
 244
 245                msleep(500);
 246        } while (--retries);
 247
 248        return -ETIMEDOUT;
 249}
 250
 251static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
 252{
 253        struct pci_dev *root_port;
 254
 255        /*
 256         * During host router NVM upgrade we should not allow root port to
 257         * go into D3cold because some root ports cannot trigger PME
 258         * itself. To be on the safe side keep the root port in D0 during
 259         * the whole upgrade process.
 260         */
 261        root_port = pcie_find_root_port(sw->tb->nhi->pdev);
 262        if (root_port)
 263                pm_runtime_get_noresume(&root_port->dev);
 264}
 265
 266static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
 267{
 268        struct pci_dev *root_port;
 269
 270        root_port = pcie_find_root_port(sw->tb->nhi->pdev);
 271        if (root_port)
 272                pm_runtime_put(&root_port->dev);
 273}
 274
 275static inline bool nvm_readable(struct tb_switch *sw)
 276{
 277        if (tb_switch_is_usb4(sw)) {
 278                /*
 279                 * USB4 devices must support NVM operations but it is
 280                 * optional for hosts. Therefore we query the NVM sector
 281                 * size here and if it is supported assume NVM
 282                 * operations are implemented.
 283                 */
 284                return usb4_switch_nvm_sector_size(sw) > 0;
 285        }
 286
 287        /* Thunderbolt 2 and 3 devices support NVM through DMA port */
 288        return !!sw->dma_port;
 289}
 290
 291static inline bool nvm_upgradeable(struct tb_switch *sw)
 292{
 293        if (sw->no_nvm_upgrade)
 294                return false;
 295        return nvm_readable(sw);
 296}
 297
 298static inline int nvm_read(struct tb_switch *sw, unsigned int address,
 299                           void *buf, size_t size)
 300{
 301        if (tb_switch_is_usb4(sw))
 302                return usb4_switch_nvm_read(sw, address, buf, size);
 303        return dma_port_flash_read(sw->dma_port, address, buf, size);
 304}
 305
 306static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
 307{
 308        int ret;
 309
 310        if (tb_switch_is_usb4(sw)) {
 311                if (auth_only) {
 312                        ret = usb4_switch_nvm_set_offset(sw, 0);
 313                        if (ret)
 314                                return ret;
 315                }
 316                sw->nvm->authenticating = true;
 317                return usb4_switch_nvm_authenticate(sw);
 318        } else if (auth_only) {
 319                return -EOPNOTSUPP;
 320        }
 321
 322        sw->nvm->authenticating = true;
 323        if (!tb_route(sw)) {
 324                nvm_authenticate_start_dma_port(sw);
 325                ret = nvm_authenticate_host_dma_port(sw);
 326        } else {
 327                ret = nvm_authenticate_device_dma_port(sw);
 328        }
 329
 330        return ret;
 331}
 332
 333static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
 334                              size_t bytes)
 335{
 336        struct tb_nvm *nvm = priv;
 337        struct tb_switch *sw = tb_to_switch(nvm->dev);
 338        int ret;
 339
 340        pm_runtime_get_sync(&sw->dev);
 341
 342        if (!mutex_trylock(&sw->tb->lock)) {
 343                ret = restart_syscall();
 344                goto out;
 345        }
 346
 347        ret = nvm_read(sw, offset, val, bytes);
 348        mutex_unlock(&sw->tb->lock);
 349
 350out:
 351        pm_runtime_mark_last_busy(&sw->dev);
 352        pm_runtime_put_autosuspend(&sw->dev);
 353
 354        return ret;
 355}
 356
 357static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
 358                               size_t bytes)
 359{
 360        struct tb_nvm *nvm = priv;
 361        struct tb_switch *sw = tb_to_switch(nvm->dev);
 362        int ret;
 363
 364        if (!mutex_trylock(&sw->tb->lock))
 365                return restart_syscall();
 366
 367        /*
 368         * Since writing the NVM image might require some special steps,
 369         * for example when CSS headers are written, we cache the image
 370         * locally here and handle the special cases when the user asks
 371         * us to authenticate the image.
 372         */
 373        ret = tb_nvm_write_buf(nvm, offset, val, bytes);
 374        mutex_unlock(&sw->tb->lock);
 375
 376        return ret;
 377}
 378
 379static int tb_switch_nvm_add(struct tb_switch *sw)
 380{
 381        struct tb_nvm *nvm;
 382        u32 val;
 383        int ret;
 384
 385        if (!nvm_readable(sw))
 386                return 0;
 387
 388        /*
 389         * The NVM format of non-Intel hardware is not known so
 390         * currently restrict NVM upgrade for Intel hardware. We may
 391         * relax this in the future when we learn other NVM formats.
 392         */
 393        if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
 394            sw->config.vendor_id != 0x8087) {
 395                dev_info(&sw->dev,
 396                         "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
 397                         sw->config.vendor_id);
 398                return 0;
 399        }
 400
 401        nvm = tb_nvm_alloc(&sw->dev);
 402        if (IS_ERR(nvm))
 403                return PTR_ERR(nvm);
 404
 405        /*
 406         * If the switch is in safe-mode the only accessible portion of
 407         * the NVM is the non-active one where userspace is expected to
 408         * write new functional NVM.
 409         */
 410        if (!sw->safe_mode) {
 411                u32 nvm_size, hdr_size;
 412
 413                ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
 414                if (ret)
 415                        goto err_nvm;
 416
 417                hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
 418                nvm_size = (SZ_1M << (val & 7)) / 8;
 419                nvm_size = (nvm_size - hdr_size) / 2;
 420
 421                ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
 422                if (ret)
 423                        goto err_nvm;
 424
 425                nvm->major = val >> 16;
 426                nvm->minor = val >> 8;
 427
 428                ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
 429                if (ret)
 430                        goto err_nvm;
 431        }
 432
 433        if (!sw->no_nvm_upgrade) {
 434                ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
 435                                            tb_switch_nvm_write);
 436                if (ret)
 437                        goto err_nvm;
 438        }
 439
 440        sw->nvm = nvm;
 441        return 0;
 442
 443err_nvm:
 444        tb_nvm_free(nvm);
 445        return ret;
 446}
 447
 448static void tb_switch_nvm_remove(struct tb_switch *sw)
 449{
 450        struct tb_nvm *nvm;
 451
 452        nvm = sw->nvm;
 453        sw->nvm = NULL;
 454
 455        if (!nvm)
 456                return;
 457
 458        /* Remove authentication status in case the switch is unplugged */
 459        if (!nvm->authenticating)
 460                nvm_clear_auth_status(sw);
 461
 462        tb_nvm_free(nvm);
 463}
 464
 465/* port utility functions */
 466
 467static const char *tb_port_type(const struct tb_regs_port_header *port)
 468{
 469        switch (port->type >> 16) {
 470        case 0:
 471                switch ((u8) port->type) {
 472                case 0:
 473                        return "Inactive";
 474                case 1:
 475                        return "Port";
 476                case 2:
 477                        return "NHI";
 478                default:
 479                        return "unknown";
 480                }
 481        case 0x2:
 482                return "Ethernet";
 483        case 0x8:
 484                return "SATA";
 485        case 0xe:
 486                return "DP/HDMI";
 487        case 0x10:
 488                return "PCIe";
 489        case 0x20:
 490                return "USB";
 491        default:
 492                return "unknown";
 493        }
 494}
 495
 496static void tb_dump_port(struct tb *tb, const struct tb_port *port)
 497{
 498        const struct tb_regs_port_header *regs = &port->config;
 499
 500        tb_dbg(tb,
 501               " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
 502               regs->port_number, regs->vendor_id, regs->device_id,
 503               regs->revision, regs->thunderbolt_version, tb_port_type(regs),
 504               regs->type);
 505        tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
 506               regs->max_in_hop_id, regs->max_out_hop_id);
 507        tb_dbg(tb, "  Max counters: %d\n", regs->max_counters);
 508        tb_dbg(tb, "  NFC Credits: %#x\n", regs->nfc_credits);
 509        tb_dbg(tb, "  Credits (total/control): %u/%u\n", port->total_credits,
 510               port->ctl_credits);
 511}
 512
 513/**
 514 * tb_port_state() - get connectedness state of a port
 515 * @port: the port to check
 516 *
 517 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
 518 *
 519 * Return: Returns an enum tb_port_state on success or an error code on failure.
 520 */
 521int tb_port_state(struct tb_port *port)
 522{
 523        struct tb_cap_phy phy;
 524        int res;
 525        if (port->cap_phy == 0) {
 526                tb_port_WARN(port, "does not have a PHY\n");
 527                return -EINVAL;
 528        }
 529        res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
 530        if (res)
 531                return res;
 532        return phy.state;
 533}
 534
 535/**
 536 * tb_wait_for_port() - wait for a port to become ready
 537 * @port: Port to wait
 538 * @wait_if_unplugged: Wait also when port is unplugged
 539 *
 540 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
 541 * wait_if_unplugged is set then we also wait if the port is in state
 542 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
 543 * switch resume). Otherwise we only wait if a device is registered but the link
 544 * has not yet been established.
 545 *
 546 * Return: Returns an error code on failure. Returns 0 if the port is not
 547 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
 548 * if the port is connected and in state TB_PORT_UP.
 549 */
 550int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
 551{
 552        int retries = 10;
 553        int state;
 554        if (!port->cap_phy) {
 555                tb_port_WARN(port, "does not have PHY\n");
 556                return -EINVAL;
 557        }
 558        if (tb_is_upstream_port(port)) {
 559                tb_port_WARN(port, "is the upstream port\n");
 560                return -EINVAL;
 561        }
 562
 563        while (retries--) {
 564                state = tb_port_state(port);
 565                if (state < 0)
 566                        return state;
 567                if (state == TB_PORT_DISABLED) {
 568                        tb_port_dbg(port, "is disabled (state: 0)\n");
 569                        return 0;
 570                }
 571                if (state == TB_PORT_UNPLUGGED) {
 572                        if (wait_if_unplugged) {
 573                                /* used during resume */
 574                                tb_port_dbg(port,
 575                                            "is unplugged (state: 7), retrying...\n");
 576                                msleep(100);
 577                                continue;
 578                        }
 579                        tb_port_dbg(port, "is unplugged (state: 7)\n");
 580                        return 0;
 581                }
 582                if (state == TB_PORT_UP) {
 583                        tb_port_dbg(port, "is connected, link is up (state: 2)\n");
 584                        return 1;
 585                }
 586
 587                /*
 588                 * After plug-in the state is TB_PORT_CONNECTING. Give it some
 589                 * time.
 590                 */
 591                tb_port_dbg(port,
 592                            "is connected, link is not up (state: %d), retrying...\n",
 593                            state);
 594                msleep(100);
 595        }
 596        tb_port_warn(port,
 597                     "failed to reach state TB_PORT_UP. Ignoring port...\n");
 598        return 0;
 599}
 600
 601/**
 602 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
 603 * @port: Port to add/remove NFC credits
 604 * @credits: Credits to add/remove
 605 *
 606 * Change the number of NFC credits allocated to @port by @credits. To remove
 607 * NFC credits pass a negative amount of credits.
 608 *
 609 * Return: Returns 0 on success or an error code on failure.
 610 */
 611int tb_port_add_nfc_credits(struct tb_port *port, int credits)
 612{
 613        u32 nfc_credits;
 614
 615        if (credits == 0 || port->sw->is_unplugged)
 616                return 0;
 617
 618        /*
 619         * USB4 restricts programming NFC buffers to lane adapters only
 620         * so skip other ports.
 621         */
 622        if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
 623                return 0;
 624
 625        nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
 626        nfc_credits += credits;
 627
 628        tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
 629                    port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
 630
 631        port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
 632        port->config.nfc_credits |= nfc_credits;
 633
 634        return tb_port_write(port, &port->config.nfc_credits,
 635                             TB_CFG_PORT, ADP_CS_4, 1);
 636}
 637
 638/**
 639 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
 640 * @port: Port whose counters to clear
 641 * @counter: Counter index to clear
 642 *
 643 * Return: Returns 0 on success or an error code on failure.
 644 */
 645int tb_port_clear_counter(struct tb_port *port, int counter)
 646{
 647        u32 zero[3] = { 0, 0, 0 };
 648        tb_port_dbg(port, "clearing counter %d\n", counter);
 649        return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
 650}
 651
 652/**
 653 * tb_port_unlock() - Unlock downstream port
 654 * @port: Port to unlock
 655 *
 656 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
 657 * downstream router accessible for CM.
 658 */
 659int tb_port_unlock(struct tb_port *port)
 660{
 661        if (tb_switch_is_icm(port->sw))
 662                return 0;
 663        if (!tb_port_is_null(port))
 664                return -EINVAL;
 665        if (tb_switch_is_usb4(port->sw))
 666                return usb4_port_unlock(port);
 667        return 0;
 668}
 669
 670static int __tb_port_enable(struct tb_port *port, bool enable)
 671{
 672        int ret;
 673        u32 phy;
 674
 675        if (!tb_port_is_null(port))
 676                return -EINVAL;
 677
 678        ret = tb_port_read(port, &phy, TB_CFG_PORT,
 679                           port->cap_phy + LANE_ADP_CS_1, 1);
 680        if (ret)
 681                return ret;
 682
 683        if (enable)
 684                phy &= ~LANE_ADP_CS_1_LD;
 685        else
 686                phy |= LANE_ADP_CS_1_LD;
 687
 688        return tb_port_write(port, &phy, TB_CFG_PORT,
 689                             port->cap_phy + LANE_ADP_CS_1, 1);
 690}
 691
 692/**
 693 * tb_port_enable() - Enable lane adapter
 694 * @port: Port to enable (can be %NULL)
 695 *
 696 * This is used for lane 0 and 1 adapters to enable it.
 697 */
 698int tb_port_enable(struct tb_port *port)
 699{
 700        return __tb_port_enable(port, true);
 701}
 702
 703/**
 704 * tb_port_disable() - Disable lane adapter
 705 * @port: Port to disable (can be %NULL)
 706 *
 707 * This is used for lane 0 and 1 adapters to disable it.
 708 */
 709int tb_port_disable(struct tb_port *port)
 710{
 711        return __tb_port_enable(port, false);
 712}
 713
 714/*
 715 * tb_init_port() - initialize a port
 716 *
 717 * This is a helper method for tb_switch_alloc. Does not check or initialize
 718 * any downstream switches.
 719 *
 720 * Return: Returns 0 on success or an error code on failure.
 721 */
 722static int tb_init_port(struct tb_port *port)
 723{
 724        int res;
 725        int cap;
 726
 727        INIT_LIST_HEAD(&port->list);
 728
 729        /* Control adapter does not have configuration space */
 730        if (!port->port)
 731                return 0;
 732
 733        res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
 734        if (res) {
 735                if (res == -ENODEV) {
 736                        tb_dbg(port->sw->tb, " Port %d: not implemented\n",
 737                               port->port);
 738                        port->disabled = true;
 739                        return 0;
 740                }
 741                return res;
 742        }
 743
 744        /* Port 0 is the switch itself and has no PHY. */
 745        if (port->config.type == TB_TYPE_PORT) {
 746                cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
 747
 748                if (cap > 0)
 749                        port->cap_phy = cap;
 750                else
 751                        tb_port_WARN(port, "non switch port without a PHY\n");
 752
 753                cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
 754                if (cap > 0)
 755                        port->cap_usb4 = cap;
 756
 757                /*
 758                 * USB4 ports the buffers allocated for the control path
 759                 * can be read from the path config space. Legacy
 760                 * devices we use hard-coded value.
 761                 */
 762                if (tb_switch_is_usb4(port->sw)) {
 763                        struct tb_regs_hop hop;
 764
 765                        if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
 766                                port->ctl_credits = hop.initial_credits;
 767                }
 768                if (!port->ctl_credits)
 769                        port->ctl_credits = 2;
 770
 771        } else {
 772                cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
 773                if (cap > 0)
 774                        port->cap_adap = cap;
 775        }
 776
 777        port->total_credits =
 778                (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
 779                ADP_CS_4_TOTAL_BUFFERS_SHIFT;
 780
 781        tb_dump_port(port->sw->tb, port);
 782        return 0;
 783}
 784
 785static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
 786                               int max_hopid)
 787{
 788        int port_max_hopid;
 789        struct ida *ida;
 790
 791        if (in) {
 792                port_max_hopid = port->config.max_in_hop_id;
 793                ida = &port->in_hopids;
 794        } else {
 795                port_max_hopid = port->config.max_out_hop_id;
 796                ida = &port->out_hopids;
 797        }
 798
 799        /*
 800         * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
 801         * reserved.
 802         */
 803        if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
 804                min_hopid = TB_PATH_MIN_HOPID;
 805
 806        if (max_hopid < 0 || max_hopid > port_max_hopid)
 807                max_hopid = port_max_hopid;
 808
 809        return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
 810}
 811
 812/**
 813 * tb_port_alloc_in_hopid() - Allocate input HopID from port
 814 * @port: Port to allocate HopID for
 815 * @min_hopid: Minimum acceptable input HopID
 816 * @max_hopid: Maximum acceptable input HopID
 817 *
 818 * Return: HopID between @min_hopid and @max_hopid or negative errno in
 819 * case of error.
 820 */
 821int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
 822{
 823        return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
 824}
 825
 826/**
 827 * tb_port_alloc_out_hopid() - Allocate output HopID from port
 828 * @port: Port to allocate HopID for
 829 * @min_hopid: Minimum acceptable output HopID
 830 * @max_hopid: Maximum acceptable output HopID
 831 *
 832 * Return: HopID between @min_hopid and @max_hopid or negative errno in
 833 * case of error.
 834 */
 835int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
 836{
 837        return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
 838}
 839
 840/**
 841 * tb_port_release_in_hopid() - Release allocated input HopID from port
 842 * @port: Port whose HopID to release
 843 * @hopid: HopID to release
 844 */
 845void tb_port_release_in_hopid(struct tb_port *port, int hopid)
 846{
 847        ida_simple_remove(&port->in_hopids, hopid);
 848}
 849
 850/**
 851 * tb_port_release_out_hopid() - Release allocated output HopID from port
 852 * @port: Port whose HopID to release
 853 * @hopid: HopID to release
 854 */
 855void tb_port_release_out_hopid(struct tb_port *port, int hopid)
 856{
 857        ida_simple_remove(&port->out_hopids, hopid);
 858}
 859
 860static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
 861                                          const struct tb_switch *sw)
 862{
 863        u64 mask = (1ULL << parent->config.depth * 8) - 1;
 864        return (tb_route(parent) & mask) == (tb_route(sw) & mask);
 865}
 866
 867/**
 868 * tb_next_port_on_path() - Return next port for given port on a path
 869 * @start: Start port of the walk
 870 * @end: End port of the walk
 871 * @prev: Previous port (%NULL if this is the first)
 872 *
 873 * This function can be used to walk from one port to another if they
 874 * are connected through zero or more switches. If the @prev is dual
 875 * link port, the function follows that link and returns another end on
 876 * that same link.
 877 *
 878 * If the @end port has been reached, return %NULL.
 879 *
 880 * Domain tb->lock must be held when this function is called.
 881 */
 882struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
 883                                     struct tb_port *prev)
 884{
 885        struct tb_port *next;
 886
 887        if (!prev)
 888                return start;
 889
 890        if (prev->sw == end->sw) {
 891                if (prev == end)
 892                        return NULL;
 893                return end;
 894        }
 895
 896        if (tb_switch_is_reachable(prev->sw, end->sw)) {
 897                next = tb_port_at(tb_route(end->sw), prev->sw);
 898                /* Walk down the topology if next == prev */
 899                if (prev->remote &&
 900                    (next == prev || next->dual_link_port == prev))
 901                        next = prev->remote;
 902        } else {
 903                if (tb_is_upstream_port(prev)) {
 904                        next = prev->remote;
 905                } else {
 906                        next = tb_upstream_port(prev->sw);
 907                        /*
 908                         * Keep the same link if prev and next are both
 909                         * dual link ports.
 910                         */
 911                        if (next->dual_link_port &&
 912                            next->link_nr != prev->link_nr) {
 913                                next = next->dual_link_port;
 914                        }
 915                }
 916        }
 917
 918        return next != prev ? next : NULL;
 919}
 920
 921/**
 922 * tb_port_get_link_speed() - Get current link speed
 923 * @port: Port to check (USB4 or CIO)
 924 *
 925 * Returns link speed in Gb/s or negative errno in case of failure.
 926 */
 927int tb_port_get_link_speed(struct tb_port *port)
 928{
 929        u32 val, speed;
 930        int ret;
 931
 932        if (!port->cap_phy)
 933                return -EINVAL;
 934
 935        ret = tb_port_read(port, &val, TB_CFG_PORT,
 936                           port->cap_phy + LANE_ADP_CS_1, 1);
 937        if (ret)
 938                return ret;
 939
 940        speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
 941                LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
 942        return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
 943}
 944
 945/**
 946 * tb_port_get_link_width() - Get current link width
 947 * @port: Port to check (USB4 or CIO)
 948 *
 949 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
 950 * or negative errno in case of failure.
 951 */
 952int tb_port_get_link_width(struct tb_port *port)
 953{
 954        u32 val;
 955        int ret;
 956
 957        if (!port->cap_phy)
 958                return -EINVAL;
 959
 960        ret = tb_port_read(port, &val, TB_CFG_PORT,
 961                           port->cap_phy + LANE_ADP_CS_1, 1);
 962        if (ret)
 963                return ret;
 964
 965        return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
 966                LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
 967}
 968
 969static bool tb_port_is_width_supported(struct tb_port *port, int width)
 970{
 971        u32 phy, widths;
 972        int ret;
 973
 974        if (!port->cap_phy)
 975                return false;
 976
 977        ret = tb_port_read(port, &phy, TB_CFG_PORT,
 978                           port->cap_phy + LANE_ADP_CS_0, 1);
 979        if (ret)
 980                return false;
 981
 982        widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
 983                LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
 984
 985        return !!(widths & width);
 986}
 987
 988static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
 989{
 990        u32 val;
 991        int ret;
 992
 993        if (!port->cap_phy)
 994                return -EINVAL;
 995
 996        ret = tb_port_read(port, &val, TB_CFG_PORT,
 997                           port->cap_phy + LANE_ADP_CS_1, 1);
 998        if (ret)
 999                return ret;
1000
1001        val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1002        switch (width) {
1003        case 1:
1004                val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1005                        LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1006                break;
1007        case 2:
1008                val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1009                        LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1010                break;
1011        default:
1012                return -EINVAL;
1013        }
1014
1015        val |= LANE_ADP_CS_1_LB;
1016
1017        return tb_port_write(port, &val, TB_CFG_PORT,
1018                             port->cap_phy + LANE_ADP_CS_1, 1);
1019}
1020
1021/**
1022 * tb_port_lane_bonding_enable() - Enable bonding on port
1023 * @port: port to enable
1024 *
1025 * Enable bonding by setting the link width of the port and the other
1026 * port in case of dual link port. Does not wait for the link to
1027 * actually reach the bonded state so caller needs to call
1028 * tb_port_wait_for_link_width() before enabling any paths through the
1029 * link to make sure the link is in expected state.
1030 *
1031 * Return: %0 in case of success and negative errno in case of error
1032 */
1033int tb_port_lane_bonding_enable(struct tb_port *port)
1034{
1035        int ret;
1036
1037        /*
1038         * Enable lane bonding for both links if not already enabled by
1039         * for example the boot firmware.
1040         */
1041        ret = tb_port_get_link_width(port);
1042        if (ret == 1) {
1043                ret = tb_port_set_link_width(port, 2);
1044                if (ret)
1045                        return ret;
1046        }
1047
1048        ret = tb_port_get_link_width(port->dual_link_port);
1049        if (ret == 1) {
1050                ret = tb_port_set_link_width(port->dual_link_port, 2);
1051                if (ret) {
1052                        tb_port_set_link_width(port, 1);
1053                        return ret;
1054                }
1055        }
1056
1057        port->bonded = true;
1058        port->dual_link_port->bonded = true;
1059
1060        return 0;
1061}
1062
1063/**
1064 * tb_port_lane_bonding_disable() - Disable bonding on port
1065 * @port: port to disable
1066 *
1067 * Disable bonding by setting the link width of the port and the
1068 * other port in case of dual link port.
1069 *
1070 */
1071void tb_port_lane_bonding_disable(struct tb_port *port)
1072{
1073        port->dual_link_port->bonded = false;
1074        port->bonded = false;
1075
1076        tb_port_set_link_width(port->dual_link_port, 1);
1077        tb_port_set_link_width(port, 1);
1078}
1079
1080/**
1081 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1082 * @port: Port to wait for
1083 * @width: Expected link width (%1 or %2)
1084 * @timeout_msec: Timeout in ms how long to wait
1085 *
1086 * Should be used after both ends of the link have been bonded (or
1087 * bonding has been disabled) to wait until the link actually reaches
1088 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1089 * within the given timeout, %0 if it did.
1090 */
1091int tb_port_wait_for_link_width(struct tb_port *port, int width,
1092                                int timeout_msec)
1093{
1094        ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1095        int ret;
1096
1097        do {
1098                ret = tb_port_get_link_width(port);
1099                if (ret < 0)
1100                        return ret;
1101                else if (ret == width)
1102                        return 0;
1103
1104                usleep_range(1000, 2000);
1105        } while (ktime_before(ktime_get(), timeout));
1106
1107        return -ETIMEDOUT;
1108}
1109
1110static int tb_port_do_update_credits(struct tb_port *port)
1111{
1112        u32 nfc_credits;
1113        int ret;
1114
1115        ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1116        if (ret)
1117                return ret;
1118
1119        if (nfc_credits != port->config.nfc_credits) {
1120                u32 total;
1121
1122                total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1123                        ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1124
1125                tb_port_dbg(port, "total credits changed %u -> %u\n",
1126                            port->total_credits, total);
1127
1128                port->config.nfc_credits = nfc_credits;
1129                port->total_credits = total;
1130        }
1131
1132        return 0;
1133}
1134
1135/**
1136 * tb_port_update_credits() - Re-read port total credits
1137 * @port: Port to update
1138 *
1139 * After the link is bonded (or bonding was disabled) the port total
1140 * credits may change, so this function needs to be called to re-read
1141 * the credits. Updates also the second lane adapter.
1142 */
1143int tb_port_update_credits(struct tb_port *port)
1144{
1145        int ret;
1146
1147        ret = tb_port_do_update_credits(port);
1148        if (ret)
1149                return ret;
1150        return tb_port_do_update_credits(port->dual_link_port);
1151}
1152
1153static int tb_port_start_lane_initialization(struct tb_port *port)
1154{
1155        int ret;
1156
1157        if (tb_switch_is_usb4(port->sw))
1158                return 0;
1159
1160        ret = tb_lc_start_lane_initialization(port);
1161        return ret == -EINVAL ? 0 : ret;
1162}
1163
1164/*
1165 * Returns true if the port had something (router, XDomain) connected
1166 * before suspend.
1167 */
1168static bool tb_port_resume(struct tb_port *port)
1169{
1170        bool has_remote = tb_port_has_remote(port);
1171
1172        if (port->usb4) {
1173                usb4_port_device_resume(port->usb4);
1174        } else if (!has_remote) {
1175                /*
1176                 * For disconnected downstream lane adapters start lane
1177                 * initialization now so we detect future connects.
1178                 *
1179                 * For XDomain start the lane initialzation now so the
1180                 * link gets re-established.
1181                 *
1182                 * This is only needed for non-USB4 ports.
1183                 */
1184                if (!tb_is_upstream_port(port) || port->xdomain)
1185                        tb_port_start_lane_initialization(port);
1186        }
1187
1188        return has_remote || port->xdomain;
1189}
1190
1191/**
1192 * tb_port_is_enabled() - Is the adapter port enabled
1193 * @port: Port to check
1194 */
1195bool tb_port_is_enabled(struct tb_port *port)
1196{
1197        switch (port->config.type) {
1198        case TB_TYPE_PCIE_UP:
1199        case TB_TYPE_PCIE_DOWN:
1200                return tb_pci_port_is_enabled(port);
1201
1202        case TB_TYPE_DP_HDMI_IN:
1203        case TB_TYPE_DP_HDMI_OUT:
1204                return tb_dp_port_is_enabled(port);
1205
1206        case TB_TYPE_USB3_UP:
1207        case TB_TYPE_USB3_DOWN:
1208                return tb_usb3_port_is_enabled(port);
1209
1210        default:
1211                return false;
1212        }
1213}
1214
1215/**
1216 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1217 * @port: USB3 adapter port to check
1218 */
1219bool tb_usb3_port_is_enabled(struct tb_port *port)
1220{
1221        u32 data;
1222
1223        if (tb_port_read(port, &data, TB_CFG_PORT,
1224                         port->cap_adap + ADP_USB3_CS_0, 1))
1225                return false;
1226
1227        return !!(data & ADP_USB3_CS_0_PE);
1228}
1229
1230/**
1231 * tb_usb3_port_enable() - Enable USB3 adapter port
1232 * @port: USB3 adapter port to enable
1233 * @enable: Enable/disable the USB3 adapter
1234 */
1235int tb_usb3_port_enable(struct tb_port *port, bool enable)
1236{
1237        u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1238                          : ADP_USB3_CS_0_V;
1239
1240        if (!port->cap_adap)
1241                return -ENXIO;
1242        return tb_port_write(port, &word, TB_CFG_PORT,
1243                             port->cap_adap + ADP_USB3_CS_0, 1);
1244}
1245
1246/**
1247 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1248 * @port: PCIe port to check
1249 */
1250bool tb_pci_port_is_enabled(struct tb_port *port)
1251{
1252        u32 data;
1253
1254        if (tb_port_read(port, &data, TB_CFG_PORT,
1255                         port->cap_adap + ADP_PCIE_CS_0, 1))
1256                return false;
1257
1258        return !!(data & ADP_PCIE_CS_0_PE);
1259}
1260
1261/**
1262 * tb_pci_port_enable() - Enable PCIe adapter port
1263 * @port: PCIe port to enable
1264 * @enable: Enable/disable the PCIe adapter
1265 */
1266int tb_pci_port_enable(struct tb_port *port, bool enable)
1267{
1268        u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1269        if (!port->cap_adap)
1270                return -ENXIO;
1271        return tb_port_write(port, &word, TB_CFG_PORT,
1272                             port->cap_adap + ADP_PCIE_CS_0, 1);
1273}
1274
1275/**
1276 * tb_dp_port_hpd_is_active() - Is HPD already active
1277 * @port: DP out port to check
1278 *
1279 * Checks if the DP OUT adapter port has HDP bit already set.
1280 */
1281int tb_dp_port_hpd_is_active(struct tb_port *port)
1282{
1283        u32 data;
1284        int ret;
1285
1286        ret = tb_port_read(port, &data, TB_CFG_PORT,
1287                           port->cap_adap + ADP_DP_CS_2, 1);
1288        if (ret)
1289                return ret;
1290
1291        return !!(data & ADP_DP_CS_2_HDP);
1292}
1293
1294/**
1295 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1296 * @port: Port to clear HPD
1297 *
1298 * If the DP IN port has HDP set, this function can be used to clear it.
1299 */
1300int tb_dp_port_hpd_clear(struct tb_port *port)
1301{
1302        u32 data;
1303        int ret;
1304
1305        ret = tb_port_read(port, &data, TB_CFG_PORT,
1306                           port->cap_adap + ADP_DP_CS_3, 1);
1307        if (ret)
1308                return ret;
1309
1310        data |= ADP_DP_CS_3_HDPC;
1311        return tb_port_write(port, &data, TB_CFG_PORT,
1312                             port->cap_adap + ADP_DP_CS_3, 1);
1313}
1314
1315/**
1316 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1317 * @port: DP IN/OUT port to set hops
1318 * @video: Video Hop ID
1319 * @aux_tx: AUX TX Hop ID
1320 * @aux_rx: AUX RX Hop ID
1321 *
1322 * Programs specified Hop IDs for DP IN/OUT port.
1323 */
1324int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1325                        unsigned int aux_tx, unsigned int aux_rx)
1326{
1327        u32 data[2];
1328        int ret;
1329
1330        ret = tb_port_read(port, data, TB_CFG_PORT,
1331                           port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1332        if (ret)
1333                return ret;
1334
1335        data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1336        data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1337        data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1338
1339        data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1340                ADP_DP_CS_0_VIDEO_HOPID_MASK;
1341        data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1342        data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1343                ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1344
1345        return tb_port_write(port, data, TB_CFG_PORT,
1346                             port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1347}
1348
1349/**
1350 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1351 * @port: DP adapter port to check
1352 */
1353bool tb_dp_port_is_enabled(struct tb_port *port)
1354{
1355        u32 data[2];
1356
1357        if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1358                         ARRAY_SIZE(data)))
1359                return false;
1360
1361        return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1362}
1363
1364/**
1365 * tb_dp_port_enable() - Enables/disables DP paths of a port
1366 * @port: DP IN/OUT port
1367 * @enable: Enable/disable DP path
1368 *
1369 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1370 * calling this function.
1371 */
1372int tb_dp_port_enable(struct tb_port *port, bool enable)
1373{
1374        u32 data[2];
1375        int ret;
1376
1377        ret = tb_port_read(port, data, TB_CFG_PORT,
1378                          port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1379        if (ret)
1380                return ret;
1381
1382        if (enable)
1383                data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1384        else
1385                data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1386
1387        return tb_port_write(port, data, TB_CFG_PORT,
1388                             port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1389}
1390
1391/* switch utility functions */
1392
1393static const char *tb_switch_generation_name(const struct tb_switch *sw)
1394{
1395        switch (sw->generation) {
1396        case 1:
1397                return "Thunderbolt 1";
1398        case 2:
1399                return "Thunderbolt 2";
1400        case 3:
1401                return "Thunderbolt 3";
1402        case 4:
1403                return "USB4";
1404        default:
1405                return "Unknown";
1406        }
1407}
1408
1409static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1410{
1411        const struct tb_regs_switch_header *regs = &sw->config;
1412
1413        tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1414               tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1415               regs->revision, regs->thunderbolt_version);
1416        tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
1417        tb_dbg(tb, "  Config:\n");
1418        tb_dbg(tb,
1419                "   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1420               regs->upstream_port_number, regs->depth,
1421               (((u64) regs->route_hi) << 32) | regs->route_lo,
1422               regs->enabled, regs->plug_events_delay);
1423        tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
1424               regs->__unknown1, regs->__unknown4);
1425}
1426
1427/**
1428 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1429 * @sw: Switch to reset
1430 *
1431 * Return: Returns 0 on success or an error code on failure.
1432 */
1433int tb_switch_reset(struct tb_switch *sw)
1434{
1435        struct tb_cfg_result res;
1436
1437        if (sw->generation > 1)
1438                return 0;
1439
1440        tb_sw_dbg(sw, "resetting switch\n");
1441
1442        res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1443                              TB_CFG_SWITCH, 2, 2);
1444        if (res.err)
1445                return res.err;
1446        res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1447        if (res.err > 0)
1448                return -EIO;
1449        return res.err;
1450}
1451
1452/*
1453 * tb_plug_events_active() - enable/disable plug events on a switch
1454 *
1455 * Also configures a sane plug_events_delay of 255ms.
1456 *
1457 * Return: Returns 0 on success or an error code on failure.
1458 */
1459static int tb_plug_events_active(struct tb_switch *sw, bool active)
1460{
1461        u32 data;
1462        int res;
1463
1464        if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1465                return 0;
1466
1467        sw->config.plug_events_delay = 0xff;
1468        res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1469        if (res)
1470                return res;
1471
1472        res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1473        if (res)
1474                return res;
1475
1476        if (active) {
1477                data = data & 0xFFFFFF83;
1478                switch (sw->config.device_id) {
1479                case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1480                case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1481                case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1482                        break;
1483                default:
1484                        data |= 4;
1485                }
1486        } else {
1487                data = data | 0x7c;
1488        }
1489        return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1490                           sw->cap_plug_events + 1, 1);
1491}
1492
1493static ssize_t authorized_show(struct device *dev,
1494                               struct device_attribute *attr,
1495                               char *buf)
1496{
1497        struct tb_switch *sw = tb_to_switch(dev);
1498
1499        return sprintf(buf, "%u\n", sw->authorized);
1500}
1501
1502static int disapprove_switch(struct device *dev, void *not_used)
1503{
1504        char *envp[] = { "AUTHORIZED=0", NULL };
1505        struct tb_switch *sw;
1506
1507        sw = tb_to_switch(dev);
1508        if (sw && sw->authorized) {
1509                int ret;
1510
1511                /* First children */
1512                ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1513                if (ret)
1514                        return ret;
1515
1516                ret = tb_domain_disapprove_switch(sw->tb, sw);
1517                if (ret)
1518                        return ret;
1519
1520                sw->authorized = 0;
1521                kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1522        }
1523
1524        return 0;
1525}
1526
1527static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1528{
1529        char envp_string[13];
1530        int ret = -EINVAL;
1531        char *envp[] = { envp_string, NULL };
1532
1533        if (!mutex_trylock(&sw->tb->lock))
1534                return restart_syscall();
1535
1536        if (!!sw->authorized == !!val)
1537                goto unlock;
1538
1539        switch (val) {
1540        /* Disapprove switch */
1541        case 0:
1542                if (tb_route(sw)) {
1543                        ret = disapprove_switch(&sw->dev, NULL);
1544                        goto unlock;
1545                }
1546                break;
1547
1548        /* Approve switch */
1549        case 1:
1550                if (sw->key)
1551                        ret = tb_domain_approve_switch_key(sw->tb, sw);
1552                else
1553                        ret = tb_domain_approve_switch(sw->tb, sw);
1554                break;
1555
1556        /* Challenge switch */
1557        case 2:
1558                if (sw->key)
1559                        ret = tb_domain_challenge_switch_key(sw->tb, sw);
1560                break;
1561
1562        default:
1563                break;
1564        }
1565
1566        if (!ret) {
1567                sw->authorized = val;
1568                /*
1569                 * Notify status change to the userspace, informing the new
1570                 * value of /sys/bus/thunderbolt/devices/.../authorized.
1571                 */
1572                sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1573                kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1574        }
1575
1576unlock:
1577        mutex_unlock(&sw->tb->lock);
1578        return ret;
1579}
1580
1581static ssize_t authorized_store(struct device *dev,
1582                                struct device_attribute *attr,
1583                                const char *buf, size_t count)
1584{
1585        struct tb_switch *sw = tb_to_switch(dev);
1586        unsigned int val;
1587        ssize_t ret;
1588
1589        ret = kstrtouint(buf, 0, &val);
1590        if (ret)
1591                return ret;
1592        if (val > 2)
1593                return -EINVAL;
1594
1595        pm_runtime_get_sync(&sw->dev);
1596        ret = tb_switch_set_authorized(sw, val);
1597        pm_runtime_mark_last_busy(&sw->dev);
1598        pm_runtime_put_autosuspend(&sw->dev);
1599
1600        return ret ? ret : count;
1601}
1602static DEVICE_ATTR_RW(authorized);
1603
1604static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1605                         char *buf)
1606{
1607        struct tb_switch *sw = tb_to_switch(dev);
1608
1609        return sprintf(buf, "%u\n", sw->boot);
1610}
1611static DEVICE_ATTR_RO(boot);
1612
1613static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1614                           char *buf)
1615{
1616        struct tb_switch *sw = tb_to_switch(dev);
1617
1618        return sprintf(buf, "%#x\n", sw->device);
1619}
1620static DEVICE_ATTR_RO(device);
1621
1622static ssize_t
1623device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1624{
1625        struct tb_switch *sw = tb_to_switch(dev);
1626
1627        return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1628}
1629static DEVICE_ATTR_RO(device_name);
1630
1631static ssize_t
1632generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1633{
1634        struct tb_switch *sw = tb_to_switch(dev);
1635
1636        return sprintf(buf, "%u\n", sw->generation);
1637}
1638static DEVICE_ATTR_RO(generation);
1639
1640static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1641                        char *buf)
1642{
1643        struct tb_switch *sw = tb_to_switch(dev);
1644        ssize_t ret;
1645
1646        if (!mutex_trylock(&sw->tb->lock))
1647                return restart_syscall();
1648
1649        if (sw->key)
1650                ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1651        else
1652                ret = sprintf(buf, "\n");
1653
1654        mutex_unlock(&sw->tb->lock);
1655        return ret;
1656}
1657
1658static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1659                         const char *buf, size_t count)
1660{
1661        struct tb_switch *sw = tb_to_switch(dev);
1662        u8 key[TB_SWITCH_KEY_SIZE];
1663        ssize_t ret = count;
1664        bool clear = false;
1665
1666        if (!strcmp(buf, "\n"))
1667                clear = true;
1668        else if (hex2bin(key, buf, sizeof(key)))
1669                return -EINVAL;
1670
1671        if (!mutex_trylock(&sw->tb->lock))
1672                return restart_syscall();
1673
1674        if (sw->authorized) {
1675                ret = -EBUSY;
1676        } else {
1677                kfree(sw->key);
1678                if (clear) {
1679                        sw->key = NULL;
1680                } else {
1681                        sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1682                        if (!sw->key)
1683                                ret = -ENOMEM;
1684                }
1685        }
1686
1687        mutex_unlock(&sw->tb->lock);
1688        return ret;
1689}
1690static DEVICE_ATTR(key, 0600, key_show, key_store);
1691
1692static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1693                          char *buf)
1694{
1695        struct tb_switch *sw = tb_to_switch(dev);
1696
1697        return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1698}
1699
1700/*
1701 * Currently all lanes must run at the same speed but we expose here
1702 * both directions to allow possible asymmetric links in the future.
1703 */
1704static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1705static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1706
1707static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1708                          char *buf)
1709{
1710        struct tb_switch *sw = tb_to_switch(dev);
1711
1712        return sprintf(buf, "%u\n", sw->link_width);
1713}
1714
1715/*
1716 * Currently link has same amount of lanes both directions (1 or 2) but
1717 * expose them separately to allow possible asymmetric links in the future.
1718 */
1719static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1720static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1721
1722static ssize_t nvm_authenticate_show(struct device *dev,
1723        struct device_attribute *attr, char *buf)
1724{
1725        struct tb_switch *sw = tb_to_switch(dev);
1726        u32 status;
1727
1728        nvm_get_auth_status(sw, &status);
1729        return sprintf(buf, "%#x\n", status);
1730}
1731
1732static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1733                                      bool disconnect)
1734{
1735        struct tb_switch *sw = tb_to_switch(dev);
1736        int val, ret;
1737
1738        pm_runtime_get_sync(&sw->dev);
1739
1740        if (!mutex_trylock(&sw->tb->lock)) {
1741                ret = restart_syscall();
1742                goto exit_rpm;
1743        }
1744
1745        /* If NVMem devices are not yet added */
1746        if (!sw->nvm) {
1747                ret = -EAGAIN;
1748                goto exit_unlock;
1749        }
1750
1751        ret = kstrtoint(buf, 10, &val);
1752        if (ret)
1753                goto exit_unlock;
1754
1755        /* Always clear the authentication status */
1756        nvm_clear_auth_status(sw);
1757
1758        if (val > 0) {
1759                if (val == AUTHENTICATE_ONLY) {
1760                        if (disconnect)
1761                                ret = -EINVAL;
1762                        else
1763                                ret = nvm_authenticate(sw, true);
1764                } else {
1765                        if (!sw->nvm->flushed) {
1766                                if (!sw->nvm->buf) {
1767                                        ret = -EINVAL;
1768                                        goto exit_unlock;
1769                                }
1770
1771                                ret = nvm_validate_and_write(sw);
1772                                if (ret || val == WRITE_ONLY)
1773                                        goto exit_unlock;
1774                        }
1775                        if (val == WRITE_AND_AUTHENTICATE) {
1776                                if (disconnect)
1777                                        ret = tb_lc_force_power(sw);
1778                                else
1779                                        ret = nvm_authenticate(sw, false);
1780                        }
1781                }
1782        }
1783
1784exit_unlock:
1785        mutex_unlock(&sw->tb->lock);
1786exit_rpm:
1787        pm_runtime_mark_last_busy(&sw->dev);
1788        pm_runtime_put_autosuspend(&sw->dev);
1789
1790        return ret;
1791}
1792
1793static ssize_t nvm_authenticate_store(struct device *dev,
1794        struct device_attribute *attr, const char *buf, size_t count)
1795{
1796        int ret = nvm_authenticate_sysfs(dev, buf, false);
1797        if (ret)
1798                return ret;
1799        return count;
1800}
1801static DEVICE_ATTR_RW(nvm_authenticate);
1802
1803static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1804        struct device_attribute *attr, char *buf)
1805{
1806        return nvm_authenticate_show(dev, attr, buf);
1807}
1808
1809static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1810        struct device_attribute *attr, const char *buf, size_t count)
1811{
1812        int ret;
1813
1814        ret = nvm_authenticate_sysfs(dev, buf, true);
1815        return ret ? ret : count;
1816}
1817static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1818
1819static ssize_t nvm_version_show(struct device *dev,
1820                                struct device_attribute *attr, char *buf)
1821{
1822        struct tb_switch *sw = tb_to_switch(dev);
1823        int ret;
1824
1825        if (!mutex_trylock(&sw->tb->lock))
1826                return restart_syscall();
1827
1828        if (sw->safe_mode)
1829                ret = -ENODATA;
1830        else if (!sw->nvm)
1831                ret = -EAGAIN;
1832        else
1833                ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1834
1835        mutex_unlock(&sw->tb->lock);
1836
1837        return ret;
1838}
1839static DEVICE_ATTR_RO(nvm_version);
1840
1841static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1842                           char *buf)
1843{
1844        struct tb_switch *sw = tb_to_switch(dev);
1845
1846        return sprintf(buf, "%#x\n", sw->vendor);
1847}
1848static DEVICE_ATTR_RO(vendor);
1849
1850static ssize_t
1851vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1852{
1853        struct tb_switch *sw = tb_to_switch(dev);
1854
1855        return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1856}
1857static DEVICE_ATTR_RO(vendor_name);
1858
1859static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1860                              char *buf)
1861{
1862        struct tb_switch *sw = tb_to_switch(dev);
1863
1864        return sprintf(buf, "%pUb\n", sw->uuid);
1865}
1866static DEVICE_ATTR_RO(unique_id);
1867
1868static struct attribute *switch_attrs[] = {
1869        &dev_attr_authorized.attr,
1870        &dev_attr_boot.attr,
1871        &dev_attr_device.attr,
1872        &dev_attr_device_name.attr,
1873        &dev_attr_generation.attr,
1874        &dev_attr_key.attr,
1875        &dev_attr_nvm_authenticate.attr,
1876        &dev_attr_nvm_authenticate_on_disconnect.attr,
1877        &dev_attr_nvm_version.attr,
1878        &dev_attr_rx_speed.attr,
1879        &dev_attr_rx_lanes.attr,
1880        &dev_attr_tx_speed.attr,
1881        &dev_attr_tx_lanes.attr,
1882        &dev_attr_vendor.attr,
1883        &dev_attr_vendor_name.attr,
1884        &dev_attr_unique_id.attr,
1885        NULL,
1886};
1887
1888static umode_t switch_attr_is_visible(struct kobject *kobj,
1889                                      struct attribute *attr, int n)
1890{
1891        struct device *dev = kobj_to_dev(kobj);
1892        struct tb_switch *sw = tb_to_switch(dev);
1893
1894        if (attr == &dev_attr_authorized.attr) {
1895                if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
1896                    sw->tb->security_level == TB_SECURITY_DPONLY)
1897                        return 0;
1898        } else if (attr == &dev_attr_device.attr) {
1899                if (!sw->device)
1900                        return 0;
1901        } else if (attr == &dev_attr_device_name.attr) {
1902                if (!sw->device_name)
1903                        return 0;
1904        } else if (attr == &dev_attr_vendor.attr)  {
1905                if (!sw->vendor)
1906                        return 0;
1907        } else if (attr == &dev_attr_vendor_name.attr)  {
1908                if (!sw->vendor_name)
1909                        return 0;
1910        } else if (attr == &dev_attr_key.attr) {
1911                if (tb_route(sw) &&
1912                    sw->tb->security_level == TB_SECURITY_SECURE &&
1913                    sw->security_level == TB_SECURITY_SECURE)
1914                        return attr->mode;
1915                return 0;
1916        } else if (attr == &dev_attr_rx_speed.attr ||
1917                   attr == &dev_attr_rx_lanes.attr ||
1918                   attr == &dev_attr_tx_speed.attr ||
1919                   attr == &dev_attr_tx_lanes.attr) {
1920                if (tb_route(sw))
1921                        return attr->mode;
1922                return 0;
1923        } else if (attr == &dev_attr_nvm_authenticate.attr) {
1924                if (nvm_upgradeable(sw))
1925                        return attr->mode;
1926                return 0;
1927        } else if (attr == &dev_attr_nvm_version.attr) {
1928                if (nvm_readable(sw))
1929                        return attr->mode;
1930                return 0;
1931        } else if (attr == &dev_attr_boot.attr) {
1932                if (tb_route(sw))
1933                        return attr->mode;
1934                return 0;
1935        } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1936                if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1937                        return attr->mode;
1938                return 0;
1939        }
1940
1941        return sw->safe_mode ? 0 : attr->mode;
1942}
1943
1944static const struct attribute_group switch_group = {
1945        .is_visible = switch_attr_is_visible,
1946        .attrs = switch_attrs,
1947};
1948
1949static const struct attribute_group *switch_groups[] = {
1950        &switch_group,
1951        NULL,
1952};
1953
1954static void tb_switch_release(struct device *dev)
1955{
1956        struct tb_switch *sw = tb_to_switch(dev);
1957        struct tb_port *port;
1958
1959        dma_port_free(sw->dma_port);
1960
1961        tb_switch_for_each_port(sw, port) {
1962                ida_destroy(&port->in_hopids);
1963                ida_destroy(&port->out_hopids);
1964        }
1965
1966        kfree(sw->uuid);
1967        kfree(sw->device_name);
1968        kfree(sw->vendor_name);
1969        kfree(sw->ports);
1970        kfree(sw->drom);
1971        kfree(sw->key);
1972        kfree(sw);
1973}
1974
1975static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
1976{
1977        struct tb_switch *sw = tb_to_switch(dev);
1978        const char *type;
1979
1980        if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
1981                if (add_uevent_var(env, "USB4_VERSION=1.0"))
1982                        return -ENOMEM;
1983        }
1984
1985        if (!tb_route(sw)) {
1986                type = "host";
1987        } else {
1988                const struct tb_port *port;
1989                bool hub = false;
1990
1991                /* Device is hub if it has any downstream ports */
1992                tb_switch_for_each_port(sw, port) {
1993                        if (!port->disabled && !tb_is_upstream_port(port) &&
1994                             tb_port_is_null(port)) {
1995                                hub = true;
1996                                break;
1997                        }
1998                }
1999
2000                type = hub ? "hub" : "device";
2001        }
2002
2003        if (add_uevent_var(env, "USB4_TYPE=%s", type))
2004                return -ENOMEM;
2005        return 0;
2006}
2007
2008/*
2009 * Currently only need to provide the callbacks. Everything else is handled
2010 * in the connection manager.
2011 */
2012static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2013{
2014        struct tb_switch *sw = tb_to_switch(dev);
2015        const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2016
2017        if (cm_ops->runtime_suspend_switch)
2018                return cm_ops->runtime_suspend_switch(sw);
2019
2020        return 0;
2021}
2022
2023static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2024{
2025        struct tb_switch *sw = tb_to_switch(dev);
2026        const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2027
2028        if (cm_ops->runtime_resume_switch)
2029                return cm_ops->runtime_resume_switch(sw);
2030        return 0;
2031}
2032
2033static const struct dev_pm_ops tb_switch_pm_ops = {
2034        SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2035                           NULL)
2036};
2037
2038struct device_type tb_switch_type = {
2039        .name = "thunderbolt_device",
2040        .release = tb_switch_release,
2041        .uevent = tb_switch_uevent,
2042        .pm = &tb_switch_pm_ops,
2043};
2044
2045static int tb_switch_get_generation(struct tb_switch *sw)
2046{
2047        switch (sw->config.device_id) {
2048        case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2049        case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2050        case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2051        case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2052        case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2053        case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2054        case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2055        case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2056                return 1;
2057
2058        case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2059        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2060        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2061                return 2;
2062
2063        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2064        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2065        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2066        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2067        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2068        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2069        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2070        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2071        case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2072        case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2073                return 3;
2074
2075        default:
2076                if (tb_switch_is_usb4(sw))
2077                        return 4;
2078
2079                /*
2080                 * For unknown switches assume generation to be 1 to be
2081                 * on the safe side.
2082                 */
2083                tb_sw_warn(sw, "unsupported switch device id %#x\n",
2084                           sw->config.device_id);
2085                return 1;
2086        }
2087}
2088
2089static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2090{
2091        int max_depth;
2092
2093        if (tb_switch_is_usb4(sw) ||
2094            (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2095                max_depth = USB4_SWITCH_MAX_DEPTH;
2096        else
2097                max_depth = TB_SWITCH_MAX_DEPTH;
2098
2099        return depth > max_depth;
2100}
2101
2102/**
2103 * tb_switch_alloc() - allocate a switch
2104 * @tb: Pointer to the owning domain
2105 * @parent: Parent device for this switch
2106 * @route: Route string for this switch
2107 *
2108 * Allocates and initializes a switch. Will not upload configuration to
2109 * the switch. For that you need to call tb_switch_configure()
2110 * separately. The returned switch should be released by calling
2111 * tb_switch_put().
2112 *
2113 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2114 * failure.
2115 */
2116struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2117                                  u64 route)
2118{
2119        struct tb_switch *sw;
2120        int upstream_port;
2121        int i, ret, depth;
2122
2123        /* Unlock the downstream port so we can access the switch below */
2124        if (route) {
2125                struct tb_switch *parent_sw = tb_to_switch(parent);
2126                struct tb_port *down;
2127
2128                down = tb_port_at(route, parent_sw);
2129                tb_port_unlock(down);
2130        }
2131
2132        depth = tb_route_length(route);
2133
2134        upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2135        if (upstream_port < 0)
2136                return ERR_PTR(upstream_port);
2137
2138        sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2139        if (!sw)
2140                return ERR_PTR(-ENOMEM);
2141
2142        sw->tb = tb;
2143        ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2144        if (ret)
2145                goto err_free_sw_ports;
2146
2147        sw->generation = tb_switch_get_generation(sw);
2148
2149        tb_dbg(tb, "current switch config:\n");
2150        tb_dump_switch(tb, sw);
2151
2152        /* configure switch */
2153        sw->config.upstream_port_number = upstream_port;
2154        sw->config.depth = depth;
2155        sw->config.route_hi = upper_32_bits(route);
2156        sw->config.route_lo = lower_32_bits(route);
2157        sw->config.enabled = 0;
2158
2159        /* Make sure we do not exceed maximum topology limit */
2160        if (tb_switch_exceeds_max_depth(sw, depth)) {
2161                ret = -EADDRNOTAVAIL;
2162                goto err_free_sw_ports;
2163        }
2164
2165        /* initialize ports */
2166        sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2167                                GFP_KERNEL);
2168        if (!sw->ports) {
2169                ret = -ENOMEM;
2170                goto err_free_sw_ports;
2171        }
2172
2173        for (i = 0; i <= sw->config.max_port_number; i++) {
2174                /* minimum setup for tb_find_cap and tb_drom_read to work */
2175                sw->ports[i].sw = sw;
2176                sw->ports[i].port = i;
2177
2178                /* Control port does not need HopID allocation */
2179                if (i) {
2180                        ida_init(&sw->ports[i].in_hopids);
2181                        ida_init(&sw->ports[i].out_hopids);
2182                }
2183        }
2184
2185        ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2186        if (ret > 0)
2187                sw->cap_plug_events = ret;
2188
2189        ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2190        if (ret > 0)
2191                sw->cap_lc = ret;
2192
2193        /* Root switch is always authorized */
2194        if (!route)
2195                sw->authorized = true;
2196
2197        device_initialize(&sw->dev);
2198        sw->dev.parent = parent;
2199        sw->dev.bus = &tb_bus_type;
2200        sw->dev.type = &tb_switch_type;
2201        sw->dev.groups = switch_groups;
2202        dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2203
2204        return sw;
2205
2206err_free_sw_ports:
2207        kfree(sw->ports);
2208        kfree(sw);
2209
2210        return ERR_PTR(ret);
2211}
2212
2213/**
2214 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2215 * @tb: Pointer to the owning domain
2216 * @parent: Parent device for this switch
2217 * @route: Route string for this switch
2218 *
2219 * This creates a switch in safe mode. This means the switch pretty much
2220 * lacks all capabilities except DMA configuration port before it is
2221 * flashed with a valid NVM firmware.
2222 *
2223 * The returned switch must be released by calling tb_switch_put().
2224 *
2225 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2226 */
2227struct tb_switch *
2228tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2229{
2230        struct tb_switch *sw;
2231
2232        sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2233        if (!sw)
2234                return ERR_PTR(-ENOMEM);
2235
2236        sw->tb = tb;
2237        sw->config.depth = tb_route_length(route);
2238        sw->config.route_hi = upper_32_bits(route);
2239        sw->config.route_lo = lower_32_bits(route);
2240        sw->safe_mode = true;
2241
2242        device_initialize(&sw->dev);
2243        sw->dev.parent = parent;
2244        sw->dev.bus = &tb_bus_type;
2245        sw->dev.type = &tb_switch_type;
2246        sw->dev.groups = switch_groups;
2247        dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2248
2249        return sw;
2250}
2251
2252/**
2253 * tb_switch_configure() - Uploads configuration to the switch
2254 * @sw: Switch to configure
2255 *
2256 * Call this function before the switch is added to the system. It will
2257 * upload configuration to the switch and makes it available for the
2258 * connection manager to use. Can be called to the switch again after
2259 * resume from low power states to re-initialize it.
2260 *
2261 * Return: %0 in case of success and negative errno in case of failure
2262 */
2263int tb_switch_configure(struct tb_switch *sw)
2264{
2265        struct tb *tb = sw->tb;
2266        u64 route;
2267        int ret;
2268
2269        route = tb_route(sw);
2270
2271        tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2272               sw->config.enabled ? "restoring" : "initializing", route,
2273               tb_route_length(route), sw->config.upstream_port_number);
2274
2275        sw->config.enabled = 1;
2276
2277        if (tb_switch_is_usb4(sw)) {
2278                /*
2279                 * For USB4 devices, we need to program the CM version
2280                 * accordingly so that it knows to expose all the
2281                 * additional capabilities.
2282                 */
2283                sw->config.cmuv = USB4_VERSION_1_0;
2284
2285                /* Enumerate the switch */
2286                ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2287                                  ROUTER_CS_1, 4);
2288                if (ret)
2289                        return ret;
2290
2291                ret = usb4_switch_setup(sw);
2292        } else {
2293                if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2294                        tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2295                                   sw->config.vendor_id);
2296
2297                if (!sw->cap_plug_events) {
2298                        tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2299                        return -ENODEV;
2300                }
2301
2302                /* Enumerate the switch */
2303                ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2304                                  ROUTER_CS_1, 3);
2305        }
2306        if (ret)
2307                return ret;
2308
2309        return tb_plug_events_active(sw, true);
2310}
2311
2312static int tb_switch_set_uuid(struct tb_switch *sw)
2313{
2314        bool uid = false;
2315        u32 uuid[4];
2316        int ret;
2317
2318        if (sw->uuid)
2319                return 0;
2320
2321        if (tb_switch_is_usb4(sw)) {
2322                ret = usb4_switch_read_uid(sw, &sw->uid);
2323                if (ret)
2324                        return ret;
2325                uid = true;
2326        } else {
2327                /*
2328                 * The newer controllers include fused UUID as part of
2329                 * link controller specific registers
2330                 */
2331                ret = tb_lc_read_uuid(sw, uuid);
2332                if (ret) {
2333                        if (ret != -EINVAL)
2334                                return ret;
2335                        uid = true;
2336                }
2337        }
2338
2339        if (uid) {
2340                /*
2341                 * ICM generates UUID based on UID and fills the upper
2342                 * two words with ones. This is not strictly following
2343                 * UUID format but we want to be compatible with it so
2344                 * we do the same here.
2345                 */
2346                uuid[0] = sw->uid & 0xffffffff;
2347                uuid[1] = (sw->uid >> 32) & 0xffffffff;
2348                uuid[2] = 0xffffffff;
2349                uuid[3] = 0xffffffff;
2350        }
2351
2352        sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2353        if (!sw->uuid)
2354                return -ENOMEM;
2355        return 0;
2356}
2357
2358static int tb_switch_add_dma_port(struct tb_switch *sw)
2359{
2360        u32 status;
2361        int ret;
2362
2363        switch (sw->generation) {
2364        case 2:
2365                /* Only root switch can be upgraded */
2366                if (tb_route(sw))
2367                        return 0;
2368
2369                fallthrough;
2370        case 3:
2371        case 4:
2372                ret = tb_switch_set_uuid(sw);
2373                if (ret)
2374                        return ret;
2375                break;
2376
2377        default:
2378                /*
2379                 * DMA port is the only thing available when the switch
2380                 * is in safe mode.
2381                 */
2382                if (!sw->safe_mode)
2383                        return 0;
2384                break;
2385        }
2386
2387        if (sw->no_nvm_upgrade)
2388                return 0;
2389
2390        if (tb_switch_is_usb4(sw)) {
2391                ret = usb4_switch_nvm_authenticate_status(sw, &status);
2392                if (ret)
2393                        return ret;
2394
2395                if (status) {
2396                        tb_sw_info(sw, "switch flash authentication failed\n");
2397                        nvm_set_auth_status(sw, status);
2398                }
2399
2400                return 0;
2401        }
2402
2403        /* Root switch DMA port requires running firmware */
2404        if (!tb_route(sw) && !tb_switch_is_icm(sw))
2405                return 0;
2406
2407        sw->dma_port = dma_port_alloc(sw);
2408        if (!sw->dma_port)
2409                return 0;
2410
2411        /*
2412         * If there is status already set then authentication failed
2413         * when the dma_port_flash_update_auth() returned. Power cycling
2414         * is not needed (it was done already) so only thing we do here
2415         * is to unblock runtime PM of the root port.
2416         */
2417        nvm_get_auth_status(sw, &status);
2418        if (status) {
2419                if (!tb_route(sw))
2420                        nvm_authenticate_complete_dma_port(sw);
2421                return 0;
2422        }
2423
2424        /*
2425         * Check status of the previous flash authentication. If there
2426         * is one we need to power cycle the switch in any case to make
2427         * it functional again.
2428         */
2429        ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2430        if (ret <= 0)
2431                return ret;
2432
2433        /* Now we can allow root port to suspend again */
2434        if (!tb_route(sw))
2435                nvm_authenticate_complete_dma_port(sw);
2436
2437        if (status) {
2438                tb_sw_info(sw, "switch flash authentication failed\n");
2439                nvm_set_auth_status(sw, status);
2440        }
2441
2442        tb_sw_info(sw, "power cycling the switch now\n");
2443        dma_port_power_cycle(sw->dma_port);
2444
2445        /*
2446         * We return error here which causes the switch adding failure.
2447         * It should appear back after power cycle is complete.
2448         */
2449        return -ESHUTDOWN;
2450}
2451
2452static void tb_switch_default_link_ports(struct tb_switch *sw)
2453{
2454        int i;
2455
2456        for (i = 1; i <= sw->config.max_port_number; i++) {
2457                struct tb_port *port = &sw->ports[i];
2458                struct tb_port *subordinate;
2459
2460                if (!tb_port_is_null(port))
2461                        continue;
2462
2463                /* Check for the subordinate port */
2464                if (i == sw->config.max_port_number ||
2465                    !tb_port_is_null(&sw->ports[i + 1]))
2466                        continue;
2467
2468                /* Link them if not already done so (by DROM) */
2469                subordinate = &sw->ports[i + 1];
2470                if (!port->dual_link_port && !subordinate->dual_link_port) {
2471                        port->link_nr = 0;
2472                        port->dual_link_port = subordinate;
2473                        subordinate->link_nr = 1;
2474                        subordinate->dual_link_port = port;
2475
2476                        tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2477                                  port->port, subordinate->port);
2478                }
2479        }
2480}
2481
2482static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2483{
2484        const struct tb_port *up = tb_upstream_port(sw);
2485
2486        if (!up->dual_link_port || !up->dual_link_port->remote)
2487                return false;
2488
2489        if (tb_switch_is_usb4(sw))
2490                return usb4_switch_lane_bonding_possible(sw);
2491        return tb_lc_lane_bonding_possible(sw);
2492}
2493
2494static int tb_switch_update_link_attributes(struct tb_switch *sw)
2495{
2496        struct tb_port *up;
2497        bool change = false;
2498        int ret;
2499
2500        if (!tb_route(sw) || tb_switch_is_icm(sw))
2501                return 0;
2502
2503        up = tb_upstream_port(sw);
2504
2505        ret = tb_port_get_link_speed(up);
2506        if (ret < 0)
2507                return ret;
2508        if (sw->link_speed != ret)
2509                change = true;
2510        sw->link_speed = ret;
2511
2512        ret = tb_port_get_link_width(up);
2513        if (ret < 0)
2514                return ret;
2515        if (sw->link_width != ret)
2516                change = true;
2517        sw->link_width = ret;
2518
2519        /* Notify userspace that there is possible link attribute change */
2520        if (device_is_registered(&sw->dev) && change)
2521                kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2522
2523        return 0;
2524}
2525
2526/**
2527 * tb_switch_lane_bonding_enable() - Enable lane bonding
2528 * @sw: Switch to enable lane bonding
2529 *
2530 * Connection manager can call this function to enable lane bonding of a
2531 * switch. If conditions are correct and both switches support the feature,
2532 * lanes are bonded. It is safe to call this to any switch.
2533 */
2534int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2535{
2536        struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2537        struct tb_port *up, *down;
2538        u64 route = tb_route(sw);
2539        int ret;
2540
2541        if (!route)
2542                return 0;
2543
2544        if (!tb_switch_lane_bonding_possible(sw))
2545                return 0;
2546
2547        up = tb_upstream_port(sw);
2548        down = tb_port_at(route, parent);
2549
2550        if (!tb_port_is_width_supported(up, 2) ||
2551            !tb_port_is_width_supported(down, 2))
2552                return 0;
2553
2554        ret = tb_port_lane_bonding_enable(up);
2555        if (ret) {
2556                tb_port_warn(up, "failed to enable lane bonding\n");
2557                return ret;
2558        }
2559
2560        ret = tb_port_lane_bonding_enable(down);
2561        if (ret) {
2562                tb_port_warn(down, "failed to enable lane bonding\n");
2563                tb_port_lane_bonding_disable(up);
2564                return ret;
2565        }
2566
2567        ret = tb_port_wait_for_link_width(down, 2, 100);
2568        if (ret) {
2569                tb_port_warn(down, "timeout enabling lane bonding\n");
2570                return ret;
2571        }
2572
2573        tb_port_update_credits(down);
2574        tb_port_update_credits(up);
2575        tb_switch_update_link_attributes(sw);
2576
2577        tb_sw_dbg(sw, "lane bonding enabled\n");
2578        return ret;
2579}
2580
2581/**
2582 * tb_switch_lane_bonding_disable() - Disable lane bonding
2583 * @sw: Switch whose lane bonding to disable
2584 *
2585 * Disables lane bonding between @sw and parent. This can be called even
2586 * if lanes were not bonded originally.
2587 */
2588void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2589{
2590        struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2591        struct tb_port *up, *down;
2592
2593        if (!tb_route(sw))
2594                return;
2595
2596        up = tb_upstream_port(sw);
2597        if (!up->bonded)
2598                return;
2599
2600        down = tb_port_at(tb_route(sw), parent);
2601
2602        tb_port_lane_bonding_disable(up);
2603        tb_port_lane_bonding_disable(down);
2604
2605        /*
2606         * It is fine if we get other errors as the router might have
2607         * been unplugged.
2608         */
2609        if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2610                tb_sw_warn(sw, "timeout disabling lane bonding\n");
2611
2612        tb_port_update_credits(down);
2613        tb_port_update_credits(up);
2614        tb_switch_update_link_attributes(sw);
2615
2616        tb_sw_dbg(sw, "lane bonding disabled\n");
2617}
2618
2619/**
2620 * tb_switch_configure_link() - Set link configured
2621 * @sw: Switch whose link is configured
2622 *
2623 * Sets the link upstream from @sw configured (from both ends) so that
2624 * it will not be disconnected when the domain exits sleep. Can be
2625 * called for any switch.
2626 *
2627 * It is recommended that this is called after lane bonding is enabled.
2628 *
2629 * Returns %0 on success and negative errno in case of error.
2630 */
2631int tb_switch_configure_link(struct tb_switch *sw)
2632{
2633        struct tb_port *up, *down;
2634        int ret;
2635
2636        if (!tb_route(sw) || tb_switch_is_icm(sw))
2637                return 0;
2638
2639        up = tb_upstream_port(sw);
2640        if (tb_switch_is_usb4(up->sw))
2641                ret = usb4_port_configure(up);
2642        else
2643                ret = tb_lc_configure_port(up);
2644        if (ret)
2645                return ret;
2646
2647        down = up->remote;
2648        if (tb_switch_is_usb4(down->sw))
2649                return usb4_port_configure(down);
2650        return tb_lc_configure_port(down);
2651}
2652
2653/**
2654 * tb_switch_unconfigure_link() - Unconfigure link
2655 * @sw: Switch whose link is unconfigured
2656 *
2657 * Sets the link unconfigured so the @sw will be disconnected if the
2658 * domain exists sleep.
2659 */
2660void tb_switch_unconfigure_link(struct tb_switch *sw)
2661{
2662        struct tb_port *up, *down;
2663
2664        if (sw->is_unplugged)
2665                return;
2666        if (!tb_route(sw) || tb_switch_is_icm(sw))
2667                return;
2668
2669        up = tb_upstream_port(sw);
2670        if (tb_switch_is_usb4(up->sw))
2671                usb4_port_unconfigure(up);
2672        else
2673                tb_lc_unconfigure_port(up);
2674
2675        down = up->remote;
2676        if (tb_switch_is_usb4(down->sw))
2677                usb4_port_unconfigure(down);
2678        else
2679                tb_lc_unconfigure_port(down);
2680}
2681
2682static void tb_switch_credits_init(struct tb_switch *sw)
2683{
2684        if (tb_switch_is_icm(sw))
2685                return;
2686        if (!tb_switch_is_usb4(sw))
2687                return;
2688        if (usb4_switch_credits_init(sw))
2689                tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2690}
2691
2692/**
2693 * tb_switch_add() - Add a switch to the domain
2694 * @sw: Switch to add
2695 *
2696 * This is the last step in adding switch to the domain. It will read
2697 * identification information from DROM and initializes ports so that
2698 * they can be used to connect other switches. The switch will be
2699 * exposed to the userspace when this function successfully returns. To
2700 * remove and release the switch, call tb_switch_remove().
2701 *
2702 * Return: %0 in case of success and negative errno in case of failure
2703 */
2704int tb_switch_add(struct tb_switch *sw)
2705{
2706        int i, ret;
2707
2708        /*
2709         * Initialize DMA control port now before we read DROM. Recent
2710         * host controllers have more complete DROM on NVM that includes
2711         * vendor and model identification strings which we then expose
2712         * to the userspace. NVM can be accessed through DMA
2713         * configuration based mailbox.
2714         */
2715        ret = tb_switch_add_dma_port(sw);
2716        if (ret) {
2717                dev_err(&sw->dev, "failed to add DMA port\n");
2718                return ret;
2719        }
2720
2721        if (!sw->safe_mode) {
2722                tb_switch_credits_init(sw);
2723
2724                /* read drom */
2725                ret = tb_drom_read(sw);
2726                if (ret) {
2727                        dev_err(&sw->dev, "reading DROM failed\n");
2728                        return ret;
2729                }
2730                tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2731
2732                tb_check_quirks(sw);
2733
2734                ret = tb_switch_set_uuid(sw);
2735                if (ret) {
2736                        dev_err(&sw->dev, "failed to set UUID\n");
2737                        return ret;
2738                }
2739
2740                for (i = 0; i <= sw->config.max_port_number; i++) {
2741                        if (sw->ports[i].disabled) {
2742                                tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2743                                continue;
2744                        }
2745                        ret = tb_init_port(&sw->ports[i]);
2746                        if (ret) {
2747                                dev_err(&sw->dev, "failed to initialize port %d\n", i);
2748                                return ret;
2749                        }
2750                }
2751
2752                tb_switch_default_link_ports(sw);
2753
2754                ret = tb_switch_update_link_attributes(sw);
2755                if (ret)
2756                        return ret;
2757
2758                ret = tb_switch_tmu_init(sw);
2759                if (ret)
2760                        return ret;
2761        }
2762
2763        ret = device_add(&sw->dev);
2764        if (ret) {
2765                dev_err(&sw->dev, "failed to add device: %d\n", ret);
2766                return ret;
2767        }
2768
2769        if (tb_route(sw)) {
2770                dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2771                         sw->vendor, sw->device);
2772                if (sw->vendor_name && sw->device_name)
2773                        dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2774                                 sw->device_name);
2775        }
2776
2777        ret = usb4_switch_add_ports(sw);
2778        if (ret) {
2779                dev_err(&sw->dev, "failed to add USB4 ports\n");
2780                goto err_del;
2781        }
2782
2783        ret = tb_switch_nvm_add(sw);
2784        if (ret) {
2785                dev_err(&sw->dev, "failed to add NVM devices\n");
2786                goto err_ports;
2787        }
2788
2789        /*
2790         * Thunderbolt routers do not generate wakeups themselves but
2791         * they forward wakeups from tunneled protocols, so enable it
2792         * here.
2793         */
2794        device_init_wakeup(&sw->dev, true);
2795
2796        pm_runtime_set_active(&sw->dev);
2797        if (sw->rpm) {
2798                pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2799                pm_runtime_use_autosuspend(&sw->dev);
2800                pm_runtime_mark_last_busy(&sw->dev);
2801                pm_runtime_enable(&sw->dev);
2802                pm_request_autosuspend(&sw->dev);
2803        }
2804
2805        tb_switch_debugfs_init(sw);
2806        return 0;
2807
2808err_ports:
2809        usb4_switch_remove_ports(sw);
2810err_del:
2811        device_del(&sw->dev);
2812
2813        return ret;
2814}
2815
2816/**
2817 * tb_switch_remove() - Remove and release a switch
2818 * @sw: Switch to remove
2819 *
2820 * This will remove the switch from the domain and release it after last
2821 * reference count drops to zero. If there are switches connected below
2822 * this switch, they will be removed as well.
2823 */
2824void tb_switch_remove(struct tb_switch *sw)
2825{
2826        struct tb_port *port;
2827
2828        tb_switch_debugfs_remove(sw);
2829
2830        if (sw->rpm) {
2831                pm_runtime_get_sync(&sw->dev);
2832                pm_runtime_disable(&sw->dev);
2833        }
2834
2835        /* port 0 is the switch itself and never has a remote */
2836        tb_switch_for_each_port(sw, port) {
2837                if (tb_port_has_remote(port)) {
2838                        tb_switch_remove(port->remote->sw);
2839                        port->remote = NULL;
2840                } else if (port->xdomain) {
2841                        tb_xdomain_remove(port->xdomain);
2842                        port->xdomain = NULL;
2843                }
2844
2845                /* Remove any downstream retimers */
2846                tb_retimer_remove_all(port);
2847        }
2848
2849        if (!sw->is_unplugged)
2850                tb_plug_events_active(sw, false);
2851
2852        tb_switch_nvm_remove(sw);
2853        usb4_switch_remove_ports(sw);
2854
2855        if (tb_route(sw))
2856                dev_info(&sw->dev, "device disconnected\n");
2857        device_unregister(&sw->dev);
2858}
2859
2860/**
2861 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2862 * @sw: Router to mark unplugged
2863 */
2864void tb_sw_set_unplugged(struct tb_switch *sw)
2865{
2866        struct tb_port *port;
2867
2868        if (sw == sw->tb->root_switch) {
2869                tb_sw_WARN(sw, "cannot unplug root switch\n");
2870                return;
2871        }
2872        if (sw->is_unplugged) {
2873                tb_sw_WARN(sw, "is_unplugged already set\n");
2874                return;
2875        }
2876        sw->is_unplugged = true;
2877        tb_switch_for_each_port(sw, port) {
2878                if (tb_port_has_remote(port))
2879                        tb_sw_set_unplugged(port->remote->sw);
2880                else if (port->xdomain)
2881                        port->xdomain->is_unplugged = true;
2882        }
2883}
2884
2885static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2886{
2887        if (flags)
2888                tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2889        else
2890                tb_sw_dbg(sw, "disabling wakeup\n");
2891
2892        if (tb_switch_is_usb4(sw))
2893                return usb4_switch_set_wake(sw, flags);
2894        return tb_lc_set_wake(sw, flags);
2895}
2896
2897int tb_switch_resume(struct tb_switch *sw)
2898{
2899        struct tb_port *port;
2900        int err;
2901
2902        tb_sw_dbg(sw, "resuming switch\n");
2903
2904        /*
2905         * Check for UID of the connected switches except for root
2906         * switch which we assume cannot be removed.
2907         */
2908        if (tb_route(sw)) {
2909                u64 uid;
2910
2911                /*
2912                 * Check first that we can still read the switch config
2913                 * space. It may be that there is now another domain
2914                 * connected.
2915                 */
2916                err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2917                if (err < 0) {
2918                        tb_sw_info(sw, "switch not present anymore\n");
2919                        return err;
2920                }
2921
2922                if (tb_switch_is_usb4(sw))
2923                        err = usb4_switch_read_uid(sw, &uid);
2924                else
2925                        err = tb_drom_read_uid_only(sw, &uid);
2926                if (err) {
2927                        tb_sw_warn(sw, "uid read failed\n");
2928                        return err;
2929                }
2930                if (sw->uid != uid) {
2931                        tb_sw_info(sw,
2932                                "changed while suspended (uid %#llx -> %#llx)\n",
2933                                sw->uid, uid);
2934                        return -ENODEV;
2935                }
2936        }
2937
2938        err = tb_switch_configure(sw);
2939        if (err)
2940                return err;
2941
2942        /* Disable wakes */
2943        tb_switch_set_wake(sw, 0);
2944
2945        err = tb_switch_tmu_init(sw);
2946        if (err)
2947                return err;
2948
2949        /* check for surviving downstream switches */
2950        tb_switch_for_each_port(sw, port) {
2951                if (!tb_port_is_null(port))
2952                        continue;
2953
2954                if (!tb_port_resume(port))
2955                        continue;
2956
2957                if (tb_wait_for_port(port, true) <= 0) {
2958                        tb_port_warn(port,
2959                                     "lost during suspend, disconnecting\n");
2960                        if (tb_port_has_remote(port))
2961                                tb_sw_set_unplugged(port->remote->sw);
2962                        else if (port->xdomain)
2963                                port->xdomain->is_unplugged = true;
2964                } else {
2965                        /*
2966                         * Always unlock the port so the downstream
2967                         * switch/domain is accessible.
2968                         */
2969                        if (tb_port_unlock(port))
2970                                tb_port_warn(port, "failed to unlock port\n");
2971                        if (port->remote && tb_switch_resume(port->remote->sw)) {
2972                                tb_port_warn(port,
2973                                             "lost during suspend, disconnecting\n");
2974                                tb_sw_set_unplugged(port->remote->sw);
2975                        }
2976                }
2977        }
2978        return 0;
2979}
2980
2981/**
2982 * tb_switch_suspend() - Put a switch to sleep
2983 * @sw: Switch to suspend
2984 * @runtime: Is this runtime suspend or system sleep
2985 *
2986 * Suspends router and all its children. Enables wakes according to
2987 * value of @runtime and then sets sleep bit for the router. If @sw is
2988 * host router the domain is ready to go to sleep once this function
2989 * returns.
2990 */
2991void tb_switch_suspend(struct tb_switch *sw, bool runtime)
2992{
2993        unsigned int flags = 0;
2994        struct tb_port *port;
2995        int err;
2996
2997        tb_sw_dbg(sw, "suspending switch\n");
2998
2999        err = tb_plug_events_active(sw, false);
3000        if (err)
3001                return;
3002
3003        tb_switch_for_each_port(sw, port) {
3004                if (tb_port_has_remote(port))
3005                        tb_switch_suspend(port->remote->sw, runtime);
3006        }
3007
3008        if (runtime) {
3009                /* Trigger wake when something is plugged in/out */
3010                flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3011                flags |= TB_WAKE_ON_USB4;
3012                flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3013        } else if (device_may_wakeup(&sw->dev)) {
3014                flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3015        }
3016
3017        tb_switch_set_wake(sw, flags);
3018
3019        if (tb_switch_is_usb4(sw))
3020                usb4_switch_set_sleep(sw);
3021        else
3022                tb_lc_set_sleep(sw);
3023}
3024
3025/**
3026 * tb_switch_query_dp_resource() - Query availability of DP resource
3027 * @sw: Switch whose DP resource is queried
3028 * @in: DP IN port
3029 *
3030 * Queries availability of DP resource for DP tunneling using switch
3031 * specific means. Returns %true if resource is available.
3032 */
3033bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3034{
3035        if (tb_switch_is_usb4(sw))
3036                return usb4_switch_query_dp_resource(sw, in);
3037        return tb_lc_dp_sink_query(sw, in);
3038}
3039
3040/**
3041 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3042 * @sw: Switch whose DP resource is allocated
3043 * @in: DP IN port
3044 *
3045 * Allocates DP resource for DP tunneling. The resource must be
3046 * available for this to succeed (see tb_switch_query_dp_resource()).
3047 * Returns %0 in success and negative errno otherwise.
3048 */
3049int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3050{
3051        if (tb_switch_is_usb4(sw))
3052                return usb4_switch_alloc_dp_resource(sw, in);
3053        return tb_lc_dp_sink_alloc(sw, in);
3054}
3055
3056/**
3057 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3058 * @sw: Switch whose DP resource is de-allocated
3059 * @in: DP IN port
3060 *
3061 * De-allocates DP resource that was previously allocated for DP
3062 * tunneling.
3063 */
3064void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3065{
3066        int ret;
3067
3068        if (tb_switch_is_usb4(sw))
3069                ret = usb4_switch_dealloc_dp_resource(sw, in);
3070        else
3071                ret = tb_lc_dp_sink_dealloc(sw, in);
3072
3073        if (ret)
3074                tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3075                           in->port);
3076}
3077
3078struct tb_sw_lookup {
3079        struct tb *tb;
3080        u8 link;
3081        u8 depth;
3082        const uuid_t *uuid;
3083        u64 route;
3084};
3085
3086static int tb_switch_match(struct device *dev, const void *data)
3087{
3088        struct tb_switch *sw = tb_to_switch(dev);
3089        const struct tb_sw_lookup *lookup = data;
3090
3091        if (!sw)
3092                return 0;
3093        if (sw->tb != lookup->tb)
3094                return 0;
3095
3096        if (lookup->uuid)
3097                return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3098
3099        if (lookup->route) {
3100                return sw->config.route_lo == lower_32_bits(lookup->route) &&
3101                       sw->config.route_hi == upper_32_bits(lookup->route);
3102        }
3103
3104        /* Root switch is matched only by depth */
3105        if (!lookup->depth)
3106                return !sw->depth;
3107
3108        return sw->link == lookup->link && sw->depth == lookup->depth;
3109}
3110
3111/**
3112 * tb_switch_find_by_link_depth() - Find switch by link and depth
3113 * @tb: Domain the switch belongs
3114 * @link: Link number the switch is connected
3115 * @depth: Depth of the switch in link
3116 *
3117 * Returned switch has reference count increased so the caller needs to
3118 * call tb_switch_put() when done with the switch.
3119 */
3120struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3121{
3122        struct tb_sw_lookup lookup;
3123        struct device *dev;
3124
3125        memset(&lookup, 0, sizeof(lookup));
3126        lookup.tb = tb;
3127        lookup.link = link;
3128        lookup.depth = depth;
3129
3130        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3131        if (dev)
3132                return tb_to_switch(dev);
3133
3134        return NULL;
3135}
3136
3137/**
3138 * tb_switch_find_by_uuid() - Find switch by UUID
3139 * @tb: Domain the switch belongs
3140 * @uuid: UUID to look for
3141 *
3142 * Returned switch has reference count increased so the caller needs to
3143 * call tb_switch_put() when done with the switch.
3144 */
3145struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3146{
3147        struct tb_sw_lookup lookup;
3148        struct device *dev;
3149
3150        memset(&lookup, 0, sizeof(lookup));
3151        lookup.tb = tb;
3152        lookup.uuid = uuid;
3153
3154        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3155        if (dev)
3156                return tb_to_switch(dev);
3157
3158        return NULL;
3159}
3160
3161/**
3162 * tb_switch_find_by_route() - Find switch by route string
3163 * @tb: Domain the switch belongs
3164 * @route: Route string to look for
3165 *
3166 * Returned switch has reference count increased so the caller needs to
3167 * call tb_switch_put() when done with the switch.
3168 */
3169struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3170{
3171        struct tb_sw_lookup lookup;
3172        struct device *dev;
3173
3174        if (!route)
3175                return tb_switch_get(tb->root_switch);
3176
3177        memset(&lookup, 0, sizeof(lookup));
3178        lookup.tb = tb;
3179        lookup.route = route;
3180
3181        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3182        if (dev)
3183                return tb_to_switch(dev);
3184
3185        return NULL;
3186}
3187
3188/**
3189 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3190 * @sw: Switch to find the port from
3191 * @type: Port type to look for
3192 */
3193struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3194                                    enum tb_port_type type)
3195{
3196        struct tb_port *port;
3197
3198        tb_switch_for_each_port(sw, port) {
3199                if (port->config.type == type)
3200                        return port;
3201        }
3202
3203        return NULL;
3204}
3205