linux/drivers/thunderbolt/switch.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - switch/port utility functions
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2018, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/idr.h>
  11#include <linux/nvmem-provider.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/sched/signal.h>
  14#include <linux/sizes.h>
  15#include <linux/slab.h>
  16
  17#include "tb.h"
  18
  19/* Switch NVM support */
  20
  21#define NVM_CSS                 0x10
  22
  23struct nvm_auth_status {
  24        struct list_head list;
  25        uuid_t uuid;
  26        u32 status;
  27};
  28
  29/*
  30 * Hold NVM authentication failure status per switch This information
  31 * needs to stay around even when the switch gets power cycled so we
  32 * keep it separately.
  33 */
  34static LIST_HEAD(nvm_auth_status_cache);
  35static DEFINE_MUTEX(nvm_auth_status_lock);
  36
  37static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
  38{
  39        struct nvm_auth_status *st;
  40
  41        list_for_each_entry(st, &nvm_auth_status_cache, list) {
  42                if (uuid_equal(&st->uuid, sw->uuid))
  43                        return st;
  44        }
  45
  46        return NULL;
  47}
  48
  49static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
  50{
  51        struct nvm_auth_status *st;
  52
  53        mutex_lock(&nvm_auth_status_lock);
  54        st = __nvm_get_auth_status(sw);
  55        mutex_unlock(&nvm_auth_status_lock);
  56
  57        *status = st ? st->status : 0;
  58}
  59
  60static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
  61{
  62        struct nvm_auth_status *st;
  63
  64        if (WARN_ON(!sw->uuid))
  65                return;
  66
  67        mutex_lock(&nvm_auth_status_lock);
  68        st = __nvm_get_auth_status(sw);
  69
  70        if (!st) {
  71                st = kzalloc(sizeof(*st), GFP_KERNEL);
  72                if (!st)
  73                        goto unlock;
  74
  75                memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
  76                INIT_LIST_HEAD(&st->list);
  77                list_add_tail(&st->list, &nvm_auth_status_cache);
  78        }
  79
  80        st->status = status;
  81unlock:
  82        mutex_unlock(&nvm_auth_status_lock);
  83}
  84
  85static void nvm_clear_auth_status(const struct tb_switch *sw)
  86{
  87        struct nvm_auth_status *st;
  88
  89        mutex_lock(&nvm_auth_status_lock);
  90        st = __nvm_get_auth_status(sw);
  91        if (st) {
  92                list_del(&st->list);
  93                kfree(st);
  94        }
  95        mutex_unlock(&nvm_auth_status_lock);
  96}
  97
  98static int nvm_validate_and_write(struct tb_switch *sw)
  99{
 100        unsigned int image_size, hdr_size;
 101        const u8 *buf = sw->nvm->buf;
 102        u16 ds_size;
 103        int ret;
 104
 105        if (!buf)
 106                return -EINVAL;
 107
 108        image_size = sw->nvm->buf_data_size;
 109        if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
 110                return -EINVAL;
 111
 112        /*
 113         * FARB pointer must point inside the image and must at least
 114         * contain parts of the digital section we will be reading here.
 115         */
 116        hdr_size = (*(u32 *)buf) & 0xffffff;
 117        if (hdr_size + NVM_DEVID + 2 >= image_size)
 118                return -EINVAL;
 119
 120        /* Digital section start should be aligned to 4k page */
 121        if (!IS_ALIGNED(hdr_size, SZ_4K))
 122                return -EINVAL;
 123
 124        /*
 125         * Read digital section size and check that it also fits inside
 126         * the image.
 127         */
 128        ds_size = *(u16 *)(buf + hdr_size);
 129        if (ds_size >= image_size)
 130                return -EINVAL;
 131
 132        if (!sw->safe_mode) {
 133                u16 device_id;
 134
 135                /*
 136                 * Make sure the device ID in the image matches the one
 137                 * we read from the switch config space.
 138                 */
 139                device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
 140                if (device_id != sw->config.device_id)
 141                        return -EINVAL;
 142
 143                if (sw->generation < 3) {
 144                        /* Write CSS headers first */
 145                        ret = dma_port_flash_write(sw->dma_port,
 146                                DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
 147                                DMA_PORT_CSS_MAX_SIZE);
 148                        if (ret)
 149                                return ret;
 150                }
 151
 152                /* Skip headers in the image */
 153                buf += hdr_size;
 154                image_size -= hdr_size;
 155        }
 156
 157        if (tb_switch_is_usb4(sw))
 158                ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
 159        else
 160                ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
 161        if (!ret)
 162                sw->nvm->flushed = true;
 163        return ret;
 164}
 165
 166static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
 167{
 168        int ret = 0;
 169
 170        /*
 171         * Root switch NVM upgrade requires that we disconnect the
 172         * existing paths first (in case it is not in safe mode
 173         * already).
 174         */
 175        if (!sw->safe_mode) {
 176                u32 status;
 177
 178                ret = tb_domain_disconnect_all_paths(sw->tb);
 179                if (ret)
 180                        return ret;
 181                /*
 182                 * The host controller goes away pretty soon after this if
 183                 * everything goes well so getting timeout is expected.
 184                 */
 185                ret = dma_port_flash_update_auth(sw->dma_port);
 186                if (!ret || ret == -ETIMEDOUT)
 187                        return 0;
 188
 189                /*
 190                 * Any error from update auth operation requires power
 191                 * cycling of the host router.
 192                 */
 193                tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
 194                if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
 195                        nvm_set_auth_status(sw, status);
 196        }
 197
 198        /*
 199         * From safe mode we can get out by just power cycling the
 200         * switch.
 201         */
 202        dma_port_power_cycle(sw->dma_port);
 203        return ret;
 204}
 205
 206static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
 207{
 208        int ret, retries = 10;
 209
 210        ret = dma_port_flash_update_auth(sw->dma_port);
 211        switch (ret) {
 212        case 0:
 213        case -ETIMEDOUT:
 214        case -EACCES:
 215        case -EINVAL:
 216                /* Power cycle is required */
 217                break;
 218        default:
 219                return ret;
 220        }
 221
 222        /*
 223         * Poll here for the authentication status. It takes some time
 224         * for the device to respond (we get timeout for a while). Once
 225         * we get response the device needs to be power cycled in order
 226         * to the new NVM to be taken into use.
 227         */
 228        do {
 229                u32 status;
 230
 231                ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
 232                if (ret < 0 && ret != -ETIMEDOUT)
 233                        return ret;
 234                if (ret > 0) {
 235                        if (status) {
 236                                tb_sw_warn(sw, "failed to authenticate NVM\n");
 237                                nvm_set_auth_status(sw, status);
 238                        }
 239
 240                        tb_sw_info(sw, "power cycling the switch now\n");
 241                        dma_port_power_cycle(sw->dma_port);
 242                        return 0;
 243                }
 244
 245                msleep(500);
 246        } while (--retries);
 247
 248        return -ETIMEDOUT;
 249}
 250
 251static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
 252{
 253        struct pci_dev *root_port;
 254
 255        /*
 256         * During host router NVM upgrade we should not allow root port to
 257         * go into D3cold because some root ports cannot trigger PME
 258         * itself. To be on the safe side keep the root port in D0 during
 259         * the whole upgrade process.
 260         */
 261        root_port = pcie_find_root_port(sw->tb->nhi->pdev);
 262        if (root_port)
 263                pm_runtime_get_noresume(&root_port->dev);
 264}
 265
 266static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
 267{
 268        struct pci_dev *root_port;
 269
 270        root_port = pcie_find_root_port(sw->tb->nhi->pdev);
 271        if (root_port)
 272                pm_runtime_put(&root_port->dev);
 273}
 274
 275static inline bool nvm_readable(struct tb_switch *sw)
 276{
 277        if (tb_switch_is_usb4(sw)) {
 278                /*
 279                 * USB4 devices must support NVM operations but it is
 280                 * optional for hosts. Therefore we query the NVM sector
 281                 * size here and if it is supported assume NVM
 282                 * operations are implemented.
 283                 */
 284                return usb4_switch_nvm_sector_size(sw) > 0;
 285        }
 286
 287        /* Thunderbolt 2 and 3 devices support NVM through DMA port */
 288        return !!sw->dma_port;
 289}
 290
 291static inline bool nvm_upgradeable(struct tb_switch *sw)
 292{
 293        if (sw->no_nvm_upgrade)
 294                return false;
 295        return nvm_readable(sw);
 296}
 297
 298static inline int nvm_read(struct tb_switch *sw, unsigned int address,
 299                           void *buf, size_t size)
 300{
 301        if (tb_switch_is_usb4(sw))
 302                return usb4_switch_nvm_read(sw, address, buf, size);
 303        return dma_port_flash_read(sw->dma_port, address, buf, size);
 304}
 305
 306static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
 307{
 308        int ret;
 309
 310        if (tb_switch_is_usb4(sw)) {
 311                if (auth_only) {
 312                        ret = usb4_switch_nvm_set_offset(sw, 0);
 313                        if (ret)
 314                                return ret;
 315                }
 316                sw->nvm->authenticating = true;
 317                return usb4_switch_nvm_authenticate(sw);
 318        } else if (auth_only) {
 319                return -EOPNOTSUPP;
 320        }
 321
 322        sw->nvm->authenticating = true;
 323        if (!tb_route(sw)) {
 324                nvm_authenticate_start_dma_port(sw);
 325                ret = nvm_authenticate_host_dma_port(sw);
 326        } else {
 327                ret = nvm_authenticate_device_dma_port(sw);
 328        }
 329
 330        return ret;
 331}
 332
 333static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
 334                              size_t bytes)
 335{
 336        struct tb_nvm *nvm = priv;
 337        struct tb_switch *sw = tb_to_switch(nvm->dev);
 338        int ret;
 339
 340        pm_runtime_get_sync(&sw->dev);
 341
 342        if (!mutex_trylock(&sw->tb->lock)) {
 343                ret = restart_syscall();
 344                goto out;
 345        }
 346
 347        ret = nvm_read(sw, offset, val, bytes);
 348        mutex_unlock(&sw->tb->lock);
 349
 350out:
 351        pm_runtime_mark_last_busy(&sw->dev);
 352        pm_runtime_put_autosuspend(&sw->dev);
 353
 354        return ret;
 355}
 356
 357static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
 358                               size_t bytes)
 359{
 360        struct tb_nvm *nvm = priv;
 361        struct tb_switch *sw = tb_to_switch(nvm->dev);
 362        int ret;
 363
 364        if (!mutex_trylock(&sw->tb->lock))
 365                return restart_syscall();
 366
 367        /*
 368         * Since writing the NVM image might require some special steps,
 369         * for example when CSS headers are written, we cache the image
 370         * locally here and handle the special cases when the user asks
 371         * us to authenticate the image.
 372         */
 373        ret = tb_nvm_write_buf(nvm, offset, val, bytes);
 374        mutex_unlock(&sw->tb->lock);
 375
 376        return ret;
 377}
 378
 379static int tb_switch_nvm_add(struct tb_switch *sw)
 380{
 381        struct tb_nvm *nvm;
 382        u32 val;
 383        int ret;
 384
 385        if (!nvm_readable(sw))
 386                return 0;
 387
 388        /*
 389         * The NVM format of non-Intel hardware is not known so
 390         * currently restrict NVM upgrade for Intel hardware. We may
 391         * relax this in the future when we learn other NVM formats.
 392         */
 393        if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
 394            sw->config.vendor_id != 0x8087) {
 395                dev_info(&sw->dev,
 396                         "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
 397                         sw->config.vendor_id);
 398                return 0;
 399        }
 400
 401        nvm = tb_nvm_alloc(&sw->dev);
 402        if (IS_ERR(nvm))
 403                return PTR_ERR(nvm);
 404
 405        /*
 406         * If the switch is in safe-mode the only accessible portion of
 407         * the NVM is the non-active one where userspace is expected to
 408         * write new functional NVM.
 409         */
 410        if (!sw->safe_mode) {
 411                u32 nvm_size, hdr_size;
 412
 413                ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
 414                if (ret)
 415                        goto err_nvm;
 416
 417                hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
 418                nvm_size = (SZ_1M << (val & 7)) / 8;
 419                nvm_size = (nvm_size - hdr_size) / 2;
 420
 421                ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
 422                if (ret)
 423                        goto err_nvm;
 424
 425                nvm->major = val >> 16;
 426                nvm->minor = val >> 8;
 427
 428                ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
 429                if (ret)
 430                        goto err_nvm;
 431        }
 432
 433        if (!sw->no_nvm_upgrade) {
 434                ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
 435                                            tb_switch_nvm_write);
 436                if (ret)
 437                        goto err_nvm;
 438        }
 439
 440        sw->nvm = nvm;
 441        return 0;
 442
 443err_nvm:
 444        tb_nvm_free(nvm);
 445        return ret;
 446}
 447
 448static void tb_switch_nvm_remove(struct tb_switch *sw)
 449{
 450        struct tb_nvm *nvm;
 451
 452        nvm = sw->nvm;
 453        sw->nvm = NULL;
 454
 455        if (!nvm)
 456                return;
 457
 458        /* Remove authentication status in case the switch is unplugged */
 459        if (!nvm->authenticating)
 460                nvm_clear_auth_status(sw);
 461
 462        tb_nvm_free(nvm);
 463}
 464
 465/* port utility functions */
 466
 467static const char *tb_port_type(const struct tb_regs_port_header *port)
 468{
 469        switch (port->type >> 16) {
 470        case 0:
 471                switch ((u8) port->type) {
 472                case 0:
 473                        return "Inactive";
 474                case 1:
 475                        return "Port";
 476                case 2:
 477                        return "NHI";
 478                default:
 479                        return "unknown";
 480                }
 481        case 0x2:
 482                return "Ethernet";
 483        case 0x8:
 484                return "SATA";
 485        case 0xe:
 486                return "DP/HDMI";
 487        case 0x10:
 488                return "PCIe";
 489        case 0x20:
 490                return "USB";
 491        default:
 492                return "unknown";
 493        }
 494}
 495
 496static void tb_dump_port(struct tb *tb, const struct tb_port *port)
 497{
 498        const struct tb_regs_port_header *regs = &port->config;
 499
 500        tb_dbg(tb,
 501               " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
 502               regs->port_number, regs->vendor_id, regs->device_id,
 503               regs->revision, regs->thunderbolt_version, tb_port_type(regs),
 504               regs->type);
 505        tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
 506               regs->max_in_hop_id, regs->max_out_hop_id);
 507        tb_dbg(tb, "  Max counters: %d\n", regs->max_counters);
 508        tb_dbg(tb, "  NFC Credits: %#x\n", regs->nfc_credits);
 509        tb_dbg(tb, "  Credits (total/control): %u/%u\n", port->total_credits,
 510               port->ctl_credits);
 511}
 512
 513/**
 514 * tb_port_state() - get connectedness state of a port
 515 * @port: the port to check
 516 *
 517 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
 518 *
 519 * Return: Returns an enum tb_port_state on success or an error code on failure.
 520 */
 521int tb_port_state(struct tb_port *port)
 522{
 523        struct tb_cap_phy phy;
 524        int res;
 525        if (port->cap_phy == 0) {
 526                tb_port_WARN(port, "does not have a PHY\n");
 527                return -EINVAL;
 528        }
 529        res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
 530        if (res)
 531                return res;
 532        return phy.state;
 533}
 534
 535/**
 536 * tb_wait_for_port() - wait for a port to become ready
 537 * @port: Port to wait
 538 * @wait_if_unplugged: Wait also when port is unplugged
 539 *
 540 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
 541 * wait_if_unplugged is set then we also wait if the port is in state
 542 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
 543 * switch resume). Otherwise we only wait if a device is registered but the link
 544 * has not yet been established.
 545 *
 546 * Return: Returns an error code on failure. Returns 0 if the port is not
 547 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
 548 * if the port is connected and in state TB_PORT_UP.
 549 */
 550int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
 551{
 552        int retries = 10;
 553        int state;
 554        if (!port->cap_phy) {
 555                tb_port_WARN(port, "does not have PHY\n");
 556                return -EINVAL;
 557        }
 558        if (tb_is_upstream_port(port)) {
 559                tb_port_WARN(port, "is the upstream port\n");
 560                return -EINVAL;
 561        }
 562
 563        while (retries--) {
 564                state = tb_port_state(port);
 565                if (state < 0)
 566                        return state;
 567                if (state == TB_PORT_DISABLED) {
 568                        tb_port_dbg(port, "is disabled (state: 0)\n");
 569                        return 0;
 570                }
 571                if (state == TB_PORT_UNPLUGGED) {
 572                        if (wait_if_unplugged) {
 573                                /* used during resume */
 574                                tb_port_dbg(port,
 575                                            "is unplugged (state: 7), retrying...\n");
 576                                msleep(100);
 577                                continue;
 578                        }
 579                        tb_port_dbg(port, "is unplugged (state: 7)\n");
 580                        return 0;
 581                }
 582                if (state == TB_PORT_UP) {
 583                        tb_port_dbg(port, "is connected, link is up (state: 2)\n");
 584                        return 1;
 585                }
 586
 587                /*
 588                 * After plug-in the state is TB_PORT_CONNECTING. Give it some
 589                 * time.
 590                 */
 591                tb_port_dbg(port,
 592                            "is connected, link is not up (state: %d), retrying...\n",
 593                            state);
 594                msleep(100);
 595        }
 596        tb_port_warn(port,
 597                     "failed to reach state TB_PORT_UP. Ignoring port...\n");
 598        return 0;
 599}
 600
 601/**
 602 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
 603 * @port: Port to add/remove NFC credits
 604 * @credits: Credits to add/remove
 605 *
 606 * Change the number of NFC credits allocated to @port by @credits. To remove
 607 * NFC credits pass a negative amount of credits.
 608 *
 609 * Return: Returns 0 on success or an error code on failure.
 610 */
 611int tb_port_add_nfc_credits(struct tb_port *port, int credits)
 612{
 613        u32 nfc_credits;
 614
 615        if (credits == 0 || port->sw->is_unplugged)
 616                return 0;
 617
 618        /*
 619         * USB4 restricts programming NFC buffers to lane adapters only
 620         * so skip other ports.
 621         */
 622        if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
 623                return 0;
 624
 625        nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
 626        nfc_credits += credits;
 627
 628        tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
 629                    port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
 630
 631        port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
 632        port->config.nfc_credits |= nfc_credits;
 633
 634        return tb_port_write(port, &port->config.nfc_credits,
 635                             TB_CFG_PORT, ADP_CS_4, 1);
 636}
 637
 638/**
 639 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
 640 * @port: Port whose counters to clear
 641 * @counter: Counter index to clear
 642 *
 643 * Return: Returns 0 on success or an error code on failure.
 644 */
 645int tb_port_clear_counter(struct tb_port *port, int counter)
 646{
 647        u32 zero[3] = { 0, 0, 0 };
 648        tb_port_dbg(port, "clearing counter %d\n", counter);
 649        return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
 650}
 651
 652/**
 653 * tb_port_unlock() - Unlock downstream port
 654 * @port: Port to unlock
 655 *
 656 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
 657 * downstream router accessible for CM.
 658 */
 659int tb_port_unlock(struct tb_port *port)
 660{
 661        if (tb_switch_is_icm(port->sw))
 662                return 0;
 663        if (!tb_port_is_null(port))
 664                return -EINVAL;
 665        if (tb_switch_is_usb4(port->sw))
 666                return usb4_port_unlock(port);
 667        return 0;
 668}
 669
 670static int __tb_port_enable(struct tb_port *port, bool enable)
 671{
 672        int ret;
 673        u32 phy;
 674
 675        if (!tb_port_is_null(port))
 676                return -EINVAL;
 677
 678        ret = tb_port_read(port, &phy, TB_CFG_PORT,
 679                           port->cap_phy + LANE_ADP_CS_1, 1);
 680        if (ret)
 681                return ret;
 682
 683        if (enable)
 684                phy &= ~LANE_ADP_CS_1_LD;
 685        else
 686                phy |= LANE_ADP_CS_1_LD;
 687
 688        return tb_port_write(port, &phy, TB_CFG_PORT,
 689                             port->cap_phy + LANE_ADP_CS_1, 1);
 690}
 691
 692/**
 693 * tb_port_enable() - Enable lane adapter
 694 * @port: Port to enable (can be %NULL)
 695 *
 696 * This is used for lane 0 and 1 adapters to enable it.
 697 */
 698int tb_port_enable(struct tb_port *port)
 699{
 700        return __tb_port_enable(port, true);
 701}
 702
 703/**
 704 * tb_port_disable() - Disable lane adapter
 705 * @port: Port to disable (can be %NULL)
 706 *
 707 * This is used for lane 0 and 1 adapters to disable it.
 708 */
 709int tb_port_disable(struct tb_port *port)
 710{
 711        return __tb_port_enable(port, false);
 712}
 713
 714/*
 715 * tb_init_port() - initialize a port
 716 *
 717 * This is a helper method for tb_switch_alloc. Does not check or initialize
 718 * any downstream switches.
 719 *
 720 * Return: Returns 0 on success or an error code on failure.
 721 */
 722static int tb_init_port(struct tb_port *port)
 723{
 724        int res;
 725        int cap;
 726
 727        res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
 728        if (res) {
 729                if (res == -ENODEV) {
 730                        tb_dbg(port->sw->tb, " Port %d: not implemented\n",
 731                               port->port);
 732                        port->disabled = true;
 733                        return 0;
 734                }
 735                return res;
 736        }
 737
 738        /* Port 0 is the switch itself and has no PHY. */
 739        if (port->config.type == TB_TYPE_PORT && port->port != 0) {
 740                cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
 741
 742                if (cap > 0)
 743                        port->cap_phy = cap;
 744                else
 745                        tb_port_WARN(port, "non switch port without a PHY\n");
 746
 747                cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
 748                if (cap > 0)
 749                        port->cap_usb4 = cap;
 750
 751                /*
 752                 * USB4 ports the buffers allocated for the control path
 753                 * can be read from the path config space. Legacy
 754                 * devices we use hard-coded value.
 755                 */
 756                if (tb_switch_is_usb4(port->sw)) {
 757                        struct tb_regs_hop hop;
 758
 759                        if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
 760                                port->ctl_credits = hop.initial_credits;
 761                }
 762                if (!port->ctl_credits)
 763                        port->ctl_credits = 2;
 764
 765        } else if (port->port != 0) {
 766                cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
 767                if (cap > 0)
 768                        port->cap_adap = cap;
 769        }
 770
 771        port->total_credits =
 772                (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
 773                ADP_CS_4_TOTAL_BUFFERS_SHIFT;
 774
 775        tb_dump_port(port->sw->tb, port);
 776
 777        INIT_LIST_HEAD(&port->list);
 778        return 0;
 779
 780}
 781
 782static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
 783                               int max_hopid)
 784{
 785        int port_max_hopid;
 786        struct ida *ida;
 787
 788        if (in) {
 789                port_max_hopid = port->config.max_in_hop_id;
 790                ida = &port->in_hopids;
 791        } else {
 792                port_max_hopid = port->config.max_out_hop_id;
 793                ida = &port->out_hopids;
 794        }
 795
 796        /*
 797         * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
 798         * reserved.
 799         */
 800        if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
 801                min_hopid = TB_PATH_MIN_HOPID;
 802
 803        if (max_hopid < 0 || max_hopid > port_max_hopid)
 804                max_hopid = port_max_hopid;
 805
 806        return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
 807}
 808
 809/**
 810 * tb_port_alloc_in_hopid() - Allocate input HopID from port
 811 * @port: Port to allocate HopID for
 812 * @min_hopid: Minimum acceptable input HopID
 813 * @max_hopid: Maximum acceptable input HopID
 814 *
 815 * Return: HopID between @min_hopid and @max_hopid or negative errno in
 816 * case of error.
 817 */
 818int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
 819{
 820        return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
 821}
 822
 823/**
 824 * tb_port_alloc_out_hopid() - Allocate output HopID from port
 825 * @port: Port to allocate HopID for
 826 * @min_hopid: Minimum acceptable output HopID
 827 * @max_hopid: Maximum acceptable output HopID
 828 *
 829 * Return: HopID between @min_hopid and @max_hopid or negative errno in
 830 * case of error.
 831 */
 832int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
 833{
 834        return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
 835}
 836
 837/**
 838 * tb_port_release_in_hopid() - Release allocated input HopID from port
 839 * @port: Port whose HopID to release
 840 * @hopid: HopID to release
 841 */
 842void tb_port_release_in_hopid(struct tb_port *port, int hopid)
 843{
 844        ida_simple_remove(&port->in_hopids, hopid);
 845}
 846
 847/**
 848 * tb_port_release_out_hopid() - Release allocated output HopID from port
 849 * @port: Port whose HopID to release
 850 * @hopid: HopID to release
 851 */
 852void tb_port_release_out_hopid(struct tb_port *port, int hopid)
 853{
 854        ida_simple_remove(&port->out_hopids, hopid);
 855}
 856
 857static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
 858                                          const struct tb_switch *sw)
 859{
 860        u64 mask = (1ULL << parent->config.depth * 8) - 1;
 861        return (tb_route(parent) & mask) == (tb_route(sw) & mask);
 862}
 863
 864/**
 865 * tb_next_port_on_path() - Return next port for given port on a path
 866 * @start: Start port of the walk
 867 * @end: End port of the walk
 868 * @prev: Previous port (%NULL if this is the first)
 869 *
 870 * This function can be used to walk from one port to another if they
 871 * are connected through zero or more switches. If the @prev is dual
 872 * link port, the function follows that link and returns another end on
 873 * that same link.
 874 *
 875 * If the @end port has been reached, return %NULL.
 876 *
 877 * Domain tb->lock must be held when this function is called.
 878 */
 879struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
 880                                     struct tb_port *prev)
 881{
 882        struct tb_port *next;
 883
 884        if (!prev)
 885                return start;
 886
 887        if (prev->sw == end->sw) {
 888                if (prev == end)
 889                        return NULL;
 890                return end;
 891        }
 892
 893        if (tb_switch_is_reachable(prev->sw, end->sw)) {
 894                next = tb_port_at(tb_route(end->sw), prev->sw);
 895                /* Walk down the topology if next == prev */
 896                if (prev->remote &&
 897                    (next == prev || next->dual_link_port == prev))
 898                        next = prev->remote;
 899        } else {
 900                if (tb_is_upstream_port(prev)) {
 901                        next = prev->remote;
 902                } else {
 903                        next = tb_upstream_port(prev->sw);
 904                        /*
 905                         * Keep the same link if prev and next are both
 906                         * dual link ports.
 907                         */
 908                        if (next->dual_link_port &&
 909                            next->link_nr != prev->link_nr) {
 910                                next = next->dual_link_port;
 911                        }
 912                }
 913        }
 914
 915        return next != prev ? next : NULL;
 916}
 917
 918/**
 919 * tb_port_get_link_speed() - Get current link speed
 920 * @port: Port to check (USB4 or CIO)
 921 *
 922 * Returns link speed in Gb/s or negative errno in case of failure.
 923 */
 924int tb_port_get_link_speed(struct tb_port *port)
 925{
 926        u32 val, speed;
 927        int ret;
 928
 929        if (!port->cap_phy)
 930                return -EINVAL;
 931
 932        ret = tb_port_read(port, &val, TB_CFG_PORT,
 933                           port->cap_phy + LANE_ADP_CS_1, 1);
 934        if (ret)
 935                return ret;
 936
 937        speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
 938                LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
 939        return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
 940}
 941
 942/**
 943 * tb_port_get_link_width() - Get current link width
 944 * @port: Port to check (USB4 or CIO)
 945 *
 946 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
 947 * or negative errno in case of failure.
 948 */
 949int tb_port_get_link_width(struct tb_port *port)
 950{
 951        u32 val;
 952        int ret;
 953
 954        if (!port->cap_phy)
 955                return -EINVAL;
 956
 957        ret = tb_port_read(port, &val, TB_CFG_PORT,
 958                           port->cap_phy + LANE_ADP_CS_1, 1);
 959        if (ret)
 960                return ret;
 961
 962        return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
 963                LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
 964}
 965
 966static bool tb_port_is_width_supported(struct tb_port *port, int width)
 967{
 968        u32 phy, widths;
 969        int ret;
 970
 971        if (!port->cap_phy)
 972                return false;
 973
 974        ret = tb_port_read(port, &phy, TB_CFG_PORT,
 975                           port->cap_phy + LANE_ADP_CS_0, 1);
 976        if (ret)
 977                return false;
 978
 979        widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
 980                LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
 981
 982        return !!(widths & width);
 983}
 984
 985static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
 986{
 987        u32 val;
 988        int ret;
 989
 990        if (!port->cap_phy)
 991                return -EINVAL;
 992
 993        ret = tb_port_read(port, &val, TB_CFG_PORT,
 994                           port->cap_phy + LANE_ADP_CS_1, 1);
 995        if (ret)
 996                return ret;
 997
 998        val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
 999        switch (width) {
1000        case 1:
1001                val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1002                        LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1003                break;
1004        case 2:
1005                val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1006                        LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1007                break;
1008        default:
1009                return -EINVAL;
1010        }
1011
1012        val |= LANE_ADP_CS_1_LB;
1013
1014        return tb_port_write(port, &val, TB_CFG_PORT,
1015                             port->cap_phy + LANE_ADP_CS_1, 1);
1016}
1017
1018/**
1019 * tb_port_lane_bonding_enable() - Enable bonding on port
1020 * @port: port to enable
1021 *
1022 * Enable bonding by setting the link width of the port and the other
1023 * port in case of dual link port. Does not wait for the link to
1024 * actually reach the bonded state so caller needs to call
1025 * tb_port_wait_for_link_width() before enabling any paths through the
1026 * link to make sure the link is in expected state.
1027 *
1028 * Return: %0 in case of success and negative errno in case of error
1029 */
1030int tb_port_lane_bonding_enable(struct tb_port *port)
1031{
1032        int ret;
1033
1034        /*
1035         * Enable lane bonding for both links if not already enabled by
1036         * for example the boot firmware.
1037         */
1038        ret = tb_port_get_link_width(port);
1039        if (ret == 1) {
1040                ret = tb_port_set_link_width(port, 2);
1041                if (ret)
1042                        return ret;
1043        }
1044
1045        ret = tb_port_get_link_width(port->dual_link_port);
1046        if (ret == 1) {
1047                ret = tb_port_set_link_width(port->dual_link_port, 2);
1048                if (ret) {
1049                        tb_port_set_link_width(port, 1);
1050                        return ret;
1051                }
1052        }
1053
1054        port->bonded = true;
1055        port->dual_link_port->bonded = true;
1056
1057        return 0;
1058}
1059
1060/**
1061 * tb_port_lane_bonding_disable() - Disable bonding on port
1062 * @port: port to disable
1063 *
1064 * Disable bonding by setting the link width of the port and the
1065 * other port in case of dual link port.
1066 *
1067 */
1068void tb_port_lane_bonding_disable(struct tb_port *port)
1069{
1070        port->dual_link_port->bonded = false;
1071        port->bonded = false;
1072
1073        tb_port_set_link_width(port->dual_link_port, 1);
1074        tb_port_set_link_width(port, 1);
1075}
1076
1077/**
1078 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1079 * @port: Port to wait for
1080 * @width: Expected link width (%1 or %2)
1081 * @timeout_msec: Timeout in ms how long to wait
1082 *
1083 * Should be used after both ends of the link have been bonded (or
1084 * bonding has been disabled) to wait until the link actually reaches
1085 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1086 * within the given timeout, %0 if it did.
1087 */
1088int tb_port_wait_for_link_width(struct tb_port *port, int width,
1089                                int timeout_msec)
1090{
1091        ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1092        int ret;
1093
1094        do {
1095                ret = tb_port_get_link_width(port);
1096                if (ret < 0)
1097                        return ret;
1098                else if (ret == width)
1099                        return 0;
1100
1101                usleep_range(1000, 2000);
1102        } while (ktime_before(ktime_get(), timeout));
1103
1104        return -ETIMEDOUT;
1105}
1106
1107static int tb_port_do_update_credits(struct tb_port *port)
1108{
1109        u32 nfc_credits;
1110        int ret;
1111
1112        ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1113        if (ret)
1114                return ret;
1115
1116        if (nfc_credits != port->config.nfc_credits) {
1117                u32 total;
1118
1119                total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1120                        ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1121
1122                tb_port_dbg(port, "total credits changed %u -> %u\n",
1123                            port->total_credits, total);
1124
1125                port->config.nfc_credits = nfc_credits;
1126                port->total_credits = total;
1127        }
1128
1129        return 0;
1130}
1131
1132/**
1133 * tb_port_update_credits() - Re-read port total credits
1134 * @port: Port to update
1135 *
1136 * After the link is bonded (or bonding was disabled) the port total
1137 * credits may change, so this function needs to be called to re-read
1138 * the credits. Updates also the second lane adapter.
1139 */
1140int tb_port_update_credits(struct tb_port *port)
1141{
1142        int ret;
1143
1144        ret = tb_port_do_update_credits(port);
1145        if (ret)
1146                return ret;
1147        return tb_port_do_update_credits(port->dual_link_port);
1148}
1149
1150static int tb_port_start_lane_initialization(struct tb_port *port)
1151{
1152        int ret;
1153
1154        if (tb_switch_is_usb4(port->sw))
1155                return 0;
1156
1157        ret = tb_lc_start_lane_initialization(port);
1158        return ret == -EINVAL ? 0 : ret;
1159}
1160
1161/*
1162 * Returns true if the port had something (router, XDomain) connected
1163 * before suspend.
1164 */
1165static bool tb_port_resume(struct tb_port *port)
1166{
1167        bool has_remote = tb_port_has_remote(port);
1168
1169        if (port->usb4) {
1170                usb4_port_device_resume(port->usb4);
1171        } else if (!has_remote) {
1172                /*
1173                 * For disconnected downstream lane adapters start lane
1174                 * initialization now so we detect future connects.
1175                 *
1176                 * For XDomain start the lane initialzation now so the
1177                 * link gets re-established.
1178                 *
1179                 * This is only needed for non-USB4 ports.
1180                 */
1181                if (!tb_is_upstream_port(port) || port->xdomain)
1182                        tb_port_start_lane_initialization(port);
1183        }
1184
1185        return has_remote || port->xdomain;
1186}
1187
1188/**
1189 * tb_port_is_enabled() - Is the adapter port enabled
1190 * @port: Port to check
1191 */
1192bool tb_port_is_enabled(struct tb_port *port)
1193{
1194        switch (port->config.type) {
1195        case TB_TYPE_PCIE_UP:
1196        case TB_TYPE_PCIE_DOWN:
1197                return tb_pci_port_is_enabled(port);
1198
1199        case TB_TYPE_DP_HDMI_IN:
1200        case TB_TYPE_DP_HDMI_OUT:
1201                return tb_dp_port_is_enabled(port);
1202
1203        case TB_TYPE_USB3_UP:
1204        case TB_TYPE_USB3_DOWN:
1205                return tb_usb3_port_is_enabled(port);
1206
1207        default:
1208                return false;
1209        }
1210}
1211
1212/**
1213 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1214 * @port: USB3 adapter port to check
1215 */
1216bool tb_usb3_port_is_enabled(struct tb_port *port)
1217{
1218        u32 data;
1219
1220        if (tb_port_read(port, &data, TB_CFG_PORT,
1221                         port->cap_adap + ADP_USB3_CS_0, 1))
1222                return false;
1223
1224        return !!(data & ADP_USB3_CS_0_PE);
1225}
1226
1227/**
1228 * tb_usb3_port_enable() - Enable USB3 adapter port
1229 * @port: USB3 adapter port to enable
1230 * @enable: Enable/disable the USB3 adapter
1231 */
1232int tb_usb3_port_enable(struct tb_port *port, bool enable)
1233{
1234        u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1235                          : ADP_USB3_CS_0_V;
1236
1237        if (!port->cap_adap)
1238                return -ENXIO;
1239        return tb_port_write(port, &word, TB_CFG_PORT,
1240                             port->cap_adap + ADP_USB3_CS_0, 1);
1241}
1242
1243/**
1244 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1245 * @port: PCIe port to check
1246 */
1247bool tb_pci_port_is_enabled(struct tb_port *port)
1248{
1249        u32 data;
1250
1251        if (tb_port_read(port, &data, TB_CFG_PORT,
1252                         port->cap_adap + ADP_PCIE_CS_0, 1))
1253                return false;
1254
1255        return !!(data & ADP_PCIE_CS_0_PE);
1256}
1257
1258/**
1259 * tb_pci_port_enable() - Enable PCIe adapter port
1260 * @port: PCIe port to enable
1261 * @enable: Enable/disable the PCIe adapter
1262 */
1263int tb_pci_port_enable(struct tb_port *port, bool enable)
1264{
1265        u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1266        if (!port->cap_adap)
1267                return -ENXIO;
1268        return tb_port_write(port, &word, TB_CFG_PORT,
1269                             port->cap_adap + ADP_PCIE_CS_0, 1);
1270}
1271
1272/**
1273 * tb_dp_port_hpd_is_active() - Is HPD already active
1274 * @port: DP out port to check
1275 *
1276 * Checks if the DP OUT adapter port has HDP bit already set.
1277 */
1278int tb_dp_port_hpd_is_active(struct tb_port *port)
1279{
1280        u32 data;
1281        int ret;
1282
1283        ret = tb_port_read(port, &data, TB_CFG_PORT,
1284                           port->cap_adap + ADP_DP_CS_2, 1);
1285        if (ret)
1286                return ret;
1287
1288        return !!(data & ADP_DP_CS_2_HDP);
1289}
1290
1291/**
1292 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1293 * @port: Port to clear HPD
1294 *
1295 * If the DP IN port has HDP set, this function can be used to clear it.
1296 */
1297int tb_dp_port_hpd_clear(struct tb_port *port)
1298{
1299        u32 data;
1300        int ret;
1301
1302        ret = tb_port_read(port, &data, TB_CFG_PORT,
1303                           port->cap_adap + ADP_DP_CS_3, 1);
1304        if (ret)
1305                return ret;
1306
1307        data |= ADP_DP_CS_3_HDPC;
1308        return tb_port_write(port, &data, TB_CFG_PORT,
1309                             port->cap_adap + ADP_DP_CS_3, 1);
1310}
1311
1312/**
1313 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1314 * @port: DP IN/OUT port to set hops
1315 * @video: Video Hop ID
1316 * @aux_tx: AUX TX Hop ID
1317 * @aux_rx: AUX RX Hop ID
1318 *
1319 * Programs specified Hop IDs for DP IN/OUT port.
1320 */
1321int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1322                        unsigned int aux_tx, unsigned int aux_rx)
1323{
1324        u32 data[2];
1325        int ret;
1326
1327        ret = tb_port_read(port, data, TB_CFG_PORT,
1328                           port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1329        if (ret)
1330                return ret;
1331
1332        data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1333        data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1334        data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1335
1336        data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1337                ADP_DP_CS_0_VIDEO_HOPID_MASK;
1338        data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1339        data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1340                ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1341
1342        return tb_port_write(port, data, TB_CFG_PORT,
1343                             port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1344}
1345
1346/**
1347 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1348 * @port: DP adapter port to check
1349 */
1350bool tb_dp_port_is_enabled(struct tb_port *port)
1351{
1352        u32 data[2];
1353
1354        if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1355                         ARRAY_SIZE(data)))
1356                return false;
1357
1358        return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1359}
1360
1361/**
1362 * tb_dp_port_enable() - Enables/disables DP paths of a port
1363 * @port: DP IN/OUT port
1364 * @enable: Enable/disable DP path
1365 *
1366 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1367 * calling this function.
1368 */
1369int tb_dp_port_enable(struct tb_port *port, bool enable)
1370{
1371        u32 data[2];
1372        int ret;
1373
1374        ret = tb_port_read(port, data, TB_CFG_PORT,
1375                          port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1376        if (ret)
1377                return ret;
1378
1379        if (enable)
1380                data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1381        else
1382                data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1383
1384        return tb_port_write(port, data, TB_CFG_PORT,
1385                             port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1386}
1387
1388/* switch utility functions */
1389
1390static const char *tb_switch_generation_name(const struct tb_switch *sw)
1391{
1392        switch (sw->generation) {
1393        case 1:
1394                return "Thunderbolt 1";
1395        case 2:
1396                return "Thunderbolt 2";
1397        case 3:
1398                return "Thunderbolt 3";
1399        case 4:
1400                return "USB4";
1401        default:
1402                return "Unknown";
1403        }
1404}
1405
1406static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1407{
1408        const struct tb_regs_switch_header *regs = &sw->config;
1409
1410        tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1411               tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1412               regs->revision, regs->thunderbolt_version);
1413        tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
1414        tb_dbg(tb, "  Config:\n");
1415        tb_dbg(tb,
1416                "   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1417               regs->upstream_port_number, regs->depth,
1418               (((u64) regs->route_hi) << 32) | regs->route_lo,
1419               regs->enabled, regs->plug_events_delay);
1420        tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
1421               regs->__unknown1, regs->__unknown4);
1422}
1423
1424/**
1425 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1426 * @sw: Switch to reset
1427 *
1428 * Return: Returns 0 on success or an error code on failure.
1429 */
1430int tb_switch_reset(struct tb_switch *sw)
1431{
1432        struct tb_cfg_result res;
1433
1434        if (sw->generation > 1)
1435                return 0;
1436
1437        tb_sw_dbg(sw, "resetting switch\n");
1438
1439        res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1440                              TB_CFG_SWITCH, 2, 2);
1441        if (res.err)
1442                return res.err;
1443        res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1444        if (res.err > 0)
1445                return -EIO;
1446        return res.err;
1447}
1448
1449/*
1450 * tb_plug_events_active() - enable/disable plug events on a switch
1451 *
1452 * Also configures a sane plug_events_delay of 255ms.
1453 *
1454 * Return: Returns 0 on success or an error code on failure.
1455 */
1456static int tb_plug_events_active(struct tb_switch *sw, bool active)
1457{
1458        u32 data;
1459        int res;
1460
1461        if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1462                return 0;
1463
1464        sw->config.plug_events_delay = 0xff;
1465        res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1466        if (res)
1467                return res;
1468
1469        res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1470        if (res)
1471                return res;
1472
1473        if (active) {
1474                data = data & 0xFFFFFF83;
1475                switch (sw->config.device_id) {
1476                case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1477                case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1478                case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1479                        break;
1480                default:
1481                        data |= 4;
1482                }
1483        } else {
1484                data = data | 0x7c;
1485        }
1486        return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1487                           sw->cap_plug_events + 1, 1);
1488}
1489
1490static ssize_t authorized_show(struct device *dev,
1491                               struct device_attribute *attr,
1492                               char *buf)
1493{
1494        struct tb_switch *sw = tb_to_switch(dev);
1495
1496        return sprintf(buf, "%u\n", sw->authorized);
1497}
1498
1499static int disapprove_switch(struct device *dev, void *not_used)
1500{
1501        struct tb_switch *sw;
1502
1503        sw = tb_to_switch(dev);
1504        if (sw && sw->authorized) {
1505                int ret;
1506
1507                /* First children */
1508                ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1509                if (ret)
1510                        return ret;
1511
1512                ret = tb_domain_disapprove_switch(sw->tb, sw);
1513                if (ret)
1514                        return ret;
1515
1516                sw->authorized = 0;
1517                kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1518        }
1519
1520        return 0;
1521}
1522
1523static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1524{
1525        int ret = -EINVAL;
1526
1527        if (!mutex_trylock(&sw->tb->lock))
1528                return restart_syscall();
1529
1530        if (!!sw->authorized == !!val)
1531                goto unlock;
1532
1533        switch (val) {
1534        /* Disapprove switch */
1535        case 0:
1536                if (tb_route(sw)) {
1537                        ret = disapprove_switch(&sw->dev, NULL);
1538                        goto unlock;
1539                }
1540                break;
1541
1542        /* Approve switch */
1543        case 1:
1544                if (sw->key)
1545                        ret = tb_domain_approve_switch_key(sw->tb, sw);
1546                else
1547                        ret = tb_domain_approve_switch(sw->tb, sw);
1548                break;
1549
1550        /* Challenge switch */
1551        case 2:
1552                if (sw->key)
1553                        ret = tb_domain_challenge_switch_key(sw->tb, sw);
1554                break;
1555
1556        default:
1557                break;
1558        }
1559
1560        if (!ret) {
1561                sw->authorized = val;
1562                /* Notify status change to the userspace */
1563                kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1564        }
1565
1566unlock:
1567        mutex_unlock(&sw->tb->lock);
1568        return ret;
1569}
1570
1571static ssize_t authorized_store(struct device *dev,
1572                                struct device_attribute *attr,
1573                                const char *buf, size_t count)
1574{
1575        struct tb_switch *sw = tb_to_switch(dev);
1576        unsigned int val;
1577        ssize_t ret;
1578
1579        ret = kstrtouint(buf, 0, &val);
1580        if (ret)
1581                return ret;
1582        if (val > 2)
1583                return -EINVAL;
1584
1585        pm_runtime_get_sync(&sw->dev);
1586        ret = tb_switch_set_authorized(sw, val);
1587        pm_runtime_mark_last_busy(&sw->dev);
1588        pm_runtime_put_autosuspend(&sw->dev);
1589
1590        return ret ? ret : count;
1591}
1592static DEVICE_ATTR_RW(authorized);
1593
1594static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1595                         char *buf)
1596{
1597        struct tb_switch *sw = tb_to_switch(dev);
1598
1599        return sprintf(buf, "%u\n", sw->boot);
1600}
1601static DEVICE_ATTR_RO(boot);
1602
1603static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1604                           char *buf)
1605{
1606        struct tb_switch *sw = tb_to_switch(dev);
1607
1608        return sprintf(buf, "%#x\n", sw->device);
1609}
1610static DEVICE_ATTR_RO(device);
1611
1612static ssize_t
1613device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1614{
1615        struct tb_switch *sw = tb_to_switch(dev);
1616
1617        return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1618}
1619static DEVICE_ATTR_RO(device_name);
1620
1621static ssize_t
1622generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1623{
1624        struct tb_switch *sw = tb_to_switch(dev);
1625
1626        return sprintf(buf, "%u\n", sw->generation);
1627}
1628static DEVICE_ATTR_RO(generation);
1629
1630static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1631                        char *buf)
1632{
1633        struct tb_switch *sw = tb_to_switch(dev);
1634        ssize_t ret;
1635
1636        if (!mutex_trylock(&sw->tb->lock))
1637                return restart_syscall();
1638
1639        if (sw->key)
1640                ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1641        else
1642                ret = sprintf(buf, "\n");
1643
1644        mutex_unlock(&sw->tb->lock);
1645        return ret;
1646}
1647
1648static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1649                         const char *buf, size_t count)
1650{
1651        struct tb_switch *sw = tb_to_switch(dev);
1652        u8 key[TB_SWITCH_KEY_SIZE];
1653        ssize_t ret = count;
1654        bool clear = false;
1655
1656        if (!strcmp(buf, "\n"))
1657                clear = true;
1658        else if (hex2bin(key, buf, sizeof(key)))
1659                return -EINVAL;
1660
1661        if (!mutex_trylock(&sw->tb->lock))
1662                return restart_syscall();
1663
1664        if (sw->authorized) {
1665                ret = -EBUSY;
1666        } else {
1667                kfree(sw->key);
1668                if (clear) {
1669                        sw->key = NULL;
1670                } else {
1671                        sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1672                        if (!sw->key)
1673                                ret = -ENOMEM;
1674                }
1675        }
1676
1677        mutex_unlock(&sw->tb->lock);
1678        return ret;
1679}
1680static DEVICE_ATTR(key, 0600, key_show, key_store);
1681
1682static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1683                          char *buf)
1684{
1685        struct tb_switch *sw = tb_to_switch(dev);
1686
1687        return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1688}
1689
1690/*
1691 * Currently all lanes must run at the same speed but we expose here
1692 * both directions to allow possible asymmetric links in the future.
1693 */
1694static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1695static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1696
1697static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1698                          char *buf)
1699{
1700        struct tb_switch *sw = tb_to_switch(dev);
1701
1702        return sprintf(buf, "%u\n", sw->link_width);
1703}
1704
1705/*
1706 * Currently link has same amount of lanes both directions (1 or 2) but
1707 * expose them separately to allow possible asymmetric links in the future.
1708 */
1709static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1710static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1711
1712static ssize_t nvm_authenticate_show(struct device *dev,
1713        struct device_attribute *attr, char *buf)
1714{
1715        struct tb_switch *sw = tb_to_switch(dev);
1716        u32 status;
1717
1718        nvm_get_auth_status(sw, &status);
1719        return sprintf(buf, "%#x\n", status);
1720}
1721
1722static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1723                                      bool disconnect)
1724{
1725        struct tb_switch *sw = tb_to_switch(dev);
1726        int val, ret;
1727
1728        pm_runtime_get_sync(&sw->dev);
1729
1730        if (!mutex_trylock(&sw->tb->lock)) {
1731                ret = restart_syscall();
1732                goto exit_rpm;
1733        }
1734
1735        /* If NVMem devices are not yet added */
1736        if (!sw->nvm) {
1737                ret = -EAGAIN;
1738                goto exit_unlock;
1739        }
1740
1741        ret = kstrtoint(buf, 10, &val);
1742        if (ret)
1743                goto exit_unlock;
1744
1745        /* Always clear the authentication status */
1746        nvm_clear_auth_status(sw);
1747
1748        if (val > 0) {
1749                if (val == AUTHENTICATE_ONLY) {
1750                        if (disconnect)
1751                                ret = -EINVAL;
1752                        else
1753                                ret = nvm_authenticate(sw, true);
1754                } else {
1755                        if (!sw->nvm->flushed) {
1756                                if (!sw->nvm->buf) {
1757                                        ret = -EINVAL;
1758                                        goto exit_unlock;
1759                                }
1760
1761                                ret = nvm_validate_and_write(sw);
1762                                if (ret || val == WRITE_ONLY)
1763                                        goto exit_unlock;
1764                        }
1765                        if (val == WRITE_AND_AUTHENTICATE) {
1766                                if (disconnect)
1767                                        ret = tb_lc_force_power(sw);
1768                                else
1769                                        ret = nvm_authenticate(sw, false);
1770                        }
1771                }
1772        }
1773
1774exit_unlock:
1775        mutex_unlock(&sw->tb->lock);
1776exit_rpm:
1777        pm_runtime_mark_last_busy(&sw->dev);
1778        pm_runtime_put_autosuspend(&sw->dev);
1779
1780        return ret;
1781}
1782
1783static ssize_t nvm_authenticate_store(struct device *dev,
1784        struct device_attribute *attr, const char *buf, size_t count)
1785{
1786        int ret = nvm_authenticate_sysfs(dev, buf, false);
1787        if (ret)
1788                return ret;
1789        return count;
1790}
1791static DEVICE_ATTR_RW(nvm_authenticate);
1792
1793static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1794        struct device_attribute *attr, char *buf)
1795{
1796        return nvm_authenticate_show(dev, attr, buf);
1797}
1798
1799static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1800        struct device_attribute *attr, const char *buf, size_t count)
1801{
1802        int ret;
1803
1804        ret = nvm_authenticate_sysfs(dev, buf, true);
1805        return ret ? ret : count;
1806}
1807static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1808
1809static ssize_t nvm_version_show(struct device *dev,
1810                                struct device_attribute *attr, char *buf)
1811{
1812        struct tb_switch *sw = tb_to_switch(dev);
1813        int ret;
1814
1815        if (!mutex_trylock(&sw->tb->lock))
1816                return restart_syscall();
1817
1818        if (sw->safe_mode)
1819                ret = -ENODATA;
1820        else if (!sw->nvm)
1821                ret = -EAGAIN;
1822        else
1823                ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1824
1825        mutex_unlock(&sw->tb->lock);
1826
1827        return ret;
1828}
1829static DEVICE_ATTR_RO(nvm_version);
1830
1831static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1832                           char *buf)
1833{
1834        struct tb_switch *sw = tb_to_switch(dev);
1835
1836        return sprintf(buf, "%#x\n", sw->vendor);
1837}
1838static DEVICE_ATTR_RO(vendor);
1839
1840static ssize_t
1841vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1842{
1843        struct tb_switch *sw = tb_to_switch(dev);
1844
1845        return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1846}
1847static DEVICE_ATTR_RO(vendor_name);
1848
1849static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1850                              char *buf)
1851{
1852        struct tb_switch *sw = tb_to_switch(dev);
1853
1854        return sprintf(buf, "%pUb\n", sw->uuid);
1855}
1856static DEVICE_ATTR_RO(unique_id);
1857
1858static struct attribute *switch_attrs[] = {
1859        &dev_attr_authorized.attr,
1860        &dev_attr_boot.attr,
1861        &dev_attr_device.attr,
1862        &dev_attr_device_name.attr,
1863        &dev_attr_generation.attr,
1864        &dev_attr_key.attr,
1865        &dev_attr_nvm_authenticate.attr,
1866        &dev_attr_nvm_authenticate_on_disconnect.attr,
1867        &dev_attr_nvm_version.attr,
1868        &dev_attr_rx_speed.attr,
1869        &dev_attr_rx_lanes.attr,
1870        &dev_attr_tx_speed.attr,
1871        &dev_attr_tx_lanes.attr,
1872        &dev_attr_vendor.attr,
1873        &dev_attr_vendor_name.attr,
1874        &dev_attr_unique_id.attr,
1875        NULL,
1876};
1877
1878static umode_t switch_attr_is_visible(struct kobject *kobj,
1879                                      struct attribute *attr, int n)
1880{
1881        struct device *dev = kobj_to_dev(kobj);
1882        struct tb_switch *sw = tb_to_switch(dev);
1883
1884        if (attr == &dev_attr_authorized.attr) {
1885                if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
1886                    sw->tb->security_level == TB_SECURITY_DPONLY)
1887                        return 0;
1888        } else if (attr == &dev_attr_device.attr) {
1889                if (!sw->device)
1890                        return 0;
1891        } else if (attr == &dev_attr_device_name.attr) {
1892                if (!sw->device_name)
1893                        return 0;
1894        } else if (attr == &dev_attr_vendor.attr)  {
1895                if (!sw->vendor)
1896                        return 0;
1897        } else if (attr == &dev_attr_vendor_name.attr)  {
1898                if (!sw->vendor_name)
1899                        return 0;
1900        } else if (attr == &dev_attr_key.attr) {
1901                if (tb_route(sw) &&
1902                    sw->tb->security_level == TB_SECURITY_SECURE &&
1903                    sw->security_level == TB_SECURITY_SECURE)
1904                        return attr->mode;
1905                return 0;
1906        } else if (attr == &dev_attr_rx_speed.attr ||
1907                   attr == &dev_attr_rx_lanes.attr ||
1908                   attr == &dev_attr_tx_speed.attr ||
1909                   attr == &dev_attr_tx_lanes.attr) {
1910                if (tb_route(sw))
1911                        return attr->mode;
1912                return 0;
1913        } else if (attr == &dev_attr_nvm_authenticate.attr) {
1914                if (nvm_upgradeable(sw))
1915                        return attr->mode;
1916                return 0;
1917        } else if (attr == &dev_attr_nvm_version.attr) {
1918                if (nvm_readable(sw))
1919                        return attr->mode;
1920                return 0;
1921        } else if (attr == &dev_attr_boot.attr) {
1922                if (tb_route(sw))
1923                        return attr->mode;
1924                return 0;
1925        } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1926                if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1927                        return attr->mode;
1928                return 0;
1929        }
1930
1931        return sw->safe_mode ? 0 : attr->mode;
1932}
1933
1934static const struct attribute_group switch_group = {
1935        .is_visible = switch_attr_is_visible,
1936        .attrs = switch_attrs,
1937};
1938
1939static const struct attribute_group *switch_groups[] = {
1940        &switch_group,
1941        NULL,
1942};
1943
1944static void tb_switch_release(struct device *dev)
1945{
1946        struct tb_switch *sw = tb_to_switch(dev);
1947        struct tb_port *port;
1948
1949        dma_port_free(sw->dma_port);
1950
1951        tb_switch_for_each_port(sw, port) {
1952                ida_destroy(&port->in_hopids);
1953                ida_destroy(&port->out_hopids);
1954        }
1955
1956        kfree(sw->uuid);
1957        kfree(sw->device_name);
1958        kfree(sw->vendor_name);
1959        kfree(sw->ports);
1960        kfree(sw->drom);
1961        kfree(sw->key);
1962        kfree(sw);
1963}
1964
1965static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
1966{
1967        struct tb_switch *sw = tb_to_switch(dev);
1968        const char *type;
1969
1970        if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
1971                if (add_uevent_var(env, "USB4_VERSION=1.0"))
1972                        return -ENOMEM;
1973        }
1974
1975        if (!tb_route(sw)) {
1976                type = "host";
1977        } else {
1978                const struct tb_port *port;
1979                bool hub = false;
1980
1981                /* Device is hub if it has any downstream ports */
1982                tb_switch_for_each_port(sw, port) {
1983                        if (!port->disabled && !tb_is_upstream_port(port) &&
1984                             tb_port_is_null(port)) {
1985                                hub = true;
1986                                break;
1987                        }
1988                }
1989
1990                type = hub ? "hub" : "device";
1991        }
1992
1993        if (add_uevent_var(env, "USB4_TYPE=%s", type))
1994                return -ENOMEM;
1995        return 0;
1996}
1997
1998/*
1999 * Currently only need to provide the callbacks. Everything else is handled
2000 * in the connection manager.
2001 */
2002static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2003{
2004        struct tb_switch *sw = tb_to_switch(dev);
2005        const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2006
2007        if (cm_ops->runtime_suspend_switch)
2008                return cm_ops->runtime_suspend_switch(sw);
2009
2010        return 0;
2011}
2012
2013static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2014{
2015        struct tb_switch *sw = tb_to_switch(dev);
2016        const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2017
2018        if (cm_ops->runtime_resume_switch)
2019                return cm_ops->runtime_resume_switch(sw);
2020        return 0;
2021}
2022
2023static const struct dev_pm_ops tb_switch_pm_ops = {
2024        SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2025                           NULL)
2026};
2027
2028struct device_type tb_switch_type = {
2029        .name = "thunderbolt_device",
2030        .release = tb_switch_release,
2031        .uevent = tb_switch_uevent,
2032        .pm = &tb_switch_pm_ops,
2033};
2034
2035static int tb_switch_get_generation(struct tb_switch *sw)
2036{
2037        switch (sw->config.device_id) {
2038        case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2039        case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2040        case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2041        case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2042        case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2043        case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2044        case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2045        case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2046                return 1;
2047
2048        case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2049        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2050        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2051                return 2;
2052
2053        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2054        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2055        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2056        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2057        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2058        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2059        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2060        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2061        case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2062        case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2063                return 3;
2064
2065        default:
2066                if (tb_switch_is_usb4(sw))
2067                        return 4;
2068
2069                /*
2070                 * For unknown switches assume generation to be 1 to be
2071                 * on the safe side.
2072                 */
2073                tb_sw_warn(sw, "unsupported switch device id %#x\n",
2074                           sw->config.device_id);
2075                return 1;
2076        }
2077}
2078
2079static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2080{
2081        int max_depth;
2082
2083        if (tb_switch_is_usb4(sw) ||
2084            (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2085                max_depth = USB4_SWITCH_MAX_DEPTH;
2086        else
2087                max_depth = TB_SWITCH_MAX_DEPTH;
2088
2089        return depth > max_depth;
2090}
2091
2092/**
2093 * tb_switch_alloc() - allocate a switch
2094 * @tb: Pointer to the owning domain
2095 * @parent: Parent device for this switch
2096 * @route: Route string for this switch
2097 *
2098 * Allocates and initializes a switch. Will not upload configuration to
2099 * the switch. For that you need to call tb_switch_configure()
2100 * separately. The returned switch should be released by calling
2101 * tb_switch_put().
2102 *
2103 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2104 * failure.
2105 */
2106struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2107                                  u64 route)
2108{
2109        struct tb_switch *sw;
2110        int upstream_port;
2111        int i, ret, depth;
2112
2113        /* Unlock the downstream port so we can access the switch below */
2114        if (route) {
2115                struct tb_switch *parent_sw = tb_to_switch(parent);
2116                struct tb_port *down;
2117
2118                down = tb_port_at(route, parent_sw);
2119                tb_port_unlock(down);
2120        }
2121
2122        depth = tb_route_length(route);
2123
2124        upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2125        if (upstream_port < 0)
2126                return ERR_PTR(upstream_port);
2127
2128        sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2129        if (!sw)
2130                return ERR_PTR(-ENOMEM);
2131
2132        sw->tb = tb;
2133        ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2134        if (ret)
2135                goto err_free_sw_ports;
2136
2137        sw->generation = tb_switch_get_generation(sw);
2138
2139        tb_dbg(tb, "current switch config:\n");
2140        tb_dump_switch(tb, sw);
2141
2142        /* configure switch */
2143        sw->config.upstream_port_number = upstream_port;
2144        sw->config.depth = depth;
2145        sw->config.route_hi = upper_32_bits(route);
2146        sw->config.route_lo = lower_32_bits(route);
2147        sw->config.enabled = 0;
2148
2149        /* Make sure we do not exceed maximum topology limit */
2150        if (tb_switch_exceeds_max_depth(sw, depth)) {
2151                ret = -EADDRNOTAVAIL;
2152                goto err_free_sw_ports;
2153        }
2154
2155        /* initialize ports */
2156        sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2157                                GFP_KERNEL);
2158        if (!sw->ports) {
2159                ret = -ENOMEM;
2160                goto err_free_sw_ports;
2161        }
2162
2163        for (i = 0; i <= sw->config.max_port_number; i++) {
2164                /* minimum setup for tb_find_cap and tb_drom_read to work */
2165                sw->ports[i].sw = sw;
2166                sw->ports[i].port = i;
2167
2168                /* Control port does not need HopID allocation */
2169                if (i) {
2170                        ida_init(&sw->ports[i].in_hopids);
2171                        ida_init(&sw->ports[i].out_hopids);
2172                }
2173        }
2174
2175        ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2176        if (ret > 0)
2177                sw->cap_plug_events = ret;
2178
2179        ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2180        if (ret > 0)
2181                sw->cap_lc = ret;
2182
2183        /* Root switch is always authorized */
2184        if (!route)
2185                sw->authorized = true;
2186
2187        device_initialize(&sw->dev);
2188        sw->dev.parent = parent;
2189        sw->dev.bus = &tb_bus_type;
2190        sw->dev.type = &tb_switch_type;
2191        sw->dev.groups = switch_groups;
2192        dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2193
2194        return sw;
2195
2196err_free_sw_ports:
2197        kfree(sw->ports);
2198        kfree(sw);
2199
2200        return ERR_PTR(ret);
2201}
2202
2203/**
2204 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2205 * @tb: Pointer to the owning domain
2206 * @parent: Parent device for this switch
2207 * @route: Route string for this switch
2208 *
2209 * This creates a switch in safe mode. This means the switch pretty much
2210 * lacks all capabilities except DMA configuration port before it is
2211 * flashed with a valid NVM firmware.
2212 *
2213 * The returned switch must be released by calling tb_switch_put().
2214 *
2215 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2216 */
2217struct tb_switch *
2218tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2219{
2220        struct tb_switch *sw;
2221
2222        sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2223        if (!sw)
2224                return ERR_PTR(-ENOMEM);
2225
2226        sw->tb = tb;
2227        sw->config.depth = tb_route_length(route);
2228        sw->config.route_hi = upper_32_bits(route);
2229        sw->config.route_lo = lower_32_bits(route);
2230        sw->safe_mode = true;
2231
2232        device_initialize(&sw->dev);
2233        sw->dev.parent = parent;
2234        sw->dev.bus = &tb_bus_type;
2235        sw->dev.type = &tb_switch_type;
2236        sw->dev.groups = switch_groups;
2237        dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2238
2239        return sw;
2240}
2241
2242/**
2243 * tb_switch_configure() - Uploads configuration to the switch
2244 * @sw: Switch to configure
2245 *
2246 * Call this function before the switch is added to the system. It will
2247 * upload configuration to the switch and makes it available for the
2248 * connection manager to use. Can be called to the switch again after
2249 * resume from low power states to re-initialize it.
2250 *
2251 * Return: %0 in case of success and negative errno in case of failure
2252 */
2253int tb_switch_configure(struct tb_switch *sw)
2254{
2255        struct tb *tb = sw->tb;
2256        u64 route;
2257        int ret;
2258
2259        route = tb_route(sw);
2260
2261        tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2262               sw->config.enabled ? "restoring" : "initializing", route,
2263               tb_route_length(route), sw->config.upstream_port_number);
2264
2265        sw->config.enabled = 1;
2266
2267        if (tb_switch_is_usb4(sw)) {
2268                /*
2269                 * For USB4 devices, we need to program the CM version
2270                 * accordingly so that it knows to expose all the
2271                 * additional capabilities.
2272                 */
2273                sw->config.cmuv = USB4_VERSION_1_0;
2274
2275                /* Enumerate the switch */
2276                ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2277                                  ROUTER_CS_1, 4);
2278                if (ret)
2279                        return ret;
2280
2281                ret = usb4_switch_setup(sw);
2282        } else {
2283                if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2284                        tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2285                                   sw->config.vendor_id);
2286
2287                if (!sw->cap_plug_events) {
2288                        tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2289                        return -ENODEV;
2290                }
2291
2292                /* Enumerate the switch */
2293                ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2294                                  ROUTER_CS_1, 3);
2295        }
2296        if (ret)
2297                return ret;
2298
2299        return tb_plug_events_active(sw, true);
2300}
2301
2302static int tb_switch_set_uuid(struct tb_switch *sw)
2303{
2304        bool uid = false;
2305        u32 uuid[4];
2306        int ret;
2307
2308        if (sw->uuid)
2309                return 0;
2310
2311        if (tb_switch_is_usb4(sw)) {
2312                ret = usb4_switch_read_uid(sw, &sw->uid);
2313                if (ret)
2314                        return ret;
2315                uid = true;
2316        } else {
2317                /*
2318                 * The newer controllers include fused UUID as part of
2319                 * link controller specific registers
2320                 */
2321                ret = tb_lc_read_uuid(sw, uuid);
2322                if (ret) {
2323                        if (ret != -EINVAL)
2324                                return ret;
2325                        uid = true;
2326                }
2327        }
2328
2329        if (uid) {
2330                /*
2331                 * ICM generates UUID based on UID and fills the upper
2332                 * two words with ones. This is not strictly following
2333                 * UUID format but we want to be compatible with it so
2334                 * we do the same here.
2335                 */
2336                uuid[0] = sw->uid & 0xffffffff;
2337                uuid[1] = (sw->uid >> 32) & 0xffffffff;
2338                uuid[2] = 0xffffffff;
2339                uuid[3] = 0xffffffff;
2340        }
2341
2342        sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2343        if (!sw->uuid)
2344                return -ENOMEM;
2345        return 0;
2346}
2347
2348static int tb_switch_add_dma_port(struct tb_switch *sw)
2349{
2350        u32 status;
2351        int ret;
2352
2353        switch (sw->generation) {
2354        case 2:
2355                /* Only root switch can be upgraded */
2356                if (tb_route(sw))
2357                        return 0;
2358
2359                fallthrough;
2360        case 3:
2361        case 4:
2362                ret = tb_switch_set_uuid(sw);
2363                if (ret)
2364                        return ret;
2365                break;
2366
2367        default:
2368                /*
2369                 * DMA port is the only thing available when the switch
2370                 * is in safe mode.
2371                 */
2372                if (!sw->safe_mode)
2373                        return 0;
2374                break;
2375        }
2376
2377        if (sw->no_nvm_upgrade)
2378                return 0;
2379
2380        if (tb_switch_is_usb4(sw)) {
2381                ret = usb4_switch_nvm_authenticate_status(sw, &status);
2382                if (ret)
2383                        return ret;
2384
2385                if (status) {
2386                        tb_sw_info(sw, "switch flash authentication failed\n");
2387                        nvm_set_auth_status(sw, status);
2388                }
2389
2390                return 0;
2391        }
2392
2393        /* Root switch DMA port requires running firmware */
2394        if (!tb_route(sw) && !tb_switch_is_icm(sw))
2395                return 0;
2396
2397        sw->dma_port = dma_port_alloc(sw);
2398        if (!sw->dma_port)
2399                return 0;
2400
2401        /*
2402         * If there is status already set then authentication failed
2403         * when the dma_port_flash_update_auth() returned. Power cycling
2404         * is not needed (it was done already) so only thing we do here
2405         * is to unblock runtime PM of the root port.
2406         */
2407        nvm_get_auth_status(sw, &status);
2408        if (status) {
2409                if (!tb_route(sw))
2410                        nvm_authenticate_complete_dma_port(sw);
2411                return 0;
2412        }
2413
2414        /*
2415         * Check status of the previous flash authentication. If there
2416         * is one we need to power cycle the switch in any case to make
2417         * it functional again.
2418         */
2419        ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2420        if (ret <= 0)
2421                return ret;
2422
2423        /* Now we can allow root port to suspend again */
2424        if (!tb_route(sw))
2425                nvm_authenticate_complete_dma_port(sw);
2426
2427        if (status) {
2428                tb_sw_info(sw, "switch flash authentication failed\n");
2429                nvm_set_auth_status(sw, status);
2430        }
2431
2432        tb_sw_info(sw, "power cycling the switch now\n");
2433        dma_port_power_cycle(sw->dma_port);
2434
2435        /*
2436         * We return error here which causes the switch adding failure.
2437         * It should appear back after power cycle is complete.
2438         */
2439        return -ESHUTDOWN;
2440}
2441
2442static void tb_switch_default_link_ports(struct tb_switch *sw)
2443{
2444        int i;
2445
2446        for (i = 1; i <= sw->config.max_port_number; i += 2) {
2447                struct tb_port *port = &sw->ports[i];
2448                struct tb_port *subordinate;
2449
2450                if (!tb_port_is_null(port))
2451                        continue;
2452
2453                /* Check for the subordinate port */
2454                if (i == sw->config.max_port_number ||
2455                    !tb_port_is_null(&sw->ports[i + 1]))
2456                        continue;
2457
2458                /* Link them if not already done so (by DROM) */
2459                subordinate = &sw->ports[i + 1];
2460                if (!port->dual_link_port && !subordinate->dual_link_port) {
2461                        port->link_nr = 0;
2462                        port->dual_link_port = subordinate;
2463                        subordinate->link_nr = 1;
2464                        subordinate->dual_link_port = port;
2465
2466                        tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2467                                  port->port, subordinate->port);
2468                }
2469        }
2470}
2471
2472static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2473{
2474        const struct tb_port *up = tb_upstream_port(sw);
2475
2476        if (!up->dual_link_port || !up->dual_link_port->remote)
2477                return false;
2478
2479        if (tb_switch_is_usb4(sw))
2480                return usb4_switch_lane_bonding_possible(sw);
2481        return tb_lc_lane_bonding_possible(sw);
2482}
2483
2484static int tb_switch_update_link_attributes(struct tb_switch *sw)
2485{
2486        struct tb_port *up;
2487        bool change = false;
2488        int ret;
2489
2490        if (!tb_route(sw) || tb_switch_is_icm(sw))
2491                return 0;
2492
2493        up = tb_upstream_port(sw);
2494
2495        ret = tb_port_get_link_speed(up);
2496        if (ret < 0)
2497                return ret;
2498        if (sw->link_speed != ret)
2499                change = true;
2500        sw->link_speed = ret;
2501
2502        ret = tb_port_get_link_width(up);
2503        if (ret < 0)
2504                return ret;
2505        if (sw->link_width != ret)
2506                change = true;
2507        sw->link_width = ret;
2508
2509        /* Notify userspace that there is possible link attribute change */
2510        if (device_is_registered(&sw->dev) && change)
2511                kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2512
2513        return 0;
2514}
2515
2516/**
2517 * tb_switch_lane_bonding_enable() - Enable lane bonding
2518 * @sw: Switch to enable lane bonding
2519 *
2520 * Connection manager can call this function to enable lane bonding of a
2521 * switch. If conditions are correct and both switches support the feature,
2522 * lanes are bonded. It is safe to call this to any switch.
2523 */
2524int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2525{
2526        struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2527        struct tb_port *up, *down;
2528        u64 route = tb_route(sw);
2529        int ret;
2530
2531        if (!route)
2532                return 0;
2533
2534        if (!tb_switch_lane_bonding_possible(sw))
2535                return 0;
2536
2537        up = tb_upstream_port(sw);
2538        down = tb_port_at(route, parent);
2539
2540        if (!tb_port_is_width_supported(up, 2) ||
2541            !tb_port_is_width_supported(down, 2))
2542                return 0;
2543
2544        ret = tb_port_lane_bonding_enable(up);
2545        if (ret) {
2546                tb_port_warn(up, "failed to enable lane bonding\n");
2547                return ret;
2548        }
2549
2550        ret = tb_port_lane_bonding_enable(down);
2551        if (ret) {
2552                tb_port_warn(down, "failed to enable lane bonding\n");
2553                tb_port_lane_bonding_disable(up);
2554                return ret;
2555        }
2556
2557        ret = tb_port_wait_for_link_width(down, 2, 100);
2558        if (ret) {
2559                tb_port_warn(down, "timeout enabling lane bonding\n");
2560                return ret;
2561        }
2562
2563        tb_port_update_credits(down);
2564        tb_port_update_credits(up);
2565        tb_switch_update_link_attributes(sw);
2566
2567        tb_sw_dbg(sw, "lane bonding enabled\n");
2568        return ret;
2569}
2570
2571/**
2572 * tb_switch_lane_bonding_disable() - Disable lane bonding
2573 * @sw: Switch whose lane bonding to disable
2574 *
2575 * Disables lane bonding between @sw and parent. This can be called even
2576 * if lanes were not bonded originally.
2577 */
2578void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2579{
2580        struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2581        struct tb_port *up, *down;
2582
2583        if (!tb_route(sw))
2584                return;
2585
2586        up = tb_upstream_port(sw);
2587        if (!up->bonded)
2588                return;
2589
2590        down = tb_port_at(tb_route(sw), parent);
2591
2592        tb_port_lane_bonding_disable(up);
2593        tb_port_lane_bonding_disable(down);
2594
2595        /*
2596         * It is fine if we get other errors as the router might have
2597         * been unplugged.
2598         */
2599        if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2600                tb_sw_warn(sw, "timeout disabling lane bonding\n");
2601
2602        tb_port_update_credits(down);
2603        tb_port_update_credits(up);
2604        tb_switch_update_link_attributes(sw);
2605
2606        tb_sw_dbg(sw, "lane bonding disabled\n");
2607}
2608
2609/**
2610 * tb_switch_configure_link() - Set link configured
2611 * @sw: Switch whose link is configured
2612 *
2613 * Sets the link upstream from @sw configured (from both ends) so that
2614 * it will not be disconnected when the domain exits sleep. Can be
2615 * called for any switch.
2616 *
2617 * It is recommended that this is called after lane bonding is enabled.
2618 *
2619 * Returns %0 on success and negative errno in case of error.
2620 */
2621int tb_switch_configure_link(struct tb_switch *sw)
2622{
2623        struct tb_port *up, *down;
2624        int ret;
2625
2626        if (!tb_route(sw) || tb_switch_is_icm(sw))
2627                return 0;
2628
2629        up = tb_upstream_port(sw);
2630        if (tb_switch_is_usb4(up->sw))
2631                ret = usb4_port_configure(up);
2632        else
2633                ret = tb_lc_configure_port(up);
2634        if (ret)
2635                return ret;
2636
2637        down = up->remote;
2638        if (tb_switch_is_usb4(down->sw))
2639                return usb4_port_configure(down);
2640        return tb_lc_configure_port(down);
2641}
2642
2643/**
2644 * tb_switch_unconfigure_link() - Unconfigure link
2645 * @sw: Switch whose link is unconfigured
2646 *
2647 * Sets the link unconfigured so the @sw will be disconnected if the
2648 * domain exists sleep.
2649 */
2650void tb_switch_unconfigure_link(struct tb_switch *sw)
2651{
2652        struct tb_port *up, *down;
2653
2654        if (sw->is_unplugged)
2655                return;
2656        if (!tb_route(sw) || tb_switch_is_icm(sw))
2657                return;
2658
2659        up = tb_upstream_port(sw);
2660        if (tb_switch_is_usb4(up->sw))
2661                usb4_port_unconfigure(up);
2662        else
2663                tb_lc_unconfigure_port(up);
2664
2665        down = up->remote;
2666        if (tb_switch_is_usb4(down->sw))
2667                usb4_port_unconfigure(down);
2668        else
2669                tb_lc_unconfigure_port(down);
2670}
2671
2672static void tb_switch_credits_init(struct tb_switch *sw)
2673{
2674        if (tb_switch_is_icm(sw))
2675                return;
2676        if (!tb_switch_is_usb4(sw))
2677                return;
2678        if (usb4_switch_credits_init(sw))
2679                tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2680}
2681
2682/**
2683 * tb_switch_add() - Add a switch to the domain
2684 * @sw: Switch to add
2685 *
2686 * This is the last step in adding switch to the domain. It will read
2687 * identification information from DROM and initializes ports so that
2688 * they can be used to connect other switches. The switch will be
2689 * exposed to the userspace when this function successfully returns. To
2690 * remove and release the switch, call tb_switch_remove().
2691 *
2692 * Return: %0 in case of success and negative errno in case of failure
2693 */
2694int tb_switch_add(struct tb_switch *sw)
2695{
2696        int i, ret;
2697
2698        /*
2699         * Initialize DMA control port now before we read DROM. Recent
2700         * host controllers have more complete DROM on NVM that includes
2701         * vendor and model identification strings which we then expose
2702         * to the userspace. NVM can be accessed through DMA
2703         * configuration based mailbox.
2704         */
2705        ret = tb_switch_add_dma_port(sw);
2706        if (ret) {
2707                dev_err(&sw->dev, "failed to add DMA port\n");
2708                return ret;
2709        }
2710
2711        if (!sw->safe_mode) {
2712                tb_switch_credits_init(sw);
2713
2714                /* read drom */
2715                ret = tb_drom_read(sw);
2716                if (ret) {
2717                        dev_err(&sw->dev, "reading DROM failed\n");
2718                        return ret;
2719                }
2720                tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2721
2722                tb_check_quirks(sw);
2723
2724                ret = tb_switch_set_uuid(sw);
2725                if (ret) {
2726                        dev_err(&sw->dev, "failed to set UUID\n");
2727                        return ret;
2728                }
2729
2730                for (i = 0; i <= sw->config.max_port_number; i++) {
2731                        if (sw->ports[i].disabled) {
2732                                tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2733                                continue;
2734                        }
2735                        ret = tb_init_port(&sw->ports[i]);
2736                        if (ret) {
2737                                dev_err(&sw->dev, "failed to initialize port %d\n", i);
2738                                return ret;
2739                        }
2740                }
2741
2742                tb_switch_default_link_ports(sw);
2743
2744                ret = tb_switch_update_link_attributes(sw);
2745                if (ret)
2746                        return ret;
2747
2748                ret = tb_switch_tmu_init(sw);
2749                if (ret)
2750                        return ret;
2751        }
2752
2753        ret = device_add(&sw->dev);
2754        if (ret) {
2755                dev_err(&sw->dev, "failed to add device: %d\n", ret);
2756                return ret;
2757        }
2758
2759        if (tb_route(sw)) {
2760                dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2761                         sw->vendor, sw->device);
2762                if (sw->vendor_name && sw->device_name)
2763                        dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2764                                 sw->device_name);
2765        }
2766
2767        ret = usb4_switch_add_ports(sw);
2768        if (ret) {
2769                dev_err(&sw->dev, "failed to add USB4 ports\n");
2770                goto err_del;
2771        }
2772
2773        ret = tb_switch_nvm_add(sw);
2774        if (ret) {
2775                dev_err(&sw->dev, "failed to add NVM devices\n");
2776                goto err_ports;
2777        }
2778
2779        /*
2780         * Thunderbolt routers do not generate wakeups themselves but
2781         * they forward wakeups from tunneled protocols, so enable it
2782         * here.
2783         */
2784        device_init_wakeup(&sw->dev, true);
2785
2786        pm_runtime_set_active(&sw->dev);
2787        if (sw->rpm) {
2788                pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2789                pm_runtime_use_autosuspend(&sw->dev);
2790                pm_runtime_mark_last_busy(&sw->dev);
2791                pm_runtime_enable(&sw->dev);
2792                pm_request_autosuspend(&sw->dev);
2793        }
2794
2795        tb_switch_debugfs_init(sw);
2796        return 0;
2797
2798err_ports:
2799        usb4_switch_remove_ports(sw);
2800err_del:
2801        device_del(&sw->dev);
2802
2803        return ret;
2804}
2805
2806/**
2807 * tb_switch_remove() - Remove and release a switch
2808 * @sw: Switch to remove
2809 *
2810 * This will remove the switch from the domain and release it after last
2811 * reference count drops to zero. If there are switches connected below
2812 * this switch, they will be removed as well.
2813 */
2814void tb_switch_remove(struct tb_switch *sw)
2815{
2816        struct tb_port *port;
2817
2818        tb_switch_debugfs_remove(sw);
2819
2820        if (sw->rpm) {
2821                pm_runtime_get_sync(&sw->dev);
2822                pm_runtime_disable(&sw->dev);
2823        }
2824
2825        /* port 0 is the switch itself and never has a remote */
2826        tb_switch_for_each_port(sw, port) {
2827                if (tb_port_has_remote(port)) {
2828                        tb_switch_remove(port->remote->sw);
2829                        port->remote = NULL;
2830                } else if (port->xdomain) {
2831                        tb_xdomain_remove(port->xdomain);
2832                        port->xdomain = NULL;
2833                }
2834
2835                /* Remove any downstream retimers */
2836                tb_retimer_remove_all(port);
2837        }
2838
2839        if (!sw->is_unplugged)
2840                tb_plug_events_active(sw, false);
2841
2842        tb_switch_nvm_remove(sw);
2843        usb4_switch_remove_ports(sw);
2844
2845        if (tb_route(sw))
2846                dev_info(&sw->dev, "device disconnected\n");
2847        device_unregister(&sw->dev);
2848}
2849
2850/**
2851 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2852 * @sw: Router to mark unplugged
2853 */
2854void tb_sw_set_unplugged(struct tb_switch *sw)
2855{
2856        struct tb_port *port;
2857
2858        if (sw == sw->tb->root_switch) {
2859                tb_sw_WARN(sw, "cannot unplug root switch\n");
2860                return;
2861        }
2862        if (sw->is_unplugged) {
2863                tb_sw_WARN(sw, "is_unplugged already set\n");
2864                return;
2865        }
2866        sw->is_unplugged = true;
2867        tb_switch_for_each_port(sw, port) {
2868                if (tb_port_has_remote(port))
2869                        tb_sw_set_unplugged(port->remote->sw);
2870                else if (port->xdomain)
2871                        port->xdomain->is_unplugged = true;
2872        }
2873}
2874
2875static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2876{
2877        if (flags)
2878                tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2879        else
2880                tb_sw_dbg(sw, "disabling wakeup\n");
2881
2882        if (tb_switch_is_usb4(sw))
2883                return usb4_switch_set_wake(sw, flags);
2884        return tb_lc_set_wake(sw, flags);
2885}
2886
2887int tb_switch_resume(struct tb_switch *sw)
2888{
2889        struct tb_port *port;
2890        int err;
2891
2892        tb_sw_dbg(sw, "resuming switch\n");
2893
2894        /*
2895         * Check for UID of the connected switches except for root
2896         * switch which we assume cannot be removed.
2897         */
2898        if (tb_route(sw)) {
2899                u64 uid;
2900
2901                /*
2902                 * Check first that we can still read the switch config
2903                 * space. It may be that there is now another domain
2904                 * connected.
2905                 */
2906                err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2907                if (err < 0) {
2908                        tb_sw_info(sw, "switch not present anymore\n");
2909                        return err;
2910                }
2911
2912                if (tb_switch_is_usb4(sw))
2913                        err = usb4_switch_read_uid(sw, &uid);
2914                else
2915                        err = tb_drom_read_uid_only(sw, &uid);
2916                if (err) {
2917                        tb_sw_warn(sw, "uid read failed\n");
2918                        return err;
2919                }
2920                if (sw->uid != uid) {
2921                        tb_sw_info(sw,
2922                                "changed while suspended (uid %#llx -> %#llx)\n",
2923                                sw->uid, uid);
2924                        return -ENODEV;
2925                }
2926        }
2927
2928        err = tb_switch_configure(sw);
2929        if (err)
2930                return err;
2931
2932        /* Disable wakes */
2933        tb_switch_set_wake(sw, 0);
2934
2935        err = tb_switch_tmu_init(sw);
2936        if (err)
2937                return err;
2938
2939        /* check for surviving downstream switches */
2940        tb_switch_for_each_port(sw, port) {
2941                if (!tb_port_is_null(port))
2942                        continue;
2943
2944                if (!tb_port_resume(port))
2945                        continue;
2946
2947                if (tb_wait_for_port(port, true) <= 0) {
2948                        tb_port_warn(port,
2949                                     "lost during suspend, disconnecting\n");
2950                        if (tb_port_has_remote(port))
2951                                tb_sw_set_unplugged(port->remote->sw);
2952                        else if (port->xdomain)
2953                                port->xdomain->is_unplugged = true;
2954                } else {
2955                        /*
2956                         * Always unlock the port so the downstream
2957                         * switch/domain is accessible.
2958                         */
2959                        if (tb_port_unlock(port))
2960                                tb_port_warn(port, "failed to unlock port\n");
2961                        if (port->remote && tb_switch_resume(port->remote->sw)) {
2962                                tb_port_warn(port,
2963                                             "lost during suspend, disconnecting\n");
2964                                tb_sw_set_unplugged(port->remote->sw);
2965                        }
2966                }
2967        }
2968        return 0;
2969}
2970
2971/**
2972 * tb_switch_suspend() - Put a switch to sleep
2973 * @sw: Switch to suspend
2974 * @runtime: Is this runtime suspend or system sleep
2975 *
2976 * Suspends router and all its children. Enables wakes according to
2977 * value of @runtime and then sets sleep bit for the router. If @sw is
2978 * host router the domain is ready to go to sleep once this function
2979 * returns.
2980 */
2981void tb_switch_suspend(struct tb_switch *sw, bool runtime)
2982{
2983        unsigned int flags = 0;
2984        struct tb_port *port;
2985        int err;
2986
2987        tb_sw_dbg(sw, "suspending switch\n");
2988
2989        err = tb_plug_events_active(sw, false);
2990        if (err)
2991                return;
2992
2993        tb_switch_for_each_port(sw, port) {
2994                if (tb_port_has_remote(port))
2995                        tb_switch_suspend(port->remote->sw, runtime);
2996        }
2997
2998        if (runtime) {
2999                /* Trigger wake when something is plugged in/out */
3000                flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3001                flags |= TB_WAKE_ON_USB4;
3002                flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3003        } else if (device_may_wakeup(&sw->dev)) {
3004                flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3005        }
3006
3007        tb_switch_set_wake(sw, flags);
3008
3009        if (tb_switch_is_usb4(sw))
3010                usb4_switch_set_sleep(sw);
3011        else
3012                tb_lc_set_sleep(sw);
3013}
3014
3015/**
3016 * tb_switch_query_dp_resource() - Query availability of DP resource
3017 * @sw: Switch whose DP resource is queried
3018 * @in: DP IN port
3019 *
3020 * Queries availability of DP resource for DP tunneling using switch
3021 * specific means. Returns %true if resource is available.
3022 */
3023bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3024{
3025        if (tb_switch_is_usb4(sw))
3026                return usb4_switch_query_dp_resource(sw, in);
3027        return tb_lc_dp_sink_query(sw, in);
3028}
3029
3030/**
3031 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3032 * @sw: Switch whose DP resource is allocated
3033 * @in: DP IN port
3034 *
3035 * Allocates DP resource for DP tunneling. The resource must be
3036 * available for this to succeed (see tb_switch_query_dp_resource()).
3037 * Returns %0 in success and negative errno otherwise.
3038 */
3039int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3040{
3041        if (tb_switch_is_usb4(sw))
3042                return usb4_switch_alloc_dp_resource(sw, in);
3043        return tb_lc_dp_sink_alloc(sw, in);
3044}
3045
3046/**
3047 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3048 * @sw: Switch whose DP resource is de-allocated
3049 * @in: DP IN port
3050 *
3051 * De-allocates DP resource that was previously allocated for DP
3052 * tunneling.
3053 */
3054void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3055{
3056        int ret;
3057
3058        if (tb_switch_is_usb4(sw))
3059                ret = usb4_switch_dealloc_dp_resource(sw, in);
3060        else
3061                ret = tb_lc_dp_sink_dealloc(sw, in);
3062
3063        if (ret)
3064                tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3065                           in->port);
3066}
3067
3068struct tb_sw_lookup {
3069        struct tb *tb;
3070        u8 link;
3071        u8 depth;
3072        const uuid_t *uuid;
3073        u64 route;
3074};
3075
3076static int tb_switch_match(struct device *dev, const void *data)
3077{
3078        struct tb_switch *sw = tb_to_switch(dev);
3079        const struct tb_sw_lookup *lookup = data;
3080
3081        if (!sw)
3082                return 0;
3083        if (sw->tb != lookup->tb)
3084                return 0;
3085
3086        if (lookup->uuid)
3087                return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3088
3089        if (lookup->route) {
3090                return sw->config.route_lo == lower_32_bits(lookup->route) &&
3091                       sw->config.route_hi == upper_32_bits(lookup->route);
3092        }
3093
3094        /* Root switch is matched only by depth */
3095        if (!lookup->depth)
3096                return !sw->depth;
3097
3098        return sw->link == lookup->link && sw->depth == lookup->depth;
3099}
3100
3101/**
3102 * tb_switch_find_by_link_depth() - Find switch by link and depth
3103 * @tb: Domain the switch belongs
3104 * @link: Link number the switch is connected
3105 * @depth: Depth of the switch in link
3106 *
3107 * Returned switch has reference count increased so the caller needs to
3108 * call tb_switch_put() when done with the switch.
3109 */
3110struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3111{
3112        struct tb_sw_lookup lookup;
3113        struct device *dev;
3114
3115        memset(&lookup, 0, sizeof(lookup));
3116        lookup.tb = tb;
3117        lookup.link = link;
3118        lookup.depth = depth;
3119
3120        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3121        if (dev)
3122                return tb_to_switch(dev);
3123
3124        return NULL;
3125}
3126
3127/**
3128 * tb_switch_find_by_uuid() - Find switch by UUID
3129 * @tb: Domain the switch belongs
3130 * @uuid: UUID to look for
3131 *
3132 * Returned switch has reference count increased so the caller needs to
3133 * call tb_switch_put() when done with the switch.
3134 */
3135struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3136{
3137        struct tb_sw_lookup lookup;
3138        struct device *dev;
3139
3140        memset(&lookup, 0, sizeof(lookup));
3141        lookup.tb = tb;
3142        lookup.uuid = uuid;
3143
3144        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3145        if (dev)
3146                return tb_to_switch(dev);
3147
3148        return NULL;
3149}
3150
3151/**
3152 * tb_switch_find_by_route() - Find switch by route string
3153 * @tb: Domain the switch belongs
3154 * @route: Route string to look for
3155 *
3156 * Returned switch has reference count increased so the caller needs to
3157 * call tb_switch_put() when done with the switch.
3158 */
3159struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3160{
3161        struct tb_sw_lookup lookup;
3162        struct device *dev;
3163
3164        if (!route)
3165                return tb_switch_get(tb->root_switch);
3166
3167        memset(&lookup, 0, sizeof(lookup));
3168        lookup.tb = tb;
3169        lookup.route = route;
3170
3171        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3172        if (dev)
3173                return tb_to_switch(dev);
3174
3175        return NULL;
3176}
3177
3178/**
3179 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3180 * @sw: Switch to find the port from
3181 * @type: Port type to look for
3182 */
3183struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3184                                    enum tb_port_type type)
3185{
3186        struct tb_port *port;
3187
3188        tb_switch_for_each_port(sw, port) {
3189                if (port->config.type == type)
3190                        return port;
3191        }
3192
3193        return NULL;
3194}
3195