linux/drivers/thunderbolt/switch.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - switch/port utility functions
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2018, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/idr.h>
  11#include <linux/nvmem-provider.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/sched/signal.h>
  14#include <linux/sizes.h>
  15#include <linux/slab.h>
  16#include <linux/vmalloc.h>
  17
  18#include "tb.h"
  19
  20/* Switch NVM support */
  21
  22#define NVM_DEVID               0x05
  23#define NVM_VERSION             0x08
  24#define NVM_CSS                 0x10
  25#define NVM_FLASH_SIZE          0x45
  26
  27#define NVM_MIN_SIZE            SZ_32K
  28#define NVM_MAX_SIZE            SZ_512K
  29
  30static DEFINE_IDA(nvm_ida);
  31
  32struct nvm_auth_status {
  33        struct list_head list;
  34        uuid_t uuid;
  35        u32 status;
  36};
  37
  38/*
  39 * Hold NVM authentication failure status per switch This information
  40 * needs to stay around even when the switch gets power cycled so we
  41 * keep it separately.
  42 */
  43static LIST_HEAD(nvm_auth_status_cache);
  44static DEFINE_MUTEX(nvm_auth_status_lock);
  45
  46static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
  47{
  48        struct nvm_auth_status *st;
  49
  50        list_for_each_entry(st, &nvm_auth_status_cache, list) {
  51                if (uuid_equal(&st->uuid, sw->uuid))
  52                        return st;
  53        }
  54
  55        return NULL;
  56}
  57
  58static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
  59{
  60        struct nvm_auth_status *st;
  61
  62        mutex_lock(&nvm_auth_status_lock);
  63        st = __nvm_get_auth_status(sw);
  64        mutex_unlock(&nvm_auth_status_lock);
  65
  66        *status = st ? st->status : 0;
  67}
  68
  69static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
  70{
  71        struct nvm_auth_status *st;
  72
  73        if (WARN_ON(!sw->uuid))
  74                return;
  75
  76        mutex_lock(&nvm_auth_status_lock);
  77        st = __nvm_get_auth_status(sw);
  78
  79        if (!st) {
  80                st = kzalloc(sizeof(*st), GFP_KERNEL);
  81                if (!st)
  82                        goto unlock;
  83
  84                memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
  85                INIT_LIST_HEAD(&st->list);
  86                list_add_tail(&st->list, &nvm_auth_status_cache);
  87        }
  88
  89        st->status = status;
  90unlock:
  91        mutex_unlock(&nvm_auth_status_lock);
  92}
  93
  94static void nvm_clear_auth_status(const struct tb_switch *sw)
  95{
  96        struct nvm_auth_status *st;
  97
  98        mutex_lock(&nvm_auth_status_lock);
  99        st = __nvm_get_auth_status(sw);
 100        if (st) {
 101                list_del(&st->list);
 102                kfree(st);
 103        }
 104        mutex_unlock(&nvm_auth_status_lock);
 105}
 106
 107static int nvm_validate_and_write(struct tb_switch *sw)
 108{
 109        unsigned int image_size, hdr_size;
 110        const u8 *buf = sw->nvm->buf;
 111        u16 ds_size;
 112        int ret;
 113
 114        if (!buf)
 115                return -EINVAL;
 116
 117        image_size = sw->nvm->buf_data_size;
 118        if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
 119                return -EINVAL;
 120
 121        /*
 122         * FARB pointer must point inside the image and must at least
 123         * contain parts of the digital section we will be reading here.
 124         */
 125        hdr_size = (*(u32 *)buf) & 0xffffff;
 126        if (hdr_size + NVM_DEVID + 2 >= image_size)
 127                return -EINVAL;
 128
 129        /* Digital section start should be aligned to 4k page */
 130        if (!IS_ALIGNED(hdr_size, SZ_4K))
 131                return -EINVAL;
 132
 133        /*
 134         * Read digital section size and check that it also fits inside
 135         * the image.
 136         */
 137        ds_size = *(u16 *)(buf + hdr_size);
 138        if (ds_size >= image_size)
 139                return -EINVAL;
 140
 141        if (!sw->safe_mode) {
 142                u16 device_id;
 143
 144                /*
 145                 * Make sure the device ID in the image matches the one
 146                 * we read from the switch config space.
 147                 */
 148                device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
 149                if (device_id != sw->config.device_id)
 150                        return -EINVAL;
 151
 152                if (sw->generation < 3) {
 153                        /* Write CSS headers first */
 154                        ret = dma_port_flash_write(sw->dma_port,
 155                                DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
 156                                DMA_PORT_CSS_MAX_SIZE);
 157                        if (ret)
 158                                return ret;
 159                }
 160
 161                /* Skip headers in the image */
 162                buf += hdr_size;
 163                image_size -= hdr_size;
 164        }
 165
 166        return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
 167}
 168
 169static int nvm_authenticate_host(struct tb_switch *sw)
 170{
 171        int ret;
 172
 173        /*
 174         * Root switch NVM upgrade requires that we disconnect the
 175         * existing paths first (in case it is not in safe mode
 176         * already).
 177         */
 178        if (!sw->safe_mode) {
 179                ret = tb_domain_disconnect_all_paths(sw->tb);
 180                if (ret)
 181                        return ret;
 182                /*
 183                 * The host controller goes away pretty soon after this if
 184                 * everything goes well so getting timeout is expected.
 185                 */
 186                ret = dma_port_flash_update_auth(sw->dma_port);
 187                return ret == -ETIMEDOUT ? 0 : ret;
 188        }
 189
 190        /*
 191         * From safe mode we can get out by just power cycling the
 192         * switch.
 193         */
 194        dma_port_power_cycle(sw->dma_port);
 195        return 0;
 196}
 197
 198static int nvm_authenticate_device(struct tb_switch *sw)
 199{
 200        int ret, retries = 10;
 201
 202        ret = dma_port_flash_update_auth(sw->dma_port);
 203        if (ret && ret != -ETIMEDOUT)
 204                return ret;
 205
 206        /*
 207         * Poll here for the authentication status. It takes some time
 208         * for the device to respond (we get timeout for a while). Once
 209         * we get response the device needs to be power cycled in order
 210         * to the new NVM to be taken into use.
 211         */
 212        do {
 213                u32 status;
 214
 215                ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
 216                if (ret < 0 && ret != -ETIMEDOUT)
 217                        return ret;
 218                if (ret > 0) {
 219                        if (status) {
 220                                tb_sw_warn(sw, "failed to authenticate NVM\n");
 221                                nvm_set_auth_status(sw, status);
 222                        }
 223
 224                        tb_sw_info(sw, "power cycling the switch now\n");
 225                        dma_port_power_cycle(sw->dma_port);
 226                        return 0;
 227                }
 228
 229                msleep(500);
 230        } while (--retries);
 231
 232        return -ETIMEDOUT;
 233}
 234
 235static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
 236                              size_t bytes)
 237{
 238        struct tb_switch *sw = priv;
 239        int ret;
 240
 241        pm_runtime_get_sync(&sw->dev);
 242
 243        if (!mutex_trylock(&sw->tb->lock)) {
 244                ret = restart_syscall();
 245                goto out;
 246        }
 247
 248        ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
 249        mutex_unlock(&sw->tb->lock);
 250
 251out:
 252        pm_runtime_mark_last_busy(&sw->dev);
 253        pm_runtime_put_autosuspend(&sw->dev);
 254
 255        return ret;
 256}
 257
 258static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
 259                               size_t bytes)
 260{
 261        struct tb_switch *sw = priv;
 262        int ret = 0;
 263
 264        if (!mutex_trylock(&sw->tb->lock))
 265                return restart_syscall();
 266
 267        /*
 268         * Since writing the NVM image might require some special steps,
 269         * for example when CSS headers are written, we cache the image
 270         * locally here and handle the special cases when the user asks
 271         * us to authenticate the image.
 272         */
 273        if (!sw->nvm->buf) {
 274                sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
 275                if (!sw->nvm->buf) {
 276                        ret = -ENOMEM;
 277                        goto unlock;
 278                }
 279        }
 280
 281        sw->nvm->buf_data_size = offset + bytes;
 282        memcpy(sw->nvm->buf + offset, val, bytes);
 283
 284unlock:
 285        mutex_unlock(&sw->tb->lock);
 286
 287        return ret;
 288}
 289
 290static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
 291                                           size_t size, bool active)
 292{
 293        struct nvmem_config config;
 294
 295        memset(&config, 0, sizeof(config));
 296
 297        if (active) {
 298                config.name = "nvm_active";
 299                config.reg_read = tb_switch_nvm_read;
 300                config.read_only = true;
 301        } else {
 302                config.name = "nvm_non_active";
 303                config.reg_write = tb_switch_nvm_write;
 304                config.root_only = true;
 305        }
 306
 307        config.id = id;
 308        config.stride = 4;
 309        config.word_size = 4;
 310        config.size = size;
 311        config.dev = &sw->dev;
 312        config.owner = THIS_MODULE;
 313        config.priv = sw;
 314
 315        return nvmem_register(&config);
 316}
 317
 318static int tb_switch_nvm_add(struct tb_switch *sw)
 319{
 320        struct nvmem_device *nvm_dev;
 321        struct tb_switch_nvm *nvm;
 322        u32 val;
 323        int ret;
 324
 325        if (!sw->dma_port)
 326                return 0;
 327
 328        nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
 329        if (!nvm)
 330                return -ENOMEM;
 331
 332        nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
 333
 334        /*
 335         * If the switch is in safe-mode the only accessible portion of
 336         * the NVM is the non-active one where userspace is expected to
 337         * write new functional NVM.
 338         */
 339        if (!sw->safe_mode) {
 340                u32 nvm_size, hdr_size;
 341
 342                ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
 343                                          sizeof(val));
 344                if (ret)
 345                        goto err_ida;
 346
 347                hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
 348                nvm_size = (SZ_1M << (val & 7)) / 8;
 349                nvm_size = (nvm_size - hdr_size) / 2;
 350
 351                ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
 352                                          sizeof(val));
 353                if (ret)
 354                        goto err_ida;
 355
 356                nvm->major = val >> 16;
 357                nvm->minor = val >> 8;
 358
 359                nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
 360                if (IS_ERR(nvm_dev)) {
 361                        ret = PTR_ERR(nvm_dev);
 362                        goto err_ida;
 363                }
 364                nvm->active = nvm_dev;
 365        }
 366
 367        nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
 368        if (IS_ERR(nvm_dev)) {
 369                ret = PTR_ERR(nvm_dev);
 370                goto err_nvm_active;
 371        }
 372        nvm->non_active = nvm_dev;
 373
 374        sw->nvm = nvm;
 375        return 0;
 376
 377err_nvm_active:
 378        if (nvm->active)
 379                nvmem_unregister(nvm->active);
 380err_ida:
 381        ida_simple_remove(&nvm_ida, nvm->id);
 382        kfree(nvm);
 383
 384        return ret;
 385}
 386
 387static void tb_switch_nvm_remove(struct tb_switch *sw)
 388{
 389        struct tb_switch_nvm *nvm;
 390
 391        nvm = sw->nvm;
 392        sw->nvm = NULL;
 393
 394        if (!nvm)
 395                return;
 396
 397        /* Remove authentication status in case the switch is unplugged */
 398        if (!nvm->authenticating)
 399                nvm_clear_auth_status(sw);
 400
 401        nvmem_unregister(nvm->non_active);
 402        if (nvm->active)
 403                nvmem_unregister(nvm->active);
 404        ida_simple_remove(&nvm_ida, nvm->id);
 405        vfree(nvm->buf);
 406        kfree(nvm);
 407}
 408
 409/* port utility functions */
 410
 411static const char *tb_port_type(struct tb_regs_port_header *port)
 412{
 413        switch (port->type >> 16) {
 414        case 0:
 415                switch ((u8) port->type) {
 416                case 0:
 417                        return "Inactive";
 418                case 1:
 419                        return "Port";
 420                case 2:
 421                        return "NHI";
 422                default:
 423                        return "unknown";
 424                }
 425        case 0x2:
 426                return "Ethernet";
 427        case 0x8:
 428                return "SATA";
 429        case 0xe:
 430                return "DP/HDMI";
 431        case 0x10:
 432                return "PCIe";
 433        case 0x20:
 434                return "USB";
 435        default:
 436                return "unknown";
 437        }
 438}
 439
 440static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
 441{
 442        tb_dbg(tb,
 443               " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
 444               port->port_number, port->vendor_id, port->device_id,
 445               port->revision, port->thunderbolt_version, tb_port_type(port),
 446               port->type);
 447        tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
 448               port->max_in_hop_id, port->max_out_hop_id);
 449        tb_dbg(tb, "  Max counters: %d\n", port->max_counters);
 450        tb_dbg(tb, "  NFC Credits: %#x\n", port->nfc_credits);
 451}
 452
 453/**
 454 * tb_port_state() - get connectedness state of a port
 455 *
 456 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
 457 *
 458 * Return: Returns an enum tb_port_state on success or an error code on failure.
 459 */
 460static int tb_port_state(struct tb_port *port)
 461{
 462        struct tb_cap_phy phy;
 463        int res;
 464        if (port->cap_phy == 0) {
 465                tb_port_WARN(port, "does not have a PHY\n");
 466                return -EINVAL;
 467        }
 468        res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
 469        if (res)
 470                return res;
 471        return phy.state;
 472}
 473
 474/**
 475 * tb_wait_for_port() - wait for a port to become ready
 476 *
 477 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
 478 * wait_if_unplugged is set then we also wait if the port is in state
 479 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
 480 * switch resume). Otherwise we only wait if a device is registered but the link
 481 * has not yet been established.
 482 *
 483 * Return: Returns an error code on failure. Returns 0 if the port is not
 484 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
 485 * if the port is connected and in state TB_PORT_UP.
 486 */
 487int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
 488{
 489        int retries = 10;
 490        int state;
 491        if (!port->cap_phy) {
 492                tb_port_WARN(port, "does not have PHY\n");
 493                return -EINVAL;
 494        }
 495        if (tb_is_upstream_port(port)) {
 496                tb_port_WARN(port, "is the upstream port\n");
 497                return -EINVAL;
 498        }
 499
 500        while (retries--) {
 501                state = tb_port_state(port);
 502                if (state < 0)
 503                        return state;
 504                if (state == TB_PORT_DISABLED) {
 505                        tb_port_dbg(port, "is disabled (state: 0)\n");
 506                        return 0;
 507                }
 508                if (state == TB_PORT_UNPLUGGED) {
 509                        if (wait_if_unplugged) {
 510                                /* used during resume */
 511                                tb_port_dbg(port,
 512                                            "is unplugged (state: 7), retrying...\n");
 513                                msleep(100);
 514                                continue;
 515                        }
 516                        tb_port_dbg(port, "is unplugged (state: 7)\n");
 517                        return 0;
 518                }
 519                if (state == TB_PORT_UP) {
 520                        tb_port_dbg(port, "is connected, link is up (state: 2)\n");
 521                        return 1;
 522                }
 523
 524                /*
 525                 * After plug-in the state is TB_PORT_CONNECTING. Give it some
 526                 * time.
 527                 */
 528                tb_port_dbg(port,
 529                            "is connected, link is not up (state: %d), retrying...\n",
 530                            state);
 531                msleep(100);
 532        }
 533        tb_port_warn(port,
 534                     "failed to reach state TB_PORT_UP. Ignoring port...\n");
 535        return 0;
 536}
 537
 538/**
 539 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
 540 *
 541 * Change the number of NFC credits allocated to @port by @credits. To remove
 542 * NFC credits pass a negative amount of credits.
 543 *
 544 * Return: Returns 0 on success or an error code on failure.
 545 */
 546int tb_port_add_nfc_credits(struct tb_port *port, int credits)
 547{
 548        u32 nfc_credits;
 549
 550        if (credits == 0 || port->sw->is_unplugged)
 551                return 0;
 552
 553        nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
 554        nfc_credits += credits;
 555
 556        tb_port_dbg(port, "adding %d NFC credits to %lu",
 557                    credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
 558
 559        port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
 560        port->config.nfc_credits |= nfc_credits;
 561
 562        return tb_port_write(port, &port->config.nfc_credits,
 563                             TB_CFG_PORT, 4, 1);
 564}
 565
 566/**
 567 * tb_port_set_initial_credits() - Set initial port link credits allocated
 568 * @port: Port to set the initial credits
 569 * @credits: Number of credits to to allocate
 570 *
 571 * Set initial credits value to be used for ingress shared buffering.
 572 */
 573int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
 574{
 575        u32 data;
 576        int ret;
 577
 578        ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
 579        if (ret)
 580                return ret;
 581
 582        data &= ~TB_PORT_LCA_MASK;
 583        data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
 584
 585        return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
 586}
 587
 588/**
 589 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
 590 *
 591 * Return: Returns 0 on success or an error code on failure.
 592 */
 593int tb_port_clear_counter(struct tb_port *port, int counter)
 594{
 595        u32 zero[3] = { 0, 0, 0 };
 596        tb_port_dbg(port, "clearing counter %d\n", counter);
 597        return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
 598}
 599
 600/**
 601 * tb_init_port() - initialize a port
 602 *
 603 * This is a helper method for tb_switch_alloc. Does not check or initialize
 604 * any downstream switches.
 605 *
 606 * Return: Returns 0 on success or an error code on failure.
 607 */
 608static int tb_init_port(struct tb_port *port)
 609{
 610        int res;
 611        int cap;
 612
 613        res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
 614        if (res)
 615                return res;
 616
 617        /* Port 0 is the switch itself and has no PHY. */
 618        if (port->config.type == TB_TYPE_PORT && port->port != 0) {
 619                cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
 620
 621                if (cap > 0)
 622                        port->cap_phy = cap;
 623                else
 624                        tb_port_WARN(port, "non switch port without a PHY\n");
 625        } else if (port->port != 0) {
 626                cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
 627                if (cap > 0)
 628                        port->cap_adap = cap;
 629        }
 630
 631        tb_dump_port(port->sw->tb, &port->config);
 632
 633        /* Control port does not need HopID allocation */
 634        if (port->port) {
 635                ida_init(&port->in_hopids);
 636                ida_init(&port->out_hopids);
 637        }
 638
 639        return 0;
 640
 641}
 642
 643static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
 644                               int max_hopid)
 645{
 646        int port_max_hopid;
 647        struct ida *ida;
 648
 649        if (in) {
 650                port_max_hopid = port->config.max_in_hop_id;
 651                ida = &port->in_hopids;
 652        } else {
 653                port_max_hopid = port->config.max_out_hop_id;
 654                ida = &port->out_hopids;
 655        }
 656
 657        /* HopIDs 0-7 are reserved */
 658        if (min_hopid < TB_PATH_MIN_HOPID)
 659                min_hopid = TB_PATH_MIN_HOPID;
 660
 661        if (max_hopid < 0 || max_hopid > port_max_hopid)
 662                max_hopid = port_max_hopid;
 663
 664        return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
 665}
 666
 667/**
 668 * tb_port_alloc_in_hopid() - Allocate input HopID from port
 669 * @port: Port to allocate HopID for
 670 * @min_hopid: Minimum acceptable input HopID
 671 * @max_hopid: Maximum acceptable input HopID
 672 *
 673 * Return: HopID between @min_hopid and @max_hopid or negative errno in
 674 * case of error.
 675 */
 676int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
 677{
 678        return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
 679}
 680
 681/**
 682 * tb_port_alloc_out_hopid() - Allocate output HopID from port
 683 * @port: Port to allocate HopID for
 684 * @min_hopid: Minimum acceptable output HopID
 685 * @max_hopid: Maximum acceptable output HopID
 686 *
 687 * Return: HopID between @min_hopid and @max_hopid or negative errno in
 688 * case of error.
 689 */
 690int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
 691{
 692        return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
 693}
 694
 695/**
 696 * tb_port_release_in_hopid() - Release allocated input HopID from port
 697 * @port: Port whose HopID to release
 698 * @hopid: HopID to release
 699 */
 700void tb_port_release_in_hopid(struct tb_port *port, int hopid)
 701{
 702        ida_simple_remove(&port->in_hopids, hopid);
 703}
 704
 705/**
 706 * tb_port_release_out_hopid() - Release allocated output HopID from port
 707 * @port: Port whose HopID to release
 708 * @hopid: HopID to release
 709 */
 710void tb_port_release_out_hopid(struct tb_port *port, int hopid)
 711{
 712        ida_simple_remove(&port->out_hopids, hopid);
 713}
 714
 715/**
 716 * tb_next_port_on_path() - Return next port for given port on a path
 717 * @start: Start port of the walk
 718 * @end: End port of the walk
 719 * @prev: Previous port (%NULL if this is the first)
 720 *
 721 * This function can be used to walk from one port to another if they
 722 * are connected through zero or more switches. If the @prev is dual
 723 * link port, the function follows that link and returns another end on
 724 * that same link.
 725 *
 726 * If the @end port has been reached, return %NULL.
 727 *
 728 * Domain tb->lock must be held when this function is called.
 729 */
 730struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
 731                                     struct tb_port *prev)
 732{
 733        struct tb_port *next;
 734
 735        if (!prev)
 736                return start;
 737
 738        if (prev->sw == end->sw) {
 739                if (prev == end)
 740                        return NULL;
 741                return end;
 742        }
 743
 744        if (start->sw->config.depth < end->sw->config.depth) {
 745                if (prev->remote &&
 746                    prev->remote->sw->config.depth > prev->sw->config.depth)
 747                        next = prev->remote;
 748                else
 749                        next = tb_port_at(tb_route(end->sw), prev->sw);
 750        } else {
 751                if (tb_is_upstream_port(prev)) {
 752                        next = prev->remote;
 753                } else {
 754                        next = tb_upstream_port(prev->sw);
 755                        /*
 756                         * Keep the same link if prev and next are both
 757                         * dual link ports.
 758                         */
 759                        if (next->dual_link_port &&
 760                            next->link_nr != prev->link_nr) {
 761                                next = next->dual_link_port;
 762                        }
 763                }
 764        }
 765
 766        return next;
 767}
 768
 769/**
 770 * tb_port_is_enabled() - Is the adapter port enabled
 771 * @port: Port to check
 772 */
 773bool tb_port_is_enabled(struct tb_port *port)
 774{
 775        switch (port->config.type) {
 776        case TB_TYPE_PCIE_UP:
 777        case TB_TYPE_PCIE_DOWN:
 778                return tb_pci_port_is_enabled(port);
 779
 780        case TB_TYPE_DP_HDMI_IN:
 781        case TB_TYPE_DP_HDMI_OUT:
 782                return tb_dp_port_is_enabled(port);
 783
 784        default:
 785                return false;
 786        }
 787}
 788
 789/**
 790 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
 791 * @port: PCIe port to check
 792 */
 793bool tb_pci_port_is_enabled(struct tb_port *port)
 794{
 795        u32 data;
 796
 797        if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
 798                return false;
 799
 800        return !!(data & TB_PCI_EN);
 801}
 802
 803/**
 804 * tb_pci_port_enable() - Enable PCIe adapter port
 805 * @port: PCIe port to enable
 806 * @enable: Enable/disable the PCIe adapter
 807 */
 808int tb_pci_port_enable(struct tb_port *port, bool enable)
 809{
 810        u32 word = enable ? TB_PCI_EN : 0x0;
 811        if (!port->cap_adap)
 812                return -ENXIO;
 813        return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
 814}
 815
 816/**
 817 * tb_dp_port_hpd_is_active() - Is HPD already active
 818 * @port: DP out port to check
 819 *
 820 * Checks if the DP OUT adapter port has HDP bit already set.
 821 */
 822int tb_dp_port_hpd_is_active(struct tb_port *port)
 823{
 824        u32 data;
 825        int ret;
 826
 827        ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
 828        if (ret)
 829                return ret;
 830
 831        return !!(data & TB_DP_HDP);
 832}
 833
 834/**
 835 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
 836 * @port: Port to clear HPD
 837 *
 838 * If the DP IN port has HDP set, this function can be used to clear it.
 839 */
 840int tb_dp_port_hpd_clear(struct tb_port *port)
 841{
 842        u32 data;
 843        int ret;
 844
 845        ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
 846        if (ret)
 847                return ret;
 848
 849        data |= TB_DP_HPDC;
 850        return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
 851}
 852
 853/**
 854 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
 855 * @port: DP IN/OUT port to set hops
 856 * @video: Video Hop ID
 857 * @aux_tx: AUX TX Hop ID
 858 * @aux_rx: AUX RX Hop ID
 859 *
 860 * Programs specified Hop IDs for DP IN/OUT port.
 861 */
 862int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
 863                        unsigned int aux_tx, unsigned int aux_rx)
 864{
 865        u32 data[2];
 866        int ret;
 867
 868        ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
 869                           ARRAY_SIZE(data));
 870        if (ret)
 871                return ret;
 872
 873        data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
 874        data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
 875
 876        data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
 877        data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
 878        data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
 879
 880        return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
 881                             ARRAY_SIZE(data));
 882}
 883
 884/**
 885 * tb_dp_port_is_enabled() - Is DP adapter port enabled
 886 * @port: DP adapter port to check
 887 */
 888bool tb_dp_port_is_enabled(struct tb_port *port)
 889{
 890        u32 data;
 891
 892        if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
 893                return false;
 894
 895        return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
 896}
 897
 898/**
 899 * tb_dp_port_enable() - Enables/disables DP paths of a port
 900 * @port: DP IN/OUT port
 901 * @enable: Enable/disable DP path
 902 *
 903 * Once Hop IDs are programmed DP paths can be enabled or disabled by
 904 * calling this function.
 905 */
 906int tb_dp_port_enable(struct tb_port *port, bool enable)
 907{
 908        u32 data;
 909        int ret;
 910
 911        ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1);
 912        if (ret)
 913                return ret;
 914
 915        if (enable)
 916                data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
 917        else
 918                data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
 919
 920        return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1);
 921}
 922
 923/* switch utility functions */
 924
 925static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
 926{
 927        tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
 928               sw->vendor_id, sw->device_id, sw->revision,
 929               sw->thunderbolt_version);
 930        tb_dbg(tb, "  Max Port Number: %d\n", sw->max_port_number);
 931        tb_dbg(tb, "  Config:\n");
 932        tb_dbg(tb,
 933                "   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
 934               sw->upstream_port_number, sw->depth,
 935               (((u64) sw->route_hi) << 32) | sw->route_lo,
 936               sw->enabled, sw->plug_events_delay);
 937        tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
 938               sw->__unknown1, sw->__unknown4);
 939}
 940
 941/**
 942 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
 943 *
 944 * Return: Returns 0 on success or an error code on failure.
 945 */
 946int tb_switch_reset(struct tb *tb, u64 route)
 947{
 948        struct tb_cfg_result res;
 949        struct tb_regs_switch_header header = {
 950                header.route_hi = route >> 32,
 951                header.route_lo = route,
 952                header.enabled = true,
 953        };
 954        tb_dbg(tb, "resetting switch at %llx\n", route);
 955        res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
 956                        0, 2, 2, 2);
 957        if (res.err)
 958                return res.err;
 959        res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
 960        if (res.err > 0)
 961                return -EIO;
 962        return res.err;
 963}
 964
 965/**
 966 * tb_plug_events_active() - enable/disable plug events on a switch
 967 *
 968 * Also configures a sane plug_events_delay of 255ms.
 969 *
 970 * Return: Returns 0 on success or an error code on failure.
 971 */
 972static int tb_plug_events_active(struct tb_switch *sw, bool active)
 973{
 974        u32 data;
 975        int res;
 976
 977        if (!sw->config.enabled)
 978                return 0;
 979
 980        sw->config.plug_events_delay = 0xff;
 981        res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
 982        if (res)
 983                return res;
 984
 985        res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
 986        if (res)
 987                return res;
 988
 989        if (active) {
 990                data = data & 0xFFFFFF83;
 991                switch (sw->config.device_id) {
 992                case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
 993                case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
 994                case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
 995                        break;
 996                default:
 997                        data |= 4;
 998                }
 999        } else {
1000                data = data | 0x7c;
1001        }
1002        return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1003                           sw->cap_plug_events + 1, 1);
1004}
1005
1006static ssize_t authorized_show(struct device *dev,
1007                               struct device_attribute *attr,
1008                               char *buf)
1009{
1010        struct tb_switch *sw = tb_to_switch(dev);
1011
1012        return sprintf(buf, "%u\n", sw->authorized);
1013}
1014
1015static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1016{
1017        int ret = -EINVAL;
1018
1019        if (!mutex_trylock(&sw->tb->lock))
1020                return restart_syscall();
1021
1022        if (sw->authorized)
1023                goto unlock;
1024
1025        /*
1026         * Make sure there is no PCIe rescan ongoing when a new PCIe
1027         * tunnel is created. Otherwise the PCIe rescan code might find
1028         * the new tunnel too early.
1029         */
1030        pci_lock_rescan_remove();
1031
1032        switch (val) {
1033        /* Approve switch */
1034        case 1:
1035                if (sw->key)
1036                        ret = tb_domain_approve_switch_key(sw->tb, sw);
1037                else
1038                        ret = tb_domain_approve_switch(sw->tb, sw);
1039                break;
1040
1041        /* Challenge switch */
1042        case 2:
1043                if (sw->key)
1044                        ret = tb_domain_challenge_switch_key(sw->tb, sw);
1045                break;
1046
1047        default:
1048                break;
1049        }
1050
1051        pci_unlock_rescan_remove();
1052
1053        if (!ret) {
1054                sw->authorized = val;
1055                /* Notify status change to the userspace */
1056                kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1057        }
1058
1059unlock:
1060        mutex_unlock(&sw->tb->lock);
1061        return ret;
1062}
1063
1064static ssize_t authorized_store(struct device *dev,
1065                                struct device_attribute *attr,
1066                                const char *buf, size_t count)
1067{
1068        struct tb_switch *sw = tb_to_switch(dev);
1069        unsigned int val;
1070        ssize_t ret;
1071
1072        ret = kstrtouint(buf, 0, &val);
1073        if (ret)
1074                return ret;
1075        if (val > 2)
1076                return -EINVAL;
1077
1078        pm_runtime_get_sync(&sw->dev);
1079        ret = tb_switch_set_authorized(sw, val);
1080        pm_runtime_mark_last_busy(&sw->dev);
1081        pm_runtime_put_autosuspend(&sw->dev);
1082
1083        return ret ? ret : count;
1084}
1085static DEVICE_ATTR_RW(authorized);
1086
1087static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1088                         char *buf)
1089{
1090        struct tb_switch *sw = tb_to_switch(dev);
1091
1092        return sprintf(buf, "%u\n", sw->boot);
1093}
1094static DEVICE_ATTR_RO(boot);
1095
1096static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1097                           char *buf)
1098{
1099        struct tb_switch *sw = tb_to_switch(dev);
1100
1101        return sprintf(buf, "%#x\n", sw->device);
1102}
1103static DEVICE_ATTR_RO(device);
1104
1105static ssize_t
1106device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1107{
1108        struct tb_switch *sw = tb_to_switch(dev);
1109
1110        return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1111}
1112static DEVICE_ATTR_RO(device_name);
1113
1114static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1115                        char *buf)
1116{
1117        struct tb_switch *sw = tb_to_switch(dev);
1118        ssize_t ret;
1119
1120        if (!mutex_trylock(&sw->tb->lock))
1121                return restart_syscall();
1122
1123        if (sw->key)
1124                ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1125        else
1126                ret = sprintf(buf, "\n");
1127
1128        mutex_unlock(&sw->tb->lock);
1129        return ret;
1130}
1131
1132static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1133                         const char *buf, size_t count)
1134{
1135        struct tb_switch *sw = tb_to_switch(dev);
1136        u8 key[TB_SWITCH_KEY_SIZE];
1137        ssize_t ret = count;
1138        bool clear = false;
1139
1140        if (!strcmp(buf, "\n"))
1141                clear = true;
1142        else if (hex2bin(key, buf, sizeof(key)))
1143                return -EINVAL;
1144
1145        if (!mutex_trylock(&sw->tb->lock))
1146                return restart_syscall();
1147
1148        if (sw->authorized) {
1149                ret = -EBUSY;
1150        } else {
1151                kfree(sw->key);
1152                if (clear) {
1153                        sw->key = NULL;
1154                } else {
1155                        sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1156                        if (!sw->key)
1157                                ret = -ENOMEM;
1158                }
1159        }
1160
1161        mutex_unlock(&sw->tb->lock);
1162        return ret;
1163}
1164static DEVICE_ATTR(key, 0600, key_show, key_store);
1165
1166static void nvm_authenticate_start(struct tb_switch *sw)
1167{
1168        struct pci_dev *root_port;
1169
1170        /*
1171         * During host router NVM upgrade we should not allow root port to
1172         * go into D3cold because some root ports cannot trigger PME
1173         * itself. To be on the safe side keep the root port in D0 during
1174         * the whole upgrade process.
1175         */
1176        root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1177        if (root_port)
1178                pm_runtime_get_noresume(&root_port->dev);
1179}
1180
1181static void nvm_authenticate_complete(struct tb_switch *sw)
1182{
1183        struct pci_dev *root_port;
1184
1185        root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1186        if (root_port)
1187                pm_runtime_put(&root_port->dev);
1188}
1189
1190static ssize_t nvm_authenticate_show(struct device *dev,
1191        struct device_attribute *attr, char *buf)
1192{
1193        struct tb_switch *sw = tb_to_switch(dev);
1194        u32 status;
1195
1196        nvm_get_auth_status(sw, &status);
1197        return sprintf(buf, "%#x\n", status);
1198}
1199
1200static ssize_t nvm_authenticate_store(struct device *dev,
1201        struct device_attribute *attr, const char *buf, size_t count)
1202{
1203        struct tb_switch *sw = tb_to_switch(dev);
1204        bool val;
1205        int ret;
1206
1207        pm_runtime_get_sync(&sw->dev);
1208
1209        if (!mutex_trylock(&sw->tb->lock)) {
1210                ret = restart_syscall();
1211                goto exit_rpm;
1212        }
1213
1214        /* If NVMem devices are not yet added */
1215        if (!sw->nvm) {
1216                ret = -EAGAIN;
1217                goto exit_unlock;
1218        }
1219
1220        ret = kstrtobool(buf, &val);
1221        if (ret)
1222                goto exit_unlock;
1223
1224        /* Always clear the authentication status */
1225        nvm_clear_auth_status(sw);
1226
1227        if (val) {
1228                if (!sw->nvm->buf) {
1229                        ret = -EINVAL;
1230                        goto exit_unlock;
1231                }
1232
1233                ret = nvm_validate_and_write(sw);
1234                if (ret)
1235                        goto exit_unlock;
1236
1237                sw->nvm->authenticating = true;
1238
1239                if (!tb_route(sw)) {
1240                        /*
1241                         * Keep root port from suspending as long as the
1242                         * NVM upgrade process is running.
1243                         */
1244                        nvm_authenticate_start(sw);
1245                        ret = nvm_authenticate_host(sw);
1246                        if (ret)
1247                                nvm_authenticate_complete(sw);
1248                } else {
1249                        ret = nvm_authenticate_device(sw);
1250                }
1251        }
1252
1253exit_unlock:
1254        mutex_unlock(&sw->tb->lock);
1255exit_rpm:
1256        pm_runtime_mark_last_busy(&sw->dev);
1257        pm_runtime_put_autosuspend(&sw->dev);
1258
1259        if (ret)
1260                return ret;
1261        return count;
1262}
1263static DEVICE_ATTR_RW(nvm_authenticate);
1264
1265static ssize_t nvm_version_show(struct device *dev,
1266                                struct device_attribute *attr, char *buf)
1267{
1268        struct tb_switch *sw = tb_to_switch(dev);
1269        int ret;
1270
1271        if (!mutex_trylock(&sw->tb->lock))
1272                return restart_syscall();
1273
1274        if (sw->safe_mode)
1275                ret = -ENODATA;
1276        else if (!sw->nvm)
1277                ret = -EAGAIN;
1278        else
1279                ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1280
1281        mutex_unlock(&sw->tb->lock);
1282
1283        return ret;
1284}
1285static DEVICE_ATTR_RO(nvm_version);
1286
1287static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1288                           char *buf)
1289{
1290        struct tb_switch *sw = tb_to_switch(dev);
1291
1292        return sprintf(buf, "%#x\n", sw->vendor);
1293}
1294static DEVICE_ATTR_RO(vendor);
1295
1296static ssize_t
1297vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1298{
1299        struct tb_switch *sw = tb_to_switch(dev);
1300
1301        return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1302}
1303static DEVICE_ATTR_RO(vendor_name);
1304
1305static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1306                              char *buf)
1307{
1308        struct tb_switch *sw = tb_to_switch(dev);
1309
1310        return sprintf(buf, "%pUb\n", sw->uuid);
1311}
1312static DEVICE_ATTR_RO(unique_id);
1313
1314static struct attribute *switch_attrs[] = {
1315        &dev_attr_authorized.attr,
1316        &dev_attr_boot.attr,
1317        &dev_attr_device.attr,
1318        &dev_attr_device_name.attr,
1319        &dev_attr_key.attr,
1320        &dev_attr_nvm_authenticate.attr,
1321        &dev_attr_nvm_version.attr,
1322        &dev_attr_vendor.attr,
1323        &dev_attr_vendor_name.attr,
1324        &dev_attr_unique_id.attr,
1325        NULL,
1326};
1327
1328static umode_t switch_attr_is_visible(struct kobject *kobj,
1329                                      struct attribute *attr, int n)
1330{
1331        struct device *dev = container_of(kobj, struct device, kobj);
1332        struct tb_switch *sw = tb_to_switch(dev);
1333
1334        if (attr == &dev_attr_key.attr) {
1335                if (tb_route(sw) &&
1336                    sw->tb->security_level == TB_SECURITY_SECURE &&
1337                    sw->security_level == TB_SECURITY_SECURE)
1338                        return attr->mode;
1339                return 0;
1340        } else if (attr == &dev_attr_nvm_authenticate.attr ||
1341                   attr == &dev_attr_nvm_version.attr) {
1342                if (sw->dma_port)
1343                        return attr->mode;
1344                return 0;
1345        } else if (attr == &dev_attr_boot.attr) {
1346                if (tb_route(sw))
1347                        return attr->mode;
1348                return 0;
1349        }
1350
1351        return sw->safe_mode ? 0 : attr->mode;
1352}
1353
1354static struct attribute_group switch_group = {
1355        .is_visible = switch_attr_is_visible,
1356        .attrs = switch_attrs,
1357};
1358
1359static const struct attribute_group *switch_groups[] = {
1360        &switch_group,
1361        NULL,
1362};
1363
1364static void tb_switch_release(struct device *dev)
1365{
1366        struct tb_switch *sw = tb_to_switch(dev);
1367        int i;
1368
1369        dma_port_free(sw->dma_port);
1370
1371        for (i = 1; i <= sw->config.max_port_number; i++) {
1372                if (!sw->ports[i].disabled) {
1373                        ida_destroy(&sw->ports[i].in_hopids);
1374                        ida_destroy(&sw->ports[i].out_hopids);
1375                }
1376        }
1377
1378        kfree(sw->uuid);
1379        kfree(sw->device_name);
1380        kfree(sw->vendor_name);
1381        kfree(sw->ports);
1382        kfree(sw->drom);
1383        kfree(sw->key);
1384        kfree(sw);
1385}
1386
1387/*
1388 * Currently only need to provide the callbacks. Everything else is handled
1389 * in the connection manager.
1390 */
1391static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1392{
1393        struct tb_switch *sw = tb_to_switch(dev);
1394        const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1395
1396        if (cm_ops->runtime_suspend_switch)
1397                return cm_ops->runtime_suspend_switch(sw);
1398
1399        return 0;
1400}
1401
1402static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1403{
1404        struct tb_switch *sw = tb_to_switch(dev);
1405        const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1406
1407        if (cm_ops->runtime_resume_switch)
1408                return cm_ops->runtime_resume_switch(sw);
1409        return 0;
1410}
1411
1412static const struct dev_pm_ops tb_switch_pm_ops = {
1413        SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1414                           NULL)
1415};
1416
1417struct device_type tb_switch_type = {
1418        .name = "thunderbolt_device",
1419        .release = tb_switch_release,
1420        .pm = &tb_switch_pm_ops,
1421};
1422
1423static int tb_switch_get_generation(struct tb_switch *sw)
1424{
1425        switch (sw->config.device_id) {
1426        case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1427        case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1428        case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1429        case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1430        case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1431        case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1432        case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1433        case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1434                return 1;
1435
1436        case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1437        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1438        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1439                return 2;
1440
1441        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1442        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1443        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1444        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1445        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1446        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1447        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1448        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1449                return 3;
1450
1451        default:
1452                /*
1453                 * For unknown switches assume generation to be 1 to be
1454                 * on the safe side.
1455                 */
1456                tb_sw_warn(sw, "unsupported switch device id %#x\n",
1457                           sw->config.device_id);
1458                return 1;
1459        }
1460}
1461
1462/**
1463 * tb_switch_alloc() - allocate a switch
1464 * @tb: Pointer to the owning domain
1465 * @parent: Parent device for this switch
1466 * @route: Route string for this switch
1467 *
1468 * Allocates and initializes a switch. Will not upload configuration to
1469 * the switch. For that you need to call tb_switch_configure()
1470 * separately. The returned switch should be released by calling
1471 * tb_switch_put().
1472 *
1473 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1474 * failure.
1475 */
1476struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1477                                  u64 route)
1478{
1479        struct tb_switch *sw;
1480        int upstream_port;
1481        int i, ret, depth;
1482
1483        /* Make sure we do not exceed maximum topology limit */
1484        depth = tb_route_length(route);
1485        if (depth > TB_SWITCH_MAX_DEPTH)
1486                return ERR_PTR(-EADDRNOTAVAIL);
1487
1488        upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1489        if (upstream_port < 0)
1490                return ERR_PTR(upstream_port);
1491
1492        sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1493        if (!sw)
1494                return ERR_PTR(-ENOMEM);
1495
1496        sw->tb = tb;
1497        ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1498        if (ret)
1499                goto err_free_sw_ports;
1500
1501        tb_dbg(tb, "current switch config:\n");
1502        tb_dump_switch(tb, &sw->config);
1503
1504        /* configure switch */
1505        sw->config.upstream_port_number = upstream_port;
1506        sw->config.depth = depth;
1507        sw->config.route_hi = upper_32_bits(route);
1508        sw->config.route_lo = lower_32_bits(route);
1509        sw->config.enabled = 0;
1510
1511        /* initialize ports */
1512        sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1513                                GFP_KERNEL);
1514        if (!sw->ports) {
1515                ret = -ENOMEM;
1516                goto err_free_sw_ports;
1517        }
1518
1519        for (i = 0; i <= sw->config.max_port_number; i++) {
1520                /* minimum setup for tb_find_cap and tb_drom_read to work */
1521                sw->ports[i].sw = sw;
1522                sw->ports[i].port = i;
1523        }
1524
1525        sw->generation = tb_switch_get_generation(sw);
1526
1527        ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1528        if (ret < 0) {
1529                tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1530                goto err_free_sw_ports;
1531        }
1532        sw->cap_plug_events = ret;
1533
1534        ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1535        if (ret > 0)
1536                sw->cap_lc = ret;
1537
1538        /* Root switch is always authorized */
1539        if (!route)
1540                sw->authorized = true;
1541
1542        device_initialize(&sw->dev);
1543        sw->dev.parent = parent;
1544        sw->dev.bus = &tb_bus_type;
1545        sw->dev.type = &tb_switch_type;
1546        sw->dev.groups = switch_groups;
1547        dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1548
1549        return sw;
1550
1551err_free_sw_ports:
1552        kfree(sw->ports);
1553        kfree(sw);
1554
1555        return ERR_PTR(ret);
1556}
1557
1558/**
1559 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1560 * @tb: Pointer to the owning domain
1561 * @parent: Parent device for this switch
1562 * @route: Route string for this switch
1563 *
1564 * This creates a switch in safe mode. This means the switch pretty much
1565 * lacks all capabilities except DMA configuration port before it is
1566 * flashed with a valid NVM firmware.
1567 *
1568 * The returned switch must be released by calling tb_switch_put().
1569 *
1570 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1571 */
1572struct tb_switch *
1573tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1574{
1575        struct tb_switch *sw;
1576
1577        sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1578        if (!sw)
1579                return ERR_PTR(-ENOMEM);
1580
1581        sw->tb = tb;
1582        sw->config.depth = tb_route_length(route);
1583        sw->config.route_hi = upper_32_bits(route);
1584        sw->config.route_lo = lower_32_bits(route);
1585        sw->safe_mode = true;
1586
1587        device_initialize(&sw->dev);
1588        sw->dev.parent = parent;
1589        sw->dev.bus = &tb_bus_type;
1590        sw->dev.type = &tb_switch_type;
1591        sw->dev.groups = switch_groups;
1592        dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1593
1594        return sw;
1595}
1596
1597/**
1598 * tb_switch_configure() - Uploads configuration to the switch
1599 * @sw: Switch to configure
1600 *
1601 * Call this function before the switch is added to the system. It will
1602 * upload configuration to the switch and makes it available for the
1603 * connection manager to use.
1604 *
1605 * Return: %0 in case of success and negative errno in case of failure
1606 */
1607int tb_switch_configure(struct tb_switch *sw)
1608{
1609        struct tb *tb = sw->tb;
1610        u64 route;
1611        int ret;
1612
1613        route = tb_route(sw);
1614        tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1615               route, tb_route_length(route), sw->config.upstream_port_number);
1616
1617        if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1618                tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1619                           sw->config.vendor_id);
1620
1621        sw->config.enabled = 1;
1622
1623        /* upload configuration */
1624        ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1625        if (ret)
1626                return ret;
1627
1628        ret = tb_lc_configure_link(sw);
1629        if (ret)
1630                return ret;
1631
1632        return tb_plug_events_active(sw, true);
1633}
1634
1635static int tb_switch_set_uuid(struct tb_switch *sw)
1636{
1637        u32 uuid[4];
1638        int ret;
1639
1640        if (sw->uuid)
1641                return 0;
1642
1643        /*
1644         * The newer controllers include fused UUID as part of link
1645         * controller specific registers
1646         */
1647        ret = tb_lc_read_uuid(sw, uuid);
1648        if (ret) {
1649                /*
1650                 * ICM generates UUID based on UID and fills the upper
1651                 * two words with ones. This is not strictly following
1652                 * UUID format but we want to be compatible with it so
1653                 * we do the same here.
1654                 */
1655                uuid[0] = sw->uid & 0xffffffff;
1656                uuid[1] = (sw->uid >> 32) & 0xffffffff;
1657                uuid[2] = 0xffffffff;
1658                uuid[3] = 0xffffffff;
1659        }
1660
1661        sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1662        if (!sw->uuid)
1663                return -ENOMEM;
1664        return 0;
1665}
1666
1667static int tb_switch_add_dma_port(struct tb_switch *sw)
1668{
1669        u32 status;
1670        int ret;
1671
1672        switch (sw->generation) {
1673        case 3:
1674                break;
1675
1676        case 2:
1677                /* Only root switch can be upgraded */
1678                if (tb_route(sw))
1679                        return 0;
1680                break;
1681
1682        default:
1683                /*
1684                 * DMA port is the only thing available when the switch
1685                 * is in safe mode.
1686                 */
1687                if (!sw->safe_mode)
1688                        return 0;
1689                break;
1690        }
1691
1692        if (sw->no_nvm_upgrade)
1693                return 0;
1694
1695        sw->dma_port = dma_port_alloc(sw);
1696        if (!sw->dma_port)
1697                return 0;
1698
1699        /*
1700         * Check status of the previous flash authentication. If there
1701         * is one we need to power cycle the switch in any case to make
1702         * it functional again.
1703         */
1704        ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1705        if (ret <= 0)
1706                return ret;
1707
1708        /* Now we can allow root port to suspend again */
1709        if (!tb_route(sw))
1710                nvm_authenticate_complete(sw);
1711
1712        if (status) {
1713                tb_sw_info(sw, "switch flash authentication failed\n");
1714                ret = tb_switch_set_uuid(sw);
1715                if (ret)
1716                        return ret;
1717                nvm_set_auth_status(sw, status);
1718        }
1719
1720        tb_sw_info(sw, "power cycling the switch now\n");
1721        dma_port_power_cycle(sw->dma_port);
1722
1723        /*
1724         * We return error here which causes the switch adding failure.
1725         * It should appear back after power cycle is complete.
1726         */
1727        return -ESHUTDOWN;
1728}
1729
1730/**
1731 * tb_switch_add() - Add a switch to the domain
1732 * @sw: Switch to add
1733 *
1734 * This is the last step in adding switch to the domain. It will read
1735 * identification information from DROM and initializes ports so that
1736 * they can be used to connect other switches. The switch will be
1737 * exposed to the userspace when this function successfully returns. To
1738 * remove and release the switch, call tb_switch_remove().
1739 *
1740 * Return: %0 in case of success and negative errno in case of failure
1741 */
1742int tb_switch_add(struct tb_switch *sw)
1743{
1744        int i, ret;
1745
1746        /*
1747         * Initialize DMA control port now before we read DROM. Recent
1748         * host controllers have more complete DROM on NVM that includes
1749         * vendor and model identification strings which we then expose
1750         * to the userspace. NVM can be accessed through DMA
1751         * configuration based mailbox.
1752         */
1753        ret = tb_switch_add_dma_port(sw);
1754        if (ret)
1755                return ret;
1756
1757        if (!sw->safe_mode) {
1758                /* read drom */
1759                ret = tb_drom_read(sw);
1760                if (ret) {
1761                        tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1762                        return ret;
1763                }
1764                tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
1765
1766                ret = tb_switch_set_uuid(sw);
1767                if (ret)
1768                        return ret;
1769
1770                for (i = 0; i <= sw->config.max_port_number; i++) {
1771                        if (sw->ports[i].disabled) {
1772                                tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
1773                                continue;
1774                        }
1775                        ret = tb_init_port(&sw->ports[i]);
1776                        if (ret)
1777                                return ret;
1778                }
1779        }
1780
1781        ret = device_add(&sw->dev);
1782        if (ret)
1783                return ret;
1784
1785        if (tb_route(sw)) {
1786                dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
1787                         sw->vendor, sw->device);
1788                if (sw->vendor_name && sw->device_name)
1789                        dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
1790                                 sw->device_name);
1791        }
1792
1793        ret = tb_switch_nvm_add(sw);
1794        if (ret) {
1795                device_del(&sw->dev);
1796                return ret;
1797        }
1798
1799        pm_runtime_set_active(&sw->dev);
1800        if (sw->rpm) {
1801                pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
1802                pm_runtime_use_autosuspend(&sw->dev);
1803                pm_runtime_mark_last_busy(&sw->dev);
1804                pm_runtime_enable(&sw->dev);
1805                pm_request_autosuspend(&sw->dev);
1806        }
1807
1808        return 0;
1809}
1810
1811/**
1812 * tb_switch_remove() - Remove and release a switch
1813 * @sw: Switch to remove
1814 *
1815 * This will remove the switch from the domain and release it after last
1816 * reference count drops to zero. If there are switches connected below
1817 * this switch, they will be removed as well.
1818 */
1819void tb_switch_remove(struct tb_switch *sw)
1820{
1821        int i;
1822
1823        if (sw->rpm) {
1824                pm_runtime_get_sync(&sw->dev);
1825                pm_runtime_disable(&sw->dev);
1826        }
1827
1828        /* port 0 is the switch itself and never has a remote */
1829        for (i = 1; i <= sw->config.max_port_number; i++) {
1830                if (tb_port_has_remote(&sw->ports[i])) {
1831                        tb_switch_remove(sw->ports[i].remote->sw);
1832                        sw->ports[i].remote = NULL;
1833                } else if (sw->ports[i].xdomain) {
1834                        tb_xdomain_remove(sw->ports[i].xdomain);
1835                        sw->ports[i].xdomain = NULL;
1836                }
1837        }
1838
1839        if (!sw->is_unplugged)
1840                tb_plug_events_active(sw, false);
1841        tb_lc_unconfigure_link(sw);
1842
1843        tb_switch_nvm_remove(sw);
1844
1845        if (tb_route(sw))
1846                dev_info(&sw->dev, "device disconnected\n");
1847        device_unregister(&sw->dev);
1848}
1849
1850/**
1851 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
1852 */
1853void tb_sw_set_unplugged(struct tb_switch *sw)
1854{
1855        int i;
1856        if (sw == sw->tb->root_switch) {
1857                tb_sw_WARN(sw, "cannot unplug root switch\n");
1858                return;
1859        }
1860        if (sw->is_unplugged) {
1861                tb_sw_WARN(sw, "is_unplugged already set\n");
1862                return;
1863        }
1864        sw->is_unplugged = true;
1865        for (i = 0; i <= sw->config.max_port_number; i++) {
1866                if (tb_port_has_remote(&sw->ports[i]))
1867                        tb_sw_set_unplugged(sw->ports[i].remote->sw);
1868                else if (sw->ports[i].xdomain)
1869                        sw->ports[i].xdomain->is_unplugged = true;
1870        }
1871}
1872
1873int tb_switch_resume(struct tb_switch *sw)
1874{
1875        int i, err;
1876        tb_sw_dbg(sw, "resuming switch\n");
1877
1878        /*
1879         * Check for UID of the connected switches except for root
1880         * switch which we assume cannot be removed.
1881         */
1882        if (tb_route(sw)) {
1883                u64 uid;
1884
1885                /*
1886                 * Check first that we can still read the switch config
1887                 * space. It may be that there is now another domain
1888                 * connected.
1889                 */
1890                err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
1891                if (err < 0) {
1892                        tb_sw_info(sw, "switch not present anymore\n");
1893                        return err;
1894                }
1895
1896                err = tb_drom_read_uid_only(sw, &uid);
1897                if (err) {
1898                        tb_sw_warn(sw, "uid read failed\n");
1899                        return err;
1900                }
1901                if (sw->uid != uid) {
1902                        tb_sw_info(sw,
1903                                "changed while suspended (uid %#llx -> %#llx)\n",
1904                                sw->uid, uid);
1905                        return -ENODEV;
1906                }
1907        }
1908
1909        /* upload configuration */
1910        err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1911        if (err)
1912                return err;
1913
1914        err = tb_lc_configure_link(sw);
1915        if (err)
1916                return err;
1917
1918        err = tb_plug_events_active(sw, true);
1919        if (err)
1920                return err;
1921
1922        /* check for surviving downstream switches */
1923        for (i = 1; i <= sw->config.max_port_number; i++) {
1924                struct tb_port *port = &sw->ports[i];
1925
1926                if (!tb_port_has_remote(port) && !port->xdomain)
1927                        continue;
1928
1929                if (tb_wait_for_port(port, true) <= 0) {
1930                        tb_port_warn(port,
1931                                     "lost during suspend, disconnecting\n");
1932                        if (tb_port_has_remote(port))
1933                                tb_sw_set_unplugged(port->remote->sw);
1934                        else if (port->xdomain)
1935                                port->xdomain->is_unplugged = true;
1936                } else if (tb_port_has_remote(port)) {
1937                        if (tb_switch_resume(port->remote->sw)) {
1938                                tb_port_warn(port,
1939                                             "lost during suspend, disconnecting\n");
1940                                tb_sw_set_unplugged(port->remote->sw);
1941                        }
1942                }
1943        }
1944        return 0;
1945}
1946
1947void tb_switch_suspend(struct tb_switch *sw)
1948{
1949        int i, err;
1950        err = tb_plug_events_active(sw, false);
1951        if (err)
1952                return;
1953
1954        for (i = 1; i <= sw->config.max_port_number; i++) {
1955                if (tb_port_has_remote(&sw->ports[i]))
1956                        tb_switch_suspend(sw->ports[i].remote->sw);
1957        }
1958
1959        tb_lc_set_sleep(sw);
1960}
1961
1962struct tb_sw_lookup {
1963        struct tb *tb;
1964        u8 link;
1965        u8 depth;
1966        const uuid_t *uuid;
1967        u64 route;
1968};
1969
1970static int tb_switch_match(struct device *dev, const void *data)
1971{
1972        struct tb_switch *sw = tb_to_switch(dev);
1973        const struct tb_sw_lookup *lookup = data;
1974
1975        if (!sw)
1976                return 0;
1977        if (sw->tb != lookup->tb)
1978                return 0;
1979
1980        if (lookup->uuid)
1981                return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
1982
1983        if (lookup->route) {
1984                return sw->config.route_lo == lower_32_bits(lookup->route) &&
1985                       sw->config.route_hi == upper_32_bits(lookup->route);
1986        }
1987
1988        /* Root switch is matched only by depth */
1989        if (!lookup->depth)
1990                return !sw->depth;
1991
1992        return sw->link == lookup->link && sw->depth == lookup->depth;
1993}
1994
1995/**
1996 * tb_switch_find_by_link_depth() - Find switch by link and depth
1997 * @tb: Domain the switch belongs
1998 * @link: Link number the switch is connected
1999 * @depth: Depth of the switch in link
2000 *
2001 * Returned switch has reference count increased so the caller needs to
2002 * call tb_switch_put() when done with the switch.
2003 */
2004struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2005{
2006        struct tb_sw_lookup lookup;
2007        struct device *dev;
2008
2009        memset(&lookup, 0, sizeof(lookup));
2010        lookup.tb = tb;
2011        lookup.link = link;
2012        lookup.depth = depth;
2013
2014        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2015        if (dev)
2016                return tb_to_switch(dev);
2017
2018        return NULL;
2019}
2020
2021/**
2022 * tb_switch_find_by_uuid() - Find switch by UUID
2023 * @tb: Domain the switch belongs
2024 * @uuid: UUID to look for
2025 *
2026 * Returned switch has reference count increased so the caller needs to
2027 * call tb_switch_put() when done with the switch.
2028 */
2029struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2030{
2031        struct tb_sw_lookup lookup;
2032        struct device *dev;
2033
2034        memset(&lookup, 0, sizeof(lookup));
2035        lookup.tb = tb;
2036        lookup.uuid = uuid;
2037
2038        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2039        if (dev)
2040                return tb_to_switch(dev);
2041
2042        return NULL;
2043}
2044
2045/**
2046 * tb_switch_find_by_route() - Find switch by route string
2047 * @tb: Domain the switch belongs
2048 * @route: Route string to look for
2049 *
2050 * Returned switch has reference count increased so the caller needs to
2051 * call tb_switch_put() when done with the switch.
2052 */
2053struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2054{
2055        struct tb_sw_lookup lookup;
2056        struct device *dev;
2057
2058        if (!route)
2059                return tb_switch_get(tb->root_switch);
2060
2061        memset(&lookup, 0, sizeof(lookup));
2062        lookup.tb = tb;
2063        lookup.route = route;
2064
2065        dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2066        if (dev)
2067                return tb_to_switch(dev);
2068
2069        return NULL;
2070}
2071
2072void tb_switch_exit(void)
2073{
2074        ida_destroy(&nvm_ida);
2075}
2076