linux/drivers/thunderbolt/icm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Internal Thunderbolt Connection Manager. This is a firmware running on
   4 * the Thunderbolt host controller performing most of the low-level
   5 * handling.
   6 *
   7 * Copyright (C) 2017, Intel Corporation
   8 * Authors: Michael Jamet <michael.jamet@intel.com>
   9 *          Mika Westerberg <mika.westerberg@linux.intel.com>
  10 */
  11
  12#include <linux/delay.h>
  13#include <linux/mutex.h>
  14#include <linux/pci.h>
  15#include <linux/pm_runtime.h>
  16#include <linux/platform_data/x86/apple.h>
  17#include <linux/sizes.h>
  18#include <linux/slab.h>
  19#include <linux/workqueue.h>
  20
  21#include "ctl.h"
  22#include "nhi_regs.h"
  23#include "tb.h"
  24
  25#define PCIE2CIO_CMD                    0x30
  26#define PCIE2CIO_CMD_TIMEOUT            BIT(31)
  27#define PCIE2CIO_CMD_START              BIT(30)
  28#define PCIE2CIO_CMD_WRITE              BIT(21)
  29#define PCIE2CIO_CMD_CS_MASK            GENMASK(20, 19)
  30#define PCIE2CIO_CMD_CS_SHIFT           19
  31#define PCIE2CIO_CMD_PORT_MASK          GENMASK(18, 13)
  32#define PCIE2CIO_CMD_PORT_SHIFT         13
  33
  34#define PCIE2CIO_WRDATA                 0x34
  35#define PCIE2CIO_RDDATA                 0x38
  36
  37#define PHY_PORT_CS1                    0x37
  38#define PHY_PORT_CS1_LINK_DISABLE       BIT(14)
  39#define PHY_PORT_CS1_LINK_STATE_MASK    GENMASK(29, 26)
  40#define PHY_PORT_CS1_LINK_STATE_SHIFT   26
  41
  42#define ICM_TIMEOUT                     5000    /* ms */
  43#define ICM_APPROVE_TIMEOUT             10000   /* ms */
  44#define ICM_MAX_LINK                    4
  45
  46/**
  47 * struct icm - Internal connection manager private data
  48 * @request_lock: Makes sure only one message is send to ICM at time
  49 * @rescan_work: Work used to rescan the surviving switches after resume
  50 * @upstream_port: Pointer to the PCIe upstream port this host
  51 *                 controller is connected. This is only set for systems
  52 *                 where ICM needs to be started manually
  53 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
  54 *           (only set when @upstream_port is not %NULL)
  55 * @safe_mode: ICM is in safe mode
  56 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
  57 * @rpm: Does the controller support runtime PM (RTD3)
  58 * @is_supported: Checks if we can support ICM on this controller
  59 * @cio_reset: Trigger CIO reset
  60 * @get_mode: Read and return the ICM firmware mode (optional)
  61 * @get_route: Find a route string for given switch
  62 * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
  63 * @driver_ready: Send driver ready message to ICM
  64 * @device_connected: Handle device connected ICM message
  65 * @device_disconnected: Handle device disconnected ICM message
  66 * @xdomain_connected - Handle XDomain connected ICM message
  67 * @xdomain_disconnected - Handle XDomain disconnected ICM message
  68 */
  69struct icm {
  70        struct mutex request_lock;
  71        struct delayed_work rescan_work;
  72        struct pci_dev *upstream_port;
  73        size_t max_boot_acl;
  74        int vnd_cap;
  75        bool safe_mode;
  76        bool rpm;
  77        bool (*is_supported)(struct tb *tb);
  78        int (*cio_reset)(struct tb *tb);
  79        int (*get_mode)(struct tb *tb);
  80        int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
  81        void (*save_devices)(struct tb *tb);
  82        int (*driver_ready)(struct tb *tb,
  83                            enum tb_security_level *security_level,
  84                            size_t *nboot_acl, bool *rpm);
  85        void (*device_connected)(struct tb *tb,
  86                                 const struct icm_pkg_header *hdr);
  87        void (*device_disconnected)(struct tb *tb,
  88                                    const struct icm_pkg_header *hdr);
  89        void (*xdomain_connected)(struct tb *tb,
  90                                  const struct icm_pkg_header *hdr);
  91        void (*xdomain_disconnected)(struct tb *tb,
  92                                     const struct icm_pkg_header *hdr);
  93};
  94
  95struct icm_notification {
  96        struct work_struct work;
  97        struct icm_pkg_header *pkg;
  98        struct tb *tb;
  99};
 100
 101struct ep_name_entry {
 102        u8 len;
 103        u8 type;
 104        u8 data[0];
 105};
 106
 107#define EP_NAME_INTEL_VSS       0x10
 108
 109/* Intel Vendor specific structure */
 110struct intel_vss {
 111        u16 vendor;
 112        u16 model;
 113        u8 mc;
 114        u8 flags;
 115        u16 pci_devid;
 116        u32 nvm_version;
 117};
 118
 119#define INTEL_VSS_FLAGS_RTD3    BIT(0)
 120
 121static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
 122{
 123        const void *end = ep_name + size;
 124
 125        while (ep_name < end) {
 126                const struct ep_name_entry *ep = ep_name;
 127
 128                if (!ep->len)
 129                        break;
 130                if (ep_name + ep->len > end)
 131                        break;
 132
 133                if (ep->type == EP_NAME_INTEL_VSS)
 134                        return (const struct intel_vss *)ep->data;
 135
 136                ep_name += ep->len;
 137        }
 138
 139        return NULL;
 140}
 141
 142static inline struct tb *icm_to_tb(struct icm *icm)
 143{
 144        return ((void *)icm - sizeof(struct tb));
 145}
 146
 147static inline u8 phy_port_from_route(u64 route, u8 depth)
 148{
 149        u8 link;
 150
 151        link = depth ? route >> ((depth - 1) * 8) : route;
 152        return tb_phy_port_from_link(link);
 153}
 154
 155static inline u8 dual_link_from_link(u8 link)
 156{
 157        return link ? ((link - 1) ^ 0x01) + 1 : 0;
 158}
 159
 160static inline u64 get_route(u32 route_hi, u32 route_lo)
 161{
 162        return (u64)route_hi << 32 | route_lo;
 163}
 164
 165static inline u64 get_parent_route(u64 route)
 166{
 167        int depth = tb_route_length(route);
 168        return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
 169}
 170
 171static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
 172{
 173        unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
 174        u32 cmd;
 175
 176        do {
 177                pci_read_config_dword(icm->upstream_port,
 178                                      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
 179                if (!(cmd & PCIE2CIO_CMD_START)) {
 180                        if (cmd & PCIE2CIO_CMD_TIMEOUT)
 181                                break;
 182                        return 0;
 183                }
 184
 185                msleep(50);
 186        } while (time_before(jiffies, end));
 187
 188        return -ETIMEDOUT;
 189}
 190
 191static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
 192                         unsigned int port, unsigned int index, u32 *data)
 193{
 194        struct pci_dev *pdev = icm->upstream_port;
 195        int ret, vnd_cap = icm->vnd_cap;
 196        u32 cmd;
 197
 198        cmd = index;
 199        cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
 200        cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
 201        cmd |= PCIE2CIO_CMD_START;
 202        pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
 203
 204        ret = pci2cio_wait_completion(icm, 5000);
 205        if (ret)
 206                return ret;
 207
 208        pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
 209        return 0;
 210}
 211
 212static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
 213                          unsigned int port, unsigned int index, u32 data)
 214{
 215        struct pci_dev *pdev = icm->upstream_port;
 216        int vnd_cap = icm->vnd_cap;
 217        u32 cmd;
 218
 219        pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
 220
 221        cmd = index;
 222        cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
 223        cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
 224        cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
 225        pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
 226
 227        return pci2cio_wait_completion(icm, 5000);
 228}
 229
 230static bool icm_match(const struct tb_cfg_request *req,
 231                      const struct ctl_pkg *pkg)
 232{
 233        const struct icm_pkg_header *res_hdr = pkg->buffer;
 234        const struct icm_pkg_header *req_hdr = req->request;
 235
 236        if (pkg->frame.eof != req->response_type)
 237                return false;
 238        if (res_hdr->code != req_hdr->code)
 239                return false;
 240
 241        return true;
 242}
 243
 244static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
 245{
 246        const struct icm_pkg_header *hdr = pkg->buffer;
 247
 248        if (hdr->packet_id < req->npackets) {
 249                size_t offset = hdr->packet_id * req->response_size;
 250
 251                memcpy(req->response + offset, pkg->buffer, req->response_size);
 252        }
 253
 254        return hdr->packet_id == hdr->total_packets - 1;
 255}
 256
 257static int icm_request(struct tb *tb, const void *request, size_t request_size,
 258                       void *response, size_t response_size, size_t npackets,
 259                       unsigned int timeout_msec)
 260{
 261        struct icm *icm = tb_priv(tb);
 262        int retries = 3;
 263
 264        do {
 265                struct tb_cfg_request *req;
 266                struct tb_cfg_result res;
 267
 268                req = tb_cfg_request_alloc();
 269                if (!req)
 270                        return -ENOMEM;
 271
 272                req->match = icm_match;
 273                req->copy = icm_copy;
 274                req->request = request;
 275                req->request_size = request_size;
 276                req->request_type = TB_CFG_PKG_ICM_CMD;
 277                req->response = response;
 278                req->npackets = npackets;
 279                req->response_size = response_size;
 280                req->response_type = TB_CFG_PKG_ICM_RESP;
 281
 282                mutex_lock(&icm->request_lock);
 283                res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
 284                mutex_unlock(&icm->request_lock);
 285
 286                tb_cfg_request_put(req);
 287
 288                if (res.err != -ETIMEDOUT)
 289                        return res.err == 1 ? -EIO : res.err;
 290
 291                usleep_range(20, 50);
 292        } while (retries--);
 293
 294        return -ETIMEDOUT;
 295}
 296
 297static bool icm_fr_is_supported(struct tb *tb)
 298{
 299        return !x86_apple_machine;
 300}
 301
 302static inline int icm_fr_get_switch_index(u32 port)
 303{
 304        int index;
 305
 306        if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
 307                return 0;
 308
 309        index = port >> ICM_PORT_INDEX_SHIFT;
 310        return index != 0xff ? index : 0;
 311}
 312
 313static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
 314{
 315        struct icm_fr_pkg_get_topology_response *switches, *sw;
 316        struct icm_fr_pkg_get_topology request = {
 317                .hdr = { .code = ICM_GET_TOPOLOGY },
 318        };
 319        size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
 320        int ret, index;
 321        u8 i;
 322
 323        switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
 324        if (!switches)
 325                return -ENOMEM;
 326
 327        ret = icm_request(tb, &request, sizeof(request), switches,
 328                          sizeof(*switches), npackets, ICM_TIMEOUT);
 329        if (ret)
 330                goto err_free;
 331
 332        sw = &switches[0];
 333        index = icm_fr_get_switch_index(sw->ports[link]);
 334        if (!index) {
 335                ret = -ENODEV;
 336                goto err_free;
 337        }
 338
 339        sw = &switches[index];
 340        for (i = 1; i < depth; i++) {
 341                unsigned int j;
 342
 343                if (!(sw->first_data & ICM_SWITCH_USED)) {
 344                        ret = -ENODEV;
 345                        goto err_free;
 346                }
 347
 348                for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
 349                        index = icm_fr_get_switch_index(sw->ports[j]);
 350                        if (index > sw->switch_index) {
 351                                sw = &switches[index];
 352                                break;
 353                        }
 354                }
 355        }
 356
 357        *route = get_route(sw->route_hi, sw->route_lo);
 358
 359err_free:
 360        kfree(switches);
 361        return ret;
 362}
 363
 364static void icm_fr_save_devices(struct tb *tb)
 365{
 366        nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
 367}
 368
 369static int
 370icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 371                    size_t *nboot_acl, bool *rpm)
 372{
 373        struct icm_fr_pkg_driver_ready_response reply;
 374        struct icm_pkg_driver_ready request = {
 375                .hdr.code = ICM_DRIVER_READY,
 376        };
 377        int ret;
 378
 379        memset(&reply, 0, sizeof(reply));
 380        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 381                          1, ICM_TIMEOUT);
 382        if (ret)
 383                return ret;
 384
 385        if (security_level)
 386                *security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
 387
 388        return 0;
 389}
 390
 391static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
 392{
 393        struct icm_fr_pkg_approve_device request;
 394        struct icm_fr_pkg_approve_device reply;
 395        int ret;
 396
 397        memset(&request, 0, sizeof(request));
 398        memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 399        request.hdr.code = ICM_APPROVE_DEVICE;
 400        request.connection_id = sw->connection_id;
 401        request.connection_key = sw->connection_key;
 402
 403        memset(&reply, 0, sizeof(reply));
 404        /* Use larger timeout as establishing tunnels can take some time */
 405        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 406                          1, ICM_APPROVE_TIMEOUT);
 407        if (ret)
 408                return ret;
 409
 410        if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 411                tb_warn(tb, "PCIe tunnel creation failed\n");
 412                return -EIO;
 413        }
 414
 415        return 0;
 416}
 417
 418static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
 419{
 420        struct icm_fr_pkg_add_device_key request;
 421        struct icm_fr_pkg_add_device_key_response reply;
 422        int ret;
 423
 424        memset(&request, 0, sizeof(request));
 425        memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 426        request.hdr.code = ICM_ADD_DEVICE_KEY;
 427        request.connection_id = sw->connection_id;
 428        request.connection_key = sw->connection_key;
 429        memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
 430
 431        memset(&reply, 0, sizeof(reply));
 432        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 433                          1, ICM_TIMEOUT);
 434        if (ret)
 435                return ret;
 436
 437        if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 438                tb_warn(tb, "Adding key to switch failed\n");
 439                return -EIO;
 440        }
 441
 442        return 0;
 443}
 444
 445static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
 446                                       const u8 *challenge, u8 *response)
 447{
 448        struct icm_fr_pkg_challenge_device request;
 449        struct icm_fr_pkg_challenge_device_response reply;
 450        int ret;
 451
 452        memset(&request, 0, sizeof(request));
 453        memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 454        request.hdr.code = ICM_CHALLENGE_DEVICE;
 455        request.connection_id = sw->connection_id;
 456        request.connection_key = sw->connection_key;
 457        memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
 458
 459        memset(&reply, 0, sizeof(reply));
 460        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 461                          1, ICM_TIMEOUT);
 462        if (ret)
 463                return ret;
 464
 465        if (reply.hdr.flags & ICM_FLAGS_ERROR)
 466                return -EKEYREJECTED;
 467        if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
 468                return -ENOKEY;
 469
 470        memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
 471
 472        return 0;
 473}
 474
 475static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 476{
 477        struct icm_fr_pkg_approve_xdomain_response reply;
 478        struct icm_fr_pkg_approve_xdomain request;
 479        int ret;
 480
 481        memset(&request, 0, sizeof(request));
 482        request.hdr.code = ICM_APPROVE_XDOMAIN;
 483        request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
 484        memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
 485
 486        request.transmit_path = xd->transmit_path;
 487        request.transmit_ring = xd->transmit_ring;
 488        request.receive_path = xd->receive_path;
 489        request.receive_ring = xd->receive_ring;
 490
 491        memset(&reply, 0, sizeof(reply));
 492        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 493                          1, ICM_TIMEOUT);
 494        if (ret)
 495                return ret;
 496
 497        if (reply.hdr.flags & ICM_FLAGS_ERROR)
 498                return -EIO;
 499
 500        return 0;
 501}
 502
 503static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 504{
 505        u8 phy_port;
 506        u8 cmd;
 507
 508        phy_port = tb_phy_port_from_link(xd->link);
 509        if (phy_port == 0)
 510                cmd = NHI_MAILBOX_DISCONNECT_PA;
 511        else
 512                cmd = NHI_MAILBOX_DISCONNECT_PB;
 513
 514        nhi_mailbox_cmd(tb->nhi, cmd, 1);
 515        usleep_range(10, 50);
 516        nhi_mailbox_cmd(tb->nhi, cmd, 2);
 517        return 0;
 518}
 519
 520static void add_switch(struct tb_switch *parent_sw, u64 route,
 521                       const uuid_t *uuid, const u8 *ep_name,
 522                       size_t ep_name_size, u8 connection_id, u8 connection_key,
 523                       u8 link, u8 depth, enum tb_security_level security_level,
 524                       bool authorized, bool boot)
 525{
 526        const struct intel_vss *vss;
 527        struct tb_switch *sw;
 528
 529        pm_runtime_get_sync(&parent_sw->dev);
 530
 531        sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
 532        if (IS_ERR(sw))
 533                goto out;
 534
 535        sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
 536        if (!sw->uuid) {
 537                tb_sw_warn(sw, "cannot allocate memory for switch\n");
 538                tb_switch_put(sw);
 539                goto out;
 540        }
 541        sw->connection_id = connection_id;
 542        sw->connection_key = connection_key;
 543        sw->link = link;
 544        sw->depth = depth;
 545        sw->authorized = authorized;
 546        sw->security_level = security_level;
 547        sw->boot = boot;
 548        init_completion(&sw->rpm_complete);
 549
 550        vss = parse_intel_vss(ep_name, ep_name_size);
 551        if (vss)
 552                sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
 553
 554        /* Link the two switches now */
 555        tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
 556        tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
 557
 558        if (tb_switch_add(sw)) {
 559                tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
 560                tb_switch_put(sw);
 561        }
 562
 563out:
 564        pm_runtime_mark_last_busy(&parent_sw->dev);
 565        pm_runtime_put_autosuspend(&parent_sw->dev);
 566}
 567
 568static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
 569                          u64 route, u8 connection_id, u8 connection_key,
 570                          u8 link, u8 depth, bool boot)
 571{
 572        /* Disconnect from parent */
 573        tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
 574        /* Re-connect via updated port*/
 575        tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
 576
 577        /* Update with the new addressing information */
 578        sw->config.route_hi = upper_32_bits(route);
 579        sw->config.route_lo = lower_32_bits(route);
 580        sw->connection_id = connection_id;
 581        sw->connection_key = connection_key;
 582        sw->link = link;
 583        sw->depth = depth;
 584        sw->boot = boot;
 585
 586        /* This switch still exists */
 587        sw->is_unplugged = false;
 588
 589        /* Runtime resume is now complete */
 590        complete(&sw->rpm_complete);
 591}
 592
 593static void remove_switch(struct tb_switch *sw)
 594{
 595        struct tb_switch *parent_sw;
 596
 597        parent_sw = tb_to_switch(sw->dev.parent);
 598        tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
 599        tb_switch_remove(sw);
 600}
 601
 602static void add_xdomain(struct tb_switch *sw, u64 route,
 603                        const uuid_t *local_uuid, const uuid_t *remote_uuid,
 604                        u8 link, u8 depth)
 605{
 606        struct tb_xdomain *xd;
 607
 608        pm_runtime_get_sync(&sw->dev);
 609
 610        xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
 611        if (!xd)
 612                goto out;
 613
 614        xd->link = link;
 615        xd->depth = depth;
 616
 617        tb_port_at(route, sw)->xdomain = xd;
 618
 619        tb_xdomain_add(xd);
 620
 621out:
 622        pm_runtime_mark_last_busy(&sw->dev);
 623        pm_runtime_put_autosuspend(&sw->dev);
 624}
 625
 626static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
 627{
 628        xd->link = link;
 629        xd->route = route;
 630        xd->is_unplugged = false;
 631}
 632
 633static void remove_xdomain(struct tb_xdomain *xd)
 634{
 635        struct tb_switch *sw;
 636
 637        sw = tb_to_switch(xd->dev.parent);
 638        tb_port_at(xd->route, sw)->xdomain = NULL;
 639        tb_xdomain_remove(xd);
 640}
 641
 642static void
 643icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 644{
 645        const struct icm_fr_event_device_connected *pkg =
 646                (const struct icm_fr_event_device_connected *)hdr;
 647        enum tb_security_level security_level;
 648        struct tb_switch *sw, *parent_sw;
 649        struct icm *icm = tb_priv(tb);
 650        bool authorized = false;
 651        struct tb_xdomain *xd;
 652        u8 link, depth;
 653        bool boot;
 654        u64 route;
 655        int ret;
 656
 657        link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 658        depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 659                ICM_LINK_INFO_DEPTH_SHIFT;
 660        authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
 661        security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
 662                         ICM_FLAGS_SLEVEL_SHIFT;
 663        boot = pkg->link_info & ICM_LINK_INFO_BOOT;
 664
 665        if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
 666                tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
 667                        link, depth);
 668                return;
 669        }
 670
 671        sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
 672        if (sw) {
 673                u8 phy_port, sw_phy_port;
 674
 675                parent_sw = tb_to_switch(sw->dev.parent);
 676                sw_phy_port = tb_phy_port_from_link(sw->link);
 677                phy_port = tb_phy_port_from_link(link);
 678
 679                /*
 680                 * On resume ICM will send us connected events for the
 681                 * devices that still are present. However, that
 682                 * information might have changed for example by the
 683                 * fact that a switch on a dual-link connection might
 684                 * have been enumerated using the other link now. Make
 685                 * sure our book keeping matches that.
 686                 */
 687                if (sw->depth == depth && sw_phy_port == phy_port &&
 688                    !!sw->authorized == authorized) {
 689                        /*
 690                         * It was enumerated through another link so update
 691                         * route string accordingly.
 692                         */
 693                        if (sw->link != link) {
 694                                ret = icm->get_route(tb, link, depth, &route);
 695                                if (ret) {
 696                                        tb_err(tb, "failed to update route string for switch at %u.%u\n",
 697                                               link, depth);
 698                                        tb_switch_put(sw);
 699                                        return;
 700                                }
 701                        } else {
 702                                route = tb_route(sw);
 703                        }
 704
 705                        update_switch(parent_sw, sw, route, pkg->connection_id,
 706                                      pkg->connection_key, link, depth, boot);
 707                        tb_switch_put(sw);
 708                        return;
 709                }
 710
 711                /*
 712                 * User connected the same switch to another physical
 713                 * port or to another part of the topology. Remove the
 714                 * existing switch now before adding the new one.
 715                 */
 716                remove_switch(sw);
 717                tb_switch_put(sw);
 718        }
 719
 720        /*
 721         * If the switch was not found by UUID, look for a switch on
 722         * same physical port (taking possible link aggregation into
 723         * account) and depth. If we found one it is definitely a stale
 724         * one so remove it first.
 725         */
 726        sw = tb_switch_find_by_link_depth(tb, link, depth);
 727        if (!sw) {
 728                u8 dual_link;
 729
 730                dual_link = dual_link_from_link(link);
 731                if (dual_link)
 732                        sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
 733        }
 734        if (sw) {
 735                remove_switch(sw);
 736                tb_switch_put(sw);
 737        }
 738
 739        /* Remove existing XDomain connection if found */
 740        xd = tb_xdomain_find_by_link_depth(tb, link, depth);
 741        if (xd) {
 742                remove_xdomain(xd);
 743                tb_xdomain_put(xd);
 744        }
 745
 746        parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
 747        if (!parent_sw) {
 748                tb_err(tb, "failed to find parent switch for %u.%u\n",
 749                       link, depth);
 750                return;
 751        }
 752
 753        ret = icm->get_route(tb, link, depth, &route);
 754        if (ret) {
 755                tb_err(tb, "failed to find route string for switch at %u.%u\n",
 756                       link, depth);
 757                tb_switch_put(parent_sw);
 758                return;
 759        }
 760
 761        add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
 762                   sizeof(pkg->ep_name), pkg->connection_id,
 763                   pkg->connection_key, link, depth, security_level,
 764                   authorized, boot);
 765
 766        tb_switch_put(parent_sw);
 767}
 768
 769static void
 770icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 771{
 772        const struct icm_fr_event_device_disconnected *pkg =
 773                (const struct icm_fr_event_device_disconnected *)hdr;
 774        struct tb_switch *sw;
 775        u8 link, depth;
 776
 777        link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 778        depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 779                ICM_LINK_INFO_DEPTH_SHIFT;
 780
 781        if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
 782                tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
 783                return;
 784        }
 785
 786        sw = tb_switch_find_by_link_depth(tb, link, depth);
 787        if (!sw) {
 788                tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
 789                        depth);
 790                return;
 791        }
 792
 793        remove_switch(sw);
 794        tb_switch_put(sw);
 795}
 796
 797static void
 798icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 799{
 800        const struct icm_fr_event_xdomain_connected *pkg =
 801                (const struct icm_fr_event_xdomain_connected *)hdr;
 802        struct tb_xdomain *xd;
 803        struct tb_switch *sw;
 804        u8 link, depth;
 805        u64 route;
 806
 807        link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 808        depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 809                ICM_LINK_INFO_DEPTH_SHIFT;
 810
 811        if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
 812                tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
 813                return;
 814        }
 815
 816        route = get_route(pkg->local_route_hi, pkg->local_route_lo);
 817
 818        xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
 819        if (xd) {
 820                u8 xd_phy_port, phy_port;
 821
 822                xd_phy_port = phy_port_from_route(xd->route, xd->depth);
 823                phy_port = phy_port_from_route(route, depth);
 824
 825                if (xd->depth == depth && xd_phy_port == phy_port) {
 826                        update_xdomain(xd, route, link);
 827                        tb_xdomain_put(xd);
 828                        return;
 829                }
 830
 831                /*
 832                 * If we find an existing XDomain connection remove it
 833                 * now. We need to go through login handshake and
 834                 * everything anyway to be able to re-establish the
 835                 * connection.
 836                 */
 837                remove_xdomain(xd);
 838                tb_xdomain_put(xd);
 839        }
 840
 841        /*
 842         * Look if there already exists an XDomain in the same place
 843         * than the new one and in that case remove it because it is
 844         * most likely another host that got disconnected.
 845         */
 846        xd = tb_xdomain_find_by_link_depth(tb, link, depth);
 847        if (!xd) {
 848                u8 dual_link;
 849
 850                dual_link = dual_link_from_link(link);
 851                if (dual_link)
 852                        xd = tb_xdomain_find_by_link_depth(tb, dual_link,
 853                                                           depth);
 854        }
 855        if (xd) {
 856                remove_xdomain(xd);
 857                tb_xdomain_put(xd);
 858        }
 859
 860        /*
 861         * If the user disconnected a switch during suspend and
 862         * connected another host to the same port, remove the switch
 863         * first.
 864         */
 865        sw = tb_switch_find_by_route(tb, route);
 866        if (sw) {
 867                remove_switch(sw);
 868                tb_switch_put(sw);
 869        }
 870
 871        sw = tb_switch_find_by_link_depth(tb, link, depth);
 872        if (!sw) {
 873                tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
 874                        depth);
 875                return;
 876        }
 877
 878        add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
 879                    depth);
 880        tb_switch_put(sw);
 881}
 882
 883static void
 884icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 885{
 886        const struct icm_fr_event_xdomain_disconnected *pkg =
 887                (const struct icm_fr_event_xdomain_disconnected *)hdr;
 888        struct tb_xdomain *xd;
 889
 890        /*
 891         * If the connection is through one or multiple devices, the
 892         * XDomain device is removed along with them so it is fine if we
 893         * cannot find it here.
 894         */
 895        xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
 896        if (xd) {
 897                remove_xdomain(xd);
 898                tb_xdomain_put(xd);
 899        }
 900}
 901
 902static int icm_tr_cio_reset(struct tb *tb)
 903{
 904        return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
 905}
 906
 907static int
 908icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 909                    size_t *nboot_acl, bool *rpm)
 910{
 911        struct icm_tr_pkg_driver_ready_response reply;
 912        struct icm_pkg_driver_ready request = {
 913                .hdr.code = ICM_DRIVER_READY,
 914        };
 915        int ret;
 916
 917        memset(&reply, 0, sizeof(reply));
 918        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 919                          1, 20000);
 920        if (ret)
 921                return ret;
 922
 923        if (security_level)
 924                *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
 925        if (nboot_acl)
 926                *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
 927                                ICM_TR_INFO_BOOT_ACL_SHIFT;
 928        if (rpm)
 929                *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
 930
 931        return 0;
 932}
 933
 934static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
 935{
 936        struct icm_tr_pkg_approve_device request;
 937        struct icm_tr_pkg_approve_device reply;
 938        int ret;
 939
 940        memset(&request, 0, sizeof(request));
 941        memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 942        request.hdr.code = ICM_APPROVE_DEVICE;
 943        request.route_lo = sw->config.route_lo;
 944        request.route_hi = sw->config.route_hi;
 945        request.connection_id = sw->connection_id;
 946
 947        memset(&reply, 0, sizeof(reply));
 948        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 949                          1, ICM_APPROVE_TIMEOUT);
 950        if (ret)
 951                return ret;
 952
 953        if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 954                tb_warn(tb, "PCIe tunnel creation failed\n");
 955                return -EIO;
 956        }
 957
 958        return 0;
 959}
 960
 961static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
 962{
 963        struct icm_tr_pkg_add_device_key_response reply;
 964        struct icm_tr_pkg_add_device_key request;
 965        int ret;
 966
 967        memset(&request, 0, sizeof(request));
 968        memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 969        request.hdr.code = ICM_ADD_DEVICE_KEY;
 970        request.route_lo = sw->config.route_lo;
 971        request.route_hi = sw->config.route_hi;
 972        request.connection_id = sw->connection_id;
 973        memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
 974
 975        memset(&reply, 0, sizeof(reply));
 976        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 977                          1, ICM_TIMEOUT);
 978        if (ret)
 979                return ret;
 980
 981        if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 982                tb_warn(tb, "Adding key to switch failed\n");
 983                return -EIO;
 984        }
 985
 986        return 0;
 987}
 988
 989static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
 990                                       const u8 *challenge, u8 *response)
 991{
 992        struct icm_tr_pkg_challenge_device_response reply;
 993        struct icm_tr_pkg_challenge_device request;
 994        int ret;
 995
 996        memset(&request, 0, sizeof(request));
 997        memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 998        request.hdr.code = ICM_CHALLENGE_DEVICE;
 999        request.route_lo = sw->config.route_lo;
1000        request.route_hi = sw->config.route_hi;
1001        request.connection_id = sw->connection_id;
1002        memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
1003
1004        memset(&reply, 0, sizeof(reply));
1005        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1006                          1, ICM_TIMEOUT);
1007        if (ret)
1008                return ret;
1009
1010        if (reply.hdr.flags & ICM_FLAGS_ERROR)
1011                return -EKEYREJECTED;
1012        if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
1013                return -ENOKEY;
1014
1015        memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
1016
1017        return 0;
1018}
1019
1020static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1021{
1022        struct icm_tr_pkg_approve_xdomain_response reply;
1023        struct icm_tr_pkg_approve_xdomain request;
1024        int ret;
1025
1026        memset(&request, 0, sizeof(request));
1027        request.hdr.code = ICM_APPROVE_XDOMAIN;
1028        request.route_hi = upper_32_bits(xd->route);
1029        request.route_lo = lower_32_bits(xd->route);
1030        request.transmit_path = xd->transmit_path;
1031        request.transmit_ring = xd->transmit_ring;
1032        request.receive_path = xd->receive_path;
1033        request.receive_ring = xd->receive_ring;
1034        memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
1035
1036        memset(&reply, 0, sizeof(reply));
1037        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1038                          1, ICM_TIMEOUT);
1039        if (ret)
1040                return ret;
1041
1042        if (reply.hdr.flags & ICM_FLAGS_ERROR)
1043                return -EIO;
1044
1045        return 0;
1046}
1047
1048static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
1049                                    int stage)
1050{
1051        struct icm_tr_pkg_disconnect_xdomain_response reply;
1052        struct icm_tr_pkg_disconnect_xdomain request;
1053        int ret;
1054
1055        memset(&request, 0, sizeof(request));
1056        request.hdr.code = ICM_DISCONNECT_XDOMAIN;
1057        request.stage = stage;
1058        request.route_hi = upper_32_bits(xd->route);
1059        request.route_lo = lower_32_bits(xd->route);
1060        memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
1061
1062        memset(&reply, 0, sizeof(reply));
1063        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1064                          1, ICM_TIMEOUT);
1065        if (ret)
1066                return ret;
1067
1068        if (reply.hdr.flags & ICM_FLAGS_ERROR)
1069                return -EIO;
1070
1071        return 0;
1072}
1073
1074static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1075{
1076        int ret;
1077
1078        ret = icm_tr_xdomain_tear_down(tb, xd, 1);
1079        if (ret)
1080                return ret;
1081
1082        usleep_range(10, 50);
1083        return icm_tr_xdomain_tear_down(tb, xd, 2);
1084}
1085
1086static void
1087icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1088{
1089        const struct icm_tr_event_device_connected *pkg =
1090                (const struct icm_tr_event_device_connected *)hdr;
1091        enum tb_security_level security_level;
1092        struct tb_switch *sw, *parent_sw;
1093        struct tb_xdomain *xd;
1094        bool authorized, boot;
1095        u64 route;
1096
1097        /*
1098         * Currently we don't use the QoS information coming with the
1099         * device connected message so simply just ignore that extra
1100         * packet for now.
1101         */
1102        if (pkg->hdr.packet_id)
1103                return;
1104
1105        route = get_route(pkg->route_hi, pkg->route_lo);
1106        authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
1107        security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
1108                         ICM_FLAGS_SLEVEL_SHIFT;
1109        boot = pkg->link_info & ICM_LINK_INFO_BOOT;
1110
1111        if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
1112                tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1113                        route);
1114                return;
1115        }
1116
1117        sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
1118        if (sw) {
1119                /* Update the switch if it is still in the same place */
1120                if (tb_route(sw) == route && !!sw->authorized == authorized) {
1121                        parent_sw = tb_to_switch(sw->dev.parent);
1122                        update_switch(parent_sw, sw, route, pkg->connection_id,
1123                                      0, 0, 0, boot);
1124                        tb_switch_put(sw);
1125                        return;
1126                }
1127
1128                remove_switch(sw);
1129                tb_switch_put(sw);
1130        }
1131
1132        /* Another switch with the same address */
1133        sw = tb_switch_find_by_route(tb, route);
1134        if (sw) {
1135                remove_switch(sw);
1136                tb_switch_put(sw);
1137        }
1138
1139        /* XDomain connection with the same address */
1140        xd = tb_xdomain_find_by_route(tb, route);
1141        if (xd) {
1142                remove_xdomain(xd);
1143                tb_xdomain_put(xd);
1144        }
1145
1146        parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
1147        if (!parent_sw) {
1148                tb_err(tb, "failed to find parent switch for %llx\n", route);
1149                return;
1150        }
1151
1152        add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
1153                   sizeof(pkg->ep_name), pkg->connection_id,
1154                   0, 0, 0, security_level, authorized, boot);
1155
1156        tb_switch_put(parent_sw);
1157}
1158
1159static void
1160icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1161{
1162        const struct icm_tr_event_device_disconnected *pkg =
1163                (const struct icm_tr_event_device_disconnected *)hdr;
1164        struct tb_switch *sw;
1165        u64 route;
1166
1167        route = get_route(pkg->route_hi, pkg->route_lo);
1168
1169        sw = tb_switch_find_by_route(tb, route);
1170        if (!sw) {
1171                tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1172                return;
1173        }
1174
1175        remove_switch(sw);
1176        tb_switch_put(sw);
1177}
1178
1179static void
1180icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1181{
1182        const struct icm_tr_event_xdomain_connected *pkg =
1183                (const struct icm_tr_event_xdomain_connected *)hdr;
1184        struct tb_xdomain *xd;
1185        struct tb_switch *sw;
1186        u64 route;
1187
1188        if (!tb->root_switch)
1189                return;
1190
1191        route = get_route(pkg->local_route_hi, pkg->local_route_lo);
1192
1193        xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1194        if (xd) {
1195                if (xd->route == route) {
1196                        update_xdomain(xd, route, 0);
1197                        tb_xdomain_put(xd);
1198                        return;
1199                }
1200
1201                remove_xdomain(xd);
1202                tb_xdomain_put(xd);
1203        }
1204
1205        /* An existing xdomain with the same address */
1206        xd = tb_xdomain_find_by_route(tb, route);
1207        if (xd) {
1208                remove_xdomain(xd);
1209                tb_xdomain_put(xd);
1210        }
1211
1212        /*
1213         * If the user disconnected a switch during suspend and
1214         * connected another host to the same port, remove the switch
1215         * first.
1216         */
1217        sw = tb_switch_find_by_route(tb, route);
1218        if (sw) {
1219                remove_switch(sw);
1220                tb_switch_put(sw);
1221        }
1222
1223        sw = tb_switch_find_by_route(tb, get_parent_route(route));
1224        if (!sw) {
1225                tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1226                return;
1227        }
1228
1229        add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
1230        tb_switch_put(sw);
1231}
1232
1233static void
1234icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1235{
1236        const struct icm_tr_event_xdomain_disconnected *pkg =
1237                (const struct icm_tr_event_xdomain_disconnected *)hdr;
1238        struct tb_xdomain *xd;
1239        u64 route;
1240
1241        route = get_route(pkg->route_hi, pkg->route_lo);
1242
1243        xd = tb_xdomain_find_by_route(tb, route);
1244        if (xd) {
1245                remove_xdomain(xd);
1246                tb_xdomain_put(xd);
1247        }
1248}
1249
1250static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1251{
1252        struct pci_dev *parent;
1253
1254        parent = pci_upstream_bridge(pdev);
1255        while (parent) {
1256                if (!pci_is_pcie(parent))
1257                        return NULL;
1258                if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
1259                        break;
1260                parent = pci_upstream_bridge(parent);
1261        }
1262
1263        if (!parent)
1264                return NULL;
1265
1266        switch (parent->device) {
1267        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1268        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1269        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1270        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1271        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1272        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1273        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1274                return parent;
1275        }
1276
1277        return NULL;
1278}
1279
1280static bool icm_ar_is_supported(struct tb *tb)
1281{
1282        struct pci_dev *upstream_port;
1283        struct icm *icm = tb_priv(tb);
1284
1285        /*
1286         * Starting from Alpine Ridge we can use ICM on Apple machines
1287         * as well. We just need to reset and re-enable it first.
1288         */
1289        if (!x86_apple_machine)
1290                return true;
1291
1292        /*
1293         * Find the upstream PCIe port in case we need to do reset
1294         * through its vendor specific registers.
1295         */
1296        upstream_port = get_upstream_port(tb->nhi->pdev);
1297        if (upstream_port) {
1298                int cap;
1299
1300                cap = pci_find_ext_capability(upstream_port,
1301                                              PCI_EXT_CAP_ID_VNDR);
1302                if (cap > 0) {
1303                        icm->upstream_port = upstream_port;
1304                        icm->vnd_cap = cap;
1305
1306                        return true;
1307                }
1308        }
1309
1310        return false;
1311}
1312
1313static int icm_ar_cio_reset(struct tb *tb)
1314{
1315        return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
1316}
1317
1318static int icm_ar_get_mode(struct tb *tb)
1319{
1320        struct tb_nhi *nhi = tb->nhi;
1321        int retries = 60;
1322        u32 val;
1323
1324        do {
1325                val = ioread32(nhi->iobase + REG_FW_STS);
1326                if (val & REG_FW_STS_NVM_AUTH_DONE)
1327                        break;
1328                msleep(50);
1329        } while (--retries);
1330
1331        if (!retries) {
1332                dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
1333                return -ENODEV;
1334        }
1335
1336        return nhi_mailbox_mode(nhi);
1337}
1338
1339static int
1340icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1341                    size_t *nboot_acl, bool *rpm)
1342{
1343        struct icm_ar_pkg_driver_ready_response reply;
1344        struct icm_pkg_driver_ready request = {
1345                .hdr.code = ICM_DRIVER_READY,
1346        };
1347        int ret;
1348
1349        memset(&reply, 0, sizeof(reply));
1350        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1351                          1, ICM_TIMEOUT);
1352        if (ret)
1353                return ret;
1354
1355        if (security_level)
1356                *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
1357        if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
1358                *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
1359                                ICM_AR_INFO_BOOT_ACL_SHIFT;
1360        if (rpm)
1361                *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
1362
1363        return 0;
1364}
1365
1366static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
1367{
1368        struct icm_ar_pkg_get_route_response reply;
1369        struct icm_ar_pkg_get_route request = {
1370                .hdr = { .code = ICM_GET_ROUTE },
1371                .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
1372        };
1373        int ret;
1374
1375        memset(&reply, 0, sizeof(reply));
1376        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1377                          1, ICM_TIMEOUT);
1378        if (ret)
1379                return ret;
1380
1381        if (reply.hdr.flags & ICM_FLAGS_ERROR)
1382                return -EIO;
1383
1384        *route = get_route(reply.route_hi, reply.route_lo);
1385        return 0;
1386}
1387
1388static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1389{
1390        struct icm_ar_pkg_preboot_acl_response reply;
1391        struct icm_ar_pkg_preboot_acl request = {
1392                .hdr = { .code = ICM_PREBOOT_ACL },
1393        };
1394        int ret, i;
1395
1396        memset(&reply, 0, sizeof(reply));
1397        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1398                          1, ICM_TIMEOUT);
1399        if (ret)
1400                return ret;
1401
1402        if (reply.hdr.flags & ICM_FLAGS_ERROR)
1403                return -EIO;
1404
1405        for (i = 0; i < nuuids; i++) {
1406                u32 *uuid = (u32 *)&uuids[i];
1407
1408                uuid[0] = reply.acl[i].uuid_lo;
1409                uuid[1] = reply.acl[i].uuid_hi;
1410
1411                if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
1412                        /* Map empty entries to null UUID */
1413                        uuid[0] = 0;
1414                        uuid[1] = 0;
1415                } else if (uuid[0] != 0 || uuid[1] != 0) {
1416                        /* Upper two DWs are always one's */
1417                        uuid[2] = 0xffffffff;
1418                        uuid[3] = 0xffffffff;
1419                }
1420        }
1421
1422        return ret;
1423}
1424
1425static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1426                               size_t nuuids)
1427{
1428        struct icm_ar_pkg_preboot_acl_response reply;
1429        struct icm_ar_pkg_preboot_acl request = {
1430                .hdr = {
1431                        .code = ICM_PREBOOT_ACL,
1432                        .flags = ICM_FLAGS_WRITE,
1433                },
1434        };
1435        int ret, i;
1436
1437        for (i = 0; i < nuuids; i++) {
1438                const u32 *uuid = (const u32 *)&uuids[i];
1439
1440                if (uuid_is_null(&uuids[i])) {
1441                        /*
1442                         * Map null UUID to the empty (all one) entries
1443                         * for ICM.
1444                         */
1445                        request.acl[i].uuid_lo = 0xffffffff;
1446                        request.acl[i].uuid_hi = 0xffffffff;
1447                } else {
1448                        /* Two high DWs need to be set to all one */
1449                        if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
1450                                return -EINVAL;
1451
1452                        request.acl[i].uuid_lo = uuid[0];
1453                        request.acl[i].uuid_hi = uuid[1];
1454                }
1455        }
1456
1457        memset(&reply, 0, sizeof(reply));
1458        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1459                          1, ICM_TIMEOUT);
1460        if (ret)
1461                return ret;
1462
1463        if (reply.hdr.flags & ICM_FLAGS_ERROR)
1464                return -EIO;
1465
1466        return 0;
1467}
1468
1469static void icm_handle_notification(struct work_struct *work)
1470{
1471        struct icm_notification *n = container_of(work, typeof(*n), work);
1472        struct tb *tb = n->tb;
1473        struct icm *icm = tb_priv(tb);
1474
1475        mutex_lock(&tb->lock);
1476
1477        /*
1478         * When the domain is stopped we flush its workqueue but before
1479         * that the root switch is removed. In that case we should treat
1480         * the queued events as being canceled.
1481         */
1482        if (tb->root_switch) {
1483                switch (n->pkg->code) {
1484                case ICM_EVENT_DEVICE_CONNECTED:
1485                        icm->device_connected(tb, n->pkg);
1486                        break;
1487                case ICM_EVENT_DEVICE_DISCONNECTED:
1488                        icm->device_disconnected(tb, n->pkg);
1489                        break;
1490                case ICM_EVENT_XDOMAIN_CONNECTED:
1491                        icm->xdomain_connected(tb, n->pkg);
1492                        break;
1493                case ICM_EVENT_XDOMAIN_DISCONNECTED:
1494                        icm->xdomain_disconnected(tb, n->pkg);
1495                        break;
1496                }
1497        }
1498
1499        mutex_unlock(&tb->lock);
1500
1501        kfree(n->pkg);
1502        kfree(n);
1503}
1504
1505static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1506                             const void *buf, size_t size)
1507{
1508        struct icm_notification *n;
1509
1510        n = kmalloc(sizeof(*n), GFP_KERNEL);
1511        if (!n)
1512                return;
1513
1514        INIT_WORK(&n->work, icm_handle_notification);
1515        n->pkg = kmemdup(buf, size, GFP_KERNEL);
1516        n->tb = tb;
1517
1518        queue_work(tb->wq, &n->work);
1519}
1520
1521static int
1522__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1523                   size_t *nboot_acl, bool *rpm)
1524{
1525        struct icm *icm = tb_priv(tb);
1526        unsigned int retries = 50;
1527        int ret;
1528
1529        ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
1530        if (ret) {
1531                tb_err(tb, "failed to send driver ready to ICM\n");
1532                return ret;
1533        }
1534
1535        /*
1536         * Hold on here until the switch config space is accessible so
1537         * that we can read root switch config successfully.
1538         */
1539        do {
1540                struct tb_cfg_result res;
1541                u32 tmp;
1542
1543                res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
1544                                      0, 1, 100);
1545                if (!res.err)
1546                        return 0;
1547
1548                msleep(50);
1549        } while (--retries);
1550
1551        tb_err(tb, "failed to read root switch config space, giving up\n");
1552        return -ETIMEDOUT;
1553}
1554
1555static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
1556{
1557        struct icm *icm = tb_priv(tb);
1558        u32 val;
1559
1560        if (!icm->upstream_port)
1561                return -ENODEV;
1562
1563        /* Put ARC to wait for CIO reset event to happen */
1564        val = ioread32(nhi->iobase + REG_FW_STS);
1565        val |= REG_FW_STS_CIO_RESET_REQ;
1566        iowrite32(val, nhi->iobase + REG_FW_STS);
1567
1568        /* Re-start ARC */
1569        val = ioread32(nhi->iobase + REG_FW_STS);
1570        val |= REG_FW_STS_ICM_EN_INVERT;
1571        val |= REG_FW_STS_ICM_EN_CPU;
1572        iowrite32(val, nhi->iobase + REG_FW_STS);
1573
1574        /* Trigger CIO reset now */
1575        return icm->cio_reset(tb);
1576}
1577
1578static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1579{
1580        unsigned int retries = 10;
1581        int ret;
1582        u32 val;
1583
1584        /* Check if the ICM firmware is already running */
1585        val = ioread32(nhi->iobase + REG_FW_STS);
1586        if (val & REG_FW_STS_ICM_EN)
1587                return 0;
1588
1589        dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
1590
1591        ret = icm_firmware_reset(tb, nhi);
1592        if (ret)
1593                return ret;
1594
1595        /* Wait until the ICM firmware tells us it is up and running */
1596        do {
1597                /* Check that the ICM firmware is running */
1598                val = ioread32(nhi->iobase + REG_FW_STS);
1599                if (val & REG_FW_STS_NVM_AUTH_DONE)
1600                        return 0;
1601
1602                msleep(300);
1603        } while (--retries);
1604
1605        return -ETIMEDOUT;
1606}
1607
1608static int icm_reset_phy_port(struct tb *tb, int phy_port)
1609{
1610        struct icm *icm = tb_priv(tb);
1611        u32 state0, state1;
1612        int port0, port1;
1613        u32 val0, val1;
1614        int ret;
1615
1616        if (!icm->upstream_port)
1617                return 0;
1618
1619        if (phy_port) {
1620                port0 = 3;
1621                port1 = 4;
1622        } else {
1623                port0 = 1;
1624                port1 = 2;
1625        }
1626
1627        /*
1628         * Read link status of both null ports belonging to a single
1629         * physical port.
1630         */
1631        ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1632        if (ret)
1633                return ret;
1634        ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1635        if (ret)
1636                return ret;
1637
1638        state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1639        state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1640        state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1641        state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1642
1643        /* If they are both up we need to reset them now */
1644        if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1645                return 0;
1646
1647        val0 |= PHY_PORT_CS1_LINK_DISABLE;
1648        ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1649        if (ret)
1650                return ret;
1651
1652        val1 |= PHY_PORT_CS1_LINK_DISABLE;
1653        ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1654        if (ret)
1655                return ret;
1656
1657        /* Wait a bit and then re-enable both ports */
1658        usleep_range(10, 100);
1659
1660        ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1661        if (ret)
1662                return ret;
1663        ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1664        if (ret)
1665                return ret;
1666
1667        val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1668        ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1669        if (ret)
1670                return ret;
1671
1672        val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1673        return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1674}
1675
1676static int icm_firmware_init(struct tb *tb)
1677{
1678        struct icm *icm = tb_priv(tb);
1679        struct tb_nhi *nhi = tb->nhi;
1680        int ret;
1681
1682        ret = icm_firmware_start(tb, nhi);
1683        if (ret) {
1684                dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1685                return ret;
1686        }
1687
1688        if (icm->get_mode) {
1689                ret = icm->get_mode(tb);
1690
1691                switch (ret) {
1692                case NHI_FW_SAFE_MODE:
1693                        icm->safe_mode = true;
1694                        break;
1695
1696                case NHI_FW_CM_MODE:
1697                        /* Ask ICM to accept all Thunderbolt devices */
1698                        nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1699                        break;
1700
1701                default:
1702                        if (ret < 0)
1703                                return ret;
1704
1705                        tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1706                        return -ENODEV;
1707                }
1708        }
1709
1710        /*
1711         * Reset both physical ports if there is anything connected to
1712         * them already.
1713         */
1714        ret = icm_reset_phy_port(tb, 0);
1715        if (ret)
1716                dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1717        ret = icm_reset_phy_port(tb, 1);
1718        if (ret)
1719                dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1720
1721        return 0;
1722}
1723
1724static int icm_driver_ready(struct tb *tb)
1725{
1726        struct icm *icm = tb_priv(tb);
1727        int ret;
1728
1729        ret = icm_firmware_init(tb);
1730        if (ret)
1731                return ret;
1732
1733        if (icm->safe_mode) {
1734                tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1735                tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1736                tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1737                return 0;
1738        }
1739
1740        ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
1741                                 &icm->rpm);
1742        if (ret)
1743                return ret;
1744
1745        /*
1746         * Make sure the number of supported preboot ACL matches what we
1747         * expect or disable the whole feature.
1748         */
1749        if (tb->nboot_acl > icm->max_boot_acl)
1750                tb->nboot_acl = 0;
1751
1752        return 0;
1753}
1754
1755static int icm_suspend(struct tb *tb)
1756{
1757        struct icm *icm = tb_priv(tb);
1758
1759        if (icm->save_devices)
1760                icm->save_devices(tb);
1761
1762        nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1763        return 0;
1764}
1765
1766/*
1767 * Mark all switches (except root switch) below this one unplugged. ICM
1768 * firmware will send us an updated list of switches after we have send
1769 * it driver ready command. If a switch is not in that list it will be
1770 * removed when we perform rescan.
1771 */
1772static void icm_unplug_children(struct tb_switch *sw)
1773{
1774        unsigned int i;
1775
1776        if (tb_route(sw))
1777                sw->is_unplugged = true;
1778
1779        for (i = 1; i <= sw->config.max_port_number; i++) {
1780                struct tb_port *port = &sw->ports[i];
1781
1782                if (port->xdomain)
1783                        port->xdomain->is_unplugged = true;
1784                else if (tb_port_has_remote(port))
1785                        icm_unplug_children(port->remote->sw);
1786        }
1787}
1788
1789static int complete_rpm(struct device *dev, void *data)
1790{
1791        struct tb_switch *sw = tb_to_switch(dev);
1792
1793        if (sw)
1794                complete(&sw->rpm_complete);
1795        return 0;
1796}
1797
1798static void remove_unplugged_switch(struct tb_switch *sw)
1799{
1800        pm_runtime_get_sync(sw->dev.parent);
1801
1802        /*
1803         * Signal this and switches below for rpm_complete because
1804         * tb_switch_remove() calls pm_runtime_get_sync() that then waits
1805         * for it.
1806         */
1807        complete_rpm(&sw->dev, NULL);
1808        bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
1809        tb_switch_remove(sw);
1810
1811        pm_runtime_mark_last_busy(sw->dev.parent);
1812        pm_runtime_put_autosuspend(sw->dev.parent);
1813}
1814
1815static void icm_free_unplugged_children(struct tb_switch *sw)
1816{
1817        unsigned int i;
1818
1819        for (i = 1; i <= sw->config.max_port_number; i++) {
1820                struct tb_port *port = &sw->ports[i];
1821
1822                if (port->xdomain && port->xdomain->is_unplugged) {
1823                        tb_xdomain_remove(port->xdomain);
1824                        port->xdomain = NULL;
1825                } else if (tb_port_has_remote(port)) {
1826                        if (port->remote->sw->is_unplugged) {
1827                                remove_unplugged_switch(port->remote->sw);
1828                                port->remote = NULL;
1829                        } else {
1830                                icm_free_unplugged_children(port->remote->sw);
1831                        }
1832                }
1833        }
1834}
1835
1836static void icm_rescan_work(struct work_struct *work)
1837{
1838        struct icm *icm = container_of(work, struct icm, rescan_work.work);
1839        struct tb *tb = icm_to_tb(icm);
1840
1841        mutex_lock(&tb->lock);
1842        if (tb->root_switch)
1843                icm_free_unplugged_children(tb->root_switch);
1844        mutex_unlock(&tb->lock);
1845}
1846
1847static void icm_complete(struct tb *tb)
1848{
1849        struct icm *icm = tb_priv(tb);
1850
1851        if (tb->nhi->going_away)
1852                return;
1853
1854        icm_unplug_children(tb->root_switch);
1855
1856        /*
1857         * Now all existing children should be resumed, start events
1858         * from ICM to get updated status.
1859         */
1860        __icm_driver_ready(tb, NULL, NULL, NULL);
1861
1862        /*
1863         * We do not get notifications of devices that have been
1864         * unplugged during suspend so schedule rescan to clean them up
1865         * if any.
1866         */
1867        queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
1868}
1869
1870static int icm_runtime_suspend(struct tb *tb)
1871{
1872        nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1873        return 0;
1874}
1875
1876static int icm_runtime_suspend_switch(struct tb_switch *sw)
1877{
1878        if (tb_route(sw))
1879                reinit_completion(&sw->rpm_complete);
1880        return 0;
1881}
1882
1883static int icm_runtime_resume_switch(struct tb_switch *sw)
1884{
1885        if (tb_route(sw)) {
1886                if (!wait_for_completion_timeout(&sw->rpm_complete,
1887                                                 msecs_to_jiffies(500))) {
1888                        dev_dbg(&sw->dev, "runtime resuming timed out\n");
1889                }
1890        }
1891        return 0;
1892}
1893
1894static int icm_runtime_resume(struct tb *tb)
1895{
1896        /*
1897         * We can reuse the same resume functionality than with system
1898         * suspend.
1899         */
1900        icm_complete(tb);
1901        return 0;
1902}
1903
1904static int icm_start(struct tb *tb)
1905{
1906        struct icm *icm = tb_priv(tb);
1907        int ret;
1908
1909        if (icm->safe_mode)
1910                tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1911        else
1912                tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1913        if (IS_ERR(tb->root_switch))
1914                return PTR_ERR(tb->root_switch);
1915
1916        /*
1917         * NVM upgrade has not been tested on Apple systems and they
1918         * don't provide images publicly either. To be on the safe side
1919         * prevent root switch NVM upgrade on Macs for now.
1920         */
1921        tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1922        tb->root_switch->rpm = icm->rpm;
1923
1924        ret = tb_switch_add(tb->root_switch);
1925        if (ret) {
1926                tb_switch_put(tb->root_switch);
1927                tb->root_switch = NULL;
1928        }
1929
1930        return ret;
1931}
1932
1933static void icm_stop(struct tb *tb)
1934{
1935        struct icm *icm = tb_priv(tb);
1936
1937        cancel_delayed_work(&icm->rescan_work);
1938        tb_switch_remove(tb->root_switch);
1939        tb->root_switch = NULL;
1940        nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1941}
1942
1943static int icm_disconnect_pcie_paths(struct tb *tb)
1944{
1945        return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1946}
1947
1948/* Falcon Ridge */
1949static const struct tb_cm_ops icm_fr_ops = {
1950        .driver_ready = icm_driver_ready,
1951        .start = icm_start,
1952        .stop = icm_stop,
1953        .suspend = icm_suspend,
1954        .complete = icm_complete,
1955        .handle_event = icm_handle_event,
1956        .approve_switch = icm_fr_approve_switch,
1957        .add_switch_key = icm_fr_add_switch_key,
1958        .challenge_switch_key = icm_fr_challenge_switch_key,
1959        .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1960        .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1961        .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1962};
1963
1964/* Alpine Ridge */
1965static const struct tb_cm_ops icm_ar_ops = {
1966        .driver_ready = icm_driver_ready,
1967        .start = icm_start,
1968        .stop = icm_stop,
1969        .suspend = icm_suspend,
1970        .complete = icm_complete,
1971        .runtime_suspend = icm_runtime_suspend,
1972        .runtime_resume = icm_runtime_resume,
1973        .runtime_suspend_switch = icm_runtime_suspend_switch,
1974        .runtime_resume_switch = icm_runtime_resume_switch,
1975        .handle_event = icm_handle_event,
1976        .get_boot_acl = icm_ar_get_boot_acl,
1977        .set_boot_acl = icm_ar_set_boot_acl,
1978        .approve_switch = icm_fr_approve_switch,
1979        .add_switch_key = icm_fr_add_switch_key,
1980        .challenge_switch_key = icm_fr_challenge_switch_key,
1981        .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1982        .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1983        .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1984};
1985
1986/* Titan Ridge */
1987static const struct tb_cm_ops icm_tr_ops = {
1988        .driver_ready = icm_driver_ready,
1989        .start = icm_start,
1990        .stop = icm_stop,
1991        .suspend = icm_suspend,
1992        .complete = icm_complete,
1993        .runtime_suspend = icm_runtime_suspend,
1994        .runtime_resume = icm_runtime_resume,
1995        .runtime_suspend_switch = icm_runtime_suspend_switch,
1996        .runtime_resume_switch = icm_runtime_resume_switch,
1997        .handle_event = icm_handle_event,
1998        .get_boot_acl = icm_ar_get_boot_acl,
1999        .set_boot_acl = icm_ar_set_boot_acl,
2000        .approve_switch = icm_tr_approve_switch,
2001        .add_switch_key = icm_tr_add_switch_key,
2002        .challenge_switch_key = icm_tr_challenge_switch_key,
2003        .disconnect_pcie_paths = icm_disconnect_pcie_paths,
2004        .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
2005        .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2006};
2007
2008struct tb *icm_probe(struct tb_nhi *nhi)
2009{
2010        struct icm *icm;
2011        struct tb *tb;
2012
2013        tb = tb_domain_alloc(nhi, sizeof(struct icm));
2014        if (!tb)
2015                return NULL;
2016
2017        icm = tb_priv(tb);
2018        INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
2019        mutex_init(&icm->request_lock);
2020
2021        switch (nhi->pdev->device) {
2022        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2023        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2024                icm->is_supported = icm_fr_is_supported;
2025                icm->get_route = icm_fr_get_route;
2026                icm->save_devices = icm_fr_save_devices;
2027                icm->driver_ready = icm_fr_driver_ready;
2028                icm->device_connected = icm_fr_device_connected;
2029                icm->device_disconnected = icm_fr_device_disconnected;
2030                icm->xdomain_connected = icm_fr_xdomain_connected;
2031                icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2032                tb->cm_ops = &icm_fr_ops;
2033                break;
2034
2035        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
2036        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
2037        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
2038        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
2039        case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
2040                icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2041                icm->is_supported = icm_ar_is_supported;
2042                icm->cio_reset = icm_ar_cio_reset;
2043                icm->get_mode = icm_ar_get_mode;
2044                icm->get_route = icm_ar_get_route;
2045                icm->save_devices = icm_fr_save_devices;
2046                icm->driver_ready = icm_ar_driver_ready;
2047                icm->device_connected = icm_fr_device_connected;
2048                icm->device_disconnected = icm_fr_device_disconnected;
2049                icm->xdomain_connected = icm_fr_xdomain_connected;
2050                icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2051                tb->cm_ops = &icm_ar_ops;
2052                break;
2053
2054        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
2055        case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
2056                icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2057                icm->is_supported = icm_ar_is_supported;
2058                icm->cio_reset = icm_tr_cio_reset;
2059                icm->get_mode = icm_ar_get_mode;
2060                icm->driver_ready = icm_tr_driver_ready;
2061                icm->device_connected = icm_tr_device_connected;
2062                icm->device_disconnected = icm_tr_device_disconnected;
2063                icm->xdomain_connected = icm_tr_xdomain_connected;
2064                icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2065                tb->cm_ops = &icm_tr_ops;
2066                break;
2067        }
2068
2069        if (!icm->is_supported || !icm->is_supported(tb)) {
2070                dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
2071                tb_domain_put(tb);
2072                return NULL;
2073        }
2074
2075        return tb;
2076}
2077