linux/drivers/thunderbolt/tb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/errno.h>
  10#include <linux/delay.h>
  11#include <linux/platform_data/x86/apple.h>
  12
  13#include "tb.h"
  14#include "tb_regs.h"
  15#include "tunnel_pci.h"
  16
  17/**
  18 * struct tb_cm - Simple Thunderbolt connection manager
  19 * @tunnel_list: List of active tunnels
  20 * @hotplug_active: tb_handle_hotplug will stop progressing plug
  21 *                  events and exit if this is not set (it needs to
  22 *                  acquire the lock one more time). Used to drain wq
  23 *                  after cfg has been paused.
  24 */
  25struct tb_cm {
  26        struct list_head tunnel_list;
  27        bool hotplug_active;
  28};
  29
  30/* enumeration & hot plug handling */
  31
  32
  33static void tb_scan_port(struct tb_port *port);
  34
  35/**
  36 * tb_scan_switch() - scan for and initialize downstream switches
  37 */
  38static void tb_scan_switch(struct tb_switch *sw)
  39{
  40        int i;
  41        for (i = 1; i <= sw->config.max_port_number; i++)
  42                tb_scan_port(&sw->ports[i]);
  43}
  44
  45/**
  46 * tb_scan_port() - check for and initialize switches below port
  47 */
  48static void tb_scan_port(struct tb_port *port)
  49{
  50        struct tb_switch *sw;
  51        if (tb_is_upstream_port(port))
  52                return;
  53        if (port->config.type != TB_TYPE_PORT)
  54                return;
  55        if (port->dual_link_port && port->link_nr)
  56                return; /*
  57                         * Downstream switch is reachable through two ports.
  58                         * Only scan on the primary port (link_nr == 0).
  59                         */
  60        if (tb_wait_for_port(port, false) <= 0)
  61                return;
  62        if (port->remote) {
  63                tb_port_WARN(port, "port already has a remote!\n");
  64                return;
  65        }
  66        sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
  67                             tb_downstream_route(port));
  68        if (!sw)
  69                return;
  70
  71        if (tb_switch_configure(sw)) {
  72                tb_switch_put(sw);
  73                return;
  74        }
  75
  76        sw->authorized = true;
  77
  78        if (tb_switch_add(sw)) {
  79                tb_switch_put(sw);
  80                return;
  81        }
  82
  83        port->remote = tb_upstream_port(sw);
  84        tb_upstream_port(sw)->remote = port;
  85        tb_scan_switch(sw);
  86}
  87
  88/**
  89 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
  90 */
  91static void tb_free_invalid_tunnels(struct tb *tb)
  92{
  93        struct tb_cm *tcm = tb_priv(tb);
  94        struct tb_pci_tunnel *tunnel;
  95        struct tb_pci_tunnel *n;
  96
  97        list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  98                if (tb_pci_is_invalid(tunnel)) {
  99                        tb_pci_deactivate(tunnel);
 100                        list_del(&tunnel->list);
 101                        tb_pci_free(tunnel);
 102                }
 103        }
 104}
 105
 106/**
 107 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
 108 */
 109static void tb_free_unplugged_children(struct tb_switch *sw)
 110{
 111        int i;
 112        for (i = 1; i <= sw->config.max_port_number; i++) {
 113                struct tb_port *port = &sw->ports[i];
 114                if (tb_is_upstream_port(port))
 115                        continue;
 116                if (!port->remote)
 117                        continue;
 118                if (port->remote->sw->is_unplugged) {
 119                        tb_switch_remove(port->remote->sw);
 120                        port->remote = NULL;
 121                } else {
 122                        tb_free_unplugged_children(port->remote->sw);
 123                }
 124        }
 125}
 126
 127
 128/**
 129 * find_pci_up_port() - return the first PCIe up port on @sw or NULL
 130 */
 131static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
 132{
 133        int i;
 134        for (i = 1; i <= sw->config.max_port_number; i++)
 135                if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
 136                        return &sw->ports[i];
 137        return NULL;
 138}
 139
 140/**
 141 * find_unused_down_port() - return the first inactive PCIe down port on @sw
 142 */
 143static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
 144{
 145        int i;
 146        int cap;
 147        int res;
 148        int data;
 149        for (i = 1; i <= sw->config.max_port_number; i++) {
 150                if (tb_is_upstream_port(&sw->ports[i]))
 151                        continue;
 152                if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
 153                        continue;
 154                cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
 155                if (cap < 0)
 156                        continue;
 157                res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
 158                if (res < 0)
 159                        continue;
 160                if (data & 0x80000000)
 161                        continue;
 162                return &sw->ports[i];
 163        }
 164        return NULL;
 165}
 166
 167/**
 168 * tb_activate_pcie_devices() - scan for and activate PCIe devices
 169 *
 170 * This method is somewhat ad hoc. For now it only supports one device
 171 * per port and only devices at depth 1.
 172 */
 173static void tb_activate_pcie_devices(struct tb *tb)
 174{
 175        int i;
 176        int cap;
 177        u32 data;
 178        struct tb_switch *sw;
 179        struct tb_port *up_port;
 180        struct tb_port *down_port;
 181        struct tb_pci_tunnel *tunnel;
 182        struct tb_cm *tcm = tb_priv(tb);
 183
 184        /* scan for pcie devices at depth 1*/
 185        for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
 186                if (tb_is_upstream_port(&tb->root_switch->ports[i]))
 187                        continue;
 188                if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
 189                        continue;
 190                if (!tb->root_switch->ports[i].remote)
 191                        continue;
 192                sw = tb->root_switch->ports[i].remote->sw;
 193                up_port = tb_find_pci_up_port(sw);
 194                if (!up_port) {
 195                        tb_sw_info(sw, "no PCIe devices found, aborting\n");
 196                        continue;
 197                }
 198
 199                /* check whether port is already activated */
 200                cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
 201                if (cap < 0)
 202                        continue;
 203                if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
 204                        continue;
 205                if (data & 0x80000000) {
 206                        tb_port_info(up_port,
 207                                     "PCIe port already activated, aborting\n");
 208                        continue;
 209                }
 210
 211                down_port = tb_find_unused_down_port(tb->root_switch);
 212                if (!down_port) {
 213                        tb_port_info(up_port,
 214                                     "All PCIe down ports are occupied, aborting\n");
 215                        continue;
 216                }
 217                tunnel = tb_pci_alloc(tb, up_port, down_port);
 218                if (!tunnel) {
 219                        tb_port_info(up_port,
 220                                     "PCIe tunnel allocation failed, aborting\n");
 221                        continue;
 222                }
 223
 224                if (tb_pci_activate(tunnel)) {
 225                        tb_port_info(up_port,
 226                                     "PCIe tunnel activation failed, aborting\n");
 227                        tb_pci_free(tunnel);
 228                        continue;
 229                }
 230
 231                list_add(&tunnel->list, &tcm->tunnel_list);
 232        }
 233}
 234
 235/* hotplug handling */
 236
 237struct tb_hotplug_event {
 238        struct work_struct work;
 239        struct tb *tb;
 240        u64 route;
 241        u8 port;
 242        bool unplug;
 243};
 244
 245/**
 246 * tb_handle_hotplug() - handle hotplug event
 247 *
 248 * Executes on tb->wq.
 249 */
 250static void tb_handle_hotplug(struct work_struct *work)
 251{
 252        struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
 253        struct tb *tb = ev->tb;
 254        struct tb_cm *tcm = tb_priv(tb);
 255        struct tb_switch *sw;
 256        struct tb_port *port;
 257        mutex_lock(&tb->lock);
 258        if (!tcm->hotplug_active)
 259                goto out; /* during init, suspend or shutdown */
 260
 261        sw = get_switch_at_route(tb->root_switch, ev->route);
 262        if (!sw) {
 263                tb_warn(tb,
 264                        "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
 265                        ev->route, ev->port, ev->unplug);
 266                goto out;
 267        }
 268        if (ev->port > sw->config.max_port_number) {
 269                tb_warn(tb,
 270                        "hotplug event from non existent port %llx:%x (unplug: %d)\n",
 271                        ev->route, ev->port, ev->unplug);
 272                goto out;
 273        }
 274        port = &sw->ports[ev->port];
 275        if (tb_is_upstream_port(port)) {
 276                tb_warn(tb,
 277                        "hotplug event for upstream port %llx:%x (unplug: %d)\n",
 278                        ev->route, ev->port, ev->unplug);
 279                goto out;
 280        }
 281        if (ev->unplug) {
 282                if (port->remote) {
 283                        tb_port_info(port, "unplugged\n");
 284                        tb_sw_set_unplugged(port->remote->sw);
 285                        tb_free_invalid_tunnels(tb);
 286                        tb_switch_remove(port->remote->sw);
 287                        port->remote = NULL;
 288                } else {
 289                        tb_port_info(port,
 290                                     "got unplug event for disconnected port, ignoring\n");
 291                }
 292        } else if (port->remote) {
 293                tb_port_info(port,
 294                             "got plug event for connected port, ignoring\n");
 295        } else {
 296                tb_port_info(port, "hotplug: scanning\n");
 297                tb_scan_port(port);
 298                if (!port->remote) {
 299                        tb_port_info(port, "hotplug: no switch found\n");
 300                } else if (port->remote->sw->config.depth > 1) {
 301                        tb_sw_warn(port->remote->sw,
 302                                   "hotplug: chaining not supported\n");
 303                } else {
 304                        tb_sw_info(port->remote->sw,
 305                                   "hotplug: activating pcie devices\n");
 306                        tb_activate_pcie_devices(tb);
 307                }
 308        }
 309out:
 310        mutex_unlock(&tb->lock);
 311        kfree(ev);
 312}
 313
 314/**
 315 * tb_schedule_hotplug_handler() - callback function for the control channel
 316 *
 317 * Delegates to tb_handle_hotplug.
 318 */
 319static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
 320                            const void *buf, size_t size)
 321{
 322        const struct cfg_event_pkg *pkg = buf;
 323        struct tb_hotplug_event *ev;
 324        u64 route;
 325
 326        if (type != TB_CFG_PKG_EVENT) {
 327                tb_warn(tb, "unexpected event %#x, ignoring\n", type);
 328                return;
 329        }
 330
 331        route = tb_cfg_get_route(&pkg->header);
 332
 333        if (tb_cfg_error(tb->ctl, route, pkg->port,
 334                         TB_CFG_ERROR_ACK_PLUG_EVENT)) {
 335                tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
 336                        pkg->port);
 337        }
 338
 339        ev = kmalloc(sizeof(*ev), GFP_KERNEL);
 340        if (!ev)
 341                return;
 342        INIT_WORK(&ev->work, tb_handle_hotplug);
 343        ev->tb = tb;
 344        ev->route = route;
 345        ev->port = pkg->port;
 346        ev->unplug = pkg->unplug;
 347        queue_work(tb->wq, &ev->work);
 348}
 349
 350static void tb_stop(struct tb *tb)
 351{
 352        struct tb_cm *tcm = tb_priv(tb);
 353        struct tb_pci_tunnel *tunnel;
 354        struct tb_pci_tunnel *n;
 355
 356        /* tunnels are only present after everything has been initialized */
 357        list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
 358                tb_pci_deactivate(tunnel);
 359                tb_pci_free(tunnel);
 360        }
 361        tb_switch_remove(tb->root_switch);
 362        tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
 363}
 364
 365static int tb_start(struct tb *tb)
 366{
 367        struct tb_cm *tcm = tb_priv(tb);
 368        int ret;
 369
 370        tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
 371        if (!tb->root_switch)
 372                return -ENOMEM;
 373
 374        /*
 375         * ICM firmware upgrade needs running firmware and in native
 376         * mode that is not available so disable firmware upgrade of the
 377         * root switch.
 378         */
 379        tb->root_switch->no_nvm_upgrade = true;
 380
 381        ret = tb_switch_configure(tb->root_switch);
 382        if (ret) {
 383                tb_switch_put(tb->root_switch);
 384                return ret;
 385        }
 386
 387        /* Announce the switch to the world */
 388        ret = tb_switch_add(tb->root_switch);
 389        if (ret) {
 390                tb_switch_put(tb->root_switch);
 391                return ret;
 392        }
 393
 394        /* Full scan to discover devices added before the driver was loaded. */
 395        tb_scan_switch(tb->root_switch);
 396        tb_activate_pcie_devices(tb);
 397
 398        /* Allow tb_handle_hotplug to progress events */
 399        tcm->hotplug_active = true;
 400        return 0;
 401}
 402
 403static int tb_suspend_noirq(struct tb *tb)
 404{
 405        struct tb_cm *tcm = tb_priv(tb);
 406
 407        tb_info(tb, "suspending...\n");
 408        tb_switch_suspend(tb->root_switch);
 409        tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
 410        tb_info(tb, "suspend finished\n");
 411
 412        return 0;
 413}
 414
 415static int tb_resume_noirq(struct tb *tb)
 416{
 417        struct tb_cm *tcm = tb_priv(tb);
 418        struct tb_pci_tunnel *tunnel, *n;
 419
 420        tb_info(tb, "resuming...\n");
 421
 422        /* remove any pci devices the firmware might have setup */
 423        tb_switch_reset(tb, 0);
 424
 425        tb_switch_resume(tb->root_switch);
 426        tb_free_invalid_tunnels(tb);
 427        tb_free_unplugged_children(tb->root_switch);
 428        list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
 429                tb_pci_restart(tunnel);
 430        if (!list_empty(&tcm->tunnel_list)) {
 431                /*
 432                 * the pcie links need some time to get going.
 433                 * 100ms works for me...
 434                 */
 435                tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
 436                msleep(100);
 437        }
 438         /* Allow tb_handle_hotplug to progress events */
 439        tcm->hotplug_active = true;
 440        tb_info(tb, "resume finished\n");
 441
 442        return 0;
 443}
 444
 445static const struct tb_cm_ops tb_cm_ops = {
 446        .start = tb_start,
 447        .stop = tb_stop,
 448        .suspend_noirq = tb_suspend_noirq,
 449        .resume_noirq = tb_resume_noirq,
 450        .handle_event = tb_handle_event,
 451};
 452
 453struct tb *tb_probe(struct tb_nhi *nhi)
 454{
 455        struct tb_cm *tcm;
 456        struct tb *tb;
 457
 458        if (!x86_apple_machine)
 459                return NULL;
 460
 461        tb = tb_domain_alloc(nhi, sizeof(*tcm));
 462        if (!tb)
 463                return NULL;
 464
 465        tb->security_level = TB_SECURITY_NONE;
 466        tb->cm_ops = &tb_cm_ops;
 467
 468        tcm = tb_priv(tb);
 469        INIT_LIST_HEAD(&tcm->tunnel_list);
 470
 471        return tb;
 472}
 473