linux/drivers/thunderbolt/nhi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Thunderbolt driver - NHI driver
   4 *
   5 * The NHI (native host interface) is the pci device that allows us to send and
   6 * receive frames from the thunderbolt bus.
   7 *
   8 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   9 * Copyright (C) 2018, Intel Corporation
  10 */
  11
  12#include <linux/pm_runtime.h>
  13#include <linux/slab.h>
  14#include <linux/errno.h>
  15#include <linux/pci.h>
  16#include <linux/interrupt.h>
  17#include <linux/module.h>
  18#include <linux/delay.h>
  19#include <linux/property.h>
  20#include <linux/platform_data/x86/apple.h>
  21
  22#include "nhi.h"
  23#include "nhi_regs.h"
  24#include "tb.h"
  25
  26#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
  27
  28#define RING_FIRST_USABLE_HOPID 1
  29
  30/*
  31 * Minimal number of vectors when we use MSI-X. Two for control channel
  32 * Rx/Tx and the rest four are for cross domain DMA paths.
  33 */
  34#define MSIX_MIN_VECS           6
  35#define MSIX_MAX_VECS           16
  36
  37#define NHI_MAILBOX_TIMEOUT     500 /* ms */
  38
  39static int ring_interrupt_index(struct tb_ring *ring)
  40{
  41        int bit = ring->hop;
  42        if (!ring->is_tx)
  43                bit += ring->nhi->hop_count;
  44        return bit;
  45}
  46
  47/**
  48 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
  49 *
  50 * ring->nhi->lock must be held.
  51 */
  52static void ring_interrupt_active(struct tb_ring *ring, bool active)
  53{
  54        int reg = REG_RING_INTERRUPT_BASE +
  55                  ring_interrupt_index(ring) / 32 * 4;
  56        int bit = ring_interrupt_index(ring) & 31;
  57        int mask = 1 << bit;
  58        u32 old, new;
  59
  60        if (ring->irq > 0) {
  61                u32 step, shift, ivr, misc;
  62                void __iomem *ivr_base;
  63                int index;
  64
  65                if (ring->is_tx)
  66                        index = ring->hop;
  67                else
  68                        index = ring->hop + ring->nhi->hop_count;
  69
  70                /*
  71                 * Ask the hardware to clear interrupt status bits automatically
  72                 * since we already know which interrupt was triggered.
  73                 */
  74                misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
  75                if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
  76                        misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
  77                        iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
  78                }
  79
  80                ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
  81                step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
  82                shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
  83                ivr = ioread32(ivr_base + step);
  84                ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
  85                if (active)
  86                        ivr |= ring->vector << shift;
  87                iowrite32(ivr, ivr_base + step);
  88        }
  89
  90        old = ioread32(ring->nhi->iobase + reg);
  91        if (active)
  92                new = old | mask;
  93        else
  94                new = old & ~mask;
  95
  96        dev_dbg(&ring->nhi->pdev->dev,
  97                "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
  98                active ? "enabling" : "disabling", reg, bit, old, new);
  99
 100        if (new == old)
 101                dev_WARN(&ring->nhi->pdev->dev,
 102                                         "interrupt for %s %d is already %s\n",
 103                                         RING_TYPE(ring), ring->hop,
 104                                         active ? "enabled" : "disabled");
 105        iowrite32(new, ring->nhi->iobase + reg);
 106}
 107
 108/**
 109 * nhi_disable_interrupts() - disable interrupts for all rings
 110 *
 111 * Use only during init and shutdown.
 112 */
 113static void nhi_disable_interrupts(struct tb_nhi *nhi)
 114{
 115        int i = 0;
 116        /* disable interrupts */
 117        for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
 118                iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
 119
 120        /* clear interrupt status bits */
 121        for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
 122                ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
 123}
 124
 125/* ring helper methods */
 126
 127static void __iomem *ring_desc_base(struct tb_ring *ring)
 128{
 129        void __iomem *io = ring->nhi->iobase;
 130        io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
 131        io += ring->hop * 16;
 132        return io;
 133}
 134
 135static void __iomem *ring_options_base(struct tb_ring *ring)
 136{
 137        void __iomem *io = ring->nhi->iobase;
 138        io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
 139        io += ring->hop * 32;
 140        return io;
 141}
 142
 143static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
 144{
 145        /*
 146         * The other 16-bits in the register is read-only and writes to it
 147         * are ignored by the hardware so we can save one ioread32() by
 148         * filling the read-only bits with zeroes.
 149         */
 150        iowrite32(cons, ring_desc_base(ring) + 8);
 151}
 152
 153static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
 154{
 155        /* See ring_iowrite_cons() above for explanation */
 156        iowrite32(prod << 16, ring_desc_base(ring) + 8);
 157}
 158
 159static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
 160{
 161        iowrite32(value, ring_desc_base(ring) + offset);
 162}
 163
 164static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
 165{
 166        iowrite32(value, ring_desc_base(ring) + offset);
 167        iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
 168}
 169
 170static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
 171{
 172        iowrite32(value, ring_options_base(ring) + offset);
 173}
 174
 175static bool ring_full(struct tb_ring *ring)
 176{
 177        return ((ring->head + 1) % ring->size) == ring->tail;
 178}
 179
 180static bool ring_empty(struct tb_ring *ring)
 181{
 182        return ring->head == ring->tail;
 183}
 184
 185/**
 186 * ring_write_descriptors() - post frames from ring->queue to the controller
 187 *
 188 * ring->lock is held.
 189 */
 190static void ring_write_descriptors(struct tb_ring *ring)
 191{
 192        struct ring_frame *frame, *n;
 193        struct ring_desc *descriptor;
 194        list_for_each_entry_safe(frame, n, &ring->queue, list) {
 195                if (ring_full(ring))
 196                        break;
 197                list_move_tail(&frame->list, &ring->in_flight);
 198                descriptor = &ring->descriptors[ring->head];
 199                descriptor->phys = frame->buffer_phy;
 200                descriptor->time = 0;
 201                descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
 202                if (ring->is_tx) {
 203                        descriptor->length = frame->size;
 204                        descriptor->eof = frame->eof;
 205                        descriptor->sof = frame->sof;
 206                }
 207                ring->head = (ring->head + 1) % ring->size;
 208                if (ring->is_tx)
 209                        ring_iowrite_prod(ring, ring->head);
 210                else
 211                        ring_iowrite_cons(ring, ring->head);
 212        }
 213}
 214
 215/**
 216 * ring_work() - progress completed frames
 217 *
 218 * If the ring is shutting down then all frames are marked as canceled and
 219 * their callbacks are invoked.
 220 *
 221 * Otherwise we collect all completed frame from the ring buffer, write new
 222 * frame to the ring buffer and invoke the callbacks for the completed frames.
 223 */
 224static void ring_work(struct work_struct *work)
 225{
 226        struct tb_ring *ring = container_of(work, typeof(*ring), work);
 227        struct ring_frame *frame;
 228        bool canceled = false;
 229        unsigned long flags;
 230        LIST_HEAD(done);
 231
 232        spin_lock_irqsave(&ring->lock, flags);
 233
 234        if (!ring->running) {
 235                /*  Move all frames to done and mark them as canceled. */
 236                list_splice_tail_init(&ring->in_flight, &done);
 237                list_splice_tail_init(&ring->queue, &done);
 238                canceled = true;
 239                goto invoke_callback;
 240        }
 241
 242        while (!ring_empty(ring)) {
 243                if (!(ring->descriptors[ring->tail].flags
 244                                & RING_DESC_COMPLETED))
 245                        break;
 246                frame = list_first_entry(&ring->in_flight, typeof(*frame),
 247                                         list);
 248                list_move_tail(&frame->list, &done);
 249                if (!ring->is_tx) {
 250                        frame->size = ring->descriptors[ring->tail].length;
 251                        frame->eof = ring->descriptors[ring->tail].eof;
 252                        frame->sof = ring->descriptors[ring->tail].sof;
 253                        frame->flags = ring->descriptors[ring->tail].flags;
 254                }
 255                ring->tail = (ring->tail + 1) % ring->size;
 256        }
 257        ring_write_descriptors(ring);
 258
 259invoke_callback:
 260        /* allow callbacks to schedule new work */
 261        spin_unlock_irqrestore(&ring->lock, flags);
 262        while (!list_empty(&done)) {
 263                frame = list_first_entry(&done, typeof(*frame), list);
 264                /*
 265                 * The callback may reenqueue or delete frame.
 266                 * Do not hold on to it.
 267                 */
 268                list_del_init(&frame->list);
 269                if (frame->callback)
 270                        frame->callback(ring, frame, canceled);
 271        }
 272}
 273
 274int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
 275{
 276        unsigned long flags;
 277        int ret = 0;
 278
 279        spin_lock_irqsave(&ring->lock, flags);
 280        if (ring->running) {
 281                list_add_tail(&frame->list, &ring->queue);
 282                ring_write_descriptors(ring);
 283        } else {
 284                ret = -ESHUTDOWN;
 285        }
 286        spin_unlock_irqrestore(&ring->lock, flags);
 287        return ret;
 288}
 289EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
 290
 291/**
 292 * tb_ring_poll() - Poll one completed frame from the ring
 293 * @ring: Ring to poll
 294 *
 295 * This function can be called when @start_poll callback of the @ring
 296 * has been called. It will read one completed frame from the ring and
 297 * return it to the caller. Returns %NULL if there is no more completed
 298 * frames.
 299 */
 300struct ring_frame *tb_ring_poll(struct tb_ring *ring)
 301{
 302        struct ring_frame *frame = NULL;
 303        unsigned long flags;
 304
 305        spin_lock_irqsave(&ring->lock, flags);
 306        if (!ring->running)
 307                goto unlock;
 308        if (ring_empty(ring))
 309                goto unlock;
 310
 311        if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
 312                frame = list_first_entry(&ring->in_flight, typeof(*frame),
 313                                         list);
 314                list_del_init(&frame->list);
 315
 316                if (!ring->is_tx) {
 317                        frame->size = ring->descriptors[ring->tail].length;
 318                        frame->eof = ring->descriptors[ring->tail].eof;
 319                        frame->sof = ring->descriptors[ring->tail].sof;
 320                        frame->flags = ring->descriptors[ring->tail].flags;
 321                }
 322
 323                ring->tail = (ring->tail + 1) % ring->size;
 324        }
 325
 326unlock:
 327        spin_unlock_irqrestore(&ring->lock, flags);
 328        return frame;
 329}
 330EXPORT_SYMBOL_GPL(tb_ring_poll);
 331
 332static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
 333{
 334        int idx = ring_interrupt_index(ring);
 335        int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
 336        int bit = idx % 32;
 337        u32 val;
 338
 339        val = ioread32(ring->nhi->iobase + reg);
 340        if (mask)
 341                val &= ~BIT(bit);
 342        else
 343                val |= BIT(bit);
 344        iowrite32(val, ring->nhi->iobase + reg);
 345}
 346
 347/* Both @nhi->lock and @ring->lock should be held */
 348static void __ring_interrupt(struct tb_ring *ring)
 349{
 350        if (!ring->running)
 351                return;
 352
 353        if (ring->start_poll) {
 354                __ring_interrupt_mask(ring, true);
 355                ring->start_poll(ring->poll_data);
 356        } else {
 357                schedule_work(&ring->work);
 358        }
 359}
 360
 361/**
 362 * tb_ring_poll_complete() - Re-start interrupt for the ring
 363 * @ring: Ring to re-start the interrupt
 364 *
 365 * This will re-start (unmask) the ring interrupt once the user is done
 366 * with polling.
 367 */
 368void tb_ring_poll_complete(struct tb_ring *ring)
 369{
 370        unsigned long flags;
 371
 372        spin_lock_irqsave(&ring->nhi->lock, flags);
 373        spin_lock(&ring->lock);
 374        if (ring->start_poll)
 375                __ring_interrupt_mask(ring, false);
 376        spin_unlock(&ring->lock);
 377        spin_unlock_irqrestore(&ring->nhi->lock, flags);
 378}
 379EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
 380
 381static irqreturn_t ring_msix(int irq, void *data)
 382{
 383        struct tb_ring *ring = data;
 384
 385        spin_lock(&ring->nhi->lock);
 386        spin_lock(&ring->lock);
 387        __ring_interrupt(ring);
 388        spin_unlock(&ring->lock);
 389        spin_unlock(&ring->nhi->lock);
 390
 391        return IRQ_HANDLED;
 392}
 393
 394static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
 395{
 396        struct tb_nhi *nhi = ring->nhi;
 397        unsigned long irqflags;
 398        int ret;
 399
 400        if (!nhi->pdev->msix_enabled)
 401                return 0;
 402
 403        ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
 404        if (ret < 0)
 405                return ret;
 406
 407        ring->vector = ret;
 408
 409        ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
 410        if (ret < 0)
 411                goto err_ida_remove;
 412
 413        ring->irq = ret;
 414
 415        irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
 416        ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
 417        if (ret)
 418                goto err_ida_remove;
 419
 420        return 0;
 421
 422err_ida_remove:
 423        ida_simple_remove(&nhi->msix_ida, ring->vector);
 424
 425        return ret;
 426}
 427
 428static void ring_release_msix(struct tb_ring *ring)
 429{
 430        if (ring->irq <= 0)
 431                return;
 432
 433        free_irq(ring->irq, ring);
 434        ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
 435        ring->vector = 0;
 436        ring->irq = 0;
 437}
 438
 439static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
 440{
 441        int ret = 0;
 442
 443        spin_lock_irq(&nhi->lock);
 444
 445        if (ring->hop < 0) {
 446                unsigned int i;
 447
 448                /*
 449                 * Automatically allocate HopID from the non-reserved
 450                 * range 1 .. hop_count - 1.
 451                 */
 452                for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
 453                        if (ring->is_tx) {
 454                                if (!nhi->tx_rings[i]) {
 455                                        ring->hop = i;
 456                                        break;
 457                                }
 458                        } else {
 459                                if (!nhi->rx_rings[i]) {
 460                                        ring->hop = i;
 461                                        break;
 462                                }
 463                        }
 464                }
 465        }
 466
 467        if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
 468                dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
 469                ret = -EINVAL;
 470                goto err_unlock;
 471        }
 472        if (ring->is_tx && nhi->tx_rings[ring->hop]) {
 473                dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
 474                         ring->hop);
 475                ret = -EBUSY;
 476                goto err_unlock;
 477        } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
 478                dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
 479                         ring->hop);
 480                ret = -EBUSY;
 481                goto err_unlock;
 482        }
 483
 484        if (ring->is_tx)
 485                nhi->tx_rings[ring->hop] = ring;
 486        else
 487                nhi->rx_rings[ring->hop] = ring;
 488
 489err_unlock:
 490        spin_unlock_irq(&nhi->lock);
 491
 492        return ret;
 493}
 494
 495static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
 496                                     bool transmit, unsigned int flags,
 497                                     u16 sof_mask, u16 eof_mask,
 498                                     void (*start_poll)(void *),
 499                                     void *poll_data)
 500{
 501        struct tb_ring *ring = NULL;
 502
 503        dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
 504                transmit ? "TX" : "RX", hop, size);
 505
 506        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 507        if (!ring)
 508                return NULL;
 509
 510        spin_lock_init(&ring->lock);
 511        INIT_LIST_HEAD(&ring->queue);
 512        INIT_LIST_HEAD(&ring->in_flight);
 513        INIT_WORK(&ring->work, ring_work);
 514
 515        ring->nhi = nhi;
 516        ring->hop = hop;
 517        ring->is_tx = transmit;
 518        ring->size = size;
 519        ring->flags = flags;
 520        ring->sof_mask = sof_mask;
 521        ring->eof_mask = eof_mask;
 522        ring->head = 0;
 523        ring->tail = 0;
 524        ring->running = false;
 525        ring->start_poll = start_poll;
 526        ring->poll_data = poll_data;
 527
 528        ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
 529                        size * sizeof(*ring->descriptors),
 530                        &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
 531        if (!ring->descriptors)
 532                goto err_free_ring;
 533
 534        if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
 535                goto err_free_descs;
 536
 537        if (nhi_alloc_hop(nhi, ring))
 538                goto err_release_msix;
 539
 540        return ring;
 541
 542err_release_msix:
 543        ring_release_msix(ring);
 544err_free_descs:
 545        dma_free_coherent(&ring->nhi->pdev->dev,
 546                          ring->size * sizeof(*ring->descriptors),
 547                          ring->descriptors, ring->descriptors_dma);
 548err_free_ring:
 549        kfree(ring);
 550
 551        return NULL;
 552}
 553
 554/**
 555 * tb_ring_alloc_tx() - Allocate DMA ring for transmit
 556 * @nhi: Pointer to the NHI the ring is to be allocated
 557 * @hop: HopID (ring) to allocate
 558 * @size: Number of entries in the ring
 559 * @flags: Flags for the ring
 560 */
 561struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
 562                                 unsigned int flags)
 563{
 564        return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
 565}
 566EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
 567
 568/**
 569 * tb_ring_alloc_rx() - Allocate DMA ring for receive
 570 * @nhi: Pointer to the NHI the ring is to be allocated
 571 * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
 572 * @size: Number of entries in the ring
 573 * @flags: Flags for the ring
 574 * @sof_mask: Mask of PDF values that start a frame
 575 * @eof_mask: Mask of PDF values that end a frame
 576 * @start_poll: If not %NULL the ring will call this function when an
 577 *              interrupt is triggered and masked, instead of callback
 578 *              in each Rx frame.
 579 * @poll_data: Optional data passed to @start_poll
 580 */
 581struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
 582                                 unsigned int flags, u16 sof_mask, u16 eof_mask,
 583                                 void (*start_poll)(void *), void *poll_data)
 584{
 585        return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
 586                             start_poll, poll_data);
 587}
 588EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
 589
 590/**
 591 * tb_ring_start() - enable a ring
 592 *
 593 * Must not be invoked in parallel with tb_ring_stop().
 594 */
 595void tb_ring_start(struct tb_ring *ring)
 596{
 597        u16 frame_size;
 598        u32 flags;
 599
 600        spin_lock_irq(&ring->nhi->lock);
 601        spin_lock(&ring->lock);
 602        if (ring->nhi->going_away)
 603                goto err;
 604        if (ring->running) {
 605                dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
 606                goto err;
 607        }
 608        dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
 609                RING_TYPE(ring), ring->hop);
 610
 611        if (ring->flags & RING_FLAG_FRAME) {
 612                /* Means 4096 */
 613                frame_size = 0;
 614                flags = RING_FLAG_ENABLE;
 615        } else {
 616                frame_size = TB_FRAME_SIZE;
 617                flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
 618        }
 619
 620        ring_iowrite64desc(ring, ring->descriptors_dma, 0);
 621        if (ring->is_tx) {
 622                ring_iowrite32desc(ring, ring->size, 12);
 623                ring_iowrite32options(ring, 0, 4); /* time releated ? */
 624                ring_iowrite32options(ring, flags, 0);
 625        } else {
 626                u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
 627
 628                ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
 629                ring_iowrite32options(ring, sof_eof_mask, 4);
 630                ring_iowrite32options(ring, flags, 0);
 631        }
 632        ring_interrupt_active(ring, true);
 633        ring->running = true;
 634err:
 635        spin_unlock(&ring->lock);
 636        spin_unlock_irq(&ring->nhi->lock);
 637}
 638EXPORT_SYMBOL_GPL(tb_ring_start);
 639
 640/**
 641 * tb_ring_stop() - shutdown a ring
 642 *
 643 * Must not be invoked from a callback.
 644 *
 645 * This method will disable the ring. Further calls to
 646 * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
 647 * called.
 648 *
 649 * All enqueued frames will be canceled and their callbacks will be executed
 650 * with frame->canceled set to true (on the callback thread). This method
 651 * returns only after all callback invocations have finished.
 652 */
 653void tb_ring_stop(struct tb_ring *ring)
 654{
 655        spin_lock_irq(&ring->nhi->lock);
 656        spin_lock(&ring->lock);
 657        dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
 658                RING_TYPE(ring), ring->hop);
 659        if (ring->nhi->going_away)
 660                goto err;
 661        if (!ring->running) {
 662                dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
 663                         RING_TYPE(ring), ring->hop);
 664                goto err;
 665        }
 666        ring_interrupt_active(ring, false);
 667
 668        ring_iowrite32options(ring, 0, 0);
 669        ring_iowrite64desc(ring, 0, 0);
 670        ring_iowrite32desc(ring, 0, 8);
 671        ring_iowrite32desc(ring, 0, 12);
 672        ring->head = 0;
 673        ring->tail = 0;
 674        ring->running = false;
 675
 676err:
 677        spin_unlock(&ring->lock);
 678        spin_unlock_irq(&ring->nhi->lock);
 679
 680        /*
 681         * schedule ring->work to invoke callbacks on all remaining frames.
 682         */
 683        schedule_work(&ring->work);
 684        flush_work(&ring->work);
 685}
 686EXPORT_SYMBOL_GPL(tb_ring_stop);
 687
 688/*
 689 * tb_ring_free() - free ring
 690 *
 691 * When this method returns all invocations of ring->callback will have
 692 * finished.
 693 *
 694 * Ring must be stopped.
 695 *
 696 * Must NOT be called from ring_frame->callback!
 697 */
 698void tb_ring_free(struct tb_ring *ring)
 699{
 700        spin_lock_irq(&ring->nhi->lock);
 701        /*
 702         * Dissociate the ring from the NHI. This also ensures that
 703         * nhi_interrupt_work cannot reschedule ring->work.
 704         */
 705        if (ring->is_tx)
 706                ring->nhi->tx_rings[ring->hop] = NULL;
 707        else
 708                ring->nhi->rx_rings[ring->hop] = NULL;
 709
 710        if (ring->running) {
 711                dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
 712                         RING_TYPE(ring), ring->hop);
 713        }
 714        spin_unlock_irq(&ring->nhi->lock);
 715
 716        ring_release_msix(ring);
 717
 718        dma_free_coherent(&ring->nhi->pdev->dev,
 719                          ring->size * sizeof(*ring->descriptors),
 720                          ring->descriptors, ring->descriptors_dma);
 721
 722        ring->descriptors = NULL;
 723        ring->descriptors_dma = 0;
 724
 725
 726        dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
 727                ring->hop);
 728
 729        /**
 730         * ring->work can no longer be scheduled (it is scheduled only
 731         * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
 732         * to finish before freeing the ring.
 733         */
 734        flush_work(&ring->work);
 735        kfree(ring);
 736}
 737EXPORT_SYMBOL_GPL(tb_ring_free);
 738
 739/**
 740 * nhi_mailbox_cmd() - Send a command through NHI mailbox
 741 * @nhi: Pointer to the NHI structure
 742 * @cmd: Command to send
 743 * @data: Data to be send with the command
 744 *
 745 * Sends mailbox command to the firmware running on NHI. Returns %0 in
 746 * case of success and negative errno in case of failure.
 747 */
 748int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
 749{
 750        ktime_t timeout;
 751        u32 val;
 752
 753        iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
 754
 755        val = ioread32(nhi->iobase + REG_INMAIL_CMD);
 756        val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
 757        val |= REG_INMAIL_OP_REQUEST | cmd;
 758        iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
 759
 760        timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
 761        do {
 762                val = ioread32(nhi->iobase + REG_INMAIL_CMD);
 763                if (!(val & REG_INMAIL_OP_REQUEST))
 764                        break;
 765                usleep_range(10, 20);
 766        } while (ktime_before(ktime_get(), timeout));
 767
 768        if (val & REG_INMAIL_OP_REQUEST)
 769                return -ETIMEDOUT;
 770        if (val & REG_INMAIL_ERROR)
 771                return -EIO;
 772
 773        return 0;
 774}
 775
 776/**
 777 * nhi_mailbox_mode() - Return current firmware operation mode
 778 * @nhi: Pointer to the NHI structure
 779 *
 780 * The function reads current firmware operation mode using NHI mailbox
 781 * registers and returns it to the caller.
 782 */
 783enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
 784{
 785        u32 val;
 786
 787        val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
 788        val &= REG_OUTMAIL_CMD_OPMODE_MASK;
 789        val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
 790
 791        return (enum nhi_fw_mode)val;
 792}
 793
 794static void nhi_interrupt_work(struct work_struct *work)
 795{
 796        struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
 797        int value = 0; /* Suppress uninitialized usage warning. */
 798        int bit;
 799        int hop = -1;
 800        int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
 801        struct tb_ring *ring;
 802
 803        spin_lock_irq(&nhi->lock);
 804
 805        /*
 806         * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
 807         * (TX, RX, RX overflow). We iterate over the bits and read a new
 808         * dwords as required. The registers are cleared on read.
 809         */
 810        for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
 811                if (bit % 32 == 0)
 812                        value = ioread32(nhi->iobase
 813                                         + REG_RING_NOTIFY_BASE
 814                                         + 4 * (bit / 32));
 815                if (++hop == nhi->hop_count) {
 816                        hop = 0;
 817                        type++;
 818                }
 819                if ((value & (1 << (bit % 32))) == 0)
 820                        continue;
 821                if (type == 2) {
 822                        dev_warn(&nhi->pdev->dev,
 823                                 "RX overflow for ring %d\n",
 824                                 hop);
 825                        continue;
 826                }
 827                if (type == 0)
 828                        ring = nhi->tx_rings[hop];
 829                else
 830                        ring = nhi->rx_rings[hop];
 831                if (ring == NULL) {
 832                        dev_warn(&nhi->pdev->dev,
 833                                 "got interrupt for inactive %s ring %d\n",
 834                                 type ? "RX" : "TX",
 835                                 hop);
 836                        continue;
 837                }
 838
 839                spin_lock(&ring->lock);
 840                __ring_interrupt(ring);
 841                spin_unlock(&ring->lock);
 842        }
 843        spin_unlock_irq(&nhi->lock);
 844}
 845
 846static irqreturn_t nhi_msi(int irq, void *data)
 847{
 848        struct tb_nhi *nhi = data;
 849        schedule_work(&nhi->interrupt_work);
 850        return IRQ_HANDLED;
 851}
 852
 853static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
 854{
 855        struct pci_dev *pdev = to_pci_dev(dev);
 856        struct tb *tb = pci_get_drvdata(pdev);
 857        struct tb_nhi *nhi = tb->nhi;
 858        int ret;
 859
 860        ret = tb_domain_suspend_noirq(tb);
 861        if (ret)
 862                return ret;
 863
 864        if (nhi->ops && nhi->ops->suspend_noirq) {
 865                ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
 866                if (ret)
 867                        return ret;
 868        }
 869
 870        return 0;
 871}
 872
 873static int nhi_suspend_noirq(struct device *dev)
 874{
 875        return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
 876}
 877
 878static int nhi_freeze_noirq(struct device *dev)
 879{
 880        struct pci_dev *pdev = to_pci_dev(dev);
 881        struct tb *tb = pci_get_drvdata(pdev);
 882
 883        return tb_domain_freeze_noirq(tb);
 884}
 885
 886static int nhi_thaw_noirq(struct device *dev)
 887{
 888        struct pci_dev *pdev = to_pci_dev(dev);
 889        struct tb *tb = pci_get_drvdata(pdev);
 890
 891        return tb_domain_thaw_noirq(tb);
 892}
 893
 894static bool nhi_wake_supported(struct pci_dev *pdev)
 895{
 896        u8 val;
 897
 898        /*
 899         * If power rails are sustainable for wakeup from S4 this
 900         * property is set by the BIOS.
 901         */
 902        if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
 903                return !!val;
 904
 905        return true;
 906}
 907
 908static int nhi_poweroff_noirq(struct device *dev)
 909{
 910        struct pci_dev *pdev = to_pci_dev(dev);
 911        bool wakeup;
 912
 913        wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
 914        return __nhi_suspend_noirq(dev, wakeup);
 915}
 916
 917static void nhi_enable_int_throttling(struct tb_nhi *nhi)
 918{
 919        /* Throttling is specified in 256ns increments */
 920        u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
 921        unsigned int i;
 922
 923        /*
 924         * Configure interrupt throttling for all vectors even if we
 925         * only use few.
 926         */
 927        for (i = 0; i < MSIX_MAX_VECS; i++) {
 928                u32 reg = REG_INT_THROTTLING_RATE + i * 4;
 929                iowrite32(throttle, nhi->iobase + reg);
 930        }
 931}
 932
 933static int nhi_resume_noirq(struct device *dev)
 934{
 935        struct pci_dev *pdev = to_pci_dev(dev);
 936        struct tb *tb = pci_get_drvdata(pdev);
 937        struct tb_nhi *nhi = tb->nhi;
 938        int ret;
 939
 940        /*
 941         * Check that the device is still there. It may be that the user
 942         * unplugged last device which causes the host controller to go
 943         * away on PCs.
 944         */
 945        if (!pci_device_is_present(pdev)) {
 946                nhi->going_away = true;
 947        } else {
 948                if (nhi->ops && nhi->ops->resume_noirq) {
 949                        ret = nhi->ops->resume_noirq(nhi);
 950                        if (ret)
 951                                return ret;
 952                }
 953                nhi_enable_int_throttling(tb->nhi);
 954        }
 955
 956        return tb_domain_resume_noirq(tb);
 957}
 958
 959static int nhi_suspend(struct device *dev)
 960{
 961        struct pci_dev *pdev = to_pci_dev(dev);
 962        struct tb *tb = pci_get_drvdata(pdev);
 963
 964        return tb_domain_suspend(tb);
 965}
 966
 967static void nhi_complete(struct device *dev)
 968{
 969        struct pci_dev *pdev = to_pci_dev(dev);
 970        struct tb *tb = pci_get_drvdata(pdev);
 971
 972        /*
 973         * If we were runtime suspended when system suspend started,
 974         * schedule runtime resume now. It should bring the domain back
 975         * to functional state.
 976         */
 977        if (pm_runtime_suspended(&pdev->dev))
 978                pm_runtime_resume(&pdev->dev);
 979        else
 980                tb_domain_complete(tb);
 981}
 982
 983static int nhi_runtime_suspend(struct device *dev)
 984{
 985        struct pci_dev *pdev = to_pci_dev(dev);
 986        struct tb *tb = pci_get_drvdata(pdev);
 987        struct tb_nhi *nhi = tb->nhi;
 988        int ret;
 989
 990        ret = tb_domain_runtime_suspend(tb);
 991        if (ret)
 992                return ret;
 993
 994        if (nhi->ops && nhi->ops->runtime_suspend) {
 995                ret = nhi->ops->runtime_suspend(tb->nhi);
 996                if (ret)
 997                        return ret;
 998        }
 999        return 0;
1000}
1001
1002static int nhi_runtime_resume(struct device *dev)
1003{
1004        struct pci_dev *pdev = to_pci_dev(dev);
1005        struct tb *tb = pci_get_drvdata(pdev);
1006        struct tb_nhi *nhi = tb->nhi;
1007        int ret;
1008
1009        if (nhi->ops && nhi->ops->runtime_resume) {
1010                ret = nhi->ops->runtime_resume(nhi);
1011                if (ret)
1012                        return ret;
1013        }
1014
1015        nhi_enable_int_throttling(nhi);
1016        return tb_domain_runtime_resume(tb);
1017}
1018
1019static void nhi_shutdown(struct tb_nhi *nhi)
1020{
1021        int i;
1022
1023        dev_dbg(&nhi->pdev->dev, "shutdown\n");
1024
1025        for (i = 0; i < nhi->hop_count; i++) {
1026                if (nhi->tx_rings[i])
1027                        dev_WARN(&nhi->pdev->dev,
1028                                 "TX ring %d is still active\n", i);
1029                if (nhi->rx_rings[i])
1030                        dev_WARN(&nhi->pdev->dev,
1031                                 "RX ring %d is still active\n", i);
1032        }
1033        nhi_disable_interrupts(nhi);
1034        /*
1035         * We have to release the irq before calling flush_work. Otherwise an
1036         * already executing IRQ handler could call schedule_work again.
1037         */
1038        if (!nhi->pdev->msix_enabled) {
1039                devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
1040                flush_work(&nhi->interrupt_work);
1041        }
1042        ida_destroy(&nhi->msix_ida);
1043
1044        if (nhi->ops && nhi->ops->shutdown)
1045                nhi->ops->shutdown(nhi);
1046}
1047
1048static int nhi_init_msi(struct tb_nhi *nhi)
1049{
1050        struct pci_dev *pdev = nhi->pdev;
1051        int res, irq, nvec;
1052
1053        /* In case someone left them on. */
1054        nhi_disable_interrupts(nhi);
1055
1056        nhi_enable_int_throttling(nhi);
1057
1058        ida_init(&nhi->msix_ida);
1059
1060        /*
1061         * The NHI has 16 MSI-X vectors or a single MSI. We first try to
1062         * get all MSI-X vectors and if we succeed, each ring will have
1063         * one MSI-X. If for some reason that does not work out, we
1064         * fallback to a single MSI.
1065         */
1066        nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
1067                                     PCI_IRQ_MSIX);
1068        if (nvec < 0) {
1069                nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1070                if (nvec < 0)
1071                        return nvec;
1072
1073                INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
1074
1075                irq = pci_irq_vector(nhi->pdev, 0);
1076                if (irq < 0)
1077                        return irq;
1078
1079                res = devm_request_irq(&pdev->dev, irq, nhi_msi,
1080                                       IRQF_NO_SUSPEND, "thunderbolt", nhi);
1081                if (res) {
1082                        dev_err(&pdev->dev, "request_irq failed, aborting\n");
1083                        return res;
1084                }
1085        }
1086
1087        return 0;
1088}
1089
1090static bool nhi_imr_valid(struct pci_dev *pdev)
1091{
1092        u8 val;
1093
1094        if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
1095                return !!val;
1096
1097        return true;
1098}
1099
1100/*
1101 * During suspend the Thunderbolt controller is reset and all PCIe
1102 * tunnels are lost. The NHI driver will try to reestablish all tunnels
1103 * during resume. This adds device links between the tunneled PCIe
1104 * downstream ports and the NHI so that the device core will make sure
1105 * NHI is resumed first before the rest.
1106 */
1107static void tb_apple_add_links(struct tb_nhi *nhi)
1108{
1109        struct pci_dev *upstream, *pdev;
1110
1111        if (!x86_apple_machine)
1112                return;
1113
1114        switch (nhi->pdev->device) {
1115        case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1116        case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1117        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1118        case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1119                break;
1120        default:
1121                return;
1122        }
1123
1124        upstream = pci_upstream_bridge(nhi->pdev);
1125        while (upstream) {
1126                if (!pci_is_pcie(upstream))
1127                        return;
1128                if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1129                        break;
1130                upstream = pci_upstream_bridge(upstream);
1131        }
1132
1133        if (!upstream)
1134                return;
1135
1136        /*
1137         * For each hotplug downstream port, create add device link
1138         * back to NHI so that PCIe tunnels can be re-established after
1139         * sleep.
1140         */
1141        for_each_pci_bridge(pdev, upstream->subordinate) {
1142                const struct device_link *link;
1143
1144                if (!pci_is_pcie(pdev))
1145                        continue;
1146                if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1147                    !pdev->is_hotplug_bridge)
1148                        continue;
1149
1150                link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1151                                       DL_FLAG_AUTOREMOVE_SUPPLIER |
1152                                       DL_FLAG_PM_RUNTIME);
1153                if (link) {
1154                        dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1155                                dev_name(&pdev->dev));
1156                } else {
1157                        dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1158                                 dev_name(&pdev->dev));
1159                }
1160        }
1161}
1162
1163static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1164{
1165        struct tb_nhi *nhi;
1166        struct tb *tb;
1167        int res;
1168
1169        if (!nhi_imr_valid(pdev)) {
1170                dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
1171                return -ENODEV;
1172        }
1173
1174        res = pcim_enable_device(pdev);
1175        if (res) {
1176                dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
1177                return res;
1178        }
1179
1180        res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
1181        if (res) {
1182                dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
1183                return res;
1184        }
1185
1186        nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
1187        if (!nhi)
1188                return -ENOMEM;
1189
1190        nhi->pdev = pdev;
1191        nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
1192        /* cannot fail - table is allocated bin pcim_iomap_regions */
1193        nhi->iobase = pcim_iomap_table(pdev)[0];
1194        nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
1195        dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
1196
1197        nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1198                                     sizeof(*nhi->tx_rings), GFP_KERNEL);
1199        nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1200                                     sizeof(*nhi->rx_rings), GFP_KERNEL);
1201        if (!nhi->tx_rings || !nhi->rx_rings)
1202                return -ENOMEM;
1203
1204        res = nhi_init_msi(nhi);
1205        if (res) {
1206                dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
1207                return res;
1208        }
1209
1210        spin_lock_init(&nhi->lock);
1211
1212        res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1213        if (res)
1214                res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1215        if (res) {
1216                dev_err(&pdev->dev, "failed to set DMA mask\n");
1217                return res;
1218        }
1219
1220        pci_set_master(pdev);
1221
1222        if (nhi->ops && nhi->ops->init) {
1223                res = nhi->ops->init(nhi);
1224                if (res)
1225                        return res;
1226        }
1227
1228        tb_apple_add_links(nhi);
1229        tb_acpi_add_links(nhi);
1230
1231        tb = icm_probe(nhi);
1232        if (!tb)
1233                tb = tb_probe(nhi);
1234        if (!tb) {
1235                dev_err(&nhi->pdev->dev,
1236                        "failed to determine connection manager, aborting\n");
1237                return -ENODEV;
1238        }
1239
1240        dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
1241
1242        res = tb_domain_add(tb);
1243        if (res) {
1244                /*
1245                 * At this point the RX/TX rings might already have been
1246                 * activated. Do a proper shutdown.
1247                 */
1248                tb_domain_put(tb);
1249                nhi_shutdown(nhi);
1250                return res;
1251        }
1252        pci_set_drvdata(pdev, tb);
1253
1254        device_wakeup_enable(&pdev->dev);
1255
1256        pm_runtime_allow(&pdev->dev);
1257        pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
1258        pm_runtime_use_autosuspend(&pdev->dev);
1259        pm_runtime_put_autosuspend(&pdev->dev);
1260
1261        return 0;
1262}
1263
1264static void nhi_remove(struct pci_dev *pdev)
1265{
1266        struct tb *tb = pci_get_drvdata(pdev);
1267        struct tb_nhi *nhi = tb->nhi;
1268
1269        pm_runtime_get_sync(&pdev->dev);
1270        pm_runtime_dont_use_autosuspend(&pdev->dev);
1271        pm_runtime_forbid(&pdev->dev);
1272
1273        tb_domain_remove(tb);
1274        nhi_shutdown(nhi);
1275}
1276
1277/*
1278 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
1279 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
1280 * resume_noirq until we are done.
1281 */
1282static const struct dev_pm_ops nhi_pm_ops = {
1283        .suspend_noirq = nhi_suspend_noirq,
1284        .resume_noirq = nhi_resume_noirq,
1285        .freeze_noirq = nhi_freeze_noirq,  /*
1286                                            * we just disable hotplug, the
1287                                            * pci-tunnels stay alive.
1288                                            */
1289        .thaw_noirq = nhi_thaw_noirq,
1290        .restore_noirq = nhi_resume_noirq,
1291        .suspend = nhi_suspend,
1292        .poweroff_noirq = nhi_poweroff_noirq,
1293        .poweroff = nhi_suspend,
1294        .complete = nhi_complete,
1295        .runtime_suspend = nhi_runtime_suspend,
1296        .runtime_resume = nhi_runtime_resume,
1297};
1298
1299static struct pci_device_id nhi_ids[] = {
1300        /*
1301         * We have to specify class, the TB bridges use the same device and
1302         * vendor (sub)id on gen 1 and gen 2 controllers.
1303         */
1304        {
1305                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1306                .vendor = PCI_VENDOR_ID_INTEL,
1307                .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
1308                .subvendor = 0x2222, .subdevice = 0x1111,
1309        },
1310        {
1311                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1312                .vendor = PCI_VENDOR_ID_INTEL,
1313                .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
1314                .subvendor = 0x2222, .subdevice = 0x1111,
1315        },
1316        {
1317                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1318                .vendor = PCI_VENDOR_ID_INTEL,
1319                .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
1320                .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1321        },
1322        {
1323                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1324                .vendor = PCI_VENDOR_ID_INTEL,
1325                .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
1326                .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1327        },
1328
1329        /* Thunderbolt 3 */
1330        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
1331        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
1332        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
1333        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
1334        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
1335        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
1336        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
1337        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
1338        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
1339        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
1340        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
1341          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1342        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
1343          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1344        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
1345          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1346        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
1347          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1348        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
1349          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1350        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
1351          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1352
1353        /* Any USB4 compliant host */
1354        { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
1355
1356        { 0,}
1357};
1358
1359MODULE_DEVICE_TABLE(pci, nhi_ids);
1360MODULE_LICENSE("GPL");
1361
1362static struct pci_driver nhi_driver = {
1363        .name = "thunderbolt",
1364        .id_table = nhi_ids,
1365        .probe = nhi_probe,
1366        .remove = nhi_remove,
1367        .shutdown = nhi_remove,
1368        .driver.pm = &nhi_pm_ops,
1369};
1370
1371static int __init nhi_init(void)
1372{
1373        int ret;
1374
1375        ret = tb_domain_init();
1376        if (ret)
1377                return ret;
1378        ret = pci_register_driver(&nhi_driver);
1379        if (ret)
1380                tb_domain_exit();
1381        return ret;
1382}
1383
1384static void __exit nhi_unload(void)
1385{
1386        pci_unregister_driver(&nhi_driver);
1387        tb_domain_exit();
1388}
1389
1390rootfs_initcall(nhi_init);
1391module_exit(nhi_unload);
1392