linux/drivers/thunderbolt/nhi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Thunderbolt driver - NHI driver
   4 *
   5 * The NHI (native host interface) is the pci device that allows us to send and
   6 * receive frames from the thunderbolt bus.
   7 *
   8 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   9 * Copyright (C) 2018, Intel Corporation
  10 */
  11
  12#include <linux/pm_runtime.h>
  13#include <linux/slab.h>
  14#include <linux/errno.h>
  15#include <linux/pci.h>
  16#include <linux/interrupt.h>
  17#include <linux/module.h>
  18#include <linux/delay.h>
  19#include <linux/property.h>
  20
  21#include "nhi.h"
  22#include "nhi_regs.h"
  23#include "tb.h"
  24
  25#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
  26
  27#define RING_FIRST_USABLE_HOPID 1
  28
  29/*
  30 * Minimal number of vectors when we use MSI-X. Two for control channel
  31 * Rx/Tx and the rest four are for cross domain DMA paths.
  32 */
  33#define MSIX_MIN_VECS           6
  34#define MSIX_MAX_VECS           16
  35
  36#define NHI_MAILBOX_TIMEOUT     500 /* ms */
  37
  38#define QUIRK_AUTO_CLEAR_INT    BIT(0)
  39
  40static int ring_interrupt_index(struct tb_ring *ring)
  41{
  42        int bit = ring->hop;
  43        if (!ring->is_tx)
  44                bit += ring->nhi->hop_count;
  45        return bit;
  46}
  47
  48/*
  49 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
  50 *
  51 * ring->nhi->lock must be held.
  52 */
  53static void ring_interrupt_active(struct tb_ring *ring, bool active)
  54{
  55        int reg = REG_RING_INTERRUPT_BASE +
  56                  ring_interrupt_index(ring) / 32 * 4;
  57        int bit = ring_interrupt_index(ring) & 31;
  58        int mask = 1 << bit;
  59        u32 old, new;
  60
  61        if (ring->irq > 0) {
  62                u32 step, shift, ivr, misc;
  63                void __iomem *ivr_base;
  64                int index;
  65
  66                if (ring->is_tx)
  67                        index = ring->hop;
  68                else
  69                        index = ring->hop + ring->nhi->hop_count;
  70
  71                if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
  72                        /*
  73                         * Ask the hardware to clear interrupt status
  74                         * bits automatically since we already know
  75                         * which interrupt was triggered.
  76                         */
  77                        misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
  78                        if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
  79                                misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
  80                                iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
  81                        }
  82                }
  83
  84                ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
  85                step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
  86                shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
  87                ivr = ioread32(ivr_base + step);
  88                ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
  89                if (active)
  90                        ivr |= ring->vector << shift;
  91                iowrite32(ivr, ivr_base + step);
  92        }
  93
  94        old = ioread32(ring->nhi->iobase + reg);
  95        if (active)
  96                new = old | mask;
  97        else
  98                new = old & ~mask;
  99
 100        dev_dbg(&ring->nhi->pdev->dev,
 101                "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
 102                active ? "enabling" : "disabling", reg, bit, old, new);
 103
 104        if (new == old)
 105                dev_WARN(&ring->nhi->pdev->dev,
 106                                         "interrupt for %s %d is already %s\n",
 107                                         RING_TYPE(ring), ring->hop,
 108                                         active ? "enabled" : "disabled");
 109        iowrite32(new, ring->nhi->iobase + reg);
 110}
 111
 112/*
 113 * nhi_disable_interrupts() - disable interrupts for all rings
 114 *
 115 * Use only during init and shutdown.
 116 */
 117static void nhi_disable_interrupts(struct tb_nhi *nhi)
 118{
 119        int i = 0;
 120        /* disable interrupts */
 121        for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
 122                iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
 123
 124        /* clear interrupt status bits */
 125        for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
 126                ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
 127}
 128
 129/* ring helper methods */
 130
 131static void __iomem *ring_desc_base(struct tb_ring *ring)
 132{
 133        void __iomem *io = ring->nhi->iobase;
 134        io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
 135        io += ring->hop * 16;
 136        return io;
 137}
 138
 139static void __iomem *ring_options_base(struct tb_ring *ring)
 140{
 141        void __iomem *io = ring->nhi->iobase;
 142        io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
 143        io += ring->hop * 32;
 144        return io;
 145}
 146
 147static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
 148{
 149        /*
 150         * The other 16-bits in the register is read-only and writes to it
 151         * are ignored by the hardware so we can save one ioread32() by
 152         * filling the read-only bits with zeroes.
 153         */
 154        iowrite32(cons, ring_desc_base(ring) + 8);
 155}
 156
 157static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
 158{
 159        /* See ring_iowrite_cons() above for explanation */
 160        iowrite32(prod << 16, ring_desc_base(ring) + 8);
 161}
 162
 163static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
 164{
 165        iowrite32(value, ring_desc_base(ring) + offset);
 166}
 167
 168static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
 169{
 170        iowrite32(value, ring_desc_base(ring) + offset);
 171        iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
 172}
 173
 174static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
 175{
 176        iowrite32(value, ring_options_base(ring) + offset);
 177}
 178
 179static bool ring_full(struct tb_ring *ring)
 180{
 181        return ((ring->head + 1) % ring->size) == ring->tail;
 182}
 183
 184static bool ring_empty(struct tb_ring *ring)
 185{
 186        return ring->head == ring->tail;
 187}
 188
 189/*
 190 * ring_write_descriptors() - post frames from ring->queue to the controller
 191 *
 192 * ring->lock is held.
 193 */
 194static void ring_write_descriptors(struct tb_ring *ring)
 195{
 196        struct ring_frame *frame, *n;
 197        struct ring_desc *descriptor;
 198        list_for_each_entry_safe(frame, n, &ring->queue, list) {
 199                if (ring_full(ring))
 200                        break;
 201                list_move_tail(&frame->list, &ring->in_flight);
 202                descriptor = &ring->descriptors[ring->head];
 203                descriptor->phys = frame->buffer_phy;
 204                descriptor->time = 0;
 205                descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
 206                if (ring->is_tx) {
 207                        descriptor->length = frame->size;
 208                        descriptor->eof = frame->eof;
 209                        descriptor->sof = frame->sof;
 210                }
 211                ring->head = (ring->head + 1) % ring->size;
 212                if (ring->is_tx)
 213                        ring_iowrite_prod(ring, ring->head);
 214                else
 215                        ring_iowrite_cons(ring, ring->head);
 216        }
 217}
 218
 219/*
 220 * ring_work() - progress completed frames
 221 *
 222 * If the ring is shutting down then all frames are marked as canceled and
 223 * their callbacks are invoked.
 224 *
 225 * Otherwise we collect all completed frame from the ring buffer, write new
 226 * frame to the ring buffer and invoke the callbacks for the completed frames.
 227 */
 228static void ring_work(struct work_struct *work)
 229{
 230        struct tb_ring *ring = container_of(work, typeof(*ring), work);
 231        struct ring_frame *frame;
 232        bool canceled = false;
 233        unsigned long flags;
 234        LIST_HEAD(done);
 235
 236        spin_lock_irqsave(&ring->lock, flags);
 237
 238        if (!ring->running) {
 239                /*  Move all frames to done and mark them as canceled. */
 240                list_splice_tail_init(&ring->in_flight, &done);
 241                list_splice_tail_init(&ring->queue, &done);
 242                canceled = true;
 243                goto invoke_callback;
 244        }
 245
 246        while (!ring_empty(ring)) {
 247                if (!(ring->descriptors[ring->tail].flags
 248                                & RING_DESC_COMPLETED))
 249                        break;
 250                frame = list_first_entry(&ring->in_flight, typeof(*frame),
 251                                         list);
 252                list_move_tail(&frame->list, &done);
 253                if (!ring->is_tx) {
 254                        frame->size = ring->descriptors[ring->tail].length;
 255                        frame->eof = ring->descriptors[ring->tail].eof;
 256                        frame->sof = ring->descriptors[ring->tail].sof;
 257                        frame->flags = ring->descriptors[ring->tail].flags;
 258                }
 259                ring->tail = (ring->tail + 1) % ring->size;
 260        }
 261        ring_write_descriptors(ring);
 262
 263invoke_callback:
 264        /* allow callbacks to schedule new work */
 265        spin_unlock_irqrestore(&ring->lock, flags);
 266        while (!list_empty(&done)) {
 267                frame = list_first_entry(&done, typeof(*frame), list);
 268                /*
 269                 * The callback may reenqueue or delete frame.
 270                 * Do not hold on to it.
 271                 */
 272                list_del_init(&frame->list);
 273                if (frame->callback)
 274                        frame->callback(ring, frame, canceled);
 275        }
 276}
 277
 278int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
 279{
 280        unsigned long flags;
 281        int ret = 0;
 282
 283        spin_lock_irqsave(&ring->lock, flags);
 284        if (ring->running) {
 285                list_add_tail(&frame->list, &ring->queue);
 286                ring_write_descriptors(ring);
 287        } else {
 288                ret = -ESHUTDOWN;
 289        }
 290        spin_unlock_irqrestore(&ring->lock, flags);
 291        return ret;
 292}
 293EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
 294
 295/**
 296 * tb_ring_poll() - Poll one completed frame from the ring
 297 * @ring: Ring to poll
 298 *
 299 * This function can be called when @start_poll callback of the @ring
 300 * has been called. It will read one completed frame from the ring and
 301 * return it to the caller. Returns %NULL if there is no more completed
 302 * frames.
 303 */
 304struct ring_frame *tb_ring_poll(struct tb_ring *ring)
 305{
 306        struct ring_frame *frame = NULL;
 307        unsigned long flags;
 308
 309        spin_lock_irqsave(&ring->lock, flags);
 310        if (!ring->running)
 311                goto unlock;
 312        if (ring_empty(ring))
 313                goto unlock;
 314
 315        if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
 316                frame = list_first_entry(&ring->in_flight, typeof(*frame),
 317                                         list);
 318                list_del_init(&frame->list);
 319
 320                if (!ring->is_tx) {
 321                        frame->size = ring->descriptors[ring->tail].length;
 322                        frame->eof = ring->descriptors[ring->tail].eof;
 323                        frame->sof = ring->descriptors[ring->tail].sof;
 324                        frame->flags = ring->descriptors[ring->tail].flags;
 325                }
 326
 327                ring->tail = (ring->tail + 1) % ring->size;
 328        }
 329
 330unlock:
 331        spin_unlock_irqrestore(&ring->lock, flags);
 332        return frame;
 333}
 334EXPORT_SYMBOL_GPL(tb_ring_poll);
 335
 336static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
 337{
 338        int idx = ring_interrupt_index(ring);
 339        int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
 340        int bit = idx % 32;
 341        u32 val;
 342
 343        val = ioread32(ring->nhi->iobase + reg);
 344        if (mask)
 345                val &= ~BIT(bit);
 346        else
 347                val |= BIT(bit);
 348        iowrite32(val, ring->nhi->iobase + reg);
 349}
 350
 351/* Both @nhi->lock and @ring->lock should be held */
 352static void __ring_interrupt(struct tb_ring *ring)
 353{
 354        if (!ring->running)
 355                return;
 356
 357        if (ring->start_poll) {
 358                __ring_interrupt_mask(ring, true);
 359                ring->start_poll(ring->poll_data);
 360        } else {
 361                schedule_work(&ring->work);
 362        }
 363}
 364
 365/**
 366 * tb_ring_poll_complete() - Re-start interrupt for the ring
 367 * @ring: Ring to re-start the interrupt
 368 *
 369 * This will re-start (unmask) the ring interrupt once the user is done
 370 * with polling.
 371 */
 372void tb_ring_poll_complete(struct tb_ring *ring)
 373{
 374        unsigned long flags;
 375
 376        spin_lock_irqsave(&ring->nhi->lock, flags);
 377        spin_lock(&ring->lock);
 378        if (ring->start_poll)
 379                __ring_interrupt_mask(ring, false);
 380        spin_unlock(&ring->lock);
 381        spin_unlock_irqrestore(&ring->nhi->lock, flags);
 382}
 383EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
 384
 385static void ring_clear_msix(const struct tb_ring *ring)
 386{
 387        if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
 388                return;
 389
 390        if (ring->is_tx)
 391                ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
 392        else
 393                ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
 394                         4 * (ring->nhi->hop_count / 32));
 395}
 396
 397static irqreturn_t ring_msix(int irq, void *data)
 398{
 399        struct tb_ring *ring = data;
 400
 401        spin_lock(&ring->nhi->lock);
 402        ring_clear_msix(ring);
 403        spin_lock(&ring->lock);
 404        __ring_interrupt(ring);
 405        spin_unlock(&ring->lock);
 406        spin_unlock(&ring->nhi->lock);
 407
 408        return IRQ_HANDLED;
 409}
 410
 411static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
 412{
 413        struct tb_nhi *nhi = ring->nhi;
 414        unsigned long irqflags;
 415        int ret;
 416
 417        if (!nhi->pdev->msix_enabled)
 418                return 0;
 419
 420        ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
 421        if (ret < 0)
 422                return ret;
 423
 424        ring->vector = ret;
 425
 426        ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
 427        if (ret < 0)
 428                goto err_ida_remove;
 429
 430        ring->irq = ret;
 431
 432        irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
 433        ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
 434        if (ret)
 435                goto err_ida_remove;
 436
 437        return 0;
 438
 439err_ida_remove:
 440        ida_simple_remove(&nhi->msix_ida, ring->vector);
 441
 442        return ret;
 443}
 444
 445static void ring_release_msix(struct tb_ring *ring)
 446{
 447        if (ring->irq <= 0)
 448                return;
 449
 450        free_irq(ring->irq, ring);
 451        ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
 452        ring->vector = 0;
 453        ring->irq = 0;
 454}
 455
 456static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
 457{
 458        int ret = 0;
 459
 460        spin_lock_irq(&nhi->lock);
 461
 462        if (ring->hop < 0) {
 463                unsigned int i;
 464
 465                /*
 466                 * Automatically allocate HopID from the non-reserved
 467                 * range 1 .. hop_count - 1.
 468                 */
 469                for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
 470                        if (ring->is_tx) {
 471                                if (!nhi->tx_rings[i]) {
 472                                        ring->hop = i;
 473                                        break;
 474                                }
 475                        } else {
 476                                if (!nhi->rx_rings[i]) {
 477                                        ring->hop = i;
 478                                        break;
 479                                }
 480                        }
 481                }
 482        }
 483
 484        if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
 485                dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
 486                ret = -EINVAL;
 487                goto err_unlock;
 488        }
 489        if (ring->is_tx && nhi->tx_rings[ring->hop]) {
 490                dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
 491                         ring->hop);
 492                ret = -EBUSY;
 493                goto err_unlock;
 494        } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
 495                dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
 496                         ring->hop);
 497                ret = -EBUSY;
 498                goto err_unlock;
 499        }
 500
 501        if (ring->is_tx)
 502                nhi->tx_rings[ring->hop] = ring;
 503        else
 504                nhi->rx_rings[ring->hop] = ring;
 505
 506err_unlock:
 507        spin_unlock_irq(&nhi->lock);
 508
 509        return ret;
 510}
 511
 512static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
 513                                     bool transmit, unsigned int flags,
 514                                     int e2e_tx_hop, u16 sof_mask, u16 eof_mask,
 515                                     void (*start_poll)(void *),
 516                                     void *poll_data)
 517{
 518        struct tb_ring *ring = NULL;
 519
 520        dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
 521                transmit ? "TX" : "RX", hop, size);
 522
 523        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 524        if (!ring)
 525                return NULL;
 526
 527        spin_lock_init(&ring->lock);
 528        INIT_LIST_HEAD(&ring->queue);
 529        INIT_LIST_HEAD(&ring->in_flight);
 530        INIT_WORK(&ring->work, ring_work);
 531
 532        ring->nhi = nhi;
 533        ring->hop = hop;
 534        ring->is_tx = transmit;
 535        ring->size = size;
 536        ring->flags = flags;
 537        ring->e2e_tx_hop = e2e_tx_hop;
 538        ring->sof_mask = sof_mask;
 539        ring->eof_mask = eof_mask;
 540        ring->head = 0;
 541        ring->tail = 0;
 542        ring->running = false;
 543        ring->start_poll = start_poll;
 544        ring->poll_data = poll_data;
 545
 546        ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
 547                        size * sizeof(*ring->descriptors),
 548                        &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
 549        if (!ring->descriptors)
 550                goto err_free_ring;
 551
 552        if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
 553                goto err_free_descs;
 554
 555        if (nhi_alloc_hop(nhi, ring))
 556                goto err_release_msix;
 557
 558        return ring;
 559
 560err_release_msix:
 561        ring_release_msix(ring);
 562err_free_descs:
 563        dma_free_coherent(&ring->nhi->pdev->dev,
 564                          ring->size * sizeof(*ring->descriptors),
 565                          ring->descriptors, ring->descriptors_dma);
 566err_free_ring:
 567        kfree(ring);
 568
 569        return NULL;
 570}
 571
 572/**
 573 * tb_ring_alloc_tx() - Allocate DMA ring for transmit
 574 * @nhi: Pointer to the NHI the ring is to be allocated
 575 * @hop: HopID (ring) to allocate
 576 * @size: Number of entries in the ring
 577 * @flags: Flags for the ring
 578 */
 579struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
 580                                 unsigned int flags)
 581{
 582        return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL);
 583}
 584EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
 585
 586/**
 587 * tb_ring_alloc_rx() - Allocate DMA ring for receive
 588 * @nhi: Pointer to the NHI the ring is to be allocated
 589 * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
 590 * @size: Number of entries in the ring
 591 * @flags: Flags for the ring
 592 * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags
 593 * @sof_mask: Mask of PDF values that start a frame
 594 * @eof_mask: Mask of PDF values that end a frame
 595 * @start_poll: If not %NULL the ring will call this function when an
 596 *              interrupt is triggered and masked, instead of callback
 597 *              in each Rx frame.
 598 * @poll_data: Optional data passed to @start_poll
 599 */
 600struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
 601                                 unsigned int flags, int e2e_tx_hop,
 602                                 u16 sof_mask, u16 eof_mask,
 603                                 void (*start_poll)(void *), void *poll_data)
 604{
 605        return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask,
 606                             start_poll, poll_data);
 607}
 608EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
 609
 610/**
 611 * tb_ring_start() - enable a ring
 612 * @ring: Ring to start
 613 *
 614 * Must not be invoked in parallel with tb_ring_stop().
 615 */
 616void tb_ring_start(struct tb_ring *ring)
 617{
 618        u16 frame_size;
 619        u32 flags;
 620
 621        spin_lock_irq(&ring->nhi->lock);
 622        spin_lock(&ring->lock);
 623        if (ring->nhi->going_away)
 624                goto err;
 625        if (ring->running) {
 626                dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
 627                goto err;
 628        }
 629        dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
 630                RING_TYPE(ring), ring->hop);
 631
 632        if (ring->flags & RING_FLAG_FRAME) {
 633                /* Means 4096 */
 634                frame_size = 0;
 635                flags = RING_FLAG_ENABLE;
 636        } else {
 637                frame_size = TB_FRAME_SIZE;
 638                flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
 639        }
 640
 641        ring_iowrite64desc(ring, ring->descriptors_dma, 0);
 642        if (ring->is_tx) {
 643                ring_iowrite32desc(ring, ring->size, 12);
 644                ring_iowrite32options(ring, 0, 4); /* time releated ? */
 645                ring_iowrite32options(ring, flags, 0);
 646        } else {
 647                u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
 648
 649                ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
 650                ring_iowrite32options(ring, sof_eof_mask, 4);
 651                ring_iowrite32options(ring, flags, 0);
 652        }
 653
 654        /*
 655         * Now that the ring valid bit is set we can configure E2E if
 656         * enabled for the ring.
 657         */
 658        if (ring->flags & RING_FLAG_E2E) {
 659                if (!ring->is_tx) {
 660                        u32 hop;
 661
 662                        hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT;
 663                        hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
 664                        flags |= hop;
 665
 666                        dev_dbg(&ring->nhi->pdev->dev,
 667                                "enabling E2E for %s %d with TX HopID %d\n",
 668                                RING_TYPE(ring), ring->hop, ring->e2e_tx_hop);
 669                } else {
 670                        dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n",
 671                                RING_TYPE(ring), ring->hop);
 672                }
 673
 674                flags |= RING_FLAG_E2E_FLOW_CONTROL;
 675                ring_iowrite32options(ring, flags, 0);
 676        }
 677
 678        ring_interrupt_active(ring, true);
 679        ring->running = true;
 680err:
 681        spin_unlock(&ring->lock);
 682        spin_unlock_irq(&ring->nhi->lock);
 683}
 684EXPORT_SYMBOL_GPL(tb_ring_start);
 685
 686/**
 687 * tb_ring_stop() - shutdown a ring
 688 * @ring: Ring to stop
 689 *
 690 * Must not be invoked from a callback.
 691 *
 692 * This method will disable the ring. Further calls to
 693 * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
 694 * called.
 695 *
 696 * All enqueued frames will be canceled and their callbacks will be executed
 697 * with frame->canceled set to true (on the callback thread). This method
 698 * returns only after all callback invocations have finished.
 699 */
 700void tb_ring_stop(struct tb_ring *ring)
 701{
 702        spin_lock_irq(&ring->nhi->lock);
 703        spin_lock(&ring->lock);
 704        dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
 705                RING_TYPE(ring), ring->hop);
 706        if (ring->nhi->going_away)
 707                goto err;
 708        if (!ring->running) {
 709                dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
 710                         RING_TYPE(ring), ring->hop);
 711                goto err;
 712        }
 713        ring_interrupt_active(ring, false);
 714
 715        ring_iowrite32options(ring, 0, 0);
 716        ring_iowrite64desc(ring, 0, 0);
 717        ring_iowrite32desc(ring, 0, 8);
 718        ring_iowrite32desc(ring, 0, 12);
 719        ring->head = 0;
 720        ring->tail = 0;
 721        ring->running = false;
 722
 723err:
 724        spin_unlock(&ring->lock);
 725        spin_unlock_irq(&ring->nhi->lock);
 726
 727        /*
 728         * schedule ring->work to invoke callbacks on all remaining frames.
 729         */
 730        schedule_work(&ring->work);
 731        flush_work(&ring->work);
 732}
 733EXPORT_SYMBOL_GPL(tb_ring_stop);
 734
 735/*
 736 * tb_ring_free() - free ring
 737 *
 738 * When this method returns all invocations of ring->callback will have
 739 * finished.
 740 *
 741 * Ring must be stopped.
 742 *
 743 * Must NOT be called from ring_frame->callback!
 744 */
 745void tb_ring_free(struct tb_ring *ring)
 746{
 747        spin_lock_irq(&ring->nhi->lock);
 748        /*
 749         * Dissociate the ring from the NHI. This also ensures that
 750         * nhi_interrupt_work cannot reschedule ring->work.
 751         */
 752        if (ring->is_tx)
 753                ring->nhi->tx_rings[ring->hop] = NULL;
 754        else
 755                ring->nhi->rx_rings[ring->hop] = NULL;
 756
 757        if (ring->running) {
 758                dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
 759                         RING_TYPE(ring), ring->hop);
 760        }
 761        spin_unlock_irq(&ring->nhi->lock);
 762
 763        ring_release_msix(ring);
 764
 765        dma_free_coherent(&ring->nhi->pdev->dev,
 766                          ring->size * sizeof(*ring->descriptors),
 767                          ring->descriptors, ring->descriptors_dma);
 768
 769        ring->descriptors = NULL;
 770        ring->descriptors_dma = 0;
 771
 772
 773        dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
 774                ring->hop);
 775
 776        /*
 777         * ring->work can no longer be scheduled (it is scheduled only
 778         * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
 779         * to finish before freeing the ring.
 780         */
 781        flush_work(&ring->work);
 782        kfree(ring);
 783}
 784EXPORT_SYMBOL_GPL(tb_ring_free);
 785
 786/**
 787 * nhi_mailbox_cmd() - Send a command through NHI mailbox
 788 * @nhi: Pointer to the NHI structure
 789 * @cmd: Command to send
 790 * @data: Data to be send with the command
 791 *
 792 * Sends mailbox command to the firmware running on NHI. Returns %0 in
 793 * case of success and negative errno in case of failure.
 794 */
 795int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
 796{
 797        ktime_t timeout;
 798        u32 val;
 799
 800        iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
 801
 802        val = ioread32(nhi->iobase + REG_INMAIL_CMD);
 803        val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
 804        val |= REG_INMAIL_OP_REQUEST | cmd;
 805        iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
 806
 807        timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
 808        do {
 809                val = ioread32(nhi->iobase + REG_INMAIL_CMD);
 810                if (!(val & REG_INMAIL_OP_REQUEST))
 811                        break;
 812                usleep_range(10, 20);
 813        } while (ktime_before(ktime_get(), timeout));
 814
 815        if (val & REG_INMAIL_OP_REQUEST)
 816                return -ETIMEDOUT;
 817        if (val & REG_INMAIL_ERROR)
 818                return -EIO;
 819
 820        return 0;
 821}
 822
 823/**
 824 * nhi_mailbox_mode() - Return current firmware operation mode
 825 * @nhi: Pointer to the NHI structure
 826 *
 827 * The function reads current firmware operation mode using NHI mailbox
 828 * registers and returns it to the caller.
 829 */
 830enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
 831{
 832        u32 val;
 833
 834        val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
 835        val &= REG_OUTMAIL_CMD_OPMODE_MASK;
 836        val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
 837
 838        return (enum nhi_fw_mode)val;
 839}
 840
 841static void nhi_interrupt_work(struct work_struct *work)
 842{
 843        struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
 844        int value = 0; /* Suppress uninitialized usage warning. */
 845        int bit;
 846        int hop = -1;
 847        int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
 848        struct tb_ring *ring;
 849
 850        spin_lock_irq(&nhi->lock);
 851
 852        /*
 853         * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
 854         * (TX, RX, RX overflow). We iterate over the bits and read a new
 855         * dwords as required. The registers are cleared on read.
 856         */
 857        for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
 858                if (bit % 32 == 0)
 859                        value = ioread32(nhi->iobase
 860                                         + REG_RING_NOTIFY_BASE
 861                                         + 4 * (bit / 32));
 862                if (++hop == nhi->hop_count) {
 863                        hop = 0;
 864                        type++;
 865                }
 866                if ((value & (1 << (bit % 32))) == 0)
 867                        continue;
 868                if (type == 2) {
 869                        dev_warn(&nhi->pdev->dev,
 870                                 "RX overflow for ring %d\n",
 871                                 hop);
 872                        continue;
 873                }
 874                if (type == 0)
 875                        ring = nhi->tx_rings[hop];
 876                else
 877                        ring = nhi->rx_rings[hop];
 878                if (ring == NULL) {
 879                        dev_warn(&nhi->pdev->dev,
 880                                 "got interrupt for inactive %s ring %d\n",
 881                                 type ? "RX" : "TX",
 882                                 hop);
 883                        continue;
 884                }
 885
 886                spin_lock(&ring->lock);
 887                __ring_interrupt(ring);
 888                spin_unlock(&ring->lock);
 889        }
 890        spin_unlock_irq(&nhi->lock);
 891}
 892
 893static irqreturn_t nhi_msi(int irq, void *data)
 894{
 895        struct tb_nhi *nhi = data;
 896        schedule_work(&nhi->interrupt_work);
 897        return IRQ_HANDLED;
 898}
 899
 900static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
 901{
 902        struct pci_dev *pdev = to_pci_dev(dev);
 903        struct tb *tb = pci_get_drvdata(pdev);
 904        struct tb_nhi *nhi = tb->nhi;
 905        int ret;
 906
 907        ret = tb_domain_suspend_noirq(tb);
 908        if (ret)
 909                return ret;
 910
 911        if (nhi->ops && nhi->ops->suspend_noirq) {
 912                ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
 913                if (ret)
 914                        return ret;
 915        }
 916
 917        return 0;
 918}
 919
 920static int nhi_suspend_noirq(struct device *dev)
 921{
 922        return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
 923}
 924
 925static int nhi_freeze_noirq(struct device *dev)
 926{
 927        struct pci_dev *pdev = to_pci_dev(dev);
 928        struct tb *tb = pci_get_drvdata(pdev);
 929
 930        return tb_domain_freeze_noirq(tb);
 931}
 932
 933static int nhi_thaw_noirq(struct device *dev)
 934{
 935        struct pci_dev *pdev = to_pci_dev(dev);
 936        struct tb *tb = pci_get_drvdata(pdev);
 937
 938        return tb_domain_thaw_noirq(tb);
 939}
 940
 941static bool nhi_wake_supported(struct pci_dev *pdev)
 942{
 943        u8 val;
 944
 945        /*
 946         * If power rails are sustainable for wakeup from S4 this
 947         * property is set by the BIOS.
 948         */
 949        if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
 950                return !!val;
 951
 952        return true;
 953}
 954
 955static int nhi_poweroff_noirq(struct device *dev)
 956{
 957        struct pci_dev *pdev = to_pci_dev(dev);
 958        bool wakeup;
 959
 960        wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
 961        return __nhi_suspend_noirq(dev, wakeup);
 962}
 963
 964static void nhi_enable_int_throttling(struct tb_nhi *nhi)
 965{
 966        /* Throttling is specified in 256ns increments */
 967        u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
 968        unsigned int i;
 969
 970        /*
 971         * Configure interrupt throttling for all vectors even if we
 972         * only use few.
 973         */
 974        for (i = 0; i < MSIX_MAX_VECS; i++) {
 975                u32 reg = REG_INT_THROTTLING_RATE + i * 4;
 976                iowrite32(throttle, nhi->iobase + reg);
 977        }
 978}
 979
 980static int nhi_resume_noirq(struct device *dev)
 981{
 982        struct pci_dev *pdev = to_pci_dev(dev);
 983        struct tb *tb = pci_get_drvdata(pdev);
 984        struct tb_nhi *nhi = tb->nhi;
 985        int ret;
 986
 987        /*
 988         * Check that the device is still there. It may be that the user
 989         * unplugged last device which causes the host controller to go
 990         * away on PCs.
 991         */
 992        if (!pci_device_is_present(pdev)) {
 993                nhi->going_away = true;
 994        } else {
 995                if (nhi->ops && nhi->ops->resume_noirq) {
 996                        ret = nhi->ops->resume_noirq(nhi);
 997                        if (ret)
 998                                return ret;
 999                }
1000                nhi_enable_int_throttling(tb->nhi);
1001        }
1002
1003        return tb_domain_resume_noirq(tb);
1004}
1005
1006static int nhi_suspend(struct device *dev)
1007{
1008        struct pci_dev *pdev = to_pci_dev(dev);
1009        struct tb *tb = pci_get_drvdata(pdev);
1010
1011        return tb_domain_suspend(tb);
1012}
1013
1014static void nhi_complete(struct device *dev)
1015{
1016        struct pci_dev *pdev = to_pci_dev(dev);
1017        struct tb *tb = pci_get_drvdata(pdev);
1018
1019        /*
1020         * If we were runtime suspended when system suspend started,
1021         * schedule runtime resume now. It should bring the domain back
1022         * to functional state.
1023         */
1024        if (pm_runtime_suspended(&pdev->dev))
1025                pm_runtime_resume(&pdev->dev);
1026        else
1027                tb_domain_complete(tb);
1028}
1029
1030static int nhi_runtime_suspend(struct device *dev)
1031{
1032        struct pci_dev *pdev = to_pci_dev(dev);
1033        struct tb *tb = pci_get_drvdata(pdev);
1034        struct tb_nhi *nhi = tb->nhi;
1035        int ret;
1036
1037        ret = tb_domain_runtime_suspend(tb);
1038        if (ret)
1039                return ret;
1040
1041        if (nhi->ops && nhi->ops->runtime_suspend) {
1042                ret = nhi->ops->runtime_suspend(tb->nhi);
1043                if (ret)
1044                        return ret;
1045        }
1046        return 0;
1047}
1048
1049static int nhi_runtime_resume(struct device *dev)
1050{
1051        struct pci_dev *pdev = to_pci_dev(dev);
1052        struct tb *tb = pci_get_drvdata(pdev);
1053        struct tb_nhi *nhi = tb->nhi;
1054        int ret;
1055
1056        if (nhi->ops && nhi->ops->runtime_resume) {
1057                ret = nhi->ops->runtime_resume(nhi);
1058                if (ret)
1059                        return ret;
1060        }
1061
1062        nhi_enable_int_throttling(nhi);
1063        return tb_domain_runtime_resume(tb);
1064}
1065
1066static void nhi_shutdown(struct tb_nhi *nhi)
1067{
1068        int i;
1069
1070        dev_dbg(&nhi->pdev->dev, "shutdown\n");
1071
1072        for (i = 0; i < nhi->hop_count; i++) {
1073                if (nhi->tx_rings[i])
1074                        dev_WARN(&nhi->pdev->dev,
1075                                 "TX ring %d is still active\n", i);
1076                if (nhi->rx_rings[i])
1077                        dev_WARN(&nhi->pdev->dev,
1078                                 "RX ring %d is still active\n", i);
1079        }
1080        nhi_disable_interrupts(nhi);
1081        /*
1082         * We have to release the irq before calling flush_work. Otherwise an
1083         * already executing IRQ handler could call schedule_work again.
1084         */
1085        if (!nhi->pdev->msix_enabled) {
1086                devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
1087                flush_work(&nhi->interrupt_work);
1088        }
1089        ida_destroy(&nhi->msix_ida);
1090
1091        if (nhi->ops && nhi->ops->shutdown)
1092                nhi->ops->shutdown(nhi);
1093}
1094
1095static void nhi_check_quirks(struct tb_nhi *nhi)
1096{
1097        /*
1098         * Intel hardware supports auto clear of the interrupt status
1099         * reqister right after interrupt is being issued.
1100         */
1101        if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL)
1102                nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
1103}
1104
1105static int nhi_init_msi(struct tb_nhi *nhi)
1106{
1107        struct pci_dev *pdev = nhi->pdev;
1108        int res, irq, nvec;
1109
1110        /* In case someone left them on. */
1111        nhi_disable_interrupts(nhi);
1112
1113        nhi_enable_int_throttling(nhi);
1114
1115        ida_init(&nhi->msix_ida);
1116
1117        /*
1118         * The NHI has 16 MSI-X vectors or a single MSI. We first try to
1119         * get all MSI-X vectors and if we succeed, each ring will have
1120         * one MSI-X. If for some reason that does not work out, we
1121         * fallback to a single MSI.
1122         */
1123        nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
1124                                     PCI_IRQ_MSIX);
1125        if (nvec < 0) {
1126                nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1127                if (nvec < 0)
1128                        return nvec;
1129
1130                INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
1131
1132                irq = pci_irq_vector(nhi->pdev, 0);
1133                if (irq < 0)
1134                        return irq;
1135
1136                res = devm_request_irq(&pdev->dev, irq, nhi_msi,
1137                                       IRQF_NO_SUSPEND, "thunderbolt", nhi);
1138                if (res) {
1139                        dev_err(&pdev->dev, "request_irq failed, aborting\n");
1140                        return res;
1141                }
1142        }
1143
1144        return 0;
1145}
1146
1147static bool nhi_imr_valid(struct pci_dev *pdev)
1148{
1149        u8 val;
1150
1151        if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
1152                return !!val;
1153
1154        return true;
1155}
1156
1157static struct tb *nhi_select_cm(struct tb_nhi *nhi)
1158{
1159        struct tb *tb;
1160
1161        /*
1162         * USB4 case is simple. If we got control of any of the
1163         * capabilities, we use software CM.
1164         */
1165        if (tb_acpi_is_native())
1166                return tb_probe(nhi);
1167
1168        /*
1169         * Either firmware based CM is running (we did not get control
1170         * from the firmware) or this is pre-USB4 PC so try first
1171         * firmware CM and then fallback to software CM.
1172         */
1173        tb = icm_probe(nhi);
1174        if (!tb)
1175                tb = tb_probe(nhi);
1176
1177        return tb;
1178}
1179
1180static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1181{
1182        struct tb_nhi *nhi;
1183        struct tb *tb;
1184        int res;
1185
1186        if (!nhi_imr_valid(pdev)) {
1187                dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
1188                return -ENODEV;
1189        }
1190
1191        res = pcim_enable_device(pdev);
1192        if (res) {
1193                dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
1194                return res;
1195        }
1196
1197        res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
1198        if (res) {
1199                dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
1200                return res;
1201        }
1202
1203        nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
1204        if (!nhi)
1205                return -ENOMEM;
1206
1207        nhi->pdev = pdev;
1208        nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
1209        /* cannot fail - table is allocated bin pcim_iomap_regions */
1210        nhi->iobase = pcim_iomap_table(pdev)[0];
1211        nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
1212        dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
1213
1214        nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1215                                     sizeof(*nhi->tx_rings), GFP_KERNEL);
1216        nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1217                                     sizeof(*nhi->rx_rings), GFP_KERNEL);
1218        if (!nhi->tx_rings || !nhi->rx_rings)
1219                return -ENOMEM;
1220
1221        nhi_check_quirks(nhi);
1222
1223        res = nhi_init_msi(nhi);
1224        if (res) {
1225                dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
1226                return res;
1227        }
1228
1229        spin_lock_init(&nhi->lock);
1230
1231        res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1232        if (res)
1233                res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1234        if (res) {
1235                dev_err(&pdev->dev, "failed to set DMA mask\n");
1236                return res;
1237        }
1238
1239        pci_set_master(pdev);
1240
1241        if (nhi->ops && nhi->ops->init) {
1242                res = nhi->ops->init(nhi);
1243                if (res)
1244                        return res;
1245        }
1246
1247        tb = nhi_select_cm(nhi);
1248        if (!tb) {
1249                dev_err(&nhi->pdev->dev,
1250                        "failed to determine connection manager, aborting\n");
1251                return -ENODEV;
1252        }
1253
1254        dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
1255
1256        res = tb_domain_add(tb);
1257        if (res) {
1258                /*
1259                 * At this point the RX/TX rings might already have been
1260                 * activated. Do a proper shutdown.
1261                 */
1262                tb_domain_put(tb);
1263                nhi_shutdown(nhi);
1264                return res;
1265        }
1266        pci_set_drvdata(pdev, tb);
1267
1268        device_wakeup_enable(&pdev->dev);
1269
1270        pm_runtime_allow(&pdev->dev);
1271        pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
1272        pm_runtime_use_autosuspend(&pdev->dev);
1273        pm_runtime_put_autosuspend(&pdev->dev);
1274
1275        return 0;
1276}
1277
1278static void nhi_remove(struct pci_dev *pdev)
1279{
1280        struct tb *tb = pci_get_drvdata(pdev);
1281        struct tb_nhi *nhi = tb->nhi;
1282
1283        pm_runtime_get_sync(&pdev->dev);
1284        pm_runtime_dont_use_autosuspend(&pdev->dev);
1285        pm_runtime_forbid(&pdev->dev);
1286
1287        tb_domain_remove(tb);
1288        nhi_shutdown(nhi);
1289}
1290
1291/*
1292 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
1293 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
1294 * resume_noirq until we are done.
1295 */
1296static const struct dev_pm_ops nhi_pm_ops = {
1297        .suspend_noirq = nhi_suspend_noirq,
1298        .resume_noirq = nhi_resume_noirq,
1299        .freeze_noirq = nhi_freeze_noirq,  /*
1300                                            * we just disable hotplug, the
1301                                            * pci-tunnels stay alive.
1302                                            */
1303        .thaw_noirq = nhi_thaw_noirq,
1304        .restore_noirq = nhi_resume_noirq,
1305        .suspend = nhi_suspend,
1306        .poweroff_noirq = nhi_poweroff_noirq,
1307        .poweroff = nhi_suspend,
1308        .complete = nhi_complete,
1309        .runtime_suspend = nhi_runtime_suspend,
1310        .runtime_resume = nhi_runtime_resume,
1311};
1312
1313static struct pci_device_id nhi_ids[] = {
1314        /*
1315         * We have to specify class, the TB bridges use the same device and
1316         * vendor (sub)id on gen 1 and gen 2 controllers.
1317         */
1318        {
1319                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1320                .vendor = PCI_VENDOR_ID_INTEL,
1321                .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
1322                .subvendor = 0x2222, .subdevice = 0x1111,
1323        },
1324        {
1325                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1326                .vendor = PCI_VENDOR_ID_INTEL,
1327                .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
1328                .subvendor = 0x2222, .subdevice = 0x1111,
1329        },
1330        {
1331                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1332                .vendor = PCI_VENDOR_ID_INTEL,
1333                .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
1334                .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1335        },
1336        {
1337                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1338                .vendor = PCI_VENDOR_ID_INTEL,
1339                .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
1340                .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1341        },
1342
1343        /* Thunderbolt 3 */
1344        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
1345        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
1346        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
1347        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
1348        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
1349        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
1350        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
1351        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
1352        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
1353        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
1354        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
1355          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1356        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
1357          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1358        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
1359          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1360        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
1361          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1362        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
1363          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1364        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
1365          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1366        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI0),
1367          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1368        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1),
1369          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1370
1371        /* Any USB4 compliant host */
1372        { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
1373
1374        { 0,}
1375};
1376
1377MODULE_DEVICE_TABLE(pci, nhi_ids);
1378MODULE_LICENSE("GPL");
1379
1380static struct pci_driver nhi_driver = {
1381        .name = "thunderbolt",
1382        .id_table = nhi_ids,
1383        .probe = nhi_probe,
1384        .remove = nhi_remove,
1385        .shutdown = nhi_remove,
1386        .driver.pm = &nhi_pm_ops,
1387};
1388
1389static int __init nhi_init(void)
1390{
1391        int ret;
1392
1393        ret = tb_domain_init();
1394        if (ret)
1395                return ret;
1396        ret = pci_register_driver(&nhi_driver);
1397        if (ret)
1398                tb_domain_exit();
1399        return ret;
1400}
1401
1402static void __exit nhi_unload(void)
1403{
1404        pci_unregister_driver(&nhi_driver);
1405        tb_domain_exit();
1406}
1407
1408rootfs_initcall(nhi_init);
1409module_exit(nhi_unload);
1410