linux/drivers/thunderbolt/nhi.c
<<
>>
Prefs
   1/*
   2 * Thunderbolt Cactus Ridge driver - NHI driver
   3 *
   4 * The NHI (native host interface) is the pci device that allows us to send and
   5 * receive frames from the thunderbolt bus.
   6 *
   7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   8 */
   9
  10#include <linux/pm_runtime.h>
  11#include <linux/slab.h>
  12#include <linux/errno.h>
  13#include <linux/pci.h>
  14#include <linux/interrupt.h>
  15#include <linux/module.h>
  16#include <linux/dmi.h>
  17
  18#include "nhi.h"
  19#include "nhi_regs.h"
  20#include "tb.h"
  21
  22#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
  23
  24
  25static int ring_interrupt_index(struct tb_ring *ring)
  26{
  27        int bit = ring->hop;
  28        if (!ring->is_tx)
  29                bit += ring->nhi->hop_count;
  30        return bit;
  31}
  32
  33/**
  34 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
  35 *
  36 * ring->nhi->lock must be held.
  37 */
  38static void ring_interrupt_active(struct tb_ring *ring, bool active)
  39{
  40        int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
  41        int bit = ring_interrupt_index(ring) & 31;
  42        int mask = 1 << bit;
  43        u32 old, new;
  44        old = ioread32(ring->nhi->iobase + reg);
  45        if (active)
  46                new = old | mask;
  47        else
  48                new = old & ~mask;
  49
  50        dev_info(&ring->nhi->pdev->dev,
  51                 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
  52                 active ? "enabling" : "disabling", reg, bit, old, new);
  53
  54        if (new == old)
  55                dev_WARN(&ring->nhi->pdev->dev,
  56                                         "interrupt for %s %d is already %s\n",
  57                                         RING_TYPE(ring), ring->hop,
  58                                         active ? "enabled" : "disabled");
  59        iowrite32(new, ring->nhi->iobase + reg);
  60}
  61
  62/**
  63 * nhi_disable_interrupts() - disable interrupts for all rings
  64 *
  65 * Use only during init and shutdown.
  66 */
  67static void nhi_disable_interrupts(struct tb_nhi *nhi)
  68{
  69        int i = 0;
  70        /* disable interrupts */
  71        for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
  72                iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
  73
  74        /* clear interrupt status bits */
  75        for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
  76                ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
  77}
  78
  79/* ring helper methods */
  80
  81static void __iomem *ring_desc_base(struct tb_ring *ring)
  82{
  83        void __iomem *io = ring->nhi->iobase;
  84        io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
  85        io += ring->hop * 16;
  86        return io;
  87}
  88
  89static void __iomem *ring_options_base(struct tb_ring *ring)
  90{
  91        void __iomem *io = ring->nhi->iobase;
  92        io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
  93        io += ring->hop * 32;
  94        return io;
  95}
  96
  97static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
  98{
  99        iowrite16(value, ring_desc_base(ring) + offset);
 100}
 101
 102static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
 103{
 104        iowrite32(value, ring_desc_base(ring) + offset);
 105}
 106
 107static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
 108{
 109        iowrite32(value, ring_desc_base(ring) + offset);
 110        iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
 111}
 112
 113static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
 114{
 115        iowrite32(value, ring_options_base(ring) + offset);
 116}
 117
 118static bool ring_full(struct tb_ring *ring)
 119{
 120        return ((ring->head + 1) % ring->size) == ring->tail;
 121}
 122
 123static bool ring_empty(struct tb_ring *ring)
 124{
 125        return ring->head == ring->tail;
 126}
 127
 128/**
 129 * ring_write_descriptors() - post frames from ring->queue to the controller
 130 *
 131 * ring->lock is held.
 132 */
 133static void ring_write_descriptors(struct tb_ring *ring)
 134{
 135        struct ring_frame *frame, *n;
 136        struct ring_desc *descriptor;
 137        list_for_each_entry_safe(frame, n, &ring->queue, list) {
 138                if (ring_full(ring))
 139                        break;
 140                list_move_tail(&frame->list, &ring->in_flight);
 141                descriptor = &ring->descriptors[ring->head];
 142                descriptor->phys = frame->buffer_phy;
 143                descriptor->time = 0;
 144                descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
 145                if (ring->is_tx) {
 146                        descriptor->length = frame->size;
 147                        descriptor->eof = frame->eof;
 148                        descriptor->sof = frame->sof;
 149                }
 150                ring->head = (ring->head + 1) % ring->size;
 151                ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
 152        }
 153}
 154
 155/**
 156 * ring_work() - progress completed frames
 157 *
 158 * If the ring is shutting down then all frames are marked as canceled and
 159 * their callbacks are invoked.
 160 *
 161 * Otherwise we collect all completed frame from the ring buffer, write new
 162 * frame to the ring buffer and invoke the callbacks for the completed frames.
 163 */
 164static void ring_work(struct work_struct *work)
 165{
 166        struct tb_ring *ring = container_of(work, typeof(*ring), work);
 167        struct ring_frame *frame;
 168        bool canceled = false;
 169        LIST_HEAD(done);
 170        mutex_lock(&ring->lock);
 171
 172        if (!ring->running) {
 173                /*  Move all frames to done and mark them as canceled. */
 174                list_splice_tail_init(&ring->in_flight, &done);
 175                list_splice_tail_init(&ring->queue, &done);
 176                canceled = true;
 177                goto invoke_callback;
 178        }
 179
 180        while (!ring_empty(ring)) {
 181                if (!(ring->descriptors[ring->tail].flags
 182                                & RING_DESC_COMPLETED))
 183                        break;
 184                frame = list_first_entry(&ring->in_flight, typeof(*frame),
 185                                         list);
 186                list_move_tail(&frame->list, &done);
 187                if (!ring->is_tx) {
 188                        frame->size = ring->descriptors[ring->tail].length;
 189                        frame->eof = ring->descriptors[ring->tail].eof;
 190                        frame->sof = ring->descriptors[ring->tail].sof;
 191                        frame->flags = ring->descriptors[ring->tail].flags;
 192                        if (frame->sof != 0)
 193                                dev_WARN(&ring->nhi->pdev->dev,
 194                                         "%s %d got unexpected SOF: %#x\n",
 195                                         RING_TYPE(ring), ring->hop,
 196                                         frame->sof);
 197                        /*
 198                         * known flags:
 199                         * raw not enabled, interupt not set: 0x2=0010
 200                         * raw enabled: 0xa=1010
 201                         * raw not enabled: 0xb=1011
 202                         * partial frame (>MAX_FRAME_SIZE): 0xe=1110
 203                         */
 204                        if (frame->flags != 0xa)
 205                                dev_WARN(&ring->nhi->pdev->dev,
 206                                         "%s %d got unexpected flags: %#x\n",
 207                                         RING_TYPE(ring), ring->hop,
 208                                         frame->flags);
 209                }
 210                ring->tail = (ring->tail + 1) % ring->size;
 211        }
 212        ring_write_descriptors(ring);
 213
 214invoke_callback:
 215        mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
 216        while (!list_empty(&done)) {
 217                frame = list_first_entry(&done, typeof(*frame), list);
 218                /*
 219                 * The callback may reenqueue or delete frame.
 220                 * Do not hold on to it.
 221                 */
 222                list_del_init(&frame->list);
 223                frame->callback(ring, frame, canceled);
 224        }
 225}
 226
 227int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
 228{
 229        int ret = 0;
 230        mutex_lock(&ring->lock);
 231        if (ring->running) {
 232                list_add_tail(&frame->list, &ring->queue);
 233                ring_write_descriptors(ring);
 234        } else {
 235                ret = -ESHUTDOWN;
 236        }
 237        mutex_unlock(&ring->lock);
 238        return ret;
 239}
 240
 241static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
 242                                  bool transmit)
 243{
 244        struct tb_ring *ring = NULL;
 245        dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
 246                 transmit ? "TX" : "RX", hop, size);
 247
 248        mutex_lock(&nhi->lock);
 249        if (hop >= nhi->hop_count) {
 250                dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
 251                goto err;
 252        }
 253        if (transmit && nhi->tx_rings[hop]) {
 254                dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
 255                goto err;
 256        } else if (!transmit && nhi->rx_rings[hop]) {
 257                dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
 258                goto err;
 259        }
 260        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 261        if (!ring)
 262                goto err;
 263
 264        mutex_init(&ring->lock);
 265        INIT_LIST_HEAD(&ring->queue);
 266        INIT_LIST_HEAD(&ring->in_flight);
 267        INIT_WORK(&ring->work, ring_work);
 268
 269        ring->nhi = nhi;
 270        ring->hop = hop;
 271        ring->is_tx = transmit;
 272        ring->size = size;
 273        ring->head = 0;
 274        ring->tail = 0;
 275        ring->running = false;
 276        ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
 277                        size * sizeof(*ring->descriptors),
 278                        &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
 279        if (!ring->descriptors)
 280                goto err;
 281
 282        if (transmit)
 283                nhi->tx_rings[hop] = ring;
 284        else
 285                nhi->rx_rings[hop] = ring;
 286        mutex_unlock(&nhi->lock);
 287        return ring;
 288
 289err:
 290        if (ring)
 291                mutex_destroy(&ring->lock);
 292        kfree(ring);
 293        mutex_unlock(&nhi->lock);
 294        return NULL;
 295}
 296
 297struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
 298{
 299        return ring_alloc(nhi, hop, size, true);
 300}
 301
 302struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
 303{
 304        return ring_alloc(nhi, hop, size, false);
 305}
 306
 307/**
 308 * ring_start() - enable a ring
 309 *
 310 * Must not be invoked in parallel with ring_stop().
 311 */
 312void ring_start(struct tb_ring *ring)
 313{
 314        mutex_lock(&ring->nhi->lock);
 315        mutex_lock(&ring->lock);
 316        if (ring->running) {
 317                dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
 318                goto err;
 319        }
 320        dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
 321                 RING_TYPE(ring), ring->hop);
 322
 323        ring_iowrite64desc(ring, ring->descriptors_dma, 0);
 324        if (ring->is_tx) {
 325                ring_iowrite32desc(ring, ring->size, 12);
 326                ring_iowrite32options(ring, 0, 4); /* time releated ? */
 327                ring_iowrite32options(ring,
 328                                      RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
 329        } else {
 330                ring_iowrite32desc(ring,
 331                                   (TB_FRAME_SIZE << 16) | ring->size, 12);
 332                ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
 333                ring_iowrite32options(ring,
 334                                      RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
 335        }
 336        ring_interrupt_active(ring, true);
 337        ring->running = true;
 338err:
 339        mutex_unlock(&ring->lock);
 340        mutex_unlock(&ring->nhi->lock);
 341}
 342
 343
 344/**
 345 * ring_stop() - shutdown a ring
 346 *
 347 * Must not be invoked from a callback.
 348 *
 349 * This method will disable the ring. Further calls to ring_tx/ring_rx will
 350 * return -ESHUTDOWN until ring_stop has been called.
 351 *
 352 * All enqueued frames will be canceled and their callbacks will be executed
 353 * with frame->canceled set to true (on the callback thread). This method
 354 * returns only after all callback invocations have finished.
 355 */
 356void ring_stop(struct tb_ring *ring)
 357{
 358        mutex_lock(&ring->nhi->lock);
 359        mutex_lock(&ring->lock);
 360        dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
 361                 RING_TYPE(ring), ring->hop);
 362        if (!ring->running) {
 363                dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
 364                         RING_TYPE(ring), ring->hop);
 365                goto err;
 366        }
 367        ring_interrupt_active(ring, false);
 368
 369        ring_iowrite32options(ring, 0, 0);
 370        ring_iowrite64desc(ring, 0, 0);
 371        ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
 372        ring_iowrite32desc(ring, 0, 12);
 373        ring->head = 0;
 374        ring->tail = 0;
 375        ring->running = false;
 376
 377err:
 378        mutex_unlock(&ring->lock);
 379        mutex_unlock(&ring->nhi->lock);
 380
 381        /*
 382         * schedule ring->work to invoke callbacks on all remaining frames.
 383         */
 384        schedule_work(&ring->work);
 385        flush_work(&ring->work);
 386}
 387
 388/*
 389 * ring_free() - free ring
 390 *
 391 * When this method returns all invocations of ring->callback will have
 392 * finished.
 393 *
 394 * Ring must be stopped.
 395 *
 396 * Must NOT be called from ring_frame->callback!
 397 */
 398void ring_free(struct tb_ring *ring)
 399{
 400        mutex_lock(&ring->nhi->lock);
 401        /*
 402         * Dissociate the ring from the NHI. This also ensures that
 403         * nhi_interrupt_work cannot reschedule ring->work.
 404         */
 405        if (ring->is_tx)
 406                ring->nhi->tx_rings[ring->hop] = NULL;
 407        else
 408                ring->nhi->rx_rings[ring->hop] = NULL;
 409
 410        if (ring->running) {
 411                dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
 412                         RING_TYPE(ring), ring->hop);
 413        }
 414
 415        dma_free_coherent(&ring->nhi->pdev->dev,
 416                          ring->size * sizeof(*ring->descriptors),
 417                          ring->descriptors, ring->descriptors_dma);
 418
 419        ring->descriptors = NULL;
 420        ring->descriptors_dma = 0;
 421
 422
 423        dev_info(&ring->nhi->pdev->dev,
 424                 "freeing %s %d\n",
 425                 RING_TYPE(ring),
 426                 ring->hop);
 427
 428        mutex_unlock(&ring->nhi->lock);
 429        /**
 430         * ring->work can no longer be scheduled (it is scheduled only by
 431         * nhi_interrupt_work and ring_stop). Wait for it to finish before
 432         * freeing the ring.
 433         */
 434        flush_work(&ring->work);
 435        mutex_destroy(&ring->lock);
 436        kfree(ring);
 437}
 438
 439static void nhi_interrupt_work(struct work_struct *work)
 440{
 441        struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
 442        int value = 0; /* Suppress uninitialized usage warning. */
 443        int bit;
 444        int hop = -1;
 445        int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
 446        struct tb_ring *ring;
 447
 448        mutex_lock(&nhi->lock);
 449
 450        /*
 451         * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
 452         * (TX, RX, RX overflow). We iterate over the bits and read a new
 453         * dwords as required. The registers are cleared on read.
 454         */
 455        for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
 456                if (bit % 32 == 0)
 457                        value = ioread32(nhi->iobase
 458                                         + REG_RING_NOTIFY_BASE
 459                                         + 4 * (bit / 32));
 460                if (++hop == nhi->hop_count) {
 461                        hop = 0;
 462                        type++;
 463                }
 464                if ((value & (1 << (bit % 32))) == 0)
 465                        continue;
 466                if (type == 2) {
 467                        dev_warn(&nhi->pdev->dev,
 468                                 "RX overflow for ring %d\n",
 469                                 hop);
 470                        continue;
 471                }
 472                if (type == 0)
 473                        ring = nhi->tx_rings[hop];
 474                else
 475                        ring = nhi->rx_rings[hop];
 476                if (ring == NULL) {
 477                        dev_warn(&nhi->pdev->dev,
 478                                 "got interrupt for inactive %s ring %d\n",
 479                                 type ? "RX" : "TX",
 480                                 hop);
 481                        continue;
 482                }
 483                /* we do not check ring->running, this is done in ring->work */
 484                schedule_work(&ring->work);
 485        }
 486        mutex_unlock(&nhi->lock);
 487}
 488
 489static irqreturn_t nhi_msi(int irq, void *data)
 490{
 491        struct tb_nhi *nhi = data;
 492        schedule_work(&nhi->interrupt_work);
 493        return IRQ_HANDLED;
 494}
 495
 496static int nhi_suspend_noirq(struct device *dev)
 497{
 498        struct pci_dev *pdev = to_pci_dev(dev);
 499        struct tb *tb = pci_get_drvdata(pdev);
 500        thunderbolt_suspend(tb);
 501        return 0;
 502}
 503
 504static int nhi_resume_noirq(struct device *dev)
 505{
 506        struct pci_dev *pdev = to_pci_dev(dev);
 507        struct tb *tb = pci_get_drvdata(pdev);
 508        thunderbolt_resume(tb);
 509        return 0;
 510}
 511
 512static void nhi_shutdown(struct tb_nhi *nhi)
 513{
 514        int i;
 515        dev_info(&nhi->pdev->dev, "shutdown\n");
 516
 517        for (i = 0; i < nhi->hop_count; i++) {
 518                if (nhi->tx_rings[i])
 519                        dev_WARN(&nhi->pdev->dev,
 520                                 "TX ring %d is still active\n", i);
 521                if (nhi->rx_rings[i])
 522                        dev_WARN(&nhi->pdev->dev,
 523                                 "RX ring %d is still active\n", i);
 524        }
 525        nhi_disable_interrupts(nhi);
 526        /*
 527         * We have to release the irq before calling flush_work. Otherwise an
 528         * already executing IRQ handler could call schedule_work again.
 529         */
 530        devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
 531        flush_work(&nhi->interrupt_work);
 532        mutex_destroy(&nhi->lock);
 533}
 534
 535static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 536{
 537        struct tb_nhi *nhi;
 538        struct tb *tb;
 539        int res;
 540
 541        res = pcim_enable_device(pdev);
 542        if (res) {
 543                dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
 544                return res;
 545        }
 546
 547        res = pci_enable_msi(pdev);
 548        if (res) {
 549                dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
 550                return res;
 551        }
 552
 553        res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
 554        if (res) {
 555                dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
 556                return res;
 557        }
 558
 559        nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
 560        if (!nhi)
 561                return -ENOMEM;
 562
 563        nhi->pdev = pdev;
 564        /* cannot fail - table is allocated bin pcim_iomap_regions */
 565        nhi->iobase = pcim_iomap_table(pdev)[0];
 566        nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
 567        if (nhi->hop_count != 12)
 568                dev_warn(&pdev->dev, "unexpected hop count: %d\n",
 569                         nhi->hop_count);
 570        INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
 571
 572        nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
 573                                     sizeof(*nhi->tx_rings), GFP_KERNEL);
 574        nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
 575                                     sizeof(*nhi->rx_rings), GFP_KERNEL);
 576        if (!nhi->tx_rings || !nhi->rx_rings)
 577                return -ENOMEM;
 578
 579        nhi_disable_interrupts(nhi); /* In case someone left them on. */
 580        res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
 581                               IRQF_NO_SUSPEND, /* must work during _noirq */
 582                               "thunderbolt", nhi);
 583        if (res) {
 584                dev_err(&pdev->dev, "request_irq failed, aborting\n");
 585                return res;
 586        }
 587
 588        mutex_init(&nhi->lock);
 589
 590        pci_set_master(pdev);
 591
 592        /* magic value - clock related? */
 593        iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
 594
 595        dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
 596        tb = thunderbolt_alloc_and_start(nhi);
 597        if (!tb) {
 598                /*
 599                 * At this point the RX/TX rings might already have been
 600                 * activated. Do a proper shutdown.
 601                 */
 602                nhi_shutdown(nhi);
 603                return -EIO;
 604        }
 605        pci_set_drvdata(pdev, tb);
 606
 607        return 0;
 608}
 609
 610static void nhi_remove(struct pci_dev *pdev)
 611{
 612        struct tb *tb = pci_get_drvdata(pdev);
 613        struct tb_nhi *nhi = tb->nhi;
 614        thunderbolt_shutdown_and_free(tb);
 615        nhi_shutdown(nhi);
 616}
 617
 618/*
 619 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
 620 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
 621 * resume_noirq until we are done.
 622 */
 623static const struct dev_pm_ops nhi_pm_ops = {
 624        .suspend_noirq = nhi_suspend_noirq,
 625        .resume_noirq = nhi_resume_noirq,
 626        .freeze_noirq = nhi_suspend_noirq, /*
 627                                            * we just disable hotplug, the
 628                                            * pci-tunnels stay alive.
 629                                            */
 630        .restore_noirq = nhi_resume_noirq,
 631};
 632
 633static struct pci_device_id nhi_ids[] = {
 634        /*
 635         * We have to specify class, the TB bridges use the same device and
 636         * vendor (sub)id.
 637         */
 638        {
 639                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
 640                .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
 641                .subvendor = 0x2222, .subdevice = 0x1111,
 642        },
 643        {
 644                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
 645                .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
 646                .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
 647        },
 648        { 0,}
 649};
 650
 651MODULE_DEVICE_TABLE(pci, nhi_ids);
 652MODULE_LICENSE("GPL");
 653
 654static struct pci_driver nhi_driver = {
 655        .name = "thunderbolt",
 656        .id_table = nhi_ids,
 657        .probe = nhi_probe,
 658        .remove = nhi_remove,
 659        .driver.pm = &nhi_pm_ops,
 660};
 661
 662static int __init nhi_init(void)
 663{
 664        if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
 665                return -ENOSYS;
 666        return pci_register_driver(&nhi_driver);
 667}
 668
 669static void __exit nhi_unload(void)
 670{
 671        pci_unregister_driver(&nhi_driver);
 672}
 673
 674module_init(nhi_init);
 675module_exit(nhi_unload);
 676