1
2
3
4
5
6
7
8
9
10#include <linux/pm_runtime.h>
11#include <linux/slab.h>
12#include <linux/errno.h>
13#include <linux/pci.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/delay.h>
17
18#include "nhi.h"
19#include "nhi_regs.h"
20#include "tb.h"
21
22#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
23
24
25
26
27
28#define MSIX_MIN_VECS 6
29#define MSIX_MAX_VECS 16
30
31#define NHI_MAILBOX_TIMEOUT 500
32
33static int ring_interrupt_index(struct tb_ring *ring)
34{
35 int bit = ring->hop;
36 if (!ring->is_tx)
37 bit += ring->nhi->hop_count;
38 return bit;
39}
40
41
42
43
44
45
46static void ring_interrupt_active(struct tb_ring *ring, bool active)
47{
48 int reg = REG_RING_INTERRUPT_BASE +
49 ring_interrupt_index(ring) / 32 * 4;
50 int bit = ring_interrupt_index(ring) & 31;
51 int mask = 1 << bit;
52 u32 old, new;
53
54 if (ring->irq > 0) {
55 u32 step, shift, ivr, misc;
56 void __iomem *ivr_base;
57 int index;
58
59 if (ring->is_tx)
60 index = ring->hop;
61 else
62 index = ring->hop + ring->nhi->hop_count;
63
64
65
66
67
68 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
69 if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
70 misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
71 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
72 }
73
74 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
75 step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
76 shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
77 ivr = ioread32(ivr_base + step);
78 ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
79 if (active)
80 ivr |= ring->vector << shift;
81 iowrite32(ivr, ivr_base + step);
82 }
83
84 old = ioread32(ring->nhi->iobase + reg);
85 if (active)
86 new = old | mask;
87 else
88 new = old & ~mask;
89
90 dev_info(&ring->nhi->pdev->dev,
91 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
92 active ? "enabling" : "disabling", reg, bit, old, new);
93
94 if (new == old)
95 dev_WARN(&ring->nhi->pdev->dev,
96 "interrupt for %s %d is already %s\n",
97 RING_TYPE(ring), ring->hop,
98 active ? "enabled" : "disabled");
99 iowrite32(new, ring->nhi->iobase + reg);
100}
101
102
103
104
105
106
107static void nhi_disable_interrupts(struct tb_nhi *nhi)
108{
109 int i = 0;
110
111 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
112 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
113
114
115 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
116 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
117}
118
119
120
121static void __iomem *ring_desc_base(struct tb_ring *ring)
122{
123 void __iomem *io = ring->nhi->iobase;
124 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
125 io += ring->hop * 16;
126 return io;
127}
128
129static void __iomem *ring_options_base(struct tb_ring *ring)
130{
131 void __iomem *io = ring->nhi->iobase;
132 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
133 io += ring->hop * 32;
134 return io;
135}
136
137static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
138{
139 iowrite16(value, ring_desc_base(ring) + offset);
140}
141
142static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
143{
144 iowrite32(value, ring_desc_base(ring) + offset);
145}
146
147static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
148{
149 iowrite32(value, ring_desc_base(ring) + offset);
150 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
151}
152
153static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
154{
155 iowrite32(value, ring_options_base(ring) + offset);
156}
157
158static bool ring_full(struct tb_ring *ring)
159{
160 return ((ring->head + 1) % ring->size) == ring->tail;
161}
162
163static bool ring_empty(struct tb_ring *ring)
164{
165 return ring->head == ring->tail;
166}
167
168
169
170
171
172
173static void ring_write_descriptors(struct tb_ring *ring)
174{
175 struct ring_frame *frame, *n;
176 struct ring_desc *descriptor;
177 list_for_each_entry_safe(frame, n, &ring->queue, list) {
178 if (ring_full(ring))
179 break;
180 list_move_tail(&frame->list, &ring->in_flight);
181 descriptor = &ring->descriptors[ring->head];
182 descriptor->phys = frame->buffer_phy;
183 descriptor->time = 0;
184 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
185 if (ring->is_tx) {
186 descriptor->length = frame->size;
187 descriptor->eof = frame->eof;
188 descriptor->sof = frame->sof;
189 }
190 ring->head = (ring->head + 1) % ring->size;
191 ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
192 }
193}
194
195
196
197
198
199
200
201
202
203
204static void ring_work(struct work_struct *work)
205{
206 struct tb_ring *ring = container_of(work, typeof(*ring), work);
207 struct ring_frame *frame;
208 bool canceled = false;
209 LIST_HEAD(done);
210 mutex_lock(&ring->lock);
211
212 if (!ring->running) {
213
214 list_splice_tail_init(&ring->in_flight, &done);
215 list_splice_tail_init(&ring->queue, &done);
216 canceled = true;
217 goto invoke_callback;
218 }
219
220 while (!ring_empty(ring)) {
221 if (!(ring->descriptors[ring->tail].flags
222 & RING_DESC_COMPLETED))
223 break;
224 frame = list_first_entry(&ring->in_flight, typeof(*frame),
225 list);
226 list_move_tail(&frame->list, &done);
227 if (!ring->is_tx) {
228 frame->size = ring->descriptors[ring->tail].length;
229 frame->eof = ring->descriptors[ring->tail].eof;
230 frame->sof = ring->descriptors[ring->tail].sof;
231 frame->flags = ring->descriptors[ring->tail].flags;
232 if (frame->sof != 0)
233 dev_WARN(&ring->nhi->pdev->dev,
234 "%s %d got unexpected SOF: %#x\n",
235 RING_TYPE(ring), ring->hop,
236 frame->sof);
237
238
239
240
241
242
243
244 if (frame->flags != 0xa)
245 dev_WARN(&ring->nhi->pdev->dev,
246 "%s %d got unexpected flags: %#x\n",
247 RING_TYPE(ring), ring->hop,
248 frame->flags);
249 }
250 ring->tail = (ring->tail + 1) % ring->size;
251 }
252 ring_write_descriptors(ring);
253
254invoke_callback:
255 mutex_unlock(&ring->lock);
256 while (!list_empty(&done)) {
257 frame = list_first_entry(&done, typeof(*frame), list);
258
259
260
261
262 list_del_init(&frame->list);
263 frame->callback(ring, frame, canceled);
264 }
265}
266
267int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
268{
269 int ret = 0;
270 mutex_lock(&ring->lock);
271 if (ring->running) {
272 list_add_tail(&frame->list, &ring->queue);
273 ring_write_descriptors(ring);
274 } else {
275 ret = -ESHUTDOWN;
276 }
277 mutex_unlock(&ring->lock);
278 return ret;
279}
280
281static irqreturn_t ring_msix(int irq, void *data)
282{
283 struct tb_ring *ring = data;
284
285 schedule_work(&ring->work);
286 return IRQ_HANDLED;
287}
288
289static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
290{
291 struct tb_nhi *nhi = ring->nhi;
292 unsigned long irqflags;
293 int ret;
294
295 if (!nhi->pdev->msix_enabled)
296 return 0;
297
298 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
299 if (ret < 0)
300 return ret;
301
302 ring->vector = ret;
303
304 ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
305 if (ring->irq < 0)
306 return ring->irq;
307
308 irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
309 return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
310}
311
312static void ring_release_msix(struct tb_ring *ring)
313{
314 if (ring->irq <= 0)
315 return;
316
317 free_irq(ring->irq, ring);
318 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
319 ring->vector = 0;
320 ring->irq = 0;
321}
322
323static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
324 bool transmit, unsigned int flags)
325{
326 struct tb_ring *ring = NULL;
327 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
328 transmit ? "TX" : "RX", hop, size);
329
330 mutex_lock(&nhi->lock);
331 if (hop >= nhi->hop_count) {
332 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
333 goto err;
334 }
335 if (transmit && nhi->tx_rings[hop]) {
336 dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
337 goto err;
338 } else if (!transmit && nhi->rx_rings[hop]) {
339 dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
340 goto err;
341 }
342 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
343 if (!ring)
344 goto err;
345
346 mutex_init(&ring->lock);
347 INIT_LIST_HEAD(&ring->queue);
348 INIT_LIST_HEAD(&ring->in_flight);
349 INIT_WORK(&ring->work, ring_work);
350
351 ring->nhi = nhi;
352 ring->hop = hop;
353 ring->is_tx = transmit;
354 ring->size = size;
355 ring->flags = flags;
356 ring->head = 0;
357 ring->tail = 0;
358 ring->running = false;
359
360 if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
361 goto err;
362
363 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
364 size * sizeof(*ring->descriptors),
365 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
366 if (!ring->descriptors)
367 goto err;
368
369 if (transmit)
370 nhi->tx_rings[hop] = ring;
371 else
372 nhi->rx_rings[hop] = ring;
373 mutex_unlock(&nhi->lock);
374 return ring;
375
376err:
377 if (ring)
378 mutex_destroy(&ring->lock);
379 kfree(ring);
380 mutex_unlock(&nhi->lock);
381 return NULL;
382}
383
384struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
385 unsigned int flags)
386{
387 return ring_alloc(nhi, hop, size, true, flags);
388}
389
390struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
391 unsigned int flags)
392{
393 return ring_alloc(nhi, hop, size, false, flags);
394}
395
396
397
398
399
400
401void ring_start(struct tb_ring *ring)
402{
403 mutex_lock(&ring->nhi->lock);
404 mutex_lock(&ring->lock);
405 if (ring->nhi->going_away)
406 goto err;
407 if (ring->running) {
408 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
409 goto err;
410 }
411 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
412 RING_TYPE(ring), ring->hop);
413
414 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
415 if (ring->is_tx) {
416 ring_iowrite32desc(ring, ring->size, 12);
417 ring_iowrite32options(ring, 0, 4);
418 ring_iowrite32options(ring,
419 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
420 } else {
421 ring_iowrite32desc(ring,
422 (TB_FRAME_SIZE << 16) | ring->size, 12);
423 ring_iowrite32options(ring, 0xffffffff, 4);
424 ring_iowrite32options(ring,
425 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
426 }
427 ring_interrupt_active(ring, true);
428 ring->running = true;
429err:
430 mutex_unlock(&ring->lock);
431 mutex_unlock(&ring->nhi->lock);
432}
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447void ring_stop(struct tb_ring *ring)
448{
449 mutex_lock(&ring->nhi->lock);
450 mutex_lock(&ring->lock);
451 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
452 RING_TYPE(ring), ring->hop);
453 if (ring->nhi->going_away)
454 goto err;
455 if (!ring->running) {
456 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
457 RING_TYPE(ring), ring->hop);
458 goto err;
459 }
460 ring_interrupt_active(ring, false);
461
462 ring_iowrite32options(ring, 0, 0);
463 ring_iowrite64desc(ring, 0, 0);
464 ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
465 ring_iowrite32desc(ring, 0, 12);
466 ring->head = 0;
467 ring->tail = 0;
468 ring->running = false;
469
470err:
471 mutex_unlock(&ring->lock);
472 mutex_unlock(&ring->nhi->lock);
473
474
475
476
477 schedule_work(&ring->work);
478 flush_work(&ring->work);
479}
480
481
482
483
484
485
486
487
488
489
490
491void ring_free(struct tb_ring *ring)
492{
493 mutex_lock(&ring->nhi->lock);
494
495
496
497
498 if (ring->is_tx)
499 ring->nhi->tx_rings[ring->hop] = NULL;
500 else
501 ring->nhi->rx_rings[ring->hop] = NULL;
502
503 if (ring->running) {
504 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
505 RING_TYPE(ring), ring->hop);
506 }
507
508 ring_release_msix(ring);
509
510 dma_free_coherent(&ring->nhi->pdev->dev,
511 ring->size * sizeof(*ring->descriptors),
512 ring->descriptors, ring->descriptors_dma);
513
514 ring->descriptors = NULL;
515 ring->descriptors_dma = 0;
516
517
518 dev_info(&ring->nhi->pdev->dev,
519 "freeing %s %d\n",
520 RING_TYPE(ring),
521 ring->hop);
522
523 mutex_unlock(&ring->nhi->lock);
524
525
526
527
528
529 flush_work(&ring->work);
530 mutex_destroy(&ring->lock);
531 kfree(ring);
532}
533
534
535
536
537
538
539
540
541
542
543int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
544{
545 ktime_t timeout;
546 u32 val;
547
548 iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
549
550 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
551 val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
552 val |= REG_INMAIL_OP_REQUEST | cmd;
553 iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
554
555 timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
556 do {
557 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
558 if (!(val & REG_INMAIL_OP_REQUEST))
559 break;
560 usleep_range(10, 20);
561 } while (ktime_before(ktime_get(), timeout));
562
563 if (val & REG_INMAIL_OP_REQUEST)
564 return -ETIMEDOUT;
565 if (val & REG_INMAIL_ERROR)
566 return -EIO;
567
568 return 0;
569}
570
571
572
573
574
575
576
577
578enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
579{
580 u32 val;
581
582 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
583 val &= REG_OUTMAIL_CMD_OPMODE_MASK;
584 val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
585
586 return (enum nhi_fw_mode)val;
587}
588
589static void nhi_interrupt_work(struct work_struct *work)
590{
591 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
592 int value = 0;
593 int bit;
594 int hop = -1;
595 int type = 0;
596 struct tb_ring *ring;
597
598 mutex_lock(&nhi->lock);
599
600
601
602
603
604
605 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
606 if (bit % 32 == 0)
607 value = ioread32(nhi->iobase
608 + REG_RING_NOTIFY_BASE
609 + 4 * (bit / 32));
610 if (++hop == nhi->hop_count) {
611 hop = 0;
612 type++;
613 }
614 if ((value & (1 << (bit % 32))) == 0)
615 continue;
616 if (type == 2) {
617 dev_warn(&nhi->pdev->dev,
618 "RX overflow for ring %d\n",
619 hop);
620 continue;
621 }
622 if (type == 0)
623 ring = nhi->tx_rings[hop];
624 else
625 ring = nhi->rx_rings[hop];
626 if (ring == NULL) {
627 dev_warn(&nhi->pdev->dev,
628 "got interrupt for inactive %s ring %d\n",
629 type ? "RX" : "TX",
630 hop);
631 continue;
632 }
633
634 schedule_work(&ring->work);
635 }
636 mutex_unlock(&nhi->lock);
637}
638
639static irqreturn_t nhi_msi(int irq, void *data)
640{
641 struct tb_nhi *nhi = data;
642 schedule_work(&nhi->interrupt_work);
643 return IRQ_HANDLED;
644}
645
646static int nhi_suspend_noirq(struct device *dev)
647{
648 struct pci_dev *pdev = to_pci_dev(dev);
649 struct tb *tb = pci_get_drvdata(pdev);
650
651 return tb_domain_suspend_noirq(tb);
652}
653
654static int nhi_resume_noirq(struct device *dev)
655{
656 struct pci_dev *pdev = to_pci_dev(dev);
657 struct tb *tb = pci_get_drvdata(pdev);
658
659
660
661
662
663
664 if (!pci_device_is_present(pdev))
665 tb->nhi->going_away = true;
666
667 return tb_domain_resume_noirq(tb);
668}
669
670static int nhi_suspend(struct device *dev)
671{
672 struct pci_dev *pdev = to_pci_dev(dev);
673 struct tb *tb = pci_get_drvdata(pdev);
674
675 return tb_domain_suspend(tb);
676}
677
678static void nhi_complete(struct device *dev)
679{
680 struct pci_dev *pdev = to_pci_dev(dev);
681 struct tb *tb = pci_get_drvdata(pdev);
682
683 tb_domain_complete(tb);
684}
685
686static void nhi_shutdown(struct tb_nhi *nhi)
687{
688 int i;
689 dev_info(&nhi->pdev->dev, "shutdown\n");
690
691 for (i = 0; i < nhi->hop_count; i++) {
692 if (nhi->tx_rings[i])
693 dev_WARN(&nhi->pdev->dev,
694 "TX ring %d is still active\n", i);
695 if (nhi->rx_rings[i])
696 dev_WARN(&nhi->pdev->dev,
697 "RX ring %d is still active\n", i);
698 }
699 nhi_disable_interrupts(nhi);
700
701
702
703
704 if (!nhi->pdev->msix_enabled) {
705 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
706 flush_work(&nhi->interrupt_work);
707 }
708 mutex_destroy(&nhi->lock);
709 ida_destroy(&nhi->msix_ida);
710}
711
712static int nhi_init_msi(struct tb_nhi *nhi)
713{
714 struct pci_dev *pdev = nhi->pdev;
715 int res, irq, nvec;
716
717
718 nhi_disable_interrupts(nhi);
719
720 ida_init(&nhi->msix_ida);
721
722
723
724
725
726
727
728 nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
729 PCI_IRQ_MSIX);
730 if (nvec < 0) {
731 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
732 if (nvec < 0)
733 return nvec;
734
735 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
736
737 irq = pci_irq_vector(nhi->pdev, 0);
738 if (irq < 0)
739 return irq;
740
741 res = devm_request_irq(&pdev->dev, irq, nhi_msi,
742 IRQF_NO_SUSPEND, "thunderbolt", nhi);
743 if (res) {
744 dev_err(&pdev->dev, "request_irq failed, aborting\n");
745 return res;
746 }
747 }
748
749 return 0;
750}
751
752static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
753{
754 struct tb_nhi *nhi;
755 struct tb *tb;
756 int res;
757
758 res = pcim_enable_device(pdev);
759 if (res) {
760 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
761 return res;
762 }
763
764 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
765 if (res) {
766 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
767 return res;
768 }
769
770 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
771 if (!nhi)
772 return -ENOMEM;
773
774 nhi->pdev = pdev;
775
776 nhi->iobase = pcim_iomap_table(pdev)[0];
777 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
778 if (nhi->hop_count != 12 && nhi->hop_count != 32)
779 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
780 nhi->hop_count);
781
782 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
783 sizeof(*nhi->tx_rings), GFP_KERNEL);
784 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
785 sizeof(*nhi->rx_rings), GFP_KERNEL);
786 if (!nhi->tx_rings || !nhi->rx_rings)
787 return -ENOMEM;
788
789 res = nhi_init_msi(nhi);
790 if (res) {
791 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
792 return res;
793 }
794
795 mutex_init(&nhi->lock);
796
797 pci_set_master(pdev);
798
799
800 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
801
802 tb = icm_probe(nhi);
803 if (!tb)
804 tb = tb_probe(nhi);
805 if (!tb) {
806 dev_err(&nhi->pdev->dev,
807 "failed to determine connection manager, aborting\n");
808 return -ENODEV;
809 }
810
811 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
812
813 res = tb_domain_add(tb);
814 if (res) {
815
816
817
818
819 tb_domain_put(tb);
820 nhi_shutdown(nhi);
821 return -EIO;
822 }
823 pci_set_drvdata(pdev, tb);
824
825 return 0;
826}
827
828static void nhi_remove(struct pci_dev *pdev)
829{
830 struct tb *tb = pci_get_drvdata(pdev);
831 struct tb_nhi *nhi = tb->nhi;
832
833 tb_domain_remove(tb);
834 nhi_shutdown(nhi);
835}
836
837
838
839
840
841
842static const struct dev_pm_ops nhi_pm_ops = {
843 .suspend_noirq = nhi_suspend_noirq,
844 .resume_noirq = nhi_resume_noirq,
845 .freeze_noirq = nhi_suspend_noirq,
846
847
848
849 .restore_noirq = nhi_resume_noirq,
850 .suspend = nhi_suspend,
851 .freeze = nhi_suspend,
852 .poweroff = nhi_suspend,
853 .complete = nhi_complete,
854};
855
856static struct pci_device_id nhi_ids[] = {
857
858
859
860
861 {
862 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
863 .vendor = PCI_VENDOR_ID_INTEL,
864 .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
865 .subvendor = 0x2222, .subdevice = 0x1111,
866 },
867 {
868 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
869 .vendor = PCI_VENDOR_ID_INTEL,
870 .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
871 .subvendor = 0x2222, .subdevice = 0x1111,
872 },
873 {
874 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
875 .vendor = PCI_VENDOR_ID_INTEL,
876 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
877 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
878 },
879 {
880 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
881 .vendor = PCI_VENDOR_ID_INTEL,
882 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
883 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
884 },
885
886
887 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
888 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
889 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
890 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
891 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
892 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
893 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
894 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
895
896 { 0,}
897};
898
899MODULE_DEVICE_TABLE(pci, nhi_ids);
900MODULE_LICENSE("GPL");
901
902static struct pci_driver nhi_driver = {
903 .name = "thunderbolt",
904 .id_table = nhi_ids,
905 .probe = nhi_probe,
906 .remove = nhi_remove,
907 .driver.pm = &nhi_pm_ops,
908};
909
910static int __init nhi_init(void)
911{
912 int ret;
913
914 ret = tb_domain_init();
915 if (ret)
916 return ret;
917 ret = pci_register_driver(&nhi_driver);
918 if (ret)
919 tb_domain_exit();
920 return ret;
921}
922
923static void __exit nhi_unload(void)
924{
925 pci_unregister_driver(&nhi_driver);
926 tb_domain_exit();
927}
928
929module_init(nhi_init);
930module_exit(nhi_unload);
931