1
2
3
4
5
6
7#include <linux/device.h>
8#include <linux/interrupt.h>
9#include <linux/irq.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/msi.h>
13#include <linux/pci.h>
14#include <linux/srcu.h>
15#include <linux/rculist.h>
16#include <linux/rcupdate.h>
17
18#include <asm/irqdomain.h>
19#include <asm/device.h>
20#include <asm/msi.h>
21#include <asm/msidef.h>
22
23#define VMD_CFGBAR 0
24#define VMD_MEMBAR1 2
25#define VMD_MEMBAR2 4
26
27
28
29
30static DEFINE_RAW_SPINLOCK(list_lock);
31
32
33
34
35
36
37
38
39
40
41
42struct vmd_irq {
43 struct list_head node;
44 struct vmd_irq_list *irq;
45 bool enabled;
46 unsigned int virq;
47};
48
49
50
51
52
53
54
55
56struct vmd_irq_list {
57 struct list_head irq_list;
58 struct srcu_struct srcu;
59 unsigned int count;
60};
61
62struct vmd_dev {
63 struct pci_dev *dev;
64
65 spinlock_t cfg_lock;
66 char __iomem *cfgbar;
67
68 int msix_count;
69 struct vmd_irq_list *irqs;
70
71 struct pci_sysdata sysdata;
72 struct resource resources[3];
73 struct irq_domain *irq_domain;
74 struct pci_bus *bus;
75
76#ifdef CONFIG_X86_DEV_DMA_OPS
77 struct dma_map_ops dma_ops;
78 struct dma_domain dma_domain;
79#endif
80};
81
82static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
83{
84 return container_of(bus->sysdata, struct vmd_dev, sysdata);
85}
86
87static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
88 struct vmd_irq_list *irqs)
89{
90 return irqs - vmd->irqs;
91}
92
93
94
95
96
97
98
99
100
101static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
102{
103 struct vmd_irq *vmdirq = data->chip_data;
104 struct vmd_irq_list *irq = vmdirq->irq;
105 struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
106
107 msg->address_hi = MSI_ADDR_BASE_HI;
108 msg->address_lo = MSI_ADDR_BASE_LO |
109 MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq));
110 msg->data = 0;
111}
112
113
114
115
116static void vmd_irq_enable(struct irq_data *data)
117{
118 struct vmd_irq *vmdirq = data->chip_data;
119 unsigned long flags;
120
121 raw_spin_lock_irqsave(&list_lock, flags);
122 WARN_ON(vmdirq->enabled);
123 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
124 vmdirq->enabled = true;
125 raw_spin_unlock_irqrestore(&list_lock, flags);
126
127 data->chip->irq_unmask(data);
128}
129
130static void vmd_irq_disable(struct irq_data *data)
131{
132 struct vmd_irq *vmdirq = data->chip_data;
133 unsigned long flags;
134
135 data->chip->irq_mask(data);
136
137 raw_spin_lock_irqsave(&list_lock, flags);
138 if (vmdirq->enabled) {
139 list_del_rcu(&vmdirq->node);
140 vmdirq->enabled = false;
141 }
142 raw_spin_unlock_irqrestore(&list_lock, flags);
143}
144
145
146
147
148
149static int vmd_irq_set_affinity(struct irq_data *data,
150 const struct cpumask *dest, bool force)
151{
152 return -EINVAL;
153}
154
155static struct irq_chip vmd_msi_controller = {
156 .name = "VMD-MSI",
157 .irq_enable = vmd_irq_enable,
158 .irq_disable = vmd_irq_disable,
159 .irq_compose_msi_msg = vmd_compose_msi_msg,
160 .irq_set_affinity = vmd_irq_set_affinity,
161};
162
163static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
164 msi_alloc_info_t *arg)
165{
166 return 0;
167}
168
169
170
171
172
173static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
174{
175 int i, best = 1;
176 unsigned long flags;
177
178 if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1)
179 return &vmd->irqs[0];
180
181 raw_spin_lock_irqsave(&list_lock, flags);
182 for (i = 1; i < vmd->msix_count; i++)
183 if (vmd->irqs[i].count < vmd->irqs[best].count)
184 best = i;
185 vmd->irqs[best].count++;
186 raw_spin_unlock_irqrestore(&list_lock, flags);
187
188 return &vmd->irqs[best];
189}
190
191static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
192 unsigned int virq, irq_hw_number_t hwirq,
193 msi_alloc_info_t *arg)
194{
195 struct msi_desc *desc = arg->desc;
196 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
197 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
198 unsigned int index, vector;
199
200 if (!vmdirq)
201 return -ENOMEM;
202
203 INIT_LIST_HEAD(&vmdirq->node);
204 vmdirq->irq = vmd_next_irq(vmd, desc);
205 vmdirq->virq = virq;
206 index = index_from_irqs(vmd, vmdirq->irq);
207 vector = pci_irq_vector(vmd->dev, index);
208
209 irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
210 handle_untracked_irq, vmd, NULL);
211 return 0;
212}
213
214static void vmd_msi_free(struct irq_domain *domain,
215 struct msi_domain_info *info, unsigned int virq)
216{
217 struct vmd_irq *vmdirq = irq_get_chip_data(virq);
218 unsigned long flags;
219
220 synchronize_srcu(&vmdirq->irq->srcu);
221
222
223 raw_spin_lock_irqsave(&list_lock, flags);
224 vmdirq->irq->count--;
225 raw_spin_unlock_irqrestore(&list_lock, flags);
226
227 kfree(vmdirq);
228}
229
230static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
231 int nvec, msi_alloc_info_t *arg)
232{
233 struct pci_dev *pdev = to_pci_dev(dev);
234 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
235
236 if (nvec > vmd->msix_count)
237 return vmd->msix_count;
238
239 memset(arg, 0, sizeof(*arg));
240 return 0;
241}
242
243static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
244{
245 arg->desc = desc;
246}
247
248static struct msi_domain_ops vmd_msi_domain_ops = {
249 .get_hwirq = vmd_get_hwirq,
250 .msi_init = vmd_msi_init,
251 .msi_free = vmd_msi_free,
252 .msi_prepare = vmd_msi_prepare,
253 .set_desc = vmd_set_desc,
254};
255
256static struct msi_domain_info vmd_msi_domain_info = {
257 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
258 MSI_FLAG_PCI_MSIX,
259 .ops = &vmd_msi_domain_ops,
260 .chip = &vmd_msi_controller,
261};
262
263#ifdef CONFIG_X86_DEV_DMA_OPS
264
265
266
267
268
269static struct device *to_vmd_dev(struct device *dev)
270{
271 struct pci_dev *pdev = to_pci_dev(dev);
272 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
273
274 return &vmd->dev->dev;
275}
276
277static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
278{
279 return get_dma_ops(to_vmd_dev(dev));
280}
281
282static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
283 gfp_t flag, unsigned long attrs)
284{
285 return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
286 attrs);
287}
288
289static void vmd_free(struct device *dev, size_t size, void *vaddr,
290 dma_addr_t addr, unsigned long attrs)
291{
292 return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
293 attrs);
294}
295
296static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
297 void *cpu_addr, dma_addr_t addr, size_t size,
298 unsigned long attrs)
299{
300 return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
301 size, attrs);
302}
303
304static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
305 void *cpu_addr, dma_addr_t addr, size_t size,
306 unsigned long attrs)
307{
308 return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
309 addr, size, attrs);
310}
311
312static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
313 unsigned long offset, size_t size,
314 enum dma_data_direction dir,
315 unsigned long attrs)
316{
317 return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
318 dir, attrs);
319}
320
321static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
322 enum dma_data_direction dir, unsigned long attrs)
323{
324 vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
325}
326
327static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
328 enum dma_data_direction dir, unsigned long attrs)
329{
330 return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
331}
332
333static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
334 enum dma_data_direction dir, unsigned long attrs)
335{
336 vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
337}
338
339static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
340 size_t size, enum dma_data_direction dir)
341{
342 vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
343}
344
345static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
346 size_t size, enum dma_data_direction dir)
347{
348 vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
349 dir);
350}
351
352static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
353 int nents, enum dma_data_direction dir)
354{
355 vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
356}
357
358static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
359 int nents, enum dma_data_direction dir)
360{
361 vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
362}
363
364static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
365{
366 return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
367}
368
369static int vmd_dma_supported(struct device *dev, u64 mask)
370{
371 return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
372}
373
374#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
375static u64 vmd_get_required_mask(struct device *dev)
376{
377 return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
378}
379#endif
380
381static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
382{
383 struct dma_domain *domain = &vmd->dma_domain;
384
385 if (get_dma_ops(&vmd->dev->dev))
386 del_dma_domain(domain);
387}
388
389#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
390 do { \
391 if (source->fn) \
392 dest->fn = vmd_##fn; \
393 } while (0)
394
395static void vmd_setup_dma_ops(struct vmd_dev *vmd)
396{
397 const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
398 struct dma_map_ops *dest = &vmd->dma_ops;
399 struct dma_domain *domain = &vmd->dma_domain;
400
401 domain->domain_nr = vmd->sysdata.domain;
402 domain->dma_ops = dest;
403
404 if (!source)
405 return;
406 ASSIGN_VMD_DMA_OPS(source, dest, alloc);
407 ASSIGN_VMD_DMA_OPS(source, dest, free);
408 ASSIGN_VMD_DMA_OPS(source, dest, mmap);
409 ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
410 ASSIGN_VMD_DMA_OPS(source, dest, map_page);
411 ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
412 ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
413 ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
414 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
415 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
416 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
417 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
418 ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
419 ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
420#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
421 ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
422#endif
423 add_dma_domain(domain);
424}
425#undef ASSIGN_VMD_DMA_OPS
426#else
427static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
428static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
429#endif
430
431static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
432 unsigned int devfn, int reg, int len)
433{
434 char __iomem *addr = vmd->cfgbar +
435 (bus->number << 20) + (devfn << 12) + reg;
436
437 if ((addr - vmd->cfgbar) + len >=
438 resource_size(&vmd->dev->resource[VMD_CFGBAR]))
439 return NULL;
440
441 return addr;
442}
443
444
445
446
447
448static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
449 int len, u32 *value)
450{
451 struct vmd_dev *vmd = vmd_from_bus(bus);
452 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
453 unsigned long flags;
454 int ret = 0;
455
456 if (!addr)
457 return -EFAULT;
458
459 spin_lock_irqsave(&vmd->cfg_lock, flags);
460 switch (len) {
461 case 1:
462 *value = readb(addr);
463 break;
464 case 2:
465 *value = readw(addr);
466 break;
467 case 4:
468 *value = readl(addr);
469 break;
470 default:
471 ret = -EINVAL;
472 break;
473 }
474 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
475 return ret;
476}
477
478
479
480
481
482
483static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
484 int len, u32 value)
485{
486 struct vmd_dev *vmd = vmd_from_bus(bus);
487 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
488 unsigned long flags;
489 int ret = 0;
490
491 if (!addr)
492 return -EFAULT;
493
494 spin_lock_irqsave(&vmd->cfg_lock, flags);
495 switch (len) {
496 case 1:
497 writeb(value, addr);
498 readb(addr);
499 break;
500 case 2:
501 writew(value, addr);
502 readw(addr);
503 break;
504 case 4:
505 writel(value, addr);
506 readl(addr);
507 break;
508 default:
509 ret = -EINVAL;
510 break;
511 }
512 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
513 return ret;
514}
515
516static struct pci_ops vmd_ops = {
517 .read = vmd_pci_read,
518 .write = vmd_pci_write,
519};
520
521static void vmd_attach_resources(struct vmd_dev *vmd)
522{
523 vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
524 vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
525}
526
527static void vmd_detach_resources(struct vmd_dev *vmd)
528{
529 vmd->dev->resource[VMD_MEMBAR1].child = NULL;
530 vmd->dev->resource[VMD_MEMBAR2].child = NULL;
531}
532
533
534
535
536
537
538
539static int vmd_find_free_domain(void)
540{
541 int domain = 0xffff;
542 struct pci_bus *bus = NULL;
543
544 while ((bus = pci_find_next_bus(bus)) != NULL)
545 domain = max_t(int, domain, pci_domain_nr(bus));
546 return domain + 1;
547}
548
549static int vmd_enable_domain(struct vmd_dev *vmd)
550{
551 struct pci_sysdata *sd = &vmd->sysdata;
552 struct fwnode_handle *fn;
553 struct resource *res;
554 u32 upper_bits;
555 unsigned long flags;
556 LIST_HEAD(resources);
557
558 res = &vmd->dev->resource[VMD_CFGBAR];
559 vmd->resources[0] = (struct resource) {
560 .name = "VMD CFGBAR",
561 .start = 0,
562 .end = (resource_size(res) >> 20) - 1,
563 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
564 };
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583 res = &vmd->dev->resource[VMD_MEMBAR1];
584 upper_bits = upper_32_bits(res->end);
585 flags = res->flags & ~IORESOURCE_SIZEALIGN;
586 if (!upper_bits)
587 flags &= ~IORESOURCE_MEM_64;
588 vmd->resources[1] = (struct resource) {
589 .name = "VMD MEMBAR1",
590 .start = res->start,
591 .end = res->end,
592 .flags = flags,
593 .parent = res,
594 };
595
596 res = &vmd->dev->resource[VMD_MEMBAR2];
597 upper_bits = upper_32_bits(res->end);
598 flags = res->flags & ~IORESOURCE_SIZEALIGN;
599 if (!upper_bits)
600 flags &= ~IORESOURCE_MEM_64;
601 vmd->resources[2] = (struct resource) {
602 .name = "VMD MEMBAR2",
603 .start = res->start + 0x2000,
604 .end = res->end,
605 .flags = flags,
606 .parent = res,
607 };
608
609 sd->vmd_domain = true;
610 sd->domain = vmd_find_free_domain();
611 if (sd->domain < 0)
612 return sd->domain;
613
614 sd->node = pcibus_to_node(vmd->dev->bus);
615
616 fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
617 if (!fn)
618 return -ENODEV;
619
620 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
621 x86_vector_domain);
622 irq_domain_free_fwnode(fn);
623 if (!vmd->irq_domain)
624 return -ENODEV;
625
626 pci_add_resource(&resources, &vmd->resources[0]);
627 pci_add_resource(&resources, &vmd->resources[1]);
628 pci_add_resource(&resources, &vmd->resources[2]);
629 vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
630 &resources);
631 if (!vmd->bus) {
632 pci_free_resource_list(&resources);
633 irq_domain_remove(vmd->irq_domain);
634 return -ENODEV;
635 }
636
637 vmd_attach_resources(vmd);
638 vmd_setup_dma_ops(vmd);
639 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
640 pci_rescan_bus(vmd->bus);
641
642 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
643 "domain"), "Can't create symlink to domain\n");
644 return 0;
645}
646
647static irqreturn_t vmd_irq(int irq, void *data)
648{
649 struct vmd_irq_list *irqs = data;
650 struct vmd_irq *vmdirq;
651 int idx;
652
653 idx = srcu_read_lock(&irqs->srcu);
654 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
655 generic_handle_irq(vmdirq->virq);
656 srcu_read_unlock(&irqs->srcu, idx);
657
658 return IRQ_HANDLED;
659}
660
661static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
662{
663 struct vmd_dev *vmd;
664 int i, err;
665
666 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
667 return -ENOMEM;
668
669 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
670 if (!vmd)
671 return -ENOMEM;
672
673 vmd->dev = dev;
674 err = pcim_enable_device(dev);
675 if (err < 0)
676 return err;
677
678 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
679 if (!vmd->cfgbar)
680 return -ENOMEM;
681
682 pci_set_master(dev);
683 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
684 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
685 return -ENODEV;
686
687 vmd->msix_count = pci_msix_vec_count(dev);
688 if (vmd->msix_count < 0)
689 return -ENODEV;
690
691 vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
692 PCI_IRQ_MSIX);
693 if (vmd->msix_count < 0)
694 return vmd->msix_count;
695
696 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
697 GFP_KERNEL);
698 if (!vmd->irqs)
699 return -ENOMEM;
700
701 for (i = 0; i < vmd->msix_count; i++) {
702 err = init_srcu_struct(&vmd->irqs[i].srcu);
703 if (err)
704 return err;
705
706 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
707 err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
708 vmd_irq, IRQF_NO_THREAD,
709 "vmd", &vmd->irqs[i]);
710 if (err)
711 return err;
712 }
713
714 spin_lock_init(&vmd->cfg_lock);
715 pci_set_drvdata(dev, vmd);
716 err = vmd_enable_domain(vmd);
717 if (err)
718 return err;
719
720 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
721 vmd->sysdata.domain);
722 return 0;
723}
724
725static void vmd_cleanup_srcu(struct vmd_dev *vmd)
726{
727 int i;
728
729 for (i = 0; i < vmd->msix_count; i++)
730 cleanup_srcu_struct(&vmd->irqs[i].srcu);
731}
732
733static void vmd_remove(struct pci_dev *dev)
734{
735 struct vmd_dev *vmd = pci_get_drvdata(dev);
736
737 vmd_detach_resources(vmd);
738 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
739 pci_stop_root_bus(vmd->bus);
740 pci_remove_root_bus(vmd->bus);
741 vmd_cleanup_srcu(vmd);
742 vmd_teardown_dma_ops(vmd);
743 irq_domain_remove(vmd->irq_domain);
744}
745
746#ifdef CONFIG_PM_SLEEP
747static int vmd_suspend(struct device *dev)
748{
749 struct pci_dev *pdev = to_pci_dev(dev);
750 struct vmd_dev *vmd = pci_get_drvdata(pdev);
751 int i;
752
753 for (i = 0; i < vmd->msix_count; i++)
754 devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
755
756 pci_save_state(pdev);
757 return 0;
758}
759
760static int vmd_resume(struct device *dev)
761{
762 struct pci_dev *pdev = to_pci_dev(dev);
763 struct vmd_dev *vmd = pci_get_drvdata(pdev);
764 int err, i;
765
766 for (i = 0; i < vmd->msix_count; i++) {
767 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
768 vmd_irq, IRQF_NO_THREAD,
769 "vmd", &vmd->irqs[i]);
770 if (err)
771 return err;
772 }
773
774 pci_restore_state(pdev);
775 return 0;
776}
777#endif
778static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
779
780static const struct pci_device_id vmd_ids[] = {
781 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
782 {0,}
783};
784MODULE_DEVICE_TABLE(pci, vmd_ids);
785
786static struct pci_driver vmd_drv = {
787 .name = "vmd",
788 .id_table = vmd_ids,
789 .probe = vmd_probe,
790 .remove = vmd_remove,
791 .driver = {
792 .pm = &vmd_dev_pm_ops,
793 },
794};
795module_pci_driver(vmd_drv);
796
797MODULE_AUTHOR("Intel Corporation");
798MODULE_LICENSE("GPL v2");
799MODULE_VERSION("0.6");
800