1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/moduleparam.h>
19#include <linux/interrupt.h>
20#include <linux/highmem.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/smp.h>
29#include <linux/io.h>
30#include <linux/vmalloc.h>
31
32#include "vmci_datagram.h"
33#include "vmci_doorbell.h"
34#include "vmci_context.h"
35#include "vmci_driver.h"
36#include "vmci_event.h"
37
38#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
39
40#define VMCI_UTIL_NUM_RESOURCES 1
41
42static bool vmci_disable_msi;
43module_param_named(disable_msi, vmci_disable_msi, bool, 0);
44MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
45
46static bool vmci_disable_msix;
47module_param_named(disable_msix, vmci_disable_msix, bool, 0);
48MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
49
50static u32 ctx_update_sub_id = VMCI_INVALID_ID;
51static u32 vm_context_id = VMCI_INVALID_ID;
52
53struct vmci_guest_device {
54 struct device *dev;
55 void __iomem *iobase;
56
57 unsigned int irq;
58 unsigned int intr_type;
59 bool exclusive_vectors;
60 struct msix_entry msix_entries[VMCI_MAX_INTRS];
61
62 struct tasklet_struct datagram_tasklet;
63 struct tasklet_struct bm_tasklet;
64
65 void *data_buffer;
66 void *notification_bitmap;
67 dma_addr_t notification_base;
68};
69
70
71struct pci_dev *vmci_pdev;
72static struct vmci_guest_device *vmci_dev_g;
73static DEFINE_SPINLOCK(vmci_dev_spinlock);
74
75static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
76
77bool vmci_guest_code_active(void)
78{
79 return atomic_read(&vmci_num_guest_devices) != 0;
80}
81
82u32 vmci_get_vm_context_id(void)
83{
84 if (vm_context_id == VMCI_INVALID_ID) {
85 struct vmci_datagram get_cid_msg;
86 get_cid_msg.dst =
87 vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
88 VMCI_GET_CONTEXT_ID);
89 get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
90 get_cid_msg.payload_size = 0;
91 vm_context_id = vmci_send_datagram(&get_cid_msg);
92 }
93 return vm_context_id;
94}
95
96
97
98
99
100int vmci_send_datagram(struct vmci_datagram *dg)
101{
102 unsigned long flags;
103 int result;
104
105
106 if (dg == NULL)
107 return VMCI_ERROR_INVALID_ARGS;
108
109
110
111
112
113
114
115
116
117
118 spin_lock_irqsave(&vmci_dev_spinlock, flags);
119
120 if (vmci_dev_g) {
121 iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
122 dg, VMCI_DG_SIZE(dg));
123 result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
124 } else {
125 result = VMCI_ERROR_UNAVAILABLE;
126 }
127
128 spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
129
130 return result;
131}
132EXPORT_SYMBOL_GPL(vmci_send_datagram);
133
134
135
136
137
138static void vmci_guest_cid_update(u32 sub_id,
139 const struct vmci_event_data *event_data,
140 void *client_data)
141{
142 const struct vmci_event_payld_ctx *ev_payload =
143 vmci_event_data_const_payload(event_data);
144
145 if (sub_id != ctx_update_sub_id) {
146 pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
147 return;
148 }
149
150 if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
151 pr_devel("Invalid event data\n");
152 return;
153 }
154
155 pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
156 vm_context_id, ev_payload->context_id, event_data->event);
157
158 vm_context_id = ev_payload->context_id;
159}
160
161
162
163
164
165
166
167static int vmci_check_host_caps(struct pci_dev *pdev)
168{
169 bool result;
170 struct vmci_resource_query_msg *msg;
171 u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
172 VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
173 struct vmci_datagram *check_msg;
174
175 check_msg = kmalloc(msg_size, GFP_KERNEL);
176 if (!check_msg) {
177 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
178 return -ENOMEM;
179 }
180
181 check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
182 VMCI_RESOURCES_QUERY);
183 check_msg->src = VMCI_ANON_SRC_HANDLE;
184 check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
185 msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
186
187 msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
188 msg->resources[0] = VMCI_GET_CONTEXT_ID;
189
190
191 result = vmci_send_datagram(check_msg) == 0x01;
192 kfree(check_msg);
193
194 dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
195 __func__, result ? "PASSED" : "FAILED");
196
197
198 return result ? 0 : -ENXIO;
199}
200
201
202
203
204
205
206
207
208
209
210
211
212static void vmci_dispatch_dgs(unsigned long data)
213{
214 struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
215 u8 *dg_in_buffer = vmci_dev->data_buffer;
216 struct vmci_datagram *dg;
217 size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
218 size_t current_dg_in_buffer_size = PAGE_SIZE;
219 size_t remaining_bytes;
220
221 BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
222
223 ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
224 vmci_dev->data_buffer, current_dg_in_buffer_size);
225 dg = (struct vmci_datagram *)dg_in_buffer;
226 remaining_bytes = current_dg_in_buffer_size;
227
228 while (dg->dst.resource != VMCI_INVALID_ID ||
229 remaining_bytes > PAGE_SIZE) {
230 unsigned dg_in_size;
231
232
233
234
235
236 if (dg->dst.resource == VMCI_INVALID_ID) {
237 dg = (struct vmci_datagram *)roundup(
238 (uintptr_t)dg + 1, PAGE_SIZE);
239 remaining_bytes =
240 (size_t)(dg_in_buffer +
241 current_dg_in_buffer_size -
242 (u8 *)dg);
243 continue;
244 }
245
246 dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
247
248 if (dg_in_size <= dg_in_buffer_size) {
249 int result;
250
251
252
253
254
255
256
257
258
259 if (dg_in_size > remaining_bytes) {
260 if (remaining_bytes !=
261 current_dg_in_buffer_size) {
262
263
264
265
266
267
268
269
270
271 memmove(dg_in_buffer, dg_in_buffer +
272 current_dg_in_buffer_size -
273 remaining_bytes,
274 remaining_bytes);
275 dg = (struct vmci_datagram *)
276 dg_in_buffer;
277 }
278
279 if (current_dg_in_buffer_size !=
280 dg_in_buffer_size)
281 current_dg_in_buffer_size =
282 dg_in_buffer_size;
283
284 ioread8_rep(vmci_dev->iobase +
285 VMCI_DATA_IN_ADDR,
286 vmci_dev->data_buffer +
287 remaining_bytes,
288 current_dg_in_buffer_size -
289 remaining_bytes);
290 }
291
292
293
294
295
296 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
297 dg->dst.resource == VMCI_EVENT_HANDLER) {
298 result = vmci_event_dispatch(dg);
299 } else {
300 result = vmci_datagram_invoke_guest_handler(dg);
301 }
302 if (result < VMCI_SUCCESS)
303 dev_dbg(vmci_dev->dev,
304 "Datagram with resource (ID=0x%x) failed (err=%d)\n",
305 dg->dst.resource, result);
306
307
308 dg = (struct vmci_datagram *)((u8 *)dg +
309 dg_in_size);
310 } else {
311 size_t bytes_to_skip;
312
313
314
315
316
317 dev_dbg(vmci_dev->dev,
318 "Failed to receive datagram (size=%u bytes)\n",
319 dg_in_size);
320
321 bytes_to_skip = dg_in_size - remaining_bytes;
322 if (current_dg_in_buffer_size != dg_in_buffer_size)
323 current_dg_in_buffer_size = dg_in_buffer_size;
324
325 for (;;) {
326 ioread8_rep(vmci_dev->iobase +
327 VMCI_DATA_IN_ADDR,
328 vmci_dev->data_buffer,
329 current_dg_in_buffer_size);
330 if (bytes_to_skip <= current_dg_in_buffer_size)
331 break;
332
333 bytes_to_skip -= current_dg_in_buffer_size;
334 }
335 dg = (struct vmci_datagram *)(dg_in_buffer +
336 bytes_to_skip);
337 }
338
339 remaining_bytes =
340 (size_t) (dg_in_buffer + current_dg_in_buffer_size -
341 (u8 *)dg);
342
343 if (remaining_bytes < VMCI_DG_HEADERSIZE) {
344
345
346 ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
347 vmci_dev->data_buffer,
348 current_dg_in_buffer_size);
349 dg = (struct vmci_datagram *)dg_in_buffer;
350 remaining_bytes = current_dg_in_buffer_size;
351 }
352 }
353}
354
355
356
357
358
359static void vmci_process_bitmap(unsigned long data)
360{
361 struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
362
363 if (!dev->notification_bitmap) {
364 dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
365 return;
366 }
367
368 vmci_dbell_scan_notification_entries(dev->notification_bitmap);
369}
370
371
372
373
374static int vmci_enable_msix(struct pci_dev *pdev,
375 struct vmci_guest_device *vmci_dev)
376{
377 int i;
378 int result;
379
380 for (i = 0; i < VMCI_MAX_INTRS; ++i) {
381 vmci_dev->msix_entries[i].entry = i;
382 vmci_dev->msix_entries[i].vector = i;
383 }
384
385 result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
386 if (result == 0)
387 vmci_dev->exclusive_vectors = true;
388 else if (result > 0)
389 result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
390
391 return result;
392}
393
394
395
396
397
398static irqreturn_t vmci_interrupt(int irq, void *_dev)
399{
400 struct vmci_guest_device *dev = _dev;
401
402
403
404
405
406
407
408 if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
409 tasklet_schedule(&dev->datagram_tasklet);
410 } else {
411 unsigned int icr;
412
413
414 icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
415 if (icr == 0 || icr == ~0)
416 return IRQ_NONE;
417
418 if (icr & VMCI_ICR_DATAGRAM) {
419 tasklet_schedule(&dev->datagram_tasklet);
420 icr &= ~VMCI_ICR_DATAGRAM;
421 }
422
423 if (icr & VMCI_ICR_NOTIFICATION) {
424 tasklet_schedule(&dev->bm_tasklet);
425 icr &= ~VMCI_ICR_NOTIFICATION;
426 }
427
428 if (icr != 0)
429 dev_warn(dev->dev,
430 "Ignoring unknown interrupt cause (%d)\n",
431 icr);
432 }
433
434 return IRQ_HANDLED;
435}
436
437
438
439
440
441
442static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
443{
444 struct vmci_guest_device *dev = _dev;
445
446
447 tasklet_schedule(&dev->bm_tasklet);
448
449 return IRQ_HANDLED;
450}
451
452
453
454
455static int vmci_guest_probe_device(struct pci_dev *pdev,
456 const struct pci_device_id *id)
457{
458 struct vmci_guest_device *vmci_dev;
459 void __iomem *iobase;
460 unsigned int capabilities;
461 unsigned long cmd;
462 int vmci_err;
463 int error;
464
465 dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
466
467 error = pcim_enable_device(pdev);
468 if (error) {
469 dev_err(&pdev->dev,
470 "Failed to enable VMCI device: %d\n", error);
471 return error;
472 }
473
474 error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
475 if (error) {
476 dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
477 return error;
478 }
479
480 iobase = pcim_iomap_table(pdev)[0];
481
482 dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
483 (unsigned long)iobase, pdev->irq);
484
485 vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
486 if (!vmci_dev) {
487 dev_err(&pdev->dev,
488 "Can't allocate memory for VMCI device\n");
489 return -ENOMEM;
490 }
491
492 vmci_dev->dev = &pdev->dev;
493 vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
494 vmci_dev->exclusive_vectors = false;
495 vmci_dev->iobase = iobase;
496
497 tasklet_init(&vmci_dev->datagram_tasklet,
498 vmci_dispatch_dgs, (unsigned long)vmci_dev);
499 tasklet_init(&vmci_dev->bm_tasklet,
500 vmci_process_bitmap, (unsigned long)vmci_dev);
501
502 vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
503 if (!vmci_dev->data_buffer) {
504 dev_err(&pdev->dev,
505 "Can't allocate memory for datagram buffer\n");
506 return -ENOMEM;
507 }
508
509 pci_set_master(pdev);
510
511
512
513
514
515
516
517
518
519
520 capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
521 if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
522 dev_err(&pdev->dev, "Device does not support datagrams\n");
523 error = -ENXIO;
524 goto err_free_data_buffer;
525 }
526
527
528
529
530
531 if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
532 vmci_dev->notification_bitmap = dma_alloc_coherent(
533 &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
534 GFP_KERNEL);
535 if (!vmci_dev->notification_bitmap) {
536 dev_warn(&pdev->dev,
537 "Unable to allocate notification bitmap\n");
538 } else {
539 memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
540 capabilities |= VMCI_CAPS_NOTIFICATIONS;
541 }
542 }
543
544 dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
545
546
547 iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
548
549
550 spin_lock_irq(&vmci_dev_spinlock);
551 vmci_dev_g = vmci_dev;
552 vmci_pdev = pdev;
553 spin_unlock_irq(&vmci_dev_spinlock);
554
555
556
557
558
559 if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
560 unsigned long bitmap_ppn =
561 vmci_dev->notification_base >> PAGE_SHIFT;
562 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
563 dev_warn(&pdev->dev,
564 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
565 (u32) bitmap_ppn);
566 error = -ENXIO;
567 goto err_remove_vmci_dev_g;
568 }
569 }
570
571
572 error = vmci_check_host_caps(pdev);
573 if (error)
574 goto err_remove_bitmap;
575
576
577
578
579
580
581
582 vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
583 vmci_guest_cid_update, NULL,
584 &ctx_update_sub_id);
585 if (vmci_err < VMCI_SUCCESS)
586 dev_warn(&pdev->dev,
587 "Failed to subscribe to event (type=%d): %d\n",
588 VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
589
590
591
592
593
594 if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
595 vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
596 vmci_dev->irq = vmci_dev->msix_entries[0].vector;
597 } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
598 vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
599 vmci_dev->irq = pdev->irq;
600 } else {
601 vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
602 vmci_dev->irq = pdev->irq;
603 }
604
605
606
607
608
609 error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
610 KBUILD_MODNAME, vmci_dev);
611 if (error) {
612 dev_err(&pdev->dev, "Irq %u in use: %d\n",
613 vmci_dev->irq, error);
614 goto err_disable_msi;
615 }
616
617
618
619
620
621
622
623 if (vmci_dev->exclusive_vectors) {
624 error = request_irq(vmci_dev->msix_entries[1].vector,
625 vmci_interrupt_bm, 0, KBUILD_MODNAME,
626 vmci_dev);
627 if (error) {
628 dev_err(&pdev->dev,
629 "Failed to allocate irq %u: %d\n",
630 vmci_dev->msix_entries[1].vector, error);
631 goto err_free_irq;
632 }
633 }
634
635 dev_dbg(&pdev->dev, "Registered device\n");
636
637 atomic_inc(&vmci_num_guest_devices);
638
639
640 cmd = VMCI_IMR_DATAGRAM;
641 if (capabilities & VMCI_CAPS_NOTIFICATIONS)
642 cmd |= VMCI_IMR_NOTIFICATION;
643 iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
644
645
646 iowrite32(VMCI_CONTROL_INT_ENABLE,
647 vmci_dev->iobase + VMCI_CONTROL_ADDR);
648
649 pci_set_drvdata(pdev, vmci_dev);
650 return 0;
651
652err_free_irq:
653 free_irq(vmci_dev->irq, &vmci_dev);
654 tasklet_kill(&vmci_dev->datagram_tasklet);
655 tasklet_kill(&vmci_dev->bm_tasklet);
656
657err_disable_msi:
658 if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
659 pci_disable_msix(pdev);
660 else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
661 pci_disable_msi(pdev);
662
663 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
664 if (vmci_err < VMCI_SUCCESS)
665 dev_warn(&pdev->dev,
666 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
667 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
668
669err_remove_bitmap:
670 if (vmci_dev->notification_bitmap) {
671 iowrite32(VMCI_CONTROL_RESET,
672 vmci_dev->iobase + VMCI_CONTROL_ADDR);
673 dma_free_coherent(&pdev->dev, PAGE_SIZE,
674 vmci_dev->notification_bitmap,
675 vmci_dev->notification_base);
676 }
677
678err_remove_vmci_dev_g:
679 spin_lock_irq(&vmci_dev_spinlock);
680 vmci_pdev = NULL;
681 vmci_dev_g = NULL;
682 spin_unlock_irq(&vmci_dev_spinlock);
683
684err_free_data_buffer:
685 vfree(vmci_dev->data_buffer);
686
687
688 return error;
689}
690
691static void vmci_guest_remove_device(struct pci_dev *pdev)
692{
693 struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
694 int vmci_err;
695
696 dev_dbg(&pdev->dev, "Removing device\n");
697
698 atomic_dec(&vmci_num_guest_devices);
699
700 vmci_qp_guest_endpoints_exit();
701
702 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
703 if (vmci_err < VMCI_SUCCESS)
704 dev_warn(&pdev->dev,
705 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
706 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
707
708 spin_lock_irq(&vmci_dev_spinlock);
709 vmci_dev_g = NULL;
710 vmci_pdev = NULL;
711 spin_unlock_irq(&vmci_dev_spinlock);
712
713 dev_dbg(&pdev->dev, "Resetting vmci device\n");
714 iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
715
716
717
718
719
720
721 free_irq(vmci_dev->irq, vmci_dev);
722 if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
723 if (vmci_dev->exclusive_vectors)
724 free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
725 pci_disable_msix(pdev);
726 } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
727 pci_disable_msi(pdev);
728 }
729
730 tasklet_kill(&vmci_dev->datagram_tasklet);
731 tasklet_kill(&vmci_dev->bm_tasklet);
732
733 if (vmci_dev->notification_bitmap) {
734
735
736
737
738
739 dma_free_coherent(&pdev->dev, PAGE_SIZE,
740 vmci_dev->notification_bitmap,
741 vmci_dev->notification_base);
742 }
743
744 vfree(vmci_dev->data_buffer);
745
746
747}
748
749static const struct pci_device_id vmci_ids[] = {
750 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
751 { 0 },
752};
753MODULE_DEVICE_TABLE(pci, vmci_ids);
754
755static struct pci_driver vmci_guest_driver = {
756 .name = KBUILD_MODNAME,
757 .id_table = vmci_ids,
758 .probe = vmci_guest_probe_device,
759 .remove = vmci_guest_remove_device,
760};
761
762int __init vmci_guest_init(void)
763{
764 return pci_register_driver(&vmci_guest_driver);
765}
766
767void __exit vmci_guest_exit(void)
768{
769 pci_unregister_driver(&vmci_guest_driver);
770}
771