1
2
3
4
5
6
7
8#include <linux/vmw_vmci_defs.h>
9#include <linux/vmw_vmci_api.h>
10#include <linux/miscdevice.h>
11#include <linux/interrupt.h>
12#include <linux/highmem.h>
13#include <linux/atomic.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/sched.h>
18#include <linux/cred.h>
19#include <linux/slab.h>
20#include <linux/file.h>
21#include <linux/init.h>
22#include <linux/poll.h>
23#include <linux/pci.h>
24#include <linux/smp.h>
25#include <linux/fs.h>
26#include <linux/io.h>
27
28#include "vmci_handle_array.h"
29#include "vmci_queue_pair.h"
30#include "vmci_datagram.h"
31#include "vmci_doorbell.h"
32#include "vmci_resource.h"
33#include "vmci_context.h"
34#include "vmci_driver.h"
35#include "vmci_event.h"
36
37#define VMCI_UTIL_NUM_RESOURCES 1
38
39enum {
40 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
41 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
42};
43
44enum {
45 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
46 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
47 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
48};
49
50
51
52
53
54struct vmci_init_blk {
55 u32 cid;
56 u32 flags;
57};
58
59
60struct vmci_qp_alloc_info_vmvm {
61 struct vmci_handle handle;
62 u32 peer;
63 u32 flags;
64 u64 produce_size;
65 u64 consume_size;
66 u64 produce_page_file;
67 u64 consume_page_file;
68 u64 produce_page_file_size;
69 u64 consume_page_file_size;
70 s32 result;
71 u32 _pad;
72};
73
74
75struct vmci_set_notify_info {
76 u64 notify_uva;
77 s32 result;
78 u32 _pad;
79};
80
81
82
83
84struct vmci_host_dev {
85 struct vmci_ctx *context;
86 int user_version;
87 enum vmci_obj_type ct_type;
88 struct mutex lock;
89};
90
91static struct vmci_ctx *host_context;
92static bool vmci_host_device_initialized;
93static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
94
95
96
97
98
99
100
101
102
103
104bool vmci_host_code_active(void)
105{
106 return vmci_host_device_initialized &&
107 (!vmci_guest_code_active() ||
108 atomic_read(&vmci_host_active_users) > 0);
109}
110
111
112
113
114static int vmci_host_open(struct inode *inode, struct file *filp)
115{
116 struct vmci_host_dev *vmci_host_dev;
117
118 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
119 if (vmci_host_dev == NULL)
120 return -ENOMEM;
121
122 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
123 mutex_init(&vmci_host_dev->lock);
124 filp->private_data = vmci_host_dev;
125
126 return 0;
127}
128
129
130
131
132
133static int vmci_host_close(struct inode *inode, struct file *filp)
134{
135 struct vmci_host_dev *vmci_host_dev = filp->private_data;
136
137 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
138 vmci_ctx_destroy(vmci_host_dev->context);
139 vmci_host_dev->context = NULL;
140
141
142
143
144
145
146
147 atomic_dec(&vmci_host_active_users);
148 }
149 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
150
151 kfree(vmci_host_dev);
152 filp->private_data = NULL;
153 return 0;
154}
155
156
157
158
159
160static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
161{
162 struct vmci_host_dev *vmci_host_dev = filp->private_data;
163 struct vmci_ctx *context = vmci_host_dev->context;
164 __poll_t mask = 0;
165
166 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
167
168 if (wait)
169 poll_wait(filp, &context->host_context.wait_queue,
170 wait);
171
172 spin_lock(&context->lock);
173 if (context->pending_datagrams > 0 ||
174 vmci_handle_arr_get_size(
175 context->pending_doorbell_array) > 0) {
176 mask = EPOLLIN;
177 }
178 spin_unlock(&context->lock);
179 }
180 return mask;
181}
182
183
184
185
186
187
188
189static int drv_cp_harray_to_user(void __user *user_buf_uva,
190 u64 *user_buf_size,
191 struct vmci_handle_arr *handle_array,
192 int *retval)
193{
194 u32 array_size = 0;
195 struct vmci_handle *handles;
196
197 if (handle_array)
198 array_size = vmci_handle_arr_get_size(handle_array);
199
200 if (array_size * sizeof(*handles) > *user_buf_size)
201 return VMCI_ERROR_MORE_DATA;
202
203 *user_buf_size = array_size * sizeof(*handles);
204 if (*user_buf_size)
205 *retval = copy_to_user(user_buf_uva,
206 vmci_handle_arr_get_handles
207 (handle_array), *user_buf_size);
208
209 return VMCI_SUCCESS;
210}
211
212
213
214
215
216static int vmci_host_setup_notify(struct vmci_ctx *context,
217 unsigned long uva)
218{
219 int retval;
220
221 if (context->notify_page) {
222 pr_devel("%s: Notify mechanism is already set up\n", __func__);
223 return VMCI_ERROR_DUPLICATE_ENTRY;
224 }
225
226
227
228
229
230 BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
231 if (!access_ok((void __user *)uva, sizeof(u8)))
232 return VMCI_ERROR_GENERIC;
233
234
235
236
237 retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
238 if (retval != 1) {
239 context->notify_page = NULL;
240 return VMCI_ERROR_GENERIC;
241 }
242
243
244
245
246 context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
247 vmci_ctx_check_signal_notify(context);
248
249 return VMCI_SUCCESS;
250}
251
252static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
253 unsigned int cmd, void __user *uptr)
254{
255 if (cmd == IOCTL_VMCI_VERSION2) {
256 int __user *vptr = uptr;
257 if (get_user(vmci_host_dev->user_version, vptr))
258 return -EFAULT;
259 }
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275 if (vmci_host_dev->user_version > 0 &&
276 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
277 return vmci_host_dev->user_version;
278 }
279
280 return VMCI_VERSION;
281}
282
283#define vmci_ioctl_err(fmt, ...) \
284 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
285
286static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
287 const char *ioctl_name,
288 void __user *uptr)
289{
290 struct vmci_init_blk init_block;
291 const struct cred *cred;
292 int retval;
293
294 if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
295 vmci_ioctl_err("error reading init block\n");
296 return -EFAULT;
297 }
298
299 mutex_lock(&vmci_host_dev->lock);
300
301 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
302 vmci_ioctl_err("received VMCI init on initialized handle\n");
303 retval = -EINVAL;
304 goto out;
305 }
306
307 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
308 vmci_ioctl_err("unsupported VMCI restriction flag\n");
309 retval = -EINVAL;
310 goto out;
311 }
312
313 cred = get_current_cred();
314 vmci_host_dev->context = vmci_ctx_create(init_block.cid,
315 init_block.flags, 0,
316 vmci_host_dev->user_version,
317 cred);
318 put_cred(cred);
319 if (IS_ERR(vmci_host_dev->context)) {
320 retval = PTR_ERR(vmci_host_dev->context);
321 vmci_ioctl_err("error initializing context\n");
322 goto out;
323 }
324
325
326
327
328
329 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
330 if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
331 vmci_ctx_destroy(vmci_host_dev->context);
332 vmci_host_dev->context = NULL;
333 vmci_ioctl_err("error writing init block\n");
334 retval = -EFAULT;
335 goto out;
336 }
337
338 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
339 atomic_inc(&vmci_host_active_users);
340
341 retval = 0;
342
343out:
344 mutex_unlock(&vmci_host_dev->lock);
345 return retval;
346}
347
348static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
349 const char *ioctl_name,
350 void __user *uptr)
351{
352 struct vmci_datagram_snd_rcv_info send_info;
353 struct vmci_datagram *dg = NULL;
354 u32 cid;
355
356 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
357 vmci_ioctl_err("only valid for contexts\n");
358 return -EINVAL;
359 }
360
361 if (copy_from_user(&send_info, uptr, sizeof(send_info)))
362 return -EFAULT;
363
364 if (send_info.len > VMCI_MAX_DG_SIZE) {
365 vmci_ioctl_err("datagram is too big (size=%d)\n",
366 send_info.len);
367 return -EINVAL;
368 }
369
370 if (send_info.len < sizeof(*dg)) {
371 vmci_ioctl_err("datagram is too small (size=%d)\n",
372 send_info.len);
373 return -EINVAL;
374 }
375
376 dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
377 send_info.len);
378 if (IS_ERR(dg)) {
379 vmci_ioctl_err(
380 "cannot allocate memory to dispatch datagram\n");
381 return PTR_ERR(dg);
382 }
383
384 if (VMCI_DG_SIZE(dg) != send_info.len) {
385 vmci_ioctl_err("datagram size mismatch\n");
386 kfree(dg);
387 return -EINVAL;
388 }
389
390 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
391 dg->dst.context, dg->dst.resource,
392 dg->src.context, dg->src.resource,
393 (unsigned long long)dg->payload_size);
394
395
396 cid = vmci_ctx_get_id(vmci_host_dev->context);
397 send_info.result = vmci_datagram_dispatch(cid, dg, true);
398 kfree(dg);
399
400 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
401}
402
403static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
404 const char *ioctl_name,
405 void __user *uptr)
406{
407 struct vmci_datagram_snd_rcv_info recv_info;
408 struct vmci_datagram *dg = NULL;
409 int retval;
410 size_t size;
411
412 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
413 vmci_ioctl_err("only valid for contexts\n");
414 return -EINVAL;
415 }
416
417 if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
418 return -EFAULT;
419
420 size = recv_info.len;
421 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
422 &size, &dg);
423
424 if (recv_info.result >= VMCI_SUCCESS) {
425 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
426 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
427 kfree(dg);
428 if (retval != 0)
429 return -EFAULT;
430 }
431
432 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
433}
434
435static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
436 const char *ioctl_name,
437 void __user *uptr)
438{
439 struct vmci_handle handle;
440 int vmci_status;
441 int __user *retptr;
442
443 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
444 vmci_ioctl_err("only valid for contexts\n");
445 return -EINVAL;
446 }
447
448 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
449 struct vmci_qp_alloc_info_vmvm alloc_info;
450 struct vmci_qp_alloc_info_vmvm __user *info = uptr;
451
452 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
453 return -EFAULT;
454
455 handle = alloc_info.handle;
456 retptr = &info->result;
457
458 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
459 alloc_info.peer,
460 alloc_info.flags,
461 VMCI_NO_PRIVILEGE_FLAGS,
462 alloc_info.produce_size,
463 alloc_info.consume_size,
464 NULL,
465 vmci_host_dev->context);
466
467 if (vmci_status == VMCI_SUCCESS)
468 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
469 } else {
470 struct vmci_qp_alloc_info alloc_info;
471 struct vmci_qp_alloc_info __user *info = uptr;
472 struct vmci_qp_page_store page_store;
473
474 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
475 return -EFAULT;
476
477 handle = alloc_info.handle;
478 retptr = &info->result;
479
480 page_store.pages = alloc_info.ppn_va;
481 page_store.len = alloc_info.num_ppns;
482
483 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
484 alloc_info.peer,
485 alloc_info.flags,
486 VMCI_NO_PRIVILEGE_FLAGS,
487 alloc_info.produce_size,
488 alloc_info.consume_size,
489 &page_store,
490 vmci_host_dev->context);
491 }
492
493 if (put_user(vmci_status, retptr)) {
494 if (vmci_status >= VMCI_SUCCESS) {
495 vmci_status = vmci_qp_broker_detach(handle,
496 vmci_host_dev->context);
497 }
498 return -EFAULT;
499 }
500
501 return 0;
502}
503
504static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
505 const char *ioctl_name,
506 void __user *uptr)
507{
508 struct vmci_qp_set_va_info set_va_info;
509 struct vmci_qp_set_va_info __user *info = uptr;
510 s32 result;
511
512 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
513 vmci_ioctl_err("only valid for contexts\n");
514 return -EINVAL;
515 }
516
517 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
518 vmci_ioctl_err("is not allowed\n");
519 return -EINVAL;
520 }
521
522 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
523 return -EFAULT;
524
525 if (set_va_info.va) {
526
527
528
529
530 result = vmci_qp_broker_map(set_va_info.handle,
531 vmci_host_dev->context,
532 set_va_info.va);
533 } else {
534
535
536
537
538 result = vmci_qp_broker_unmap(set_va_info.handle,
539 vmci_host_dev->context, 0);
540 }
541
542 return put_user(result, &info->result) ? -EFAULT : 0;
543}
544
545static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
546 const char *ioctl_name,
547 void __user *uptr)
548{
549 struct vmci_qp_page_file_info page_file_info;
550 struct vmci_qp_page_file_info __user *info = uptr;
551 s32 result;
552
553 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
554 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
555 vmci_ioctl_err("not supported on this VMX (version=%d)\n",
556 vmci_host_dev->user_version);
557 return -EINVAL;
558 }
559
560 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
561 vmci_ioctl_err("only valid for contexts\n");
562 return -EINVAL;
563 }
564
565 if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
566 return -EFAULT;
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584 if (put_user(VMCI_SUCCESS, &info->result)) {
585
586
587
588
589
590 return -EFAULT;
591 }
592
593 result = vmci_qp_broker_set_page_store(page_file_info.handle,
594 page_file_info.produce_va,
595 page_file_info.consume_va,
596 vmci_host_dev->context);
597 if (result < VMCI_SUCCESS) {
598 if (put_user(result, &info->result)) {
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616 return -EFAULT;
617 }
618 }
619
620 return 0;
621}
622
623static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
624 const char *ioctl_name,
625 void __user *uptr)
626{
627 struct vmci_qp_dtch_info detach_info;
628 struct vmci_qp_dtch_info __user *info = uptr;
629 s32 result;
630
631 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
632 vmci_ioctl_err("only valid for contexts\n");
633 return -EINVAL;
634 }
635
636 if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
637 return -EFAULT;
638
639 result = vmci_qp_broker_detach(detach_info.handle,
640 vmci_host_dev->context);
641 if (result == VMCI_SUCCESS &&
642 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
643 result = VMCI_SUCCESS_LAST_DETACH;
644 }
645
646 return put_user(result, &info->result) ? -EFAULT : 0;
647}
648
649static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
650 const char *ioctl_name,
651 void __user *uptr)
652{
653 struct vmci_ctx_info ar_info;
654 struct vmci_ctx_info __user *info = uptr;
655 s32 result;
656 u32 cid;
657
658 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
659 vmci_ioctl_err("only valid for contexts\n");
660 return -EINVAL;
661 }
662
663 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
664 return -EFAULT;
665
666 cid = vmci_ctx_get_id(vmci_host_dev->context);
667 result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
668
669 return put_user(result, &info->result) ? -EFAULT : 0;
670}
671
672static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
673 const char *ioctl_name,
674 void __user *uptr)
675{
676 struct vmci_ctx_info ar_info;
677 struct vmci_ctx_info __user *info = uptr;
678 u32 cid;
679 int result;
680
681 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
682 vmci_ioctl_err("only valid for contexts\n");
683 return -EINVAL;
684 }
685
686 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
687 return -EFAULT;
688
689 cid = vmci_ctx_get_id(vmci_host_dev->context);
690 result = vmci_ctx_remove_notification(cid,
691 ar_info.remote_cid);
692
693 return put_user(result, &info->result) ? -EFAULT : 0;
694}
695
696static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
697 const char *ioctl_name,
698 void __user *uptr)
699{
700 struct vmci_ctx_chkpt_buf_info get_info;
701 u32 cid;
702 void *cpt_buf;
703 int retval;
704
705 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
706 vmci_ioctl_err("only valid for contexts\n");
707 return -EINVAL;
708 }
709
710 if (copy_from_user(&get_info, uptr, sizeof(get_info)))
711 return -EFAULT;
712
713 cid = vmci_ctx_get_id(vmci_host_dev->context);
714 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
715 &get_info.buf_size, &cpt_buf);
716 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
717 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
718 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
719 kfree(cpt_buf);
720
721 if (retval)
722 return -EFAULT;
723 }
724
725 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
726}
727
728static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
729 const char *ioctl_name,
730 void __user *uptr)
731{
732 struct vmci_ctx_chkpt_buf_info set_info;
733 u32 cid;
734 void *cpt_buf;
735 int retval;
736
737 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
738 vmci_ioctl_err("only valid for contexts\n");
739 return -EINVAL;
740 }
741
742 if (copy_from_user(&set_info, uptr, sizeof(set_info)))
743 return -EFAULT;
744
745 cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf,
746 set_info.buf_size);
747 if (IS_ERR(cpt_buf))
748 return PTR_ERR(cpt_buf);
749
750 cid = vmci_ctx_get_id(vmci_host_dev->context);
751 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
752 set_info.buf_size, cpt_buf);
753
754 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
755
756 kfree(cpt_buf);
757 return retval;
758}
759
760static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
761 const char *ioctl_name,
762 void __user *uptr)
763{
764 u32 __user *u32ptr = uptr;
765
766 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
767}
768
769static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
770 const char *ioctl_name,
771 void __user *uptr)
772{
773 struct vmci_set_notify_info notify_info;
774
775 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
776 vmci_ioctl_err("only valid for contexts\n");
777 return -EINVAL;
778 }
779
780 if (copy_from_user(¬ify_info, uptr, sizeof(notify_info)))
781 return -EFAULT;
782
783 if (notify_info.notify_uva) {
784 notify_info.result =
785 vmci_host_setup_notify(vmci_host_dev->context,
786 notify_info.notify_uva);
787 } else {
788 vmci_ctx_unset_notify(vmci_host_dev->context);
789 notify_info.result = VMCI_SUCCESS;
790 }
791
792 return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ?
793 -EFAULT : 0;
794}
795
796static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
797 const char *ioctl_name,
798 void __user *uptr)
799{
800 struct vmci_dbell_notify_resource_info info;
801 u32 cid;
802
803 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
804 vmci_ioctl_err("invalid for current VMX versions\n");
805 return -EINVAL;
806 }
807
808 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
809 vmci_ioctl_err("only valid for contexts\n");
810 return -EINVAL;
811 }
812
813 if (copy_from_user(&info, uptr, sizeof(info)))
814 return -EFAULT;
815
816 cid = vmci_ctx_get_id(vmci_host_dev->context);
817
818 switch (info.action) {
819 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
820 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
821 u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
822 info.result = vmci_ctx_notify_dbell(cid, info.handle,
823 flags);
824 } else {
825 info.result = VMCI_ERROR_UNAVAILABLE;
826 }
827 break;
828
829 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
830 info.result = vmci_ctx_dbell_create(cid, info.handle);
831 break;
832
833 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
834 info.result = vmci_ctx_dbell_destroy(cid, info.handle);
835 break;
836
837 default:
838 vmci_ioctl_err("got unknown action (action=%d)\n",
839 info.action);
840 info.result = VMCI_ERROR_INVALID_ARGS;
841 }
842
843 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
844}
845
846static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
847 const char *ioctl_name,
848 void __user *uptr)
849{
850 struct vmci_ctx_notify_recv_info info;
851 struct vmci_handle_arr *db_handle_array;
852 struct vmci_handle_arr *qp_handle_array;
853 void __user *ubuf;
854 u32 cid;
855 int retval = 0;
856
857 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
858 vmci_ioctl_err("only valid for contexts\n");
859 return -EINVAL;
860 }
861
862 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
863 vmci_ioctl_err("not supported for the current vmx version\n");
864 return -EINVAL;
865 }
866
867 if (copy_from_user(&info, uptr, sizeof(info)))
868 return -EFAULT;
869
870 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
871 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
872 return -EINVAL;
873 }
874
875 cid = vmci_ctx_get_id(vmci_host_dev->context);
876
877 info.result = vmci_ctx_rcv_notifications_get(cid,
878 &db_handle_array, &qp_handle_array);
879 if (info.result != VMCI_SUCCESS)
880 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
881
882 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
883 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
884 db_handle_array, &retval);
885 if (info.result == VMCI_SUCCESS && !retval) {
886 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
887 info.result = drv_cp_harray_to_user(ubuf,
888 &info.qp_handle_buf_size,
889 qp_handle_array, &retval);
890 }
891
892 if (!retval && copy_to_user(uptr, &info, sizeof(info)))
893 retval = -EFAULT;
894
895 vmci_ctx_rcv_notifications_release(cid,
896 db_handle_array, qp_handle_array,
897 info.result == VMCI_SUCCESS && !retval);
898
899 return retval;
900}
901
902static long vmci_host_unlocked_ioctl(struct file *filp,
903 unsigned int iocmd, unsigned long ioarg)
904{
905#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
906 char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
907 return vmci_host_do_ ## ioctl_fn( \
908 vmci_host_dev, name, uptr); \
909 } while (0)
910
911 struct vmci_host_dev *vmci_host_dev = filp->private_data;
912 void __user *uptr = (void __user *)ioarg;
913
914 switch (iocmd) {
915 case IOCTL_VMCI_INIT_CONTEXT:
916 VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
917 case IOCTL_VMCI_DATAGRAM_SEND:
918 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
919 case IOCTL_VMCI_DATAGRAM_RECEIVE:
920 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
921 case IOCTL_VMCI_QUEUEPAIR_ALLOC:
922 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
923 case IOCTL_VMCI_QUEUEPAIR_SETVA:
924 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
925 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
926 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
927 case IOCTL_VMCI_QUEUEPAIR_DETACH:
928 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
929 case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
930 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
931 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
932 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
933 case IOCTL_VMCI_CTX_GET_CPT_STATE:
934 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
935 case IOCTL_VMCI_CTX_SET_CPT_STATE:
936 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
937 case IOCTL_VMCI_GET_CONTEXT_ID:
938 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
939 case IOCTL_VMCI_SET_NOTIFY:
940 VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
941 case IOCTL_VMCI_NOTIFY_RESOURCE:
942 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
943 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
944 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
945
946 case IOCTL_VMCI_VERSION:
947 case IOCTL_VMCI_VERSION2:
948 return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
949
950 default:
951 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
952 return -EINVAL;
953 }
954
955#undef VMCI_DO_IOCTL
956}
957
958static const struct file_operations vmuser_fops = {
959 .owner = THIS_MODULE,
960 .open = vmci_host_open,
961 .release = vmci_host_close,
962 .poll = vmci_host_poll,
963 .unlocked_ioctl = vmci_host_unlocked_ioctl,
964 .compat_ioctl = vmci_host_unlocked_ioctl,
965};
966
967static struct miscdevice vmci_host_miscdev = {
968 .name = "vmci",
969 .minor = MISC_DYNAMIC_MINOR,
970 .fops = &vmuser_fops,
971};
972
973int __init vmci_host_init(void)
974{
975 int error;
976
977 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
978 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
979 -1, VMCI_VERSION, NULL);
980 if (IS_ERR(host_context)) {
981 error = PTR_ERR(host_context);
982 pr_warn("Failed to initialize VMCIContext (error%d)\n",
983 error);
984 return error;
985 }
986
987 error = misc_register(&vmci_host_miscdev);
988 if (error) {
989 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
990 vmci_host_miscdev.name,
991 MISC_MAJOR, vmci_host_miscdev.minor,
992 error);
993 pr_warn("Unable to initialize host personality\n");
994 vmci_ctx_destroy(host_context);
995 return error;
996 }
997
998 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
999 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1000
1001 vmci_host_device_initialized = true;
1002 return 0;
1003}
1004
1005void __exit vmci_host_exit(void)
1006{
1007 vmci_host_device_initialized = false;
1008
1009 misc_deregister(&vmci_host_miscdev);
1010 vmci_ctx_destroy(host_context);
1011 vmci_qp_broker_exit();
1012
1013 pr_debug("VMCI host driver module unloaded\n");
1014}
1015