1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/highmem.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/pagemap.h>
24#include <linux/pci.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/uio.h>
28#include <linux/wait.h>
29#include <linux/vmalloc.h>
30#include <linux/skbuff.h>
31
32#include "vmci_handle_array.h"
33#include "vmci_queue_pair.h"
34#include "vmci_datagram.h"
35#include "vmci_resource.h"
36#include "vmci_context.h"
37#include "vmci_driver.h"
38#include "vmci_event.h"
39#include "vmci_route.h"
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
143 u64 queue_offset, const void *src,
144 size_t src_offset, size_t size);
145typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
146 const struct vmci_queue *queue,
147 u64 queue_offset, size_t size);
148
149
150struct vmci_queue_kern_if {
151 struct mutex __mutex;
152 struct mutex *mutex;
153 size_t num_pages;
154 bool host;
155 union {
156 struct {
157 dma_addr_t *pas;
158 void **vas;
159 } g;
160 struct {
161 struct page **page;
162 struct page **header_page;
163 } h;
164 } u;
165};
166
167
168
169
170struct vmci_qp {
171 struct vmci_handle handle;
172 struct vmci_queue *produce_q;
173 struct vmci_queue *consume_q;
174 u64 produce_q_size;
175 u64 consume_q_size;
176 u32 peer;
177 u32 flags;
178 u32 priv_flags;
179 bool guest_endpoint;
180 unsigned int blocked;
181 unsigned int generation;
182 wait_queue_head_t event;
183};
184
185enum qp_broker_state {
186 VMCIQPB_NEW,
187 VMCIQPB_CREATED_NO_MEM,
188 VMCIQPB_CREATED_MEM,
189 VMCIQPB_ATTACHED_NO_MEM,
190 VMCIQPB_ATTACHED_MEM,
191 VMCIQPB_SHUTDOWN_NO_MEM,
192 VMCIQPB_SHUTDOWN_MEM,
193 VMCIQPB_GONE
194};
195
196#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
197 _qpb->state == VMCIQPB_ATTACHED_MEM || \
198 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
199
200
201
202
203
204
205
206
207
208
209struct qp_entry {
210 struct list_head list_item;
211 struct vmci_handle handle;
212 u32 peer;
213 u32 flags;
214 u64 produce_size;
215 u64 consume_size;
216 u32 ref_count;
217};
218
219struct qp_broker_entry {
220 struct vmci_resource resource;
221 struct qp_entry qp;
222 u32 create_id;
223 u32 attach_id;
224 enum qp_broker_state state;
225 bool require_trusted_attach;
226 bool created_by_trusted;
227 bool vmci_page_files;
228 struct vmci_queue *produce_q;
229 struct vmci_queue *consume_q;
230 struct vmci_queue_header saved_produce_q;
231 struct vmci_queue_header saved_consume_q;
232 vmci_event_release_cb wakeup_cb;
233 void *client_data;
234 void *local_mem;
235};
236
237struct qp_guest_endpoint {
238 struct vmci_resource resource;
239 struct qp_entry qp;
240 u64 num_ppns;
241 void *produce_q;
242 void *consume_q;
243 struct ppn_set ppn_set;
244};
245
246struct qp_list {
247 struct list_head head;
248 struct mutex mutex;
249};
250
251static struct qp_list qp_broker_list = {
252 .head = LIST_HEAD_INIT(qp_broker_list.head),
253 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
254};
255
256static struct qp_list qp_guest_endpoints = {
257 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
258 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
259};
260
261#define INVALID_VMCI_GUEST_MEM_ID 0
262#define QPE_NUM_PAGES(_QPE) ((u32) \
263 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
264 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
265
266
267
268
269
270
271static void qp_free_queue(void *q, u64 size)
272{
273 struct vmci_queue *queue = q;
274
275 if (queue) {
276 u64 i;
277
278
279 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
280 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
281 queue->kernel_if->u.g.vas[i],
282 queue->kernel_if->u.g.pas[i]);
283 }
284
285 vfree(queue);
286 }
287}
288
289
290
291
292
293
294static void *qp_alloc_queue(u64 size, u32 flags)
295{
296 u64 i;
297 struct vmci_queue *queue;
298 size_t pas_size;
299 size_t vas_size;
300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
301 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
302
303 if (num_pages >
304 (SIZE_MAX - queue_size) /
305 (sizeof(*queue->kernel_if->u.g.pas) +
306 sizeof(*queue->kernel_if->u.g.vas)))
307 return NULL;
308
309 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
310 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
311 queue_size += pas_size + vas_size;
312
313 queue = vmalloc(queue_size);
314 if (!queue)
315 return NULL;
316
317 queue->q_header = NULL;
318 queue->saved_header = NULL;
319 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
320 queue->kernel_if->mutex = NULL;
321 queue->kernel_if->num_pages = num_pages;
322 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
323 queue->kernel_if->u.g.vas =
324 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
325 queue->kernel_if->host = false;
326
327 for (i = 0; i < num_pages; i++) {
328 queue->kernel_if->u.g.vas[i] =
329 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
330 &queue->kernel_if->u.g.pas[i],
331 GFP_KERNEL);
332 if (!queue->kernel_if->u.g.vas[i]) {
333
334 qp_free_queue(queue, i * PAGE_SIZE);
335 return NULL;
336 }
337 }
338
339
340 queue->q_header = queue->kernel_if->u.g.vas[0];
341
342 return queue;
343}
344
345
346
347
348
349
350
351static int __qp_memcpy_to_queue(struct vmci_queue *queue,
352 u64 queue_offset,
353 const void *src,
354 size_t size,
355 bool is_iovec)
356{
357 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
358 size_t bytes_copied = 0;
359
360 while (bytes_copied < size) {
361 const u64 page_index =
362 (queue_offset + bytes_copied) / PAGE_SIZE;
363 const size_t page_offset =
364 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
365 void *va;
366 size_t to_copy;
367
368 if (kernel_if->host)
369 va = kmap(kernel_if->u.h.page[page_index]);
370 else
371 va = kernel_if->u.g.vas[page_index + 1];
372
373
374 if (size - bytes_copied > PAGE_SIZE - page_offset)
375
376 to_copy = PAGE_SIZE - page_offset;
377 else
378 to_copy = size - bytes_copied;
379
380 if (is_iovec) {
381 struct msghdr *msg = (struct msghdr *)src;
382 int err;
383
384
385 err = memcpy_from_msg((u8 *)va + page_offset,
386 msg, to_copy);
387 if (err != 0) {
388 if (kernel_if->host)
389 kunmap(kernel_if->u.h.page[page_index]);
390 return VMCI_ERROR_INVALID_ARGS;
391 }
392 } else {
393 memcpy((u8 *)va + page_offset,
394 (u8 *)src + bytes_copied, to_copy);
395 }
396
397 bytes_copied += to_copy;
398 if (kernel_if->host)
399 kunmap(kernel_if->u.h.page[page_index]);
400 }
401
402 return VMCI_SUCCESS;
403}
404
405
406
407
408
409
410
411static int __qp_memcpy_from_queue(void *dest,
412 const struct vmci_queue *queue,
413 u64 queue_offset,
414 size_t size,
415 bool is_iovec)
416{
417 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
418 size_t bytes_copied = 0;
419
420 while (bytes_copied < size) {
421 const u64 page_index =
422 (queue_offset + bytes_copied) / PAGE_SIZE;
423 const size_t page_offset =
424 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
425 void *va;
426 size_t to_copy;
427
428 if (kernel_if->host)
429 va = kmap(kernel_if->u.h.page[page_index]);
430 else
431 va = kernel_if->u.g.vas[page_index + 1];
432
433
434 if (size - bytes_copied > PAGE_SIZE - page_offset)
435
436 to_copy = PAGE_SIZE - page_offset;
437 else
438 to_copy = size - bytes_copied;
439
440 if (is_iovec) {
441 struct msghdr *msg = dest;
442 int err;
443
444
445 err = memcpy_to_msg(msg, (u8 *)va + page_offset,
446 to_copy);
447 if (err != 0) {
448 if (kernel_if->host)
449 kunmap(kernel_if->u.h.page[page_index]);
450 return VMCI_ERROR_INVALID_ARGS;
451 }
452 } else {
453 memcpy((u8 *)dest + bytes_copied,
454 (u8 *)va + page_offset, to_copy);
455 }
456
457 bytes_copied += to_copy;
458 if (kernel_if->host)
459 kunmap(kernel_if->u.h.page[page_index]);
460 }
461
462 return VMCI_SUCCESS;
463}
464
465
466
467
468
469
470
471static int qp_alloc_ppn_set(void *prod_q,
472 u64 num_produce_pages,
473 void *cons_q,
474 u64 num_consume_pages, struct ppn_set *ppn_set)
475{
476 u32 *produce_ppns;
477 u32 *consume_ppns;
478 struct vmci_queue *produce_q = prod_q;
479 struct vmci_queue *consume_q = cons_q;
480 u64 i;
481
482 if (!produce_q || !num_produce_pages || !consume_q ||
483 !num_consume_pages || !ppn_set)
484 return VMCI_ERROR_INVALID_ARGS;
485
486 if (ppn_set->initialized)
487 return VMCI_ERROR_ALREADY_EXISTS;
488
489 produce_ppns =
490 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
491 if (!produce_ppns)
492 return VMCI_ERROR_NO_MEM;
493
494 consume_ppns =
495 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
496 if (!consume_ppns) {
497 kfree(produce_ppns);
498 return VMCI_ERROR_NO_MEM;
499 }
500
501 for (i = 0; i < num_produce_pages; i++) {
502 unsigned long pfn;
503
504 produce_ppns[i] =
505 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
506 pfn = produce_ppns[i];
507
508
509 if (sizeof(pfn) > sizeof(*produce_ppns)
510 && pfn != produce_ppns[i])
511 goto ppn_error;
512 }
513
514 for (i = 0; i < num_consume_pages; i++) {
515 unsigned long pfn;
516
517 consume_ppns[i] =
518 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
519 pfn = consume_ppns[i];
520
521
522 if (sizeof(pfn) > sizeof(*consume_ppns)
523 && pfn != consume_ppns[i])
524 goto ppn_error;
525 }
526
527 ppn_set->num_produce_pages = num_produce_pages;
528 ppn_set->num_consume_pages = num_consume_pages;
529 ppn_set->produce_ppns = produce_ppns;
530 ppn_set->consume_ppns = consume_ppns;
531 ppn_set->initialized = true;
532 return VMCI_SUCCESS;
533
534 ppn_error:
535 kfree(produce_ppns);
536 kfree(consume_ppns);
537 return VMCI_ERROR_INVALID_ARGS;
538}
539
540
541
542
543static void qp_free_ppn_set(struct ppn_set *ppn_set)
544{
545 if (ppn_set->initialized) {
546
547 kfree(ppn_set->produce_ppns);
548 kfree(ppn_set->consume_ppns);
549 }
550 memset(ppn_set, 0, sizeof(*ppn_set));
551}
552
553
554
555
556
557static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
558{
559 memcpy(call_buf, ppn_set->produce_ppns,
560 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
561 memcpy(call_buf +
562 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
563 ppn_set->consume_ppns,
564 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
565
566 return VMCI_SUCCESS;
567}
568
569static int qp_memcpy_to_queue(struct vmci_queue *queue,
570 u64 queue_offset,
571 const void *src, size_t src_offset, size_t size)
572{
573 return __qp_memcpy_to_queue(queue, queue_offset,
574 (u8 *)src + src_offset, size, false);
575}
576
577static int qp_memcpy_from_queue(void *dest,
578 size_t dest_offset,
579 const struct vmci_queue *queue,
580 u64 queue_offset, size_t size)
581{
582 return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
583 queue, queue_offset, size, false);
584}
585
586
587
588
589static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
590 u64 queue_offset,
591 const void *msg,
592 size_t src_offset, size_t size)
593{
594
595
596
597
598
599 return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
600}
601
602
603
604
605static int qp_memcpy_from_queue_iov(void *dest,
606 size_t dest_offset,
607 const struct vmci_queue *queue,
608 u64 queue_offset, size_t size)
609{
610
611
612
613
614 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
615}
616
617
618
619
620
621
622
623static struct vmci_queue *qp_host_alloc_queue(u64 size)
624{
625 struct vmci_queue *queue;
626 size_t queue_page_size;
627 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
628 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
629
630 if (num_pages > (SIZE_MAX - queue_size) /
631 sizeof(*queue->kernel_if->u.h.page))
632 return NULL;
633
634 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
635
636 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
637 if (queue) {
638 queue->q_header = NULL;
639 queue->saved_header = NULL;
640 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
641 queue->kernel_if->host = true;
642 queue->kernel_if->mutex = NULL;
643 queue->kernel_if->num_pages = num_pages;
644 queue->kernel_if->u.h.header_page =
645 (struct page **)((u8 *)queue + queue_size);
646 queue->kernel_if->u.h.page =
647 &queue->kernel_if->u.h.header_page[1];
648 }
649
650 return queue;
651}
652
653
654
655
656
657static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
658{
659 kfree(queue);
660}
661
662
663
664
665
666
667
668
669static void qp_init_queue_mutex(struct vmci_queue *produce_q,
670 struct vmci_queue *consume_q)
671{
672
673
674
675
676
677 if (produce_q->kernel_if->host) {
678 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
679 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
680 mutex_init(produce_q->kernel_if->mutex);
681 }
682}
683
684
685
686
687static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
688 struct vmci_queue *consume_q)
689{
690 if (produce_q->kernel_if->host) {
691 produce_q->kernel_if->mutex = NULL;
692 consume_q->kernel_if->mutex = NULL;
693 }
694}
695
696
697
698
699
700
701static void qp_acquire_queue_mutex(struct vmci_queue *queue)
702{
703 if (queue->kernel_if->host)
704 mutex_lock(queue->kernel_if->mutex);
705}
706
707
708
709
710
711
712static void qp_release_queue_mutex(struct vmci_queue *queue)
713{
714 if (queue->kernel_if->host)
715 mutex_unlock(queue->kernel_if->mutex);
716}
717
718
719
720
721
722static void qp_release_pages(struct page **pages,
723 u64 num_pages, bool dirty)
724{
725 int i;
726
727 for (i = 0; i < num_pages; i++) {
728 if (dirty)
729 set_page_dirty(pages[i]);
730
731 put_page(pages[i]);
732 pages[i] = NULL;
733 }
734}
735
736
737
738
739
740
741static int qp_host_get_user_memory(u64 produce_uva,
742 u64 consume_uva,
743 struct vmci_queue *produce_q,
744 struct vmci_queue *consume_q)
745{
746 int retval;
747 int err = VMCI_SUCCESS;
748
749 retval = get_user_pages_fast((uintptr_t) produce_uva,
750 produce_q->kernel_if->num_pages, 1,
751 produce_q->kernel_if->u.h.header_page);
752 if (retval < produce_q->kernel_if->num_pages) {
753 pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
754 retval);
755 qp_release_pages(produce_q->kernel_if->u.h.header_page,
756 retval, false);
757 err = VMCI_ERROR_NO_MEM;
758 goto out;
759 }
760
761 retval = get_user_pages_fast((uintptr_t) consume_uva,
762 consume_q->kernel_if->num_pages, 1,
763 consume_q->kernel_if->u.h.header_page);
764 if (retval < consume_q->kernel_if->num_pages) {
765 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
766 retval);
767 qp_release_pages(consume_q->kernel_if->u.h.header_page,
768 retval, false);
769 qp_release_pages(produce_q->kernel_if->u.h.header_page,
770 produce_q->kernel_if->num_pages, false);
771 err = VMCI_ERROR_NO_MEM;
772 }
773
774 out:
775 return err;
776}
777
778
779
780
781
782
783static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
784 struct vmci_queue *produce_q,
785 struct vmci_queue *consume_q)
786{
787 u64 produce_uva;
788 u64 consume_uva;
789
790
791
792
793
794
795 produce_uva = page_store->pages;
796 consume_uva = page_store->pages +
797 produce_q->kernel_if->num_pages * PAGE_SIZE;
798 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
799 consume_q);
800}
801
802
803
804
805
806
807static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
808 struct vmci_queue *consume_q)
809{
810 qp_release_pages(produce_q->kernel_if->u.h.header_page,
811 produce_q->kernel_if->num_pages, true);
812 memset(produce_q->kernel_if->u.h.header_page, 0,
813 sizeof(*produce_q->kernel_if->u.h.header_page) *
814 produce_q->kernel_if->num_pages);
815 qp_release_pages(consume_q->kernel_if->u.h.header_page,
816 consume_q->kernel_if->num_pages, true);
817 memset(consume_q->kernel_if->u.h.header_page, 0,
818 sizeof(*consume_q->kernel_if->u.h.header_page) *
819 consume_q->kernel_if->num_pages);
820}
821
822
823
824
825
826
827
828
829
830static int qp_host_map_queues(struct vmci_queue *produce_q,
831 struct vmci_queue *consume_q)
832{
833 int result;
834
835 if (!produce_q->q_header || !consume_q->q_header) {
836 struct page *headers[2];
837
838 if (produce_q->q_header != consume_q->q_header)
839 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
840
841 if (produce_q->kernel_if->u.h.header_page == NULL ||
842 *produce_q->kernel_if->u.h.header_page == NULL)
843 return VMCI_ERROR_UNAVAILABLE;
844
845 headers[0] = *produce_q->kernel_if->u.h.header_page;
846 headers[1] = *consume_q->kernel_if->u.h.header_page;
847
848 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
849 if (produce_q->q_header != NULL) {
850 consume_q->q_header =
851 (struct vmci_queue_header *)((u8 *)
852 produce_q->q_header +
853 PAGE_SIZE);
854 result = VMCI_SUCCESS;
855 } else {
856 pr_warn("vmap failed\n");
857 result = VMCI_ERROR_NO_MEM;
858 }
859 } else {
860 result = VMCI_SUCCESS;
861 }
862
863 return result;
864}
865
866
867
868
869
870static int qp_host_unmap_queues(u32 gid,
871 struct vmci_queue *produce_q,
872 struct vmci_queue *consume_q)
873{
874 if (produce_q->q_header) {
875 if (produce_q->q_header < consume_q->q_header)
876 vunmap(produce_q->q_header);
877 else
878 vunmap(consume_q->q_header);
879
880 produce_q->q_header = NULL;
881 consume_q->q_header = NULL;
882 }
883
884 return VMCI_SUCCESS;
885}
886
887
888
889
890
891static struct qp_entry *qp_list_find(struct qp_list *qp_list,
892 struct vmci_handle handle)
893{
894 struct qp_entry *entry;
895
896 if (vmci_handle_is_invalid(handle))
897 return NULL;
898
899 list_for_each_entry(entry, &qp_list->head, list_item) {
900 if (vmci_handle_is_equal(entry->handle, handle))
901 return entry;
902 }
903
904 return NULL;
905}
906
907
908
909
910static struct qp_guest_endpoint *
911qp_guest_handle_to_entry(struct vmci_handle handle)
912{
913 struct qp_guest_endpoint *entry;
914 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
915
916 entry = qp ? container_of(
917 qp, struct qp_guest_endpoint, qp) : NULL;
918 return entry;
919}
920
921
922
923
924static struct qp_broker_entry *
925qp_broker_handle_to_entry(struct vmci_handle handle)
926{
927 struct qp_broker_entry *entry;
928 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
929
930 entry = qp ? container_of(
931 qp, struct qp_broker_entry, qp) : NULL;
932 return entry;
933}
934
935
936
937
938
939static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
940{
941 u32 context_id = vmci_get_context_id();
942 struct vmci_event_qp ev;
943
944 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
945 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
946 VMCI_CONTEXT_RESOURCE_ID);
947 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
948 ev.msg.event_data.event =
949 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
950 ev.payload.peer_id = context_id;
951 ev.payload.handle = handle;
952
953 return vmci_event_dispatch(&ev.msg.hdr);
954}
955
956
957
958
959
960
961
962
963static struct qp_guest_endpoint *
964qp_guest_endpoint_create(struct vmci_handle handle,
965 u32 peer,
966 u32 flags,
967 u64 produce_size,
968 u64 consume_size,
969 void *produce_q,
970 void *consume_q)
971{
972 int result;
973 struct qp_guest_endpoint *entry;
974
975 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
976 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
977
978 if (vmci_handle_is_invalid(handle)) {
979 u32 context_id = vmci_get_context_id();
980
981 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
982 }
983
984 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
985 if (entry) {
986 entry->qp.peer = peer;
987 entry->qp.flags = flags;
988 entry->qp.produce_size = produce_size;
989 entry->qp.consume_size = consume_size;
990 entry->qp.ref_count = 0;
991 entry->num_ppns = num_ppns;
992 entry->produce_q = produce_q;
993 entry->consume_q = consume_q;
994 INIT_LIST_HEAD(&entry->qp.list_item);
995
996
997 result = vmci_resource_add(&entry->resource,
998 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
999 handle);
1000 entry->qp.handle = vmci_resource_handle(&entry->resource);
1001 if ((result != VMCI_SUCCESS) ||
1002 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
1003 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1004 handle.context, handle.resource, result);
1005 kfree(entry);
1006 entry = NULL;
1007 }
1008 }
1009 return entry;
1010}
1011
1012
1013
1014
1015static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
1016{
1017 qp_free_ppn_set(&entry->ppn_set);
1018 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
1019 qp_free_queue(entry->produce_q, entry->qp.produce_size);
1020 qp_free_queue(entry->consume_q, entry->qp.consume_size);
1021
1022 vmci_resource_remove(&entry->resource);
1023
1024 kfree(entry);
1025}
1026
1027
1028
1029
1030
1031static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
1032{
1033 struct vmci_qp_alloc_msg *alloc_msg;
1034 size_t msg_size;
1035 int result;
1036
1037 if (!entry || entry->num_ppns <= 2)
1038 return VMCI_ERROR_INVALID_ARGS;
1039
1040 msg_size = sizeof(*alloc_msg) +
1041 (size_t) entry->num_ppns * sizeof(u32);
1042 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
1043 if (!alloc_msg)
1044 return VMCI_ERROR_NO_MEM;
1045
1046 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1047 VMCI_QUEUEPAIR_ALLOC);
1048 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
1049 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
1050 alloc_msg->handle = entry->qp.handle;
1051 alloc_msg->peer = entry->qp.peer;
1052 alloc_msg->flags = entry->qp.flags;
1053 alloc_msg->produce_size = entry->qp.produce_size;
1054 alloc_msg->consume_size = entry->qp.consume_size;
1055 alloc_msg->num_ppns = entry->num_ppns;
1056
1057 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
1058 &entry->ppn_set);
1059 if (result == VMCI_SUCCESS)
1060 result = vmci_send_datagram(&alloc_msg->hdr);
1061
1062 kfree(alloc_msg);
1063
1064 return result;
1065}
1066
1067
1068
1069
1070
1071static int qp_detatch_hypercall(struct vmci_handle handle)
1072{
1073 struct vmci_qp_detach_msg detach_msg;
1074
1075 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1076 VMCI_QUEUEPAIR_DETACH);
1077 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
1078 detach_msg.hdr.payload_size = sizeof(handle);
1079 detach_msg.handle = handle;
1080
1081 return vmci_send_datagram(&detach_msg.hdr);
1082}
1083
1084
1085
1086
1087static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1088{
1089 if (entry)
1090 list_add(&entry->list_item, &qp_list->head);
1091}
1092
1093
1094
1095
1096static void qp_list_remove_entry(struct qp_list *qp_list,
1097 struct qp_entry *entry)
1098{
1099 if (entry)
1100 list_del(&entry->list_item);
1101}
1102
1103
1104
1105
1106
1107static int qp_detatch_guest_work(struct vmci_handle handle)
1108{
1109 int result;
1110 struct qp_guest_endpoint *entry;
1111 u32 ref_count = ~0;
1112
1113 mutex_lock(&qp_guest_endpoints.mutex);
1114
1115 entry = qp_guest_handle_to_entry(handle);
1116 if (!entry) {
1117 mutex_unlock(&qp_guest_endpoints.mutex);
1118 return VMCI_ERROR_NOT_FOUND;
1119 }
1120
1121 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1122 result = VMCI_SUCCESS;
1123
1124 if (entry->qp.ref_count > 1) {
1125 result = qp_notify_peer_local(false, handle);
1126
1127
1128
1129
1130
1131
1132 }
1133 } else {
1134 result = qp_detatch_hypercall(handle);
1135 if (result < VMCI_SUCCESS) {
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 mutex_unlock(&qp_guest_endpoints.mutex);
1147 return result;
1148 }
1149 }
1150
1151
1152
1153
1154
1155
1156 entry->qp.ref_count--;
1157 if (entry->qp.ref_count == 0)
1158 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1159
1160
1161 if (entry)
1162 ref_count = entry->qp.ref_count;
1163
1164 mutex_unlock(&qp_guest_endpoints.mutex);
1165
1166 if (ref_count == 0)
1167 qp_guest_endpoint_destroy(entry);
1168
1169 return result;
1170}
1171
1172
1173
1174
1175
1176
1177static int qp_alloc_guest_work(struct vmci_handle *handle,
1178 struct vmci_queue **produce_q,
1179 u64 produce_size,
1180 struct vmci_queue **consume_q,
1181 u64 consume_size,
1182 u32 peer,
1183 u32 flags,
1184 u32 priv_flags)
1185{
1186 const u64 num_produce_pages =
1187 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1188 const u64 num_consume_pages =
1189 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1190 void *my_produce_q = NULL;
1191 void *my_consume_q = NULL;
1192 int result;
1193 struct qp_guest_endpoint *queue_pair_entry = NULL;
1194
1195 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1196 return VMCI_ERROR_NO_ACCESS;
1197
1198 mutex_lock(&qp_guest_endpoints.mutex);
1199
1200 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1201 if (queue_pair_entry) {
1202 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1203
1204 if (queue_pair_entry->qp.ref_count > 1) {
1205 pr_devel("Error attempting to attach more than once\n");
1206 result = VMCI_ERROR_UNAVAILABLE;
1207 goto error_keep_entry;
1208 }
1209
1210 if (queue_pair_entry->qp.produce_size != consume_size ||
1211 queue_pair_entry->qp.consume_size !=
1212 produce_size ||
1213 queue_pair_entry->qp.flags !=
1214 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1215 pr_devel("Error mismatched queue pair in local attach\n");
1216 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1217 goto error_keep_entry;
1218 }
1219
1220
1221
1222
1223
1224
1225 result = qp_notify_peer_local(true, *handle);
1226 if (result < VMCI_SUCCESS)
1227 goto error_keep_entry;
1228
1229 my_produce_q = queue_pair_entry->consume_q;
1230 my_consume_q = queue_pair_entry->produce_q;
1231 goto out;
1232 }
1233
1234 result = VMCI_ERROR_ALREADY_EXISTS;
1235 goto error_keep_entry;
1236 }
1237
1238 my_produce_q = qp_alloc_queue(produce_size, flags);
1239 if (!my_produce_q) {
1240 pr_warn("Error allocating pages for produce queue\n");
1241 result = VMCI_ERROR_NO_MEM;
1242 goto error;
1243 }
1244
1245 my_consume_q = qp_alloc_queue(consume_size, flags);
1246 if (!my_consume_q) {
1247 pr_warn("Error allocating pages for consume queue\n");
1248 result = VMCI_ERROR_NO_MEM;
1249 goto error;
1250 }
1251
1252 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1253 produce_size, consume_size,
1254 my_produce_q, my_consume_q);
1255 if (!queue_pair_entry) {
1256 pr_warn("Error allocating memory in %s\n", __func__);
1257 result = VMCI_ERROR_NO_MEM;
1258 goto error;
1259 }
1260
1261 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1262 num_consume_pages,
1263 &queue_pair_entry->ppn_set);
1264 if (result < VMCI_SUCCESS) {
1265 pr_warn("qp_alloc_ppn_set failed\n");
1266 goto error;
1267 }
1268
1269
1270
1271
1272
1273 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1274
1275 u32 context_id = vmci_get_context_id();
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286 if (queue_pair_entry->qp.handle.context != context_id ||
1287 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1288 queue_pair_entry->qp.peer != context_id)) {
1289 result = VMCI_ERROR_NO_ACCESS;
1290 goto error;
1291 }
1292
1293 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1294 result = VMCI_ERROR_NOT_FOUND;
1295 goto error;
1296 }
1297 } else {
1298 result = qp_alloc_hypercall(queue_pair_entry);
1299 if (result < VMCI_SUCCESS) {
1300 pr_warn("qp_alloc_hypercall result = %d\n", result);
1301 goto error;
1302 }
1303 }
1304
1305 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1306 (struct vmci_queue *)my_consume_q);
1307
1308 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1309
1310 out:
1311 queue_pair_entry->qp.ref_count++;
1312 *handle = queue_pair_entry->qp.handle;
1313 *produce_q = (struct vmci_queue *)my_produce_q;
1314 *consume_q = (struct vmci_queue *)my_consume_q;
1315
1316
1317
1318
1319
1320
1321 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1322 queue_pair_entry->qp.ref_count == 1) {
1323 vmci_q_header_init((*produce_q)->q_header, *handle);
1324 vmci_q_header_init((*consume_q)->q_header, *handle);
1325 }
1326
1327 mutex_unlock(&qp_guest_endpoints.mutex);
1328
1329 return VMCI_SUCCESS;
1330
1331 error:
1332 mutex_unlock(&qp_guest_endpoints.mutex);
1333 if (queue_pair_entry) {
1334
1335 qp_guest_endpoint_destroy(queue_pair_entry);
1336 } else {
1337 qp_free_queue(my_produce_q, produce_size);
1338 qp_free_queue(my_consume_q, consume_size);
1339 }
1340 return result;
1341
1342 error_keep_entry:
1343
1344 mutex_unlock(&qp_guest_endpoints.mutex);
1345 return result;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static int qp_broker_create(struct vmci_handle handle,
1367 u32 peer,
1368 u32 flags,
1369 u32 priv_flags,
1370 u64 produce_size,
1371 u64 consume_size,
1372 struct vmci_qp_page_store *page_store,
1373 struct vmci_ctx *context,
1374 vmci_event_release_cb wakeup_cb,
1375 void *client_data, struct qp_broker_entry **ent)
1376{
1377 struct qp_broker_entry *entry = NULL;
1378 const u32 context_id = vmci_ctx_get_id(context);
1379 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1380 int result;
1381 u64 guest_produce_size;
1382 u64 guest_consume_size;
1383
1384
1385 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1386 return VMCI_ERROR_NOT_FOUND;
1387
1388
1389
1390
1391
1392 if (handle.context != context_id && handle.context != peer)
1393 return VMCI_ERROR_NO_ACCESS;
1394
1395 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1396 return VMCI_ERROR_DST_UNREACHABLE;
1397
1398
1399
1400
1401
1402 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1403 return VMCI_ERROR_NO_ACCESS;
1404
1405 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1406 if (!entry)
1407 return VMCI_ERROR_NO_MEM;
1408
1409 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1410
1411
1412
1413
1414
1415
1416
1417
1418 guest_produce_size = consume_size;
1419 guest_consume_size = produce_size;
1420 } else {
1421 guest_produce_size = produce_size;
1422 guest_consume_size = consume_size;
1423 }
1424
1425 entry->qp.handle = handle;
1426 entry->qp.peer = peer;
1427 entry->qp.flags = flags;
1428 entry->qp.produce_size = guest_produce_size;
1429 entry->qp.consume_size = guest_consume_size;
1430 entry->qp.ref_count = 1;
1431 entry->create_id = context_id;
1432 entry->attach_id = VMCI_INVALID_ID;
1433 entry->state = VMCIQPB_NEW;
1434 entry->require_trusted_attach =
1435 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1436 entry->created_by_trusted =
1437 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1438 entry->vmci_page_files = false;
1439 entry->wakeup_cb = wakeup_cb;
1440 entry->client_data = client_data;
1441 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1442 if (entry->produce_q == NULL) {
1443 result = VMCI_ERROR_NO_MEM;
1444 goto error;
1445 }
1446 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1447 if (entry->consume_q == NULL) {
1448 result = VMCI_ERROR_NO_MEM;
1449 goto error;
1450 }
1451
1452 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1453
1454 INIT_LIST_HEAD(&entry->qp.list_item);
1455
1456 if (is_local) {
1457 u8 *tmp;
1458
1459 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1460 PAGE_SIZE, GFP_KERNEL);
1461 if (entry->local_mem == NULL) {
1462 result = VMCI_ERROR_NO_MEM;
1463 goto error;
1464 }
1465 entry->state = VMCIQPB_CREATED_MEM;
1466 entry->produce_q->q_header = entry->local_mem;
1467 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1468 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1469 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1470 } else if (page_store) {
1471
1472
1473
1474
1475 result = qp_host_register_user_memory(page_store,
1476 entry->produce_q,
1477 entry->consume_q);
1478 if (result < VMCI_SUCCESS)
1479 goto error;
1480
1481 entry->state = VMCIQPB_CREATED_MEM;
1482 } else {
1483
1484
1485
1486
1487
1488
1489
1490 entry->state = VMCIQPB_CREATED_NO_MEM;
1491 }
1492
1493 qp_list_add_entry(&qp_broker_list, &entry->qp);
1494 if (ent != NULL)
1495 *ent = entry;
1496
1497
1498 result = vmci_resource_add(&entry->resource,
1499 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1500 handle);
1501 if (result != VMCI_SUCCESS) {
1502 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1503 handle.context, handle.resource, result);
1504 goto error;
1505 }
1506
1507 entry->qp.handle = vmci_resource_handle(&entry->resource);
1508 if (is_local) {
1509 vmci_q_header_init(entry->produce_q->q_header,
1510 entry->qp.handle);
1511 vmci_q_header_init(entry->consume_q->q_header,
1512 entry->qp.handle);
1513 }
1514
1515 vmci_ctx_qp_create(context, entry->qp.handle);
1516
1517 return VMCI_SUCCESS;
1518
1519 error:
1520 if (entry != NULL) {
1521 qp_host_free_queue(entry->produce_q, guest_produce_size);
1522 qp_host_free_queue(entry->consume_q, guest_consume_size);
1523 kfree(entry);
1524 }
1525
1526 return result;
1527}
1528
1529
1530
1531
1532
1533
1534
1535static int qp_notify_peer(bool attach,
1536 struct vmci_handle handle,
1537 u32 my_id,
1538 u32 peer_id)
1539{
1540 int rv;
1541 struct vmci_event_qp ev;
1542
1543 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1544 peer_id == VMCI_INVALID_ID)
1545 return VMCI_ERROR_INVALID_ARGS;
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1556 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1557 VMCI_CONTEXT_RESOURCE_ID);
1558 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1559 ev.msg.event_data.event = attach ?
1560 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1561 ev.payload.handle = handle;
1562 ev.payload.peer_id = my_id;
1563
1564 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1565 &ev.msg.hdr, false);
1566 if (rv < VMCI_SUCCESS)
1567 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1568 attach ? "ATTACH" : "DETACH", peer_id);
1569
1570 return rv;
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594static int qp_broker_attach(struct qp_broker_entry *entry,
1595 u32 peer,
1596 u32 flags,
1597 u32 priv_flags,
1598 u64 produce_size,
1599 u64 consume_size,
1600 struct vmci_qp_page_store *page_store,
1601 struct vmci_ctx *context,
1602 vmci_event_release_cb wakeup_cb,
1603 void *client_data,
1604 struct qp_broker_entry **ent)
1605{
1606 const u32 context_id = vmci_ctx_get_id(context);
1607 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1608 int result;
1609
1610 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1611 entry->state != VMCIQPB_CREATED_MEM)
1612 return VMCI_ERROR_UNAVAILABLE;
1613
1614 if (is_local) {
1615 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1616 context_id != entry->create_id) {
1617 return VMCI_ERROR_INVALID_ARGS;
1618 }
1619 } else if (context_id == entry->create_id ||
1620 context_id == entry->attach_id) {
1621 return VMCI_ERROR_ALREADY_EXISTS;
1622 }
1623
1624 if (VMCI_CONTEXT_IS_VM(context_id) &&
1625 VMCI_CONTEXT_IS_VM(entry->create_id))
1626 return VMCI_ERROR_DST_UNREACHABLE;
1627
1628
1629
1630
1631
1632 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1633 !entry->created_by_trusted)
1634 return VMCI_ERROR_NO_ACCESS;
1635
1636
1637
1638
1639
1640 if (entry->require_trusted_attach &&
1641 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1642 return VMCI_ERROR_NO_ACCESS;
1643
1644
1645
1646
1647
1648 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1649 return VMCI_ERROR_NO_ACCESS;
1650
1651 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1652
1653
1654
1655
1656
1657 if (!vmci_ctx_supports_host_qp(context))
1658 return VMCI_ERROR_INVALID_RESOURCE;
1659
1660 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1661 struct vmci_ctx *create_context;
1662 bool supports_host_qp;
1663
1664
1665
1666
1667
1668
1669 create_context = vmci_ctx_get(entry->create_id);
1670 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1671 vmci_ctx_put(create_context);
1672
1673 if (!supports_host_qp)
1674 return VMCI_ERROR_INVALID_RESOURCE;
1675 }
1676
1677 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1678 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1679
1680 if (context_id != VMCI_HOST_CONTEXT_ID) {
1681
1682
1683
1684
1685
1686
1687 if (entry->qp.produce_size != produce_size ||
1688 entry->qp.consume_size != consume_size) {
1689 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1690 }
1691 } else if (entry->qp.produce_size != consume_size ||
1692 entry->qp.consume_size != produce_size) {
1693 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1694 }
1695
1696 if (context_id != VMCI_HOST_CONTEXT_ID) {
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1711 return VMCI_ERROR_INVALID_ARGS;
1712
1713 if (page_store != NULL) {
1714
1715
1716
1717
1718
1719
1720
1721 result = qp_host_register_user_memory(page_store,
1722 entry->produce_q,
1723 entry->consume_q);
1724 if (result < VMCI_SUCCESS)
1725 return result;
1726
1727 entry->state = VMCIQPB_ATTACHED_MEM;
1728 } else {
1729 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1730 }
1731 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1732
1733
1734
1735
1736
1737
1738
1739 return VMCI_ERROR_UNAVAILABLE;
1740 } else {
1741
1742 entry->state = VMCIQPB_ATTACHED_MEM;
1743 }
1744
1745 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1746 result =
1747 qp_notify_peer(true, entry->qp.handle, context_id,
1748 entry->create_id);
1749 if (result < VMCI_SUCCESS)
1750 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1751 entry->create_id, entry->qp.handle.context,
1752 entry->qp.handle.resource);
1753 }
1754
1755 entry->attach_id = context_id;
1756 entry->qp.ref_count++;
1757 if (wakeup_cb) {
1758 entry->wakeup_cb = wakeup_cb;
1759 entry->client_data = client_data;
1760 }
1761
1762
1763
1764
1765
1766 if (!is_local)
1767 vmci_ctx_qp_create(context, entry->qp.handle);
1768
1769 if (ent != NULL)
1770 *ent = entry;
1771
1772 return VMCI_SUCCESS;
1773}
1774
1775
1776
1777
1778
1779static int qp_broker_alloc(struct vmci_handle handle,
1780 u32 peer,
1781 u32 flags,
1782 u32 priv_flags,
1783 u64 produce_size,
1784 u64 consume_size,
1785 struct vmci_qp_page_store *page_store,
1786 struct vmci_ctx *context,
1787 vmci_event_release_cb wakeup_cb,
1788 void *client_data,
1789 struct qp_broker_entry **ent,
1790 bool *swap)
1791{
1792 const u32 context_id = vmci_ctx_get_id(context);
1793 bool create;
1794 struct qp_broker_entry *entry = NULL;
1795 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1796 int result;
1797
1798 if (vmci_handle_is_invalid(handle) ||
1799 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1800 !(produce_size || consume_size) ||
1801 !context || context_id == VMCI_INVALID_ID ||
1802 handle.context == VMCI_INVALID_ID) {
1803 return VMCI_ERROR_INVALID_ARGS;
1804 }
1805
1806 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1807 return VMCI_ERROR_INVALID_ARGS;
1808
1809
1810
1811
1812
1813
1814 mutex_lock(&qp_broker_list.mutex);
1815
1816 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1817 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1818 context_id, handle.context, handle.resource);
1819 mutex_unlock(&qp_broker_list.mutex);
1820 return VMCI_ERROR_ALREADY_EXISTS;
1821 }
1822
1823 if (handle.resource != VMCI_INVALID_ID)
1824 entry = qp_broker_handle_to_entry(handle);
1825
1826 if (!entry) {
1827 create = true;
1828 result =
1829 qp_broker_create(handle, peer, flags, priv_flags,
1830 produce_size, consume_size, page_store,
1831 context, wakeup_cb, client_data, ent);
1832 } else {
1833 create = false;
1834 result =
1835 qp_broker_attach(entry, peer, flags, priv_flags,
1836 produce_size, consume_size, page_store,
1837 context, wakeup_cb, client_data, ent);
1838 }
1839
1840 mutex_unlock(&qp_broker_list.mutex);
1841
1842 if (swap)
1843 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1844 !(create && is_local);
1845
1846 return result;
1847}
1848
1849
1850
1851
1852
1853static int qp_alloc_host_work(struct vmci_handle *handle,
1854 struct vmci_queue **produce_q,
1855 u64 produce_size,
1856 struct vmci_queue **consume_q,
1857 u64 consume_size,
1858 u32 peer,
1859 u32 flags,
1860 u32 priv_flags,
1861 vmci_event_release_cb wakeup_cb,
1862 void *client_data)
1863{
1864 struct vmci_handle new_handle;
1865 struct vmci_ctx *context;
1866 struct qp_broker_entry *entry;
1867 int result;
1868 bool swap;
1869
1870 if (vmci_handle_is_invalid(*handle)) {
1871 new_handle = vmci_make_handle(
1872 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1873 } else
1874 new_handle = *handle;
1875
1876 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1877 entry = NULL;
1878 result =
1879 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1880 produce_size, consume_size, NULL, context,
1881 wakeup_cb, client_data, &entry, &swap);
1882 if (result == VMCI_SUCCESS) {
1883 if (swap) {
1884
1885
1886
1887
1888
1889
1890 *produce_q = entry->consume_q;
1891 *consume_q = entry->produce_q;
1892 } else {
1893 *produce_q = entry->produce_q;
1894 *consume_q = entry->consume_q;
1895 }
1896
1897 *handle = vmci_resource_handle(&entry->resource);
1898 } else {
1899 *handle = VMCI_INVALID_HANDLE;
1900 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1901 result);
1902 }
1903 vmci_ctx_put(context);
1904 return result;
1905}
1906
1907
1908
1909
1910
1911
1912int vmci_qp_alloc(struct vmci_handle *handle,
1913 struct vmci_queue **produce_q,
1914 u64 produce_size,
1915 struct vmci_queue **consume_q,
1916 u64 consume_size,
1917 u32 peer,
1918 u32 flags,
1919 u32 priv_flags,
1920 bool guest_endpoint,
1921 vmci_event_release_cb wakeup_cb,
1922 void *client_data)
1923{
1924 if (!handle || !produce_q || !consume_q ||
1925 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1926 return VMCI_ERROR_INVALID_ARGS;
1927
1928 if (guest_endpoint) {
1929 return qp_alloc_guest_work(handle, produce_q,
1930 produce_size, consume_q,
1931 consume_size, peer,
1932 flags, priv_flags);
1933 } else {
1934 return qp_alloc_host_work(handle, produce_q,
1935 produce_size, consume_q,
1936 consume_size, peer, flags,
1937 priv_flags, wakeup_cb, client_data);
1938 }
1939}
1940
1941
1942
1943
1944
1945static int qp_detatch_host_work(struct vmci_handle handle)
1946{
1947 int result;
1948 struct vmci_ctx *context;
1949
1950 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1951
1952 result = vmci_qp_broker_detach(handle, context);
1953
1954 vmci_ctx_put(context);
1955 return result;
1956}
1957
1958
1959
1960
1961
1962static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1963{
1964 if (vmci_handle_is_invalid(handle))
1965 return VMCI_ERROR_INVALID_ARGS;
1966
1967 if (guest_endpoint)
1968 return qp_detatch_guest_work(handle);
1969 else
1970 return qp_detatch_host_work(handle);
1971}
1972
1973
1974
1975
1976
1977static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1978{
1979 if (!list_empty(&qp_list->head)) {
1980 struct qp_entry *entry =
1981 list_first_entry(&qp_list->head, struct qp_entry,
1982 list_item);
1983 return entry;
1984 }
1985
1986 return NULL;
1987}
1988
1989void vmci_qp_broker_exit(void)
1990{
1991 struct qp_entry *entry;
1992 struct qp_broker_entry *be;
1993
1994 mutex_lock(&qp_broker_list.mutex);
1995
1996 while ((entry = qp_list_get_head(&qp_broker_list))) {
1997 be = (struct qp_broker_entry *)entry;
1998
1999 qp_list_remove_entry(&qp_broker_list, entry);
2000 kfree(be);
2001 }
2002
2003 mutex_unlock(&qp_broker_list.mutex);
2004}
2005
2006
2007
2008
2009
2010
2011
2012
2013int vmci_qp_broker_alloc(struct vmci_handle handle,
2014 u32 peer,
2015 u32 flags,
2016 u32 priv_flags,
2017 u64 produce_size,
2018 u64 consume_size,
2019 struct vmci_qp_page_store *page_store,
2020 struct vmci_ctx *context)
2021{
2022 return qp_broker_alloc(handle, peer, flags, priv_flags,
2023 produce_size, consume_size,
2024 page_store, context, NULL, NULL, NULL, NULL);
2025}
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043int vmci_qp_broker_set_page_store(struct vmci_handle handle,
2044 u64 produce_uva,
2045 u64 consume_uva,
2046 struct vmci_ctx *context)
2047{
2048 struct qp_broker_entry *entry;
2049 int result;
2050 const u32 context_id = vmci_ctx_get_id(context);
2051
2052 if (vmci_handle_is_invalid(handle) || !context ||
2053 context_id == VMCI_INVALID_ID)
2054 return VMCI_ERROR_INVALID_ARGS;
2055
2056
2057
2058
2059
2060
2061 if (produce_uva == 0 || consume_uva == 0)
2062 return VMCI_ERROR_INVALID_ARGS;
2063
2064 mutex_lock(&qp_broker_list.mutex);
2065
2066 if (!vmci_ctx_qp_exists(context, handle)) {
2067 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2068 context_id, handle.context, handle.resource);
2069 result = VMCI_ERROR_NOT_FOUND;
2070 goto out;
2071 }
2072
2073 entry = qp_broker_handle_to_entry(handle);
2074 if (!entry) {
2075 result = VMCI_ERROR_NOT_FOUND;
2076 goto out;
2077 }
2078
2079
2080
2081
2082
2083
2084
2085 if (entry->create_id != context_id &&
2086 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2087 entry->attach_id != context_id)) {
2088 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2089 goto out;
2090 }
2091
2092 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2093 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2094 result = VMCI_ERROR_UNAVAILABLE;
2095 goto out;
2096 }
2097
2098 result = qp_host_get_user_memory(produce_uva, consume_uva,
2099 entry->produce_q, entry->consume_q);
2100 if (result < VMCI_SUCCESS)
2101 goto out;
2102
2103 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2104 if (result < VMCI_SUCCESS) {
2105 qp_host_unregister_user_memory(entry->produce_q,
2106 entry->consume_q);
2107 goto out;
2108 }
2109
2110 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2111 entry->state = VMCIQPB_CREATED_MEM;
2112 else
2113 entry->state = VMCIQPB_ATTACHED_MEM;
2114
2115 entry->vmci_page_files = true;
2116
2117 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2118 result =
2119 qp_notify_peer(true, handle, context_id, entry->create_id);
2120 if (result < VMCI_SUCCESS) {
2121 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2122 entry->create_id, entry->qp.handle.context,
2123 entry->qp.handle.resource);
2124 }
2125 }
2126
2127 result = VMCI_SUCCESS;
2128 out:
2129 mutex_unlock(&qp_broker_list.mutex);
2130 return result;
2131}
2132
2133
2134
2135
2136
2137
2138static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2139{
2140 entry->produce_q->saved_header = NULL;
2141 entry->consume_q->saved_header = NULL;
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2163{
2164 struct qp_broker_entry *entry;
2165 const u32 context_id = vmci_ctx_get_id(context);
2166 u32 peer_id;
2167 bool is_local = false;
2168 int result;
2169
2170 if (vmci_handle_is_invalid(handle) || !context ||
2171 context_id == VMCI_INVALID_ID) {
2172 return VMCI_ERROR_INVALID_ARGS;
2173 }
2174
2175 mutex_lock(&qp_broker_list.mutex);
2176
2177 if (!vmci_ctx_qp_exists(context, handle)) {
2178 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2179 context_id, handle.context, handle.resource);
2180 result = VMCI_ERROR_NOT_FOUND;
2181 goto out;
2182 }
2183
2184 entry = qp_broker_handle_to_entry(handle);
2185 if (!entry) {
2186 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2187 context_id, handle.context, handle.resource);
2188 result = VMCI_ERROR_NOT_FOUND;
2189 goto out;
2190 }
2191
2192 if (context_id != entry->create_id && context_id != entry->attach_id) {
2193 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2194 goto out;
2195 }
2196
2197 if (context_id == entry->create_id) {
2198 peer_id = entry->attach_id;
2199 entry->create_id = VMCI_INVALID_ID;
2200 } else {
2201 peer_id = entry->create_id;
2202 entry->attach_id = VMCI_INVALID_ID;
2203 }
2204 entry->qp.ref_count--;
2205
2206 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2207
2208 if (context_id != VMCI_HOST_CONTEXT_ID) {
2209 bool headers_mapped;
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219 qp_acquire_queue_mutex(entry->produce_q);
2220 headers_mapped = entry->produce_q->q_header ||
2221 entry->consume_q->q_header;
2222 if (QPBROKERSTATE_HAS_MEM(entry)) {
2223 result =
2224 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2225 entry->produce_q,
2226 entry->consume_q);
2227 if (result < VMCI_SUCCESS)
2228 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2229 handle.context, handle.resource,
2230 result);
2231
2232 if (entry->vmci_page_files)
2233 qp_host_unregister_user_memory(entry->produce_q,
2234 entry->
2235 consume_q);
2236 else
2237 qp_host_unregister_user_memory(entry->produce_q,
2238 entry->
2239 consume_q);
2240
2241 }
2242
2243 if (!headers_mapped)
2244 qp_reset_saved_headers(entry);
2245
2246 qp_release_queue_mutex(entry->produce_q);
2247
2248 if (!headers_mapped && entry->wakeup_cb)
2249 entry->wakeup_cb(entry->client_data);
2250
2251 } else {
2252 if (entry->wakeup_cb) {
2253 entry->wakeup_cb = NULL;
2254 entry->client_data = NULL;
2255 }
2256 }
2257
2258 if (entry->qp.ref_count == 0) {
2259 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2260
2261 if (is_local)
2262 kfree(entry->local_mem);
2263
2264 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2265 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2266 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2267
2268 vmci_resource_remove(&entry->resource);
2269
2270 kfree(entry);
2271
2272 vmci_ctx_qp_destroy(context, handle);
2273 } else {
2274 qp_notify_peer(false, handle, context_id, peer_id);
2275 if (context_id == VMCI_HOST_CONTEXT_ID &&
2276 QPBROKERSTATE_HAS_MEM(entry)) {
2277 entry->state = VMCIQPB_SHUTDOWN_MEM;
2278 } else {
2279 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2280 }
2281
2282 if (!is_local)
2283 vmci_ctx_qp_destroy(context, handle);
2284
2285 }
2286 result = VMCI_SUCCESS;
2287 out:
2288 mutex_unlock(&qp_broker_list.mutex);
2289 return result;
2290}
2291
2292
2293
2294
2295
2296
2297
2298int vmci_qp_broker_map(struct vmci_handle handle,
2299 struct vmci_ctx *context,
2300 u64 guest_mem)
2301{
2302 struct qp_broker_entry *entry;
2303 const u32 context_id = vmci_ctx_get_id(context);
2304 bool is_local = false;
2305 int result;
2306
2307 if (vmci_handle_is_invalid(handle) || !context ||
2308 context_id == VMCI_INVALID_ID)
2309 return VMCI_ERROR_INVALID_ARGS;
2310
2311 mutex_lock(&qp_broker_list.mutex);
2312
2313 if (!vmci_ctx_qp_exists(context, handle)) {
2314 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2315 context_id, handle.context, handle.resource);
2316 result = VMCI_ERROR_NOT_FOUND;
2317 goto out;
2318 }
2319
2320 entry = qp_broker_handle_to_entry(handle);
2321 if (!entry) {
2322 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2323 context_id, handle.context, handle.resource);
2324 result = VMCI_ERROR_NOT_FOUND;
2325 goto out;
2326 }
2327
2328 if (context_id != entry->create_id && context_id != entry->attach_id) {
2329 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2330 goto out;
2331 }
2332
2333 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2334 result = VMCI_SUCCESS;
2335
2336 if (context_id != VMCI_HOST_CONTEXT_ID) {
2337 struct vmci_qp_page_store page_store;
2338
2339 page_store.pages = guest_mem;
2340 page_store.len = QPE_NUM_PAGES(entry->qp);
2341
2342 qp_acquire_queue_mutex(entry->produce_q);
2343 qp_reset_saved_headers(entry);
2344 result =
2345 qp_host_register_user_memory(&page_store,
2346 entry->produce_q,
2347 entry->consume_q);
2348 qp_release_queue_mutex(entry->produce_q);
2349 if (result == VMCI_SUCCESS) {
2350
2351
2352 entry->state++;
2353
2354 if (entry->wakeup_cb)
2355 entry->wakeup_cb(entry->client_data);
2356 }
2357 }
2358
2359 out:
2360 mutex_unlock(&qp_broker_list.mutex);
2361 return result;
2362}
2363
2364
2365
2366
2367
2368
2369
2370
2371static int qp_save_headers(struct qp_broker_entry *entry)
2372{
2373 int result;
2374
2375 if (entry->produce_q->saved_header != NULL &&
2376 entry->consume_q->saved_header != NULL) {
2377
2378
2379
2380
2381
2382
2383 return VMCI_SUCCESS;
2384 }
2385
2386 if (NULL == entry->produce_q->q_header ||
2387 NULL == entry->consume_q->q_header) {
2388 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2389 if (result < VMCI_SUCCESS)
2390 return result;
2391 }
2392
2393 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2394 sizeof(entry->saved_produce_q));
2395 entry->produce_q->saved_header = &entry->saved_produce_q;
2396 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2397 sizeof(entry->saved_consume_q));
2398 entry->consume_q->saved_header = &entry->saved_consume_q;
2399
2400 return VMCI_SUCCESS;
2401}
2402
2403
2404
2405
2406
2407
2408
2409int vmci_qp_broker_unmap(struct vmci_handle handle,
2410 struct vmci_ctx *context,
2411 u32 gid)
2412{
2413 struct qp_broker_entry *entry;
2414 const u32 context_id = vmci_ctx_get_id(context);
2415 bool is_local = false;
2416 int result;
2417
2418 if (vmci_handle_is_invalid(handle) || !context ||
2419 context_id == VMCI_INVALID_ID)
2420 return VMCI_ERROR_INVALID_ARGS;
2421
2422 mutex_lock(&qp_broker_list.mutex);
2423
2424 if (!vmci_ctx_qp_exists(context, handle)) {
2425 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2426 context_id, handle.context, handle.resource);
2427 result = VMCI_ERROR_NOT_FOUND;
2428 goto out;
2429 }
2430
2431 entry = qp_broker_handle_to_entry(handle);
2432 if (!entry) {
2433 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2434 context_id, handle.context, handle.resource);
2435 result = VMCI_ERROR_NOT_FOUND;
2436 goto out;
2437 }
2438
2439 if (context_id != entry->create_id && context_id != entry->attach_id) {
2440 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2441 goto out;
2442 }
2443
2444 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2445
2446 if (context_id != VMCI_HOST_CONTEXT_ID) {
2447 qp_acquire_queue_mutex(entry->produce_q);
2448 result = qp_save_headers(entry);
2449 if (result < VMCI_SUCCESS)
2450 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2451 handle.context, handle.resource, result);
2452
2453 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2454
2455
2456
2457
2458
2459
2460
2461
2462 qp_host_unregister_user_memory(entry->produce_q,
2463 entry->consume_q);
2464
2465
2466
2467
2468 entry->state--;
2469
2470 qp_release_queue_mutex(entry->produce_q);
2471 }
2472
2473 result = VMCI_SUCCESS;
2474
2475 out:
2476 mutex_unlock(&qp_broker_list.mutex);
2477 return result;
2478}
2479
2480
2481
2482
2483
2484
2485
2486void vmci_qp_guest_endpoints_exit(void)
2487{
2488 struct qp_entry *entry;
2489 struct qp_guest_endpoint *ep;
2490
2491 mutex_lock(&qp_guest_endpoints.mutex);
2492
2493 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2494 ep = (struct qp_guest_endpoint *)entry;
2495
2496
2497 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2498 qp_detatch_hypercall(entry->handle);
2499
2500
2501 entry->ref_count = 0;
2502 qp_list_remove_entry(&qp_guest_endpoints, entry);
2503
2504 qp_guest_endpoint_destroy(ep);
2505 }
2506
2507 mutex_unlock(&qp_guest_endpoints.mutex);
2508}
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518static void qp_lock(const struct vmci_qp *qpair)
2519{
2520 qp_acquire_queue_mutex(qpair->produce_q);
2521}
2522
2523
2524
2525
2526
2527static void qp_unlock(const struct vmci_qp *qpair)
2528{
2529 qp_release_queue_mutex(qpair->produce_q);
2530}
2531
2532
2533
2534
2535
2536static int qp_map_queue_headers(struct vmci_queue *produce_q,
2537 struct vmci_queue *consume_q)
2538{
2539 int result;
2540
2541 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2542 result = qp_host_map_queues(produce_q, consume_q);
2543 if (result < VMCI_SUCCESS)
2544 return (produce_q->saved_header &&
2545 consume_q->saved_header) ?
2546 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2547 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2548 }
2549
2550 return VMCI_SUCCESS;
2551}
2552
2553
2554
2555
2556
2557
2558
2559static int qp_get_queue_headers(const struct vmci_qp *qpair,
2560 struct vmci_queue_header **produce_q_header,
2561 struct vmci_queue_header **consume_q_header)
2562{
2563 int result;
2564
2565 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2566 if (result == VMCI_SUCCESS) {
2567 *produce_q_header = qpair->produce_q->q_header;
2568 *consume_q_header = qpair->consume_q->q_header;
2569 } else if (qpair->produce_q->saved_header &&
2570 qpair->consume_q->saved_header) {
2571 *produce_q_header = qpair->produce_q->saved_header;
2572 *consume_q_header = qpair->consume_q->saved_header;
2573 result = VMCI_SUCCESS;
2574 }
2575
2576 return result;
2577}
2578
2579
2580
2581
2582
2583
2584static int qp_wakeup_cb(void *client_data)
2585{
2586 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2587
2588 qp_lock(qpair);
2589 while (qpair->blocked > 0) {
2590 qpair->blocked--;
2591 qpair->generation++;
2592 wake_up(&qpair->event);
2593 }
2594 qp_unlock(qpair);
2595
2596 return VMCI_SUCCESS;
2597}
2598
2599
2600
2601
2602
2603
2604static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2605{
2606 unsigned int generation;
2607
2608 qpair->blocked++;
2609 generation = qpair->generation;
2610 qp_unlock(qpair);
2611 wait_event(qpair->event, generation != qpair->generation);
2612 qp_lock(qpair);
2613
2614 return true;
2615}
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2630 struct vmci_queue *consume_q,
2631 const u64 produce_q_size,
2632 const void *buf,
2633 size_t buf_size,
2634 vmci_memcpy_to_queue_func memcpy_to_queue)
2635{
2636 s64 free_space;
2637 u64 tail;
2638 size_t written;
2639 ssize_t result;
2640
2641 result = qp_map_queue_headers(produce_q, consume_q);
2642 if (unlikely(result != VMCI_SUCCESS))
2643 return result;
2644
2645 free_space = vmci_q_header_free_space(produce_q->q_header,
2646 consume_q->q_header,
2647 produce_q_size);
2648 if (free_space == 0)
2649 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2650
2651 if (free_space < VMCI_SUCCESS)
2652 return (ssize_t) free_space;
2653
2654 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2655 tail = vmci_q_header_producer_tail(produce_q->q_header);
2656 if (likely(tail + written < produce_q_size)) {
2657 result = memcpy_to_queue(produce_q, tail, buf, 0, written);
2658 } else {
2659
2660
2661 const size_t tmp = (size_t) (produce_q_size - tail);
2662
2663 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
2664 if (result >= VMCI_SUCCESS)
2665 result = memcpy_to_queue(produce_q, 0, buf, tmp,
2666 written - tmp);
2667 }
2668
2669 if (result < VMCI_SUCCESS)
2670 return result;
2671
2672 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2673 produce_q_size);
2674 return written;
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2691 struct vmci_queue *consume_q,
2692 const u64 consume_q_size,
2693 void *buf,
2694 size_t buf_size,
2695 vmci_memcpy_from_queue_func memcpy_from_queue,
2696 bool update_consumer)
2697{
2698 s64 buf_ready;
2699 u64 head;
2700 size_t read;
2701 ssize_t result;
2702
2703 result = qp_map_queue_headers(produce_q, consume_q);
2704 if (unlikely(result != VMCI_SUCCESS))
2705 return result;
2706
2707 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2708 produce_q->q_header,
2709 consume_q_size);
2710 if (buf_ready == 0)
2711 return VMCI_ERROR_QUEUEPAIR_NODATA;
2712
2713 if (buf_ready < VMCI_SUCCESS)
2714 return (ssize_t) buf_ready;
2715
2716 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2717 head = vmci_q_header_consumer_head(produce_q->q_header);
2718 if (likely(head + read < consume_q_size)) {
2719 result = memcpy_from_queue(buf, 0, consume_q, head, read);
2720 } else {
2721
2722
2723 const size_t tmp = (size_t) (consume_q_size - head);
2724
2725 result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
2726 if (result >= VMCI_SUCCESS)
2727 result = memcpy_from_queue(buf, tmp, consume_q, 0,
2728 read - tmp);
2729
2730 }
2731
2732 if (result < VMCI_SUCCESS)
2733 return result;
2734
2735 if (update_consumer)
2736 vmci_q_header_add_consumer_head(produce_q->q_header,
2737 read, consume_q_size);
2738
2739 return read;
2740}
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758int vmci_qpair_alloc(struct vmci_qp **qpair,
2759 struct vmci_handle *handle,
2760 u64 produce_qsize,
2761 u64 consume_qsize,
2762 u32 peer,
2763 u32 flags,
2764 u32 priv_flags)
2765{
2766 struct vmci_qp *my_qpair;
2767 int retval;
2768 struct vmci_handle src = VMCI_INVALID_HANDLE;
2769 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2770 enum vmci_route route;
2771 vmci_event_release_cb wakeup_cb;
2772 void *client_data;
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2792 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2793 return VMCI_ERROR_NO_RESOURCES;
2794
2795 retval = vmci_route(&src, &dst, false, &route);
2796 if (retval < VMCI_SUCCESS)
2797 route = vmci_guest_code_active() ?
2798 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2799
2800 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2801 pr_devel("NONBLOCK OR PINNED set");
2802 return VMCI_ERROR_INVALID_ARGS;
2803 }
2804
2805 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2806 if (!my_qpair)
2807 return VMCI_ERROR_NO_MEM;
2808
2809 my_qpair->produce_q_size = produce_qsize;
2810 my_qpair->consume_q_size = consume_qsize;
2811 my_qpair->peer = peer;
2812 my_qpair->flags = flags;
2813 my_qpair->priv_flags = priv_flags;
2814
2815 wakeup_cb = NULL;
2816 client_data = NULL;
2817
2818 if (VMCI_ROUTE_AS_HOST == route) {
2819 my_qpair->guest_endpoint = false;
2820 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2821 my_qpair->blocked = 0;
2822 my_qpair->generation = 0;
2823 init_waitqueue_head(&my_qpair->event);
2824 wakeup_cb = qp_wakeup_cb;
2825 client_data = (void *)my_qpair;
2826 }
2827 } else {
2828 my_qpair->guest_endpoint = true;
2829 }
2830
2831 retval = vmci_qp_alloc(handle,
2832 &my_qpair->produce_q,
2833 my_qpair->produce_q_size,
2834 &my_qpair->consume_q,
2835 my_qpair->consume_q_size,
2836 my_qpair->peer,
2837 my_qpair->flags,
2838 my_qpair->priv_flags,
2839 my_qpair->guest_endpoint,
2840 wakeup_cb, client_data);
2841
2842 if (retval < VMCI_SUCCESS) {
2843 kfree(my_qpair);
2844 return retval;
2845 }
2846
2847 *qpair = my_qpair;
2848 my_qpair->handle = *handle;
2849
2850 return retval;
2851}
2852EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862int vmci_qpair_detach(struct vmci_qp **qpair)
2863{
2864 int result;
2865 struct vmci_qp *old_qpair;
2866
2867 if (!qpair || !(*qpair))
2868 return VMCI_ERROR_INVALID_ARGS;
2869
2870 old_qpair = *qpair;
2871 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883 memset(old_qpair, 0, sizeof(*old_qpair));
2884 old_qpair->handle = VMCI_INVALID_HANDLE;
2885 old_qpair->peer = VMCI_INVALID_ID;
2886 kfree(old_qpair);
2887 *qpair = NULL;
2888
2889 return result;
2890}
2891EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2903 u64 *producer_tail,
2904 u64 *consumer_head)
2905{
2906 struct vmci_queue_header *produce_q_header;
2907 struct vmci_queue_header *consume_q_header;
2908 int result;
2909
2910 if (!qpair)
2911 return VMCI_ERROR_INVALID_ARGS;
2912
2913 qp_lock(qpair);
2914 result =
2915 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2916 if (result == VMCI_SUCCESS)
2917 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2918 producer_tail, consumer_head);
2919 qp_unlock(qpair);
2920
2921 if (result == VMCI_SUCCESS &&
2922 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2923 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2924 return VMCI_ERROR_INVALID_SIZE;
2925
2926 return result;
2927}
2928EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2940 u64 *consumer_tail,
2941 u64 *producer_head)
2942{
2943 struct vmci_queue_header *produce_q_header;
2944 struct vmci_queue_header *consume_q_header;
2945 int result;
2946
2947 if (!qpair)
2948 return VMCI_ERROR_INVALID_ARGS;
2949
2950 qp_lock(qpair);
2951 result =
2952 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2953 if (result == VMCI_SUCCESS)
2954 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2955 consumer_tail, producer_head);
2956 qp_unlock(qpair);
2957
2958 if (result == VMCI_SUCCESS &&
2959 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2960 (producer_head && *producer_head >= qpair->consume_q_size)))
2961 return VMCI_ERROR_INVALID_SIZE;
2962
2963 return result;
2964}
2965EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2977{
2978 struct vmci_queue_header *produce_q_header;
2979 struct vmci_queue_header *consume_q_header;
2980 s64 result;
2981
2982 if (!qpair)
2983 return VMCI_ERROR_INVALID_ARGS;
2984
2985 qp_lock(qpair);
2986 result =
2987 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2988 if (result == VMCI_SUCCESS)
2989 result = vmci_q_header_free_space(produce_q_header,
2990 consume_q_header,
2991 qpair->produce_q_size);
2992 else
2993 result = 0;
2994
2995 qp_unlock(qpair);
2996
2997 return result;
2998}
2999EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
3011{
3012 struct vmci_queue_header *produce_q_header;
3013 struct vmci_queue_header *consume_q_header;
3014 s64 result;
3015
3016 if (!qpair)
3017 return VMCI_ERROR_INVALID_ARGS;
3018
3019 qp_lock(qpair);
3020 result =
3021 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3022 if (result == VMCI_SUCCESS)
3023 result = vmci_q_header_free_space(consume_q_header,
3024 produce_q_header,
3025 qpair->consume_q_size);
3026 else
3027 result = 0;
3028
3029 qp_unlock(qpair);
3030
3031 return result;
3032}
3033EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
3046{
3047 struct vmci_queue_header *produce_q_header;
3048 struct vmci_queue_header *consume_q_header;
3049 s64 result;
3050
3051 if (!qpair)
3052 return VMCI_ERROR_INVALID_ARGS;
3053
3054 qp_lock(qpair);
3055 result =
3056 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3057 if (result == VMCI_SUCCESS)
3058 result = vmci_q_header_buf_ready(produce_q_header,
3059 consume_q_header,
3060 qpair->produce_q_size);
3061 else
3062 result = 0;
3063
3064 qp_unlock(qpair);
3065
3066 return result;
3067}
3068EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
3081{
3082 struct vmci_queue_header *produce_q_header;
3083 struct vmci_queue_header *consume_q_header;
3084 s64 result;
3085
3086 if (!qpair)
3087 return VMCI_ERROR_INVALID_ARGS;
3088
3089 qp_lock(qpair);
3090 result =
3091 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3092 if (result == VMCI_SUCCESS)
3093 result = vmci_q_header_buf_ready(consume_q_header,
3094 produce_q_header,
3095 qpair->consume_q_size);
3096 else
3097 result = 0;
3098
3099 qp_unlock(qpair);
3100
3101 return result;
3102}
3103EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3116 const void *buf,
3117 size_t buf_size,
3118 int buf_type)
3119{
3120 ssize_t result;
3121
3122 if (!qpair || !buf)
3123 return VMCI_ERROR_INVALID_ARGS;
3124
3125 qp_lock(qpair);
3126
3127 do {
3128 result = qp_enqueue_locked(qpair->produce_q,
3129 qpair->consume_q,
3130 qpair->produce_q_size,
3131 buf, buf_size,
3132 qp_memcpy_to_queue);
3133
3134 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3135 !qp_wait_for_ready_queue(qpair))
3136 result = VMCI_ERROR_WOULD_BLOCK;
3137
3138 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3139
3140 qp_unlock(qpair);
3141
3142 return result;
3143}
3144EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3157 void *buf,
3158 size_t buf_size,
3159 int buf_type)
3160{
3161 ssize_t result;
3162
3163 if (!qpair || !buf)
3164 return VMCI_ERROR_INVALID_ARGS;
3165
3166 qp_lock(qpair);
3167
3168 do {
3169 result = qp_dequeue_locked(qpair->produce_q,
3170 qpair->consume_q,
3171 qpair->consume_q_size,
3172 buf, buf_size,
3173 qp_memcpy_from_queue, true);
3174
3175 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3176 !qp_wait_for_ready_queue(qpair))
3177 result = VMCI_ERROR_WOULD_BLOCK;
3178
3179 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3180
3181 qp_unlock(qpair);
3182
3183 return result;
3184}
3185EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3199 void *buf,
3200 size_t buf_size,
3201 int buf_type)
3202{
3203 ssize_t result;
3204
3205 if (!qpair || !buf)
3206 return VMCI_ERROR_INVALID_ARGS;
3207
3208 qp_lock(qpair);
3209
3210 do {
3211 result = qp_dequeue_locked(qpair->produce_q,
3212 qpair->consume_q,
3213 qpair->consume_q_size,
3214 buf, buf_size,
3215 qp_memcpy_from_queue, false);
3216
3217 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3218 !qp_wait_for_ready_queue(qpair))
3219 result = VMCI_ERROR_WOULD_BLOCK;
3220
3221 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3222
3223 qp_unlock(qpair);
3224
3225 return result;
3226}
3227EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3241 struct msghdr *msg,
3242 size_t iov_size,
3243 int buf_type)
3244{
3245 ssize_t result;
3246
3247 if (!qpair)
3248 return VMCI_ERROR_INVALID_ARGS;
3249
3250 qp_lock(qpair);
3251
3252 do {
3253 result = qp_enqueue_locked(qpair->produce_q,
3254 qpair->consume_q,
3255 qpair->produce_q_size,
3256 msg, iov_size,
3257 qp_memcpy_to_queue_iov);
3258
3259 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3260 !qp_wait_for_ready_queue(qpair))
3261 result = VMCI_ERROR_WOULD_BLOCK;
3262
3263 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3264
3265 qp_unlock(qpair);
3266
3267 return result;
3268}
3269EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3283 struct msghdr *msg,
3284 size_t iov_size,
3285 int buf_type)
3286{
3287 ssize_t result;
3288
3289 if (!qpair)
3290 return VMCI_ERROR_INVALID_ARGS;
3291
3292 qp_lock(qpair);
3293
3294 do {
3295 result = qp_dequeue_locked(qpair->produce_q,
3296 qpair->consume_q,
3297 qpair->consume_q_size,
3298 msg, iov_size,
3299 qp_memcpy_from_queue_iov,
3300 true);
3301
3302 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3303 !qp_wait_for_ready_queue(qpair))
3304 result = VMCI_ERROR_WOULD_BLOCK;
3305
3306 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3307
3308 qp_unlock(qpair);
3309
3310 return result;
3311}
3312EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3327 struct msghdr *msg,
3328 size_t iov_size,
3329 int buf_type)
3330{
3331 ssize_t result;
3332
3333 if (!qpair)
3334 return VMCI_ERROR_INVALID_ARGS;
3335
3336 qp_lock(qpair);
3337
3338 do {
3339 result = qp_dequeue_locked(qpair->produce_q,
3340 qpair->consume_q,
3341 qpair->consume_q_size,
3342 msg, iov_size,
3343 qp_memcpy_from_queue_iov,
3344 false);
3345
3346 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3347 !qp_wait_for_ready_queue(qpair))
3348 result = VMCI_ERROR_WOULD_BLOCK;
3349
3350 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3351
3352 qp_unlock(qpair);
3353 return result;
3354}
3355EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
3356