1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/highmem.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/pagemap.h>
24#include <linux/pci.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/uio.h>
28#include <linux/wait.h>
29#include <linux/vmalloc.h>
30
31#include "vmci_handle_array.h"
32#include "vmci_queue_pair.h"
33#include "vmci_datagram.h"
34#include "vmci_resource.h"
35#include "vmci_context.h"
36#include "vmci_driver.h"
37#include "vmci_event.h"
38#include "vmci_route.h"
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
142 u64 queue_offset, const void *src,
143 size_t src_offset, size_t size);
144typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
145 const struct vmci_queue *queue,
146 u64 queue_offset, size_t size);
147
148
149struct vmci_queue_kern_if {
150 struct mutex __mutex;
151 struct mutex *mutex;
152 size_t num_pages;
153 bool host;
154 union {
155 struct {
156 dma_addr_t *pas;
157 void **vas;
158 } g;
159 struct {
160 struct page **page;
161 struct page **header_page;
162 } h;
163 } u;
164};
165
166
167
168
169struct vmci_qp {
170 struct vmci_handle handle;
171 struct vmci_queue *produce_q;
172 struct vmci_queue *consume_q;
173 u64 produce_q_size;
174 u64 consume_q_size;
175 u32 peer;
176 u32 flags;
177 u32 priv_flags;
178 bool guest_endpoint;
179 unsigned int blocked;
180 unsigned int generation;
181 wait_queue_head_t event;
182};
183
184enum qp_broker_state {
185 VMCIQPB_NEW,
186 VMCIQPB_CREATED_NO_MEM,
187 VMCIQPB_CREATED_MEM,
188 VMCIQPB_ATTACHED_NO_MEM,
189 VMCIQPB_ATTACHED_MEM,
190 VMCIQPB_SHUTDOWN_NO_MEM,
191 VMCIQPB_SHUTDOWN_MEM,
192 VMCIQPB_GONE
193};
194
195#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
196 _qpb->state == VMCIQPB_ATTACHED_MEM || \
197 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
198
199
200
201
202
203
204
205
206
207
208struct qp_entry {
209 struct list_head list_item;
210 struct vmci_handle handle;
211 u32 peer;
212 u32 flags;
213 u64 produce_size;
214 u64 consume_size;
215 u32 ref_count;
216};
217
218struct qp_broker_entry {
219 struct vmci_resource resource;
220 struct qp_entry qp;
221 u32 create_id;
222 u32 attach_id;
223 enum qp_broker_state state;
224 bool require_trusted_attach;
225 bool created_by_trusted;
226 bool vmci_page_files;
227 struct vmci_queue *produce_q;
228 struct vmci_queue *consume_q;
229 struct vmci_queue_header saved_produce_q;
230 struct vmci_queue_header saved_consume_q;
231 vmci_event_release_cb wakeup_cb;
232 void *client_data;
233 void *local_mem;
234};
235
236struct qp_guest_endpoint {
237 struct vmci_resource resource;
238 struct qp_entry qp;
239 u64 num_ppns;
240 void *produce_q;
241 void *consume_q;
242 struct ppn_set ppn_set;
243};
244
245struct qp_list {
246 struct list_head head;
247 struct mutex mutex;
248};
249
250static struct qp_list qp_broker_list = {
251 .head = LIST_HEAD_INIT(qp_broker_list.head),
252 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
253};
254
255static struct qp_list qp_guest_endpoints = {
256 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
257 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
258};
259
260#define INVALID_VMCI_GUEST_MEM_ID 0
261#define QPE_NUM_PAGES(_QPE) ((u32) \
262 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
263 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
264
265
266
267
268
269
270static void qp_free_queue(void *q, u64 size)
271{
272 struct vmci_queue *queue = q;
273
274 if (queue) {
275 u64 i;
276
277
278 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
279 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
280 queue->kernel_if->u.g.vas[i],
281 queue->kernel_if->u.g.pas[i]);
282 }
283
284 vfree(queue);
285 }
286}
287
288
289
290
291
292
293static void *qp_alloc_queue(u64 size, u32 flags)
294{
295 u64 i;
296 struct vmci_queue *queue;
297 size_t pas_size;
298 size_t vas_size;
299 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
300 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
301
302 if (num_pages >
303 (SIZE_MAX - queue_size) /
304 (sizeof(*queue->kernel_if->u.g.pas) +
305 sizeof(*queue->kernel_if->u.g.vas)))
306 return NULL;
307
308 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
309 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
310 queue_size += pas_size + vas_size;
311
312 queue = vmalloc(queue_size);
313 if (!queue)
314 return NULL;
315
316 queue->q_header = NULL;
317 queue->saved_header = NULL;
318 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
319 queue->kernel_if->mutex = NULL;
320 queue->kernel_if->num_pages = num_pages;
321 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
322 queue->kernel_if->u.g.vas =
323 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
324 queue->kernel_if->host = false;
325
326 for (i = 0; i < num_pages; i++) {
327 queue->kernel_if->u.g.vas[i] =
328 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
329 &queue->kernel_if->u.g.pas[i],
330 GFP_KERNEL);
331 if (!queue->kernel_if->u.g.vas[i]) {
332
333 qp_free_queue(queue, i * PAGE_SIZE);
334 return NULL;
335 }
336 }
337
338
339 queue->q_header = queue->kernel_if->u.g.vas[0];
340
341 return queue;
342}
343
344
345
346
347
348
349
350static int __qp_memcpy_to_queue(struct vmci_queue *queue,
351 u64 queue_offset,
352 const void *src,
353 size_t size,
354 bool is_iovec)
355{
356 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
357 size_t bytes_copied = 0;
358
359 while (bytes_copied < size) {
360 const u64 page_index =
361 (queue_offset + bytes_copied) / PAGE_SIZE;
362 const size_t page_offset =
363 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
364 void *va;
365 size_t to_copy;
366
367 if (kernel_if->host)
368 va = kmap(kernel_if->u.h.page[page_index]);
369 else
370 va = kernel_if->u.g.vas[page_index + 1];
371
372
373 if (size - bytes_copied > PAGE_SIZE - page_offset)
374
375 to_copy = PAGE_SIZE - page_offset;
376 else
377 to_copy = size - bytes_copied;
378
379 if (is_iovec) {
380 struct iovec *iov = (struct iovec *)src;
381 int err;
382
383
384 err = memcpy_fromiovec((u8 *)va + page_offset,
385 iov, to_copy);
386 if (err != 0) {
387 if (kernel_if->host)
388 kunmap(kernel_if->u.h.page[page_index]);
389 return VMCI_ERROR_INVALID_ARGS;
390 }
391 } else {
392 memcpy((u8 *)va + page_offset,
393 (u8 *)src + bytes_copied, to_copy);
394 }
395
396 bytes_copied += to_copy;
397 if (kernel_if->host)
398 kunmap(kernel_if->u.h.page[page_index]);
399 }
400
401 return VMCI_SUCCESS;
402}
403
404
405
406
407
408
409
410static int __qp_memcpy_from_queue(void *dest,
411 const struct vmci_queue *queue,
412 u64 queue_offset,
413 size_t size,
414 bool is_iovec)
415{
416 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
417 size_t bytes_copied = 0;
418
419 while (bytes_copied < size) {
420 const u64 page_index =
421 (queue_offset + bytes_copied) / PAGE_SIZE;
422 const size_t page_offset =
423 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
424 void *va;
425 size_t to_copy;
426
427 if (kernel_if->host)
428 va = kmap(kernel_if->u.h.page[page_index]);
429 else
430 va = kernel_if->u.g.vas[page_index + 1];
431
432
433 if (size - bytes_copied > PAGE_SIZE - page_offset)
434
435 to_copy = PAGE_SIZE - page_offset;
436 else
437 to_copy = size - bytes_copied;
438
439 if (is_iovec) {
440 struct iovec *iov = (struct iovec *)dest;
441 int err;
442
443
444 err = memcpy_toiovec(iov, (u8 *)va + page_offset,
445 to_copy);
446 if (err != 0) {
447 if (kernel_if->host)
448 kunmap(kernel_if->u.h.page[page_index]);
449 return VMCI_ERROR_INVALID_ARGS;
450 }
451 } else {
452 memcpy((u8 *)dest + bytes_copied,
453 (u8 *)va + page_offset, to_copy);
454 }
455
456 bytes_copied += to_copy;
457 if (kernel_if->host)
458 kunmap(kernel_if->u.h.page[page_index]);
459 }
460
461 return VMCI_SUCCESS;
462}
463
464
465
466
467
468
469
470static int qp_alloc_ppn_set(void *prod_q,
471 u64 num_produce_pages,
472 void *cons_q,
473 u64 num_consume_pages, struct ppn_set *ppn_set)
474{
475 u32 *produce_ppns;
476 u32 *consume_ppns;
477 struct vmci_queue *produce_q = prod_q;
478 struct vmci_queue *consume_q = cons_q;
479 u64 i;
480
481 if (!produce_q || !num_produce_pages || !consume_q ||
482 !num_consume_pages || !ppn_set)
483 return VMCI_ERROR_INVALID_ARGS;
484
485 if (ppn_set->initialized)
486 return VMCI_ERROR_ALREADY_EXISTS;
487
488 produce_ppns =
489 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
490 if (!produce_ppns)
491 return VMCI_ERROR_NO_MEM;
492
493 consume_ppns =
494 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
495 if (!consume_ppns) {
496 kfree(produce_ppns);
497 return VMCI_ERROR_NO_MEM;
498 }
499
500 for (i = 0; i < num_produce_pages; i++) {
501 unsigned long pfn;
502
503 produce_ppns[i] =
504 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
505 pfn = produce_ppns[i];
506
507
508 if (sizeof(pfn) > sizeof(*produce_ppns)
509 && pfn != produce_ppns[i])
510 goto ppn_error;
511 }
512
513 for (i = 0; i < num_consume_pages; i++) {
514 unsigned long pfn;
515
516 consume_ppns[i] =
517 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
518 pfn = consume_ppns[i];
519
520
521 if (sizeof(pfn) > sizeof(*consume_ppns)
522 && pfn != consume_ppns[i])
523 goto ppn_error;
524 }
525
526 ppn_set->num_produce_pages = num_produce_pages;
527 ppn_set->num_consume_pages = num_consume_pages;
528 ppn_set->produce_ppns = produce_ppns;
529 ppn_set->consume_ppns = consume_ppns;
530 ppn_set->initialized = true;
531 return VMCI_SUCCESS;
532
533 ppn_error:
534 kfree(produce_ppns);
535 kfree(consume_ppns);
536 return VMCI_ERROR_INVALID_ARGS;
537}
538
539
540
541
542static void qp_free_ppn_set(struct ppn_set *ppn_set)
543{
544 if (ppn_set->initialized) {
545
546 kfree(ppn_set->produce_ppns);
547 kfree(ppn_set->consume_ppns);
548 }
549 memset(ppn_set, 0, sizeof(*ppn_set));
550}
551
552
553
554
555
556static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
557{
558 memcpy(call_buf, ppn_set->produce_ppns,
559 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
560 memcpy(call_buf +
561 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
562 ppn_set->consume_ppns,
563 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
564
565 return VMCI_SUCCESS;
566}
567
568static int qp_memcpy_to_queue(struct vmci_queue *queue,
569 u64 queue_offset,
570 const void *src, size_t src_offset, size_t size)
571{
572 return __qp_memcpy_to_queue(queue, queue_offset,
573 (u8 *)src + src_offset, size, false);
574}
575
576static int qp_memcpy_from_queue(void *dest,
577 size_t dest_offset,
578 const struct vmci_queue *queue,
579 u64 queue_offset, size_t size)
580{
581 return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
582 queue, queue_offset, size, false);
583}
584
585
586
587
588static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
589 u64 queue_offset,
590 const void *src,
591 size_t src_offset, size_t size)
592{
593
594
595
596
597
598 return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
599}
600
601
602
603
604static int qp_memcpy_from_queue_iov(void *dest,
605 size_t dest_offset,
606 const struct vmci_queue *queue,
607 u64 queue_offset, size_t size)
608{
609
610
611
612
613 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
614}
615
616
617
618
619
620
621
622static struct vmci_queue *qp_host_alloc_queue(u64 size)
623{
624 struct vmci_queue *queue;
625 size_t queue_page_size;
626 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
627 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
628
629 if (num_pages > (SIZE_MAX - queue_size) /
630 sizeof(*queue->kernel_if->u.h.page))
631 return NULL;
632
633 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
634
635 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
636 if (queue) {
637 queue->q_header = NULL;
638 queue->saved_header = NULL;
639 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
640 queue->kernel_if->host = true;
641 queue->kernel_if->mutex = NULL;
642 queue->kernel_if->num_pages = num_pages;
643 queue->kernel_if->u.h.header_page =
644 (struct page **)((u8 *)queue + queue_size);
645 queue->kernel_if->u.h.page =
646 &queue->kernel_if->u.h.header_page[1];
647 }
648
649 return queue;
650}
651
652
653
654
655
656static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
657{
658 kfree(queue);
659}
660
661
662
663
664
665
666
667
668static void qp_init_queue_mutex(struct vmci_queue *produce_q,
669 struct vmci_queue *consume_q)
670{
671
672
673
674
675
676 if (produce_q->kernel_if->host) {
677 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
678 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
679 mutex_init(produce_q->kernel_if->mutex);
680 }
681}
682
683
684
685
686static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
687 struct vmci_queue *consume_q)
688{
689 if (produce_q->kernel_if->host) {
690 produce_q->kernel_if->mutex = NULL;
691 consume_q->kernel_if->mutex = NULL;
692 }
693}
694
695
696
697
698
699
700static void qp_acquire_queue_mutex(struct vmci_queue *queue)
701{
702 if (queue->kernel_if->host)
703 mutex_lock(queue->kernel_if->mutex);
704}
705
706
707
708
709
710
711static void qp_release_queue_mutex(struct vmci_queue *queue)
712{
713 if (queue->kernel_if->host)
714 mutex_unlock(queue->kernel_if->mutex);
715}
716
717
718
719
720
721static void qp_release_pages(struct page **pages,
722 u64 num_pages, bool dirty)
723{
724 int i;
725
726 for (i = 0; i < num_pages; i++) {
727 if (dirty)
728 set_page_dirty(pages[i]);
729
730 page_cache_release(pages[i]);
731 pages[i] = NULL;
732 }
733}
734
735
736
737
738
739
740static int qp_host_get_user_memory(u64 produce_uva,
741 u64 consume_uva,
742 struct vmci_queue *produce_q,
743 struct vmci_queue *consume_q)
744{
745 int retval;
746 int err = VMCI_SUCCESS;
747
748 retval = get_user_pages_fast((uintptr_t) produce_uva,
749 produce_q->kernel_if->num_pages, 1,
750 produce_q->kernel_if->u.h.header_page);
751 if (retval < produce_q->kernel_if->num_pages) {
752 pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
753 qp_release_pages(produce_q->kernel_if->u.h.header_page,
754 retval, false);
755 err = VMCI_ERROR_NO_MEM;
756 goto out;
757 }
758
759 retval = get_user_pages_fast((uintptr_t) consume_uva,
760 consume_q->kernel_if->num_pages, 1,
761 consume_q->kernel_if->u.h.header_page);
762 if (retval < consume_q->kernel_if->num_pages) {
763 pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
764 qp_release_pages(consume_q->kernel_if->u.h.header_page,
765 retval, false);
766 qp_release_pages(produce_q->kernel_if->u.h.header_page,
767 produce_q->kernel_if->num_pages, false);
768 err = VMCI_ERROR_NO_MEM;
769 }
770
771 out:
772 return err;
773}
774
775
776
777
778
779
780static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
781 struct vmci_queue *produce_q,
782 struct vmci_queue *consume_q)
783{
784 u64 produce_uva;
785 u64 consume_uva;
786
787
788
789
790
791
792 produce_uva = page_store->pages;
793 consume_uva = page_store->pages +
794 produce_q->kernel_if->num_pages * PAGE_SIZE;
795 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
796 consume_q);
797}
798
799
800
801
802
803
804static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
805 struct vmci_queue *consume_q)
806{
807 qp_release_pages(produce_q->kernel_if->u.h.header_page,
808 produce_q->kernel_if->num_pages, true);
809 memset(produce_q->kernel_if->u.h.header_page, 0,
810 sizeof(*produce_q->kernel_if->u.h.header_page) *
811 produce_q->kernel_if->num_pages);
812 qp_release_pages(consume_q->kernel_if->u.h.header_page,
813 consume_q->kernel_if->num_pages, true);
814 memset(consume_q->kernel_if->u.h.header_page, 0,
815 sizeof(*consume_q->kernel_if->u.h.header_page) *
816 consume_q->kernel_if->num_pages);
817}
818
819
820
821
822
823
824
825
826
827static int qp_host_map_queues(struct vmci_queue *produce_q,
828 struct vmci_queue *consume_q)
829{
830 int result;
831
832 if (!produce_q->q_header || !consume_q->q_header) {
833 struct page *headers[2];
834
835 if (produce_q->q_header != consume_q->q_header)
836 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
837
838 if (produce_q->kernel_if->u.h.header_page == NULL ||
839 *produce_q->kernel_if->u.h.header_page == NULL)
840 return VMCI_ERROR_UNAVAILABLE;
841
842 headers[0] = *produce_q->kernel_if->u.h.header_page;
843 headers[1] = *consume_q->kernel_if->u.h.header_page;
844
845 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
846 if (produce_q->q_header != NULL) {
847 consume_q->q_header =
848 (struct vmci_queue_header *)((u8 *)
849 produce_q->q_header +
850 PAGE_SIZE);
851 result = VMCI_SUCCESS;
852 } else {
853 pr_warn("vmap failed\n");
854 result = VMCI_ERROR_NO_MEM;
855 }
856 } else {
857 result = VMCI_SUCCESS;
858 }
859
860 return result;
861}
862
863
864
865
866
867static int qp_host_unmap_queues(u32 gid,
868 struct vmci_queue *produce_q,
869 struct vmci_queue *consume_q)
870{
871 if (produce_q->q_header) {
872 if (produce_q->q_header < consume_q->q_header)
873 vunmap(produce_q->q_header);
874 else
875 vunmap(consume_q->q_header);
876
877 produce_q->q_header = NULL;
878 consume_q->q_header = NULL;
879 }
880
881 return VMCI_SUCCESS;
882}
883
884
885
886
887
888static struct qp_entry *qp_list_find(struct qp_list *qp_list,
889 struct vmci_handle handle)
890{
891 struct qp_entry *entry;
892
893 if (vmci_handle_is_invalid(handle))
894 return NULL;
895
896 list_for_each_entry(entry, &qp_list->head, list_item) {
897 if (vmci_handle_is_equal(entry->handle, handle))
898 return entry;
899 }
900
901 return NULL;
902}
903
904
905
906
907static struct qp_guest_endpoint *
908qp_guest_handle_to_entry(struct vmci_handle handle)
909{
910 struct qp_guest_endpoint *entry;
911 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
912
913 entry = qp ? container_of(
914 qp, struct qp_guest_endpoint, qp) : NULL;
915 return entry;
916}
917
918
919
920
921static struct qp_broker_entry *
922qp_broker_handle_to_entry(struct vmci_handle handle)
923{
924 struct qp_broker_entry *entry;
925 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
926
927 entry = qp ? container_of(
928 qp, struct qp_broker_entry, qp) : NULL;
929 return entry;
930}
931
932
933
934
935
936static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
937{
938 u32 context_id = vmci_get_context_id();
939 struct vmci_event_qp ev;
940
941 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
942 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
943 VMCI_CONTEXT_RESOURCE_ID);
944 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
945 ev.msg.event_data.event =
946 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
947 ev.payload.peer_id = context_id;
948 ev.payload.handle = handle;
949
950 return vmci_event_dispatch(&ev.msg.hdr);
951}
952
953
954
955
956
957
958
959
960static struct qp_guest_endpoint *
961qp_guest_endpoint_create(struct vmci_handle handle,
962 u32 peer,
963 u32 flags,
964 u64 produce_size,
965 u64 consume_size,
966 void *produce_q,
967 void *consume_q)
968{
969 int result;
970 struct qp_guest_endpoint *entry;
971
972 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
973 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
974
975 if (vmci_handle_is_invalid(handle)) {
976 u32 context_id = vmci_get_context_id();
977
978 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
979 }
980
981 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
982 if (entry) {
983 entry->qp.peer = peer;
984 entry->qp.flags = flags;
985 entry->qp.produce_size = produce_size;
986 entry->qp.consume_size = consume_size;
987 entry->qp.ref_count = 0;
988 entry->num_ppns = num_ppns;
989 entry->produce_q = produce_q;
990 entry->consume_q = consume_q;
991 INIT_LIST_HEAD(&entry->qp.list_item);
992
993
994 result = vmci_resource_add(&entry->resource,
995 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
996 handle);
997 entry->qp.handle = vmci_resource_handle(&entry->resource);
998 if ((result != VMCI_SUCCESS) ||
999 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
1000 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1001 handle.context, handle.resource, result);
1002 kfree(entry);
1003 entry = NULL;
1004 }
1005 }
1006 return entry;
1007}
1008
1009
1010
1011
1012static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
1013{
1014 qp_free_ppn_set(&entry->ppn_set);
1015 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
1016 qp_free_queue(entry->produce_q, entry->qp.produce_size);
1017 qp_free_queue(entry->consume_q, entry->qp.consume_size);
1018
1019 vmci_resource_remove(&entry->resource);
1020
1021 kfree(entry);
1022}
1023
1024
1025
1026
1027
1028static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
1029{
1030 struct vmci_qp_alloc_msg *alloc_msg;
1031 size_t msg_size;
1032 int result;
1033
1034 if (!entry || entry->num_ppns <= 2)
1035 return VMCI_ERROR_INVALID_ARGS;
1036
1037 msg_size = sizeof(*alloc_msg) +
1038 (size_t) entry->num_ppns * sizeof(u32);
1039 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
1040 if (!alloc_msg)
1041 return VMCI_ERROR_NO_MEM;
1042
1043 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1044 VMCI_QUEUEPAIR_ALLOC);
1045 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
1046 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
1047 alloc_msg->handle = entry->qp.handle;
1048 alloc_msg->peer = entry->qp.peer;
1049 alloc_msg->flags = entry->qp.flags;
1050 alloc_msg->produce_size = entry->qp.produce_size;
1051 alloc_msg->consume_size = entry->qp.consume_size;
1052 alloc_msg->num_ppns = entry->num_ppns;
1053
1054 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
1055 &entry->ppn_set);
1056 if (result == VMCI_SUCCESS)
1057 result = vmci_send_datagram(&alloc_msg->hdr);
1058
1059 kfree(alloc_msg);
1060
1061 return result;
1062}
1063
1064
1065
1066
1067
1068static int qp_detatch_hypercall(struct vmci_handle handle)
1069{
1070 struct vmci_qp_detach_msg detach_msg;
1071
1072 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1073 VMCI_QUEUEPAIR_DETACH);
1074 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
1075 detach_msg.hdr.payload_size = sizeof(handle);
1076 detach_msg.handle = handle;
1077
1078 return vmci_send_datagram(&detach_msg.hdr);
1079}
1080
1081
1082
1083
1084static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1085{
1086 if (entry)
1087 list_add(&entry->list_item, &qp_list->head);
1088}
1089
1090
1091
1092
1093static void qp_list_remove_entry(struct qp_list *qp_list,
1094 struct qp_entry *entry)
1095{
1096 if (entry)
1097 list_del(&entry->list_item);
1098}
1099
1100
1101
1102
1103
1104static int qp_detatch_guest_work(struct vmci_handle handle)
1105{
1106 int result;
1107 struct qp_guest_endpoint *entry;
1108 u32 ref_count = ~0;
1109
1110 mutex_lock(&qp_guest_endpoints.mutex);
1111
1112 entry = qp_guest_handle_to_entry(handle);
1113 if (!entry) {
1114 mutex_unlock(&qp_guest_endpoints.mutex);
1115 return VMCI_ERROR_NOT_FOUND;
1116 }
1117
1118 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1119 result = VMCI_SUCCESS;
1120
1121 if (entry->qp.ref_count > 1) {
1122 result = qp_notify_peer_local(false, handle);
1123
1124
1125
1126
1127
1128
1129 }
1130 } else {
1131 result = qp_detatch_hypercall(handle);
1132 if (result < VMCI_SUCCESS) {
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 mutex_unlock(&qp_guest_endpoints.mutex);
1144 return result;
1145 }
1146 }
1147
1148
1149
1150
1151
1152
1153 entry->qp.ref_count--;
1154 if (entry->qp.ref_count == 0)
1155 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1156
1157
1158 if (entry)
1159 ref_count = entry->qp.ref_count;
1160
1161 mutex_unlock(&qp_guest_endpoints.mutex);
1162
1163 if (ref_count == 0)
1164 qp_guest_endpoint_destroy(entry);
1165
1166 return result;
1167}
1168
1169
1170
1171
1172
1173
1174static int qp_alloc_guest_work(struct vmci_handle *handle,
1175 struct vmci_queue **produce_q,
1176 u64 produce_size,
1177 struct vmci_queue **consume_q,
1178 u64 consume_size,
1179 u32 peer,
1180 u32 flags,
1181 u32 priv_flags)
1182{
1183 const u64 num_produce_pages =
1184 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1185 const u64 num_consume_pages =
1186 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1187 void *my_produce_q = NULL;
1188 void *my_consume_q = NULL;
1189 int result;
1190 struct qp_guest_endpoint *queue_pair_entry = NULL;
1191
1192 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1193 return VMCI_ERROR_NO_ACCESS;
1194
1195 mutex_lock(&qp_guest_endpoints.mutex);
1196
1197 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1198 if (queue_pair_entry) {
1199 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1200
1201 if (queue_pair_entry->qp.ref_count > 1) {
1202 pr_devel("Error attempting to attach more than once\n");
1203 result = VMCI_ERROR_UNAVAILABLE;
1204 goto error_keep_entry;
1205 }
1206
1207 if (queue_pair_entry->qp.produce_size != consume_size ||
1208 queue_pair_entry->qp.consume_size !=
1209 produce_size ||
1210 queue_pair_entry->qp.flags !=
1211 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1212 pr_devel("Error mismatched queue pair in local attach\n");
1213 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1214 goto error_keep_entry;
1215 }
1216
1217
1218
1219
1220
1221
1222 result = qp_notify_peer_local(true, *handle);
1223 if (result < VMCI_SUCCESS)
1224 goto error_keep_entry;
1225
1226 my_produce_q = queue_pair_entry->consume_q;
1227 my_consume_q = queue_pair_entry->produce_q;
1228 goto out;
1229 }
1230
1231 result = VMCI_ERROR_ALREADY_EXISTS;
1232 goto error_keep_entry;
1233 }
1234
1235 my_produce_q = qp_alloc_queue(produce_size, flags);
1236 if (!my_produce_q) {
1237 pr_warn("Error allocating pages for produce queue\n");
1238 result = VMCI_ERROR_NO_MEM;
1239 goto error;
1240 }
1241
1242 my_consume_q = qp_alloc_queue(consume_size, flags);
1243 if (!my_consume_q) {
1244 pr_warn("Error allocating pages for consume queue\n");
1245 result = VMCI_ERROR_NO_MEM;
1246 goto error;
1247 }
1248
1249 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1250 produce_size, consume_size,
1251 my_produce_q, my_consume_q);
1252 if (!queue_pair_entry) {
1253 pr_warn("Error allocating memory in %s\n", __func__);
1254 result = VMCI_ERROR_NO_MEM;
1255 goto error;
1256 }
1257
1258 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1259 num_consume_pages,
1260 &queue_pair_entry->ppn_set);
1261 if (result < VMCI_SUCCESS) {
1262 pr_warn("qp_alloc_ppn_set failed\n");
1263 goto error;
1264 }
1265
1266
1267
1268
1269
1270 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1271
1272 u32 context_id = vmci_get_context_id();
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283 if (queue_pair_entry->qp.handle.context != context_id ||
1284 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1285 queue_pair_entry->qp.peer != context_id)) {
1286 result = VMCI_ERROR_NO_ACCESS;
1287 goto error;
1288 }
1289
1290 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1291 result = VMCI_ERROR_NOT_FOUND;
1292 goto error;
1293 }
1294 } else {
1295 result = qp_alloc_hypercall(queue_pair_entry);
1296 if (result < VMCI_SUCCESS) {
1297 pr_warn("qp_alloc_hypercall result = %d\n", result);
1298 goto error;
1299 }
1300 }
1301
1302 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1303 (struct vmci_queue *)my_consume_q);
1304
1305 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1306
1307 out:
1308 queue_pair_entry->qp.ref_count++;
1309 *handle = queue_pair_entry->qp.handle;
1310 *produce_q = (struct vmci_queue *)my_produce_q;
1311 *consume_q = (struct vmci_queue *)my_consume_q;
1312
1313
1314
1315
1316
1317
1318 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1319 queue_pair_entry->qp.ref_count == 1) {
1320 vmci_q_header_init((*produce_q)->q_header, *handle);
1321 vmci_q_header_init((*consume_q)->q_header, *handle);
1322 }
1323
1324 mutex_unlock(&qp_guest_endpoints.mutex);
1325
1326 return VMCI_SUCCESS;
1327
1328 error:
1329 mutex_unlock(&qp_guest_endpoints.mutex);
1330 if (queue_pair_entry) {
1331
1332 qp_guest_endpoint_destroy(queue_pair_entry);
1333 } else {
1334 qp_free_queue(my_produce_q, produce_size);
1335 qp_free_queue(my_consume_q, consume_size);
1336 }
1337 return result;
1338
1339 error_keep_entry:
1340
1341 mutex_unlock(&qp_guest_endpoints.mutex);
1342 return result;
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363static int qp_broker_create(struct vmci_handle handle,
1364 u32 peer,
1365 u32 flags,
1366 u32 priv_flags,
1367 u64 produce_size,
1368 u64 consume_size,
1369 struct vmci_qp_page_store *page_store,
1370 struct vmci_ctx *context,
1371 vmci_event_release_cb wakeup_cb,
1372 void *client_data, struct qp_broker_entry **ent)
1373{
1374 struct qp_broker_entry *entry = NULL;
1375 const u32 context_id = vmci_ctx_get_id(context);
1376 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1377 int result;
1378 u64 guest_produce_size;
1379 u64 guest_consume_size;
1380
1381
1382 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1383 return VMCI_ERROR_NOT_FOUND;
1384
1385
1386
1387
1388
1389 if (handle.context != context_id && handle.context != peer)
1390 return VMCI_ERROR_NO_ACCESS;
1391
1392 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1393 return VMCI_ERROR_DST_UNREACHABLE;
1394
1395
1396
1397
1398
1399 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1400 return VMCI_ERROR_NO_ACCESS;
1401
1402 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1403 if (!entry)
1404 return VMCI_ERROR_NO_MEM;
1405
1406 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1407
1408
1409
1410
1411
1412
1413
1414
1415 guest_produce_size = consume_size;
1416 guest_consume_size = produce_size;
1417 } else {
1418 guest_produce_size = produce_size;
1419 guest_consume_size = consume_size;
1420 }
1421
1422 entry->qp.handle = handle;
1423 entry->qp.peer = peer;
1424 entry->qp.flags = flags;
1425 entry->qp.produce_size = guest_produce_size;
1426 entry->qp.consume_size = guest_consume_size;
1427 entry->qp.ref_count = 1;
1428 entry->create_id = context_id;
1429 entry->attach_id = VMCI_INVALID_ID;
1430 entry->state = VMCIQPB_NEW;
1431 entry->require_trusted_attach =
1432 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1433 entry->created_by_trusted =
1434 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1435 entry->vmci_page_files = false;
1436 entry->wakeup_cb = wakeup_cb;
1437 entry->client_data = client_data;
1438 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1439 if (entry->produce_q == NULL) {
1440 result = VMCI_ERROR_NO_MEM;
1441 goto error;
1442 }
1443 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1444 if (entry->consume_q == NULL) {
1445 result = VMCI_ERROR_NO_MEM;
1446 goto error;
1447 }
1448
1449 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1450
1451 INIT_LIST_HEAD(&entry->qp.list_item);
1452
1453 if (is_local) {
1454 u8 *tmp;
1455
1456 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1457 PAGE_SIZE, GFP_KERNEL);
1458 if (entry->local_mem == NULL) {
1459 result = VMCI_ERROR_NO_MEM;
1460 goto error;
1461 }
1462 entry->state = VMCIQPB_CREATED_MEM;
1463 entry->produce_q->q_header = entry->local_mem;
1464 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1465 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1466 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1467 } else if (page_store) {
1468
1469
1470
1471
1472 result = qp_host_register_user_memory(page_store,
1473 entry->produce_q,
1474 entry->consume_q);
1475 if (result < VMCI_SUCCESS)
1476 goto error;
1477
1478 entry->state = VMCIQPB_CREATED_MEM;
1479 } else {
1480
1481
1482
1483
1484
1485
1486
1487 entry->state = VMCIQPB_CREATED_NO_MEM;
1488 }
1489
1490 qp_list_add_entry(&qp_broker_list, &entry->qp);
1491 if (ent != NULL)
1492 *ent = entry;
1493
1494
1495 result = vmci_resource_add(&entry->resource,
1496 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1497 handle);
1498 if (result != VMCI_SUCCESS) {
1499 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1500 handle.context, handle.resource, result);
1501 goto error;
1502 }
1503
1504 entry->qp.handle = vmci_resource_handle(&entry->resource);
1505 if (is_local) {
1506 vmci_q_header_init(entry->produce_q->q_header,
1507 entry->qp.handle);
1508 vmci_q_header_init(entry->consume_q->q_header,
1509 entry->qp.handle);
1510 }
1511
1512 vmci_ctx_qp_create(context, entry->qp.handle);
1513
1514 return VMCI_SUCCESS;
1515
1516 error:
1517 if (entry != NULL) {
1518 qp_host_free_queue(entry->produce_q, guest_produce_size);
1519 qp_host_free_queue(entry->consume_q, guest_consume_size);
1520 kfree(entry);
1521 }
1522
1523 return result;
1524}
1525
1526
1527
1528
1529
1530
1531
1532static int qp_notify_peer(bool attach,
1533 struct vmci_handle handle,
1534 u32 my_id,
1535 u32 peer_id)
1536{
1537 int rv;
1538 struct vmci_event_qp ev;
1539
1540 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1541 peer_id == VMCI_INVALID_ID)
1542 return VMCI_ERROR_INVALID_ARGS;
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1553 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1554 VMCI_CONTEXT_RESOURCE_ID);
1555 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1556 ev.msg.event_data.event = attach ?
1557 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1558 ev.payload.handle = handle;
1559 ev.payload.peer_id = my_id;
1560
1561 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1562 &ev.msg.hdr, false);
1563 if (rv < VMCI_SUCCESS)
1564 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1565 attach ? "ATTACH" : "DETACH", peer_id);
1566
1567 return rv;
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591static int qp_broker_attach(struct qp_broker_entry *entry,
1592 u32 peer,
1593 u32 flags,
1594 u32 priv_flags,
1595 u64 produce_size,
1596 u64 consume_size,
1597 struct vmci_qp_page_store *page_store,
1598 struct vmci_ctx *context,
1599 vmci_event_release_cb wakeup_cb,
1600 void *client_data,
1601 struct qp_broker_entry **ent)
1602{
1603 const u32 context_id = vmci_ctx_get_id(context);
1604 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1605 int result;
1606
1607 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1608 entry->state != VMCIQPB_CREATED_MEM)
1609 return VMCI_ERROR_UNAVAILABLE;
1610
1611 if (is_local) {
1612 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1613 context_id != entry->create_id) {
1614 return VMCI_ERROR_INVALID_ARGS;
1615 }
1616 } else if (context_id == entry->create_id ||
1617 context_id == entry->attach_id) {
1618 return VMCI_ERROR_ALREADY_EXISTS;
1619 }
1620
1621 if (VMCI_CONTEXT_IS_VM(context_id) &&
1622 VMCI_CONTEXT_IS_VM(entry->create_id))
1623 return VMCI_ERROR_DST_UNREACHABLE;
1624
1625
1626
1627
1628
1629 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1630 !entry->created_by_trusted)
1631 return VMCI_ERROR_NO_ACCESS;
1632
1633
1634
1635
1636
1637 if (entry->require_trusted_attach &&
1638 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1639 return VMCI_ERROR_NO_ACCESS;
1640
1641
1642
1643
1644
1645 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1646 return VMCI_ERROR_NO_ACCESS;
1647
1648 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1649
1650
1651
1652
1653
1654 if (!vmci_ctx_supports_host_qp(context))
1655 return VMCI_ERROR_INVALID_RESOURCE;
1656
1657 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1658 struct vmci_ctx *create_context;
1659 bool supports_host_qp;
1660
1661
1662
1663
1664
1665
1666 create_context = vmci_ctx_get(entry->create_id);
1667 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1668 vmci_ctx_put(create_context);
1669
1670 if (!supports_host_qp)
1671 return VMCI_ERROR_INVALID_RESOURCE;
1672 }
1673
1674 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1675 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1676
1677 if (context_id != VMCI_HOST_CONTEXT_ID) {
1678
1679
1680
1681
1682
1683
1684 if (entry->qp.produce_size != produce_size ||
1685 entry->qp.consume_size != consume_size) {
1686 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1687 }
1688 } else if (entry->qp.produce_size != consume_size ||
1689 entry->qp.consume_size != produce_size) {
1690 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1691 }
1692
1693 if (context_id != VMCI_HOST_CONTEXT_ID) {
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1708 return VMCI_ERROR_INVALID_ARGS;
1709
1710 if (page_store != NULL) {
1711
1712
1713
1714
1715
1716
1717
1718 result = qp_host_register_user_memory(page_store,
1719 entry->produce_q,
1720 entry->consume_q);
1721 if (result < VMCI_SUCCESS)
1722 return result;
1723
1724 entry->state = VMCIQPB_ATTACHED_MEM;
1725 } else {
1726 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1727 }
1728 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1729
1730
1731
1732
1733
1734
1735
1736 return VMCI_ERROR_UNAVAILABLE;
1737 } else {
1738
1739 entry->state = VMCIQPB_ATTACHED_MEM;
1740 }
1741
1742 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1743 result =
1744 qp_notify_peer(true, entry->qp.handle, context_id,
1745 entry->create_id);
1746 if (result < VMCI_SUCCESS)
1747 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1748 entry->create_id, entry->qp.handle.context,
1749 entry->qp.handle.resource);
1750 }
1751
1752 entry->attach_id = context_id;
1753 entry->qp.ref_count++;
1754 if (wakeup_cb) {
1755 entry->wakeup_cb = wakeup_cb;
1756 entry->client_data = client_data;
1757 }
1758
1759
1760
1761
1762
1763 if (!is_local)
1764 vmci_ctx_qp_create(context, entry->qp.handle);
1765
1766 if (ent != NULL)
1767 *ent = entry;
1768
1769 return VMCI_SUCCESS;
1770}
1771
1772
1773
1774
1775
1776static int qp_broker_alloc(struct vmci_handle handle,
1777 u32 peer,
1778 u32 flags,
1779 u32 priv_flags,
1780 u64 produce_size,
1781 u64 consume_size,
1782 struct vmci_qp_page_store *page_store,
1783 struct vmci_ctx *context,
1784 vmci_event_release_cb wakeup_cb,
1785 void *client_data,
1786 struct qp_broker_entry **ent,
1787 bool *swap)
1788{
1789 const u32 context_id = vmci_ctx_get_id(context);
1790 bool create;
1791 struct qp_broker_entry *entry = NULL;
1792 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1793 int result;
1794
1795 if (vmci_handle_is_invalid(handle) ||
1796 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1797 !(produce_size || consume_size) ||
1798 !context || context_id == VMCI_INVALID_ID ||
1799 handle.context == VMCI_INVALID_ID) {
1800 return VMCI_ERROR_INVALID_ARGS;
1801 }
1802
1803 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1804 return VMCI_ERROR_INVALID_ARGS;
1805
1806
1807
1808
1809
1810
1811 mutex_lock(&qp_broker_list.mutex);
1812
1813 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1814 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1815 context_id, handle.context, handle.resource);
1816 mutex_unlock(&qp_broker_list.mutex);
1817 return VMCI_ERROR_ALREADY_EXISTS;
1818 }
1819
1820 if (handle.resource != VMCI_INVALID_ID)
1821 entry = qp_broker_handle_to_entry(handle);
1822
1823 if (!entry) {
1824 create = true;
1825 result =
1826 qp_broker_create(handle, peer, flags, priv_flags,
1827 produce_size, consume_size, page_store,
1828 context, wakeup_cb, client_data, ent);
1829 } else {
1830 create = false;
1831 result =
1832 qp_broker_attach(entry, peer, flags, priv_flags,
1833 produce_size, consume_size, page_store,
1834 context, wakeup_cb, client_data, ent);
1835 }
1836
1837 mutex_unlock(&qp_broker_list.mutex);
1838
1839 if (swap)
1840 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1841 !(create && is_local);
1842
1843 return result;
1844}
1845
1846
1847
1848
1849
1850static int qp_alloc_host_work(struct vmci_handle *handle,
1851 struct vmci_queue **produce_q,
1852 u64 produce_size,
1853 struct vmci_queue **consume_q,
1854 u64 consume_size,
1855 u32 peer,
1856 u32 flags,
1857 u32 priv_flags,
1858 vmci_event_release_cb wakeup_cb,
1859 void *client_data)
1860{
1861 struct vmci_handle new_handle;
1862 struct vmci_ctx *context;
1863 struct qp_broker_entry *entry;
1864 int result;
1865 bool swap;
1866
1867 if (vmci_handle_is_invalid(*handle)) {
1868 new_handle = vmci_make_handle(
1869 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1870 } else
1871 new_handle = *handle;
1872
1873 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1874 entry = NULL;
1875 result =
1876 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1877 produce_size, consume_size, NULL, context,
1878 wakeup_cb, client_data, &entry, &swap);
1879 if (result == VMCI_SUCCESS) {
1880 if (swap) {
1881
1882
1883
1884
1885
1886
1887 *produce_q = entry->consume_q;
1888 *consume_q = entry->produce_q;
1889 } else {
1890 *produce_q = entry->produce_q;
1891 *consume_q = entry->consume_q;
1892 }
1893
1894 *handle = vmci_resource_handle(&entry->resource);
1895 } else {
1896 *handle = VMCI_INVALID_HANDLE;
1897 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1898 result);
1899 }
1900 vmci_ctx_put(context);
1901 return result;
1902}
1903
1904
1905
1906
1907
1908
1909int vmci_qp_alloc(struct vmci_handle *handle,
1910 struct vmci_queue **produce_q,
1911 u64 produce_size,
1912 struct vmci_queue **consume_q,
1913 u64 consume_size,
1914 u32 peer,
1915 u32 flags,
1916 u32 priv_flags,
1917 bool guest_endpoint,
1918 vmci_event_release_cb wakeup_cb,
1919 void *client_data)
1920{
1921 if (!handle || !produce_q || !consume_q ||
1922 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1923 return VMCI_ERROR_INVALID_ARGS;
1924
1925 if (guest_endpoint) {
1926 return qp_alloc_guest_work(handle, produce_q,
1927 produce_size, consume_q,
1928 consume_size, peer,
1929 flags, priv_flags);
1930 } else {
1931 return qp_alloc_host_work(handle, produce_q,
1932 produce_size, consume_q,
1933 consume_size, peer, flags,
1934 priv_flags, wakeup_cb, client_data);
1935 }
1936}
1937
1938
1939
1940
1941
1942static int qp_detatch_host_work(struct vmci_handle handle)
1943{
1944 int result;
1945 struct vmci_ctx *context;
1946
1947 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1948
1949 result = vmci_qp_broker_detach(handle, context);
1950
1951 vmci_ctx_put(context);
1952 return result;
1953}
1954
1955
1956
1957
1958
1959static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1960{
1961 if (vmci_handle_is_invalid(handle))
1962 return VMCI_ERROR_INVALID_ARGS;
1963
1964 if (guest_endpoint)
1965 return qp_detatch_guest_work(handle);
1966 else
1967 return qp_detatch_host_work(handle);
1968}
1969
1970
1971
1972
1973
1974static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1975{
1976 if (!list_empty(&qp_list->head)) {
1977 struct qp_entry *entry =
1978 list_first_entry(&qp_list->head, struct qp_entry,
1979 list_item);
1980 return entry;
1981 }
1982
1983 return NULL;
1984}
1985
1986void vmci_qp_broker_exit(void)
1987{
1988 struct qp_entry *entry;
1989 struct qp_broker_entry *be;
1990
1991 mutex_lock(&qp_broker_list.mutex);
1992
1993 while ((entry = qp_list_get_head(&qp_broker_list))) {
1994 be = (struct qp_broker_entry *)entry;
1995
1996 qp_list_remove_entry(&qp_broker_list, entry);
1997 kfree(be);
1998 }
1999
2000 mutex_unlock(&qp_broker_list.mutex);
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010int vmci_qp_broker_alloc(struct vmci_handle handle,
2011 u32 peer,
2012 u32 flags,
2013 u32 priv_flags,
2014 u64 produce_size,
2015 u64 consume_size,
2016 struct vmci_qp_page_store *page_store,
2017 struct vmci_ctx *context)
2018{
2019 return qp_broker_alloc(handle, peer, flags, priv_flags,
2020 produce_size, consume_size,
2021 page_store, context, NULL, NULL, NULL, NULL);
2022}
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040int vmci_qp_broker_set_page_store(struct vmci_handle handle,
2041 u64 produce_uva,
2042 u64 consume_uva,
2043 struct vmci_ctx *context)
2044{
2045 struct qp_broker_entry *entry;
2046 int result;
2047 const u32 context_id = vmci_ctx_get_id(context);
2048
2049 if (vmci_handle_is_invalid(handle) || !context ||
2050 context_id == VMCI_INVALID_ID)
2051 return VMCI_ERROR_INVALID_ARGS;
2052
2053
2054
2055
2056
2057
2058 if (produce_uva == 0 || consume_uva == 0)
2059 return VMCI_ERROR_INVALID_ARGS;
2060
2061 mutex_lock(&qp_broker_list.mutex);
2062
2063 if (!vmci_ctx_qp_exists(context, handle)) {
2064 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2065 context_id, handle.context, handle.resource);
2066 result = VMCI_ERROR_NOT_FOUND;
2067 goto out;
2068 }
2069
2070 entry = qp_broker_handle_to_entry(handle);
2071 if (!entry) {
2072 result = VMCI_ERROR_NOT_FOUND;
2073 goto out;
2074 }
2075
2076
2077
2078
2079
2080
2081
2082 if (entry->create_id != context_id &&
2083 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2084 entry->attach_id != context_id)) {
2085 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2086 goto out;
2087 }
2088
2089 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2090 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2091 result = VMCI_ERROR_UNAVAILABLE;
2092 goto out;
2093 }
2094
2095 result = qp_host_get_user_memory(produce_uva, consume_uva,
2096 entry->produce_q, entry->consume_q);
2097 if (result < VMCI_SUCCESS)
2098 goto out;
2099
2100 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2101 if (result < VMCI_SUCCESS) {
2102 qp_host_unregister_user_memory(entry->produce_q,
2103 entry->consume_q);
2104 goto out;
2105 }
2106
2107 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2108 entry->state = VMCIQPB_CREATED_MEM;
2109 else
2110 entry->state = VMCIQPB_ATTACHED_MEM;
2111
2112 entry->vmci_page_files = true;
2113
2114 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2115 result =
2116 qp_notify_peer(true, handle, context_id, entry->create_id);
2117 if (result < VMCI_SUCCESS) {
2118 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2119 entry->create_id, entry->qp.handle.context,
2120 entry->qp.handle.resource);
2121 }
2122 }
2123
2124 result = VMCI_SUCCESS;
2125 out:
2126 mutex_unlock(&qp_broker_list.mutex);
2127 return result;
2128}
2129
2130
2131
2132
2133
2134
2135static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2136{
2137 entry->produce_q->saved_header = NULL;
2138 entry->consume_q->saved_header = NULL;
2139}
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2160{
2161 struct qp_broker_entry *entry;
2162 const u32 context_id = vmci_ctx_get_id(context);
2163 u32 peer_id;
2164 bool is_local = false;
2165 int result;
2166
2167 if (vmci_handle_is_invalid(handle) || !context ||
2168 context_id == VMCI_INVALID_ID) {
2169 return VMCI_ERROR_INVALID_ARGS;
2170 }
2171
2172 mutex_lock(&qp_broker_list.mutex);
2173
2174 if (!vmci_ctx_qp_exists(context, handle)) {
2175 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2176 context_id, handle.context, handle.resource);
2177 result = VMCI_ERROR_NOT_FOUND;
2178 goto out;
2179 }
2180
2181 entry = qp_broker_handle_to_entry(handle);
2182 if (!entry) {
2183 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2184 context_id, handle.context, handle.resource);
2185 result = VMCI_ERROR_NOT_FOUND;
2186 goto out;
2187 }
2188
2189 if (context_id != entry->create_id && context_id != entry->attach_id) {
2190 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2191 goto out;
2192 }
2193
2194 if (context_id == entry->create_id) {
2195 peer_id = entry->attach_id;
2196 entry->create_id = VMCI_INVALID_ID;
2197 } else {
2198 peer_id = entry->create_id;
2199 entry->attach_id = VMCI_INVALID_ID;
2200 }
2201 entry->qp.ref_count--;
2202
2203 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2204
2205 if (context_id != VMCI_HOST_CONTEXT_ID) {
2206 bool headers_mapped;
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216 qp_acquire_queue_mutex(entry->produce_q);
2217 headers_mapped = entry->produce_q->q_header ||
2218 entry->consume_q->q_header;
2219 if (QPBROKERSTATE_HAS_MEM(entry)) {
2220 result =
2221 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2222 entry->produce_q,
2223 entry->consume_q);
2224 if (result < VMCI_SUCCESS)
2225 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2226 handle.context, handle.resource,
2227 result);
2228
2229 if (entry->vmci_page_files)
2230 qp_host_unregister_user_memory(entry->produce_q,
2231 entry->
2232 consume_q);
2233 else
2234 qp_host_unregister_user_memory(entry->produce_q,
2235 entry->
2236 consume_q);
2237
2238 }
2239
2240 if (!headers_mapped)
2241 qp_reset_saved_headers(entry);
2242
2243 qp_release_queue_mutex(entry->produce_q);
2244
2245 if (!headers_mapped && entry->wakeup_cb)
2246 entry->wakeup_cb(entry->client_data);
2247
2248 } else {
2249 if (entry->wakeup_cb) {
2250 entry->wakeup_cb = NULL;
2251 entry->client_data = NULL;
2252 }
2253 }
2254
2255 if (entry->qp.ref_count == 0) {
2256 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2257
2258 if (is_local)
2259 kfree(entry->local_mem);
2260
2261 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2262 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2263 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2264
2265 vmci_resource_remove(&entry->resource);
2266
2267 kfree(entry);
2268
2269 vmci_ctx_qp_destroy(context, handle);
2270 } else {
2271 qp_notify_peer(false, handle, context_id, peer_id);
2272 if (context_id == VMCI_HOST_CONTEXT_ID &&
2273 QPBROKERSTATE_HAS_MEM(entry)) {
2274 entry->state = VMCIQPB_SHUTDOWN_MEM;
2275 } else {
2276 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2277 }
2278
2279 if (!is_local)
2280 vmci_ctx_qp_destroy(context, handle);
2281
2282 }
2283 result = VMCI_SUCCESS;
2284 out:
2285 mutex_unlock(&qp_broker_list.mutex);
2286 return result;
2287}
2288
2289
2290
2291
2292
2293
2294
2295int vmci_qp_broker_map(struct vmci_handle handle,
2296 struct vmci_ctx *context,
2297 u64 guest_mem)
2298{
2299 struct qp_broker_entry *entry;
2300 const u32 context_id = vmci_ctx_get_id(context);
2301 bool is_local = false;
2302 int result;
2303
2304 if (vmci_handle_is_invalid(handle) || !context ||
2305 context_id == VMCI_INVALID_ID)
2306 return VMCI_ERROR_INVALID_ARGS;
2307
2308 mutex_lock(&qp_broker_list.mutex);
2309
2310 if (!vmci_ctx_qp_exists(context, handle)) {
2311 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2312 context_id, handle.context, handle.resource);
2313 result = VMCI_ERROR_NOT_FOUND;
2314 goto out;
2315 }
2316
2317 entry = qp_broker_handle_to_entry(handle);
2318 if (!entry) {
2319 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2320 context_id, handle.context, handle.resource);
2321 result = VMCI_ERROR_NOT_FOUND;
2322 goto out;
2323 }
2324
2325 if (context_id != entry->create_id && context_id != entry->attach_id) {
2326 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2327 goto out;
2328 }
2329
2330 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2331 result = VMCI_SUCCESS;
2332
2333 if (context_id != VMCI_HOST_CONTEXT_ID) {
2334 struct vmci_qp_page_store page_store;
2335
2336 page_store.pages = guest_mem;
2337 page_store.len = QPE_NUM_PAGES(entry->qp);
2338
2339 qp_acquire_queue_mutex(entry->produce_q);
2340 qp_reset_saved_headers(entry);
2341 result =
2342 qp_host_register_user_memory(&page_store,
2343 entry->produce_q,
2344 entry->consume_q);
2345 qp_release_queue_mutex(entry->produce_q);
2346 if (result == VMCI_SUCCESS) {
2347
2348
2349 entry->state++;
2350
2351 if (entry->wakeup_cb)
2352 entry->wakeup_cb(entry->client_data);
2353 }
2354 }
2355
2356 out:
2357 mutex_unlock(&qp_broker_list.mutex);
2358 return result;
2359}
2360
2361
2362
2363
2364
2365
2366
2367
2368static int qp_save_headers(struct qp_broker_entry *entry)
2369{
2370 int result;
2371
2372 if (entry->produce_q->saved_header != NULL &&
2373 entry->consume_q->saved_header != NULL) {
2374
2375
2376
2377
2378
2379
2380 return VMCI_SUCCESS;
2381 }
2382
2383 if (NULL == entry->produce_q->q_header ||
2384 NULL == entry->consume_q->q_header) {
2385 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2386 if (result < VMCI_SUCCESS)
2387 return result;
2388 }
2389
2390 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2391 sizeof(entry->saved_produce_q));
2392 entry->produce_q->saved_header = &entry->saved_produce_q;
2393 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2394 sizeof(entry->saved_consume_q));
2395 entry->consume_q->saved_header = &entry->saved_consume_q;
2396
2397 return VMCI_SUCCESS;
2398}
2399
2400
2401
2402
2403
2404
2405
2406int vmci_qp_broker_unmap(struct vmci_handle handle,
2407 struct vmci_ctx *context,
2408 u32 gid)
2409{
2410 struct qp_broker_entry *entry;
2411 const u32 context_id = vmci_ctx_get_id(context);
2412 bool is_local = false;
2413 int result;
2414
2415 if (vmci_handle_is_invalid(handle) || !context ||
2416 context_id == VMCI_INVALID_ID)
2417 return VMCI_ERROR_INVALID_ARGS;
2418
2419 mutex_lock(&qp_broker_list.mutex);
2420
2421 if (!vmci_ctx_qp_exists(context, handle)) {
2422 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2423 context_id, handle.context, handle.resource);
2424 result = VMCI_ERROR_NOT_FOUND;
2425 goto out;
2426 }
2427
2428 entry = qp_broker_handle_to_entry(handle);
2429 if (!entry) {
2430 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2431 context_id, handle.context, handle.resource);
2432 result = VMCI_ERROR_NOT_FOUND;
2433 goto out;
2434 }
2435
2436 if (context_id != entry->create_id && context_id != entry->attach_id) {
2437 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2438 goto out;
2439 }
2440
2441 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2442
2443 if (context_id != VMCI_HOST_CONTEXT_ID) {
2444 qp_acquire_queue_mutex(entry->produce_q);
2445 result = qp_save_headers(entry);
2446 if (result < VMCI_SUCCESS)
2447 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2448 handle.context, handle.resource, result);
2449
2450 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2451
2452
2453
2454
2455
2456
2457
2458
2459 qp_host_unregister_user_memory(entry->produce_q,
2460 entry->consume_q);
2461
2462
2463
2464
2465 entry->state--;
2466
2467 qp_release_queue_mutex(entry->produce_q);
2468 }
2469
2470 result = VMCI_SUCCESS;
2471
2472 out:
2473 mutex_unlock(&qp_broker_list.mutex);
2474 return result;
2475}
2476
2477
2478
2479
2480
2481
2482
2483void vmci_qp_guest_endpoints_exit(void)
2484{
2485 struct qp_entry *entry;
2486 struct qp_guest_endpoint *ep;
2487
2488 mutex_lock(&qp_guest_endpoints.mutex);
2489
2490 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2491 ep = (struct qp_guest_endpoint *)entry;
2492
2493
2494 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2495 qp_detatch_hypercall(entry->handle);
2496
2497
2498 entry->ref_count = 0;
2499 qp_list_remove_entry(&qp_guest_endpoints, entry);
2500
2501 qp_guest_endpoint_destroy(ep);
2502 }
2503
2504 mutex_unlock(&qp_guest_endpoints.mutex);
2505}
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515static void qp_lock(const struct vmci_qp *qpair)
2516{
2517 qp_acquire_queue_mutex(qpair->produce_q);
2518}
2519
2520
2521
2522
2523
2524static void qp_unlock(const struct vmci_qp *qpair)
2525{
2526 qp_release_queue_mutex(qpair->produce_q);
2527}
2528
2529
2530
2531
2532
2533static int qp_map_queue_headers(struct vmci_queue *produce_q,
2534 struct vmci_queue *consume_q)
2535{
2536 int result;
2537
2538 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2539 result = qp_host_map_queues(produce_q, consume_q);
2540 if (result < VMCI_SUCCESS)
2541 return (produce_q->saved_header &&
2542 consume_q->saved_header) ?
2543 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2544 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2545 }
2546
2547 return VMCI_SUCCESS;
2548}
2549
2550
2551
2552
2553
2554
2555
2556static int qp_get_queue_headers(const struct vmci_qp *qpair,
2557 struct vmci_queue_header **produce_q_header,
2558 struct vmci_queue_header **consume_q_header)
2559{
2560 int result;
2561
2562 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2563 if (result == VMCI_SUCCESS) {
2564 *produce_q_header = qpair->produce_q->q_header;
2565 *consume_q_header = qpair->consume_q->q_header;
2566 } else if (qpair->produce_q->saved_header &&
2567 qpair->consume_q->saved_header) {
2568 *produce_q_header = qpair->produce_q->saved_header;
2569 *consume_q_header = qpair->consume_q->saved_header;
2570 result = VMCI_SUCCESS;
2571 }
2572
2573 return result;
2574}
2575
2576
2577
2578
2579
2580
2581static int qp_wakeup_cb(void *client_data)
2582{
2583 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2584
2585 qp_lock(qpair);
2586 while (qpair->blocked > 0) {
2587 qpair->blocked--;
2588 qpair->generation++;
2589 wake_up(&qpair->event);
2590 }
2591 qp_unlock(qpair);
2592
2593 return VMCI_SUCCESS;
2594}
2595
2596
2597
2598
2599
2600
2601static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2602{
2603 unsigned int generation;
2604
2605 qpair->blocked++;
2606 generation = qpair->generation;
2607 qp_unlock(qpair);
2608 wait_event(qpair->event, generation != qpair->generation);
2609 qp_lock(qpair);
2610
2611 return true;
2612}
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2627 struct vmci_queue *consume_q,
2628 const u64 produce_q_size,
2629 const void *buf,
2630 size_t buf_size,
2631 vmci_memcpy_to_queue_func memcpy_to_queue)
2632{
2633 s64 free_space;
2634 u64 tail;
2635 size_t written;
2636 ssize_t result;
2637
2638 result = qp_map_queue_headers(produce_q, consume_q);
2639 if (unlikely(result != VMCI_SUCCESS))
2640 return result;
2641
2642 free_space = vmci_q_header_free_space(produce_q->q_header,
2643 consume_q->q_header,
2644 produce_q_size);
2645 if (free_space == 0)
2646 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2647
2648 if (free_space < VMCI_SUCCESS)
2649 return (ssize_t) free_space;
2650
2651 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2652 tail = vmci_q_header_producer_tail(produce_q->q_header);
2653 if (likely(tail + written < produce_q_size)) {
2654 result = memcpy_to_queue(produce_q, tail, buf, 0, written);
2655 } else {
2656
2657
2658 const size_t tmp = (size_t) (produce_q_size - tail);
2659
2660 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
2661 if (result >= VMCI_SUCCESS)
2662 result = memcpy_to_queue(produce_q, 0, buf, tmp,
2663 written - tmp);
2664 }
2665
2666 if (result < VMCI_SUCCESS)
2667 return result;
2668
2669 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2670 produce_q_size);
2671 return written;
2672}
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2688 struct vmci_queue *consume_q,
2689 const u64 consume_q_size,
2690 void *buf,
2691 size_t buf_size,
2692 vmci_memcpy_from_queue_func memcpy_from_queue,
2693 bool update_consumer)
2694{
2695 s64 buf_ready;
2696 u64 head;
2697 size_t read;
2698 ssize_t result;
2699
2700 result = qp_map_queue_headers(produce_q, consume_q);
2701 if (unlikely(result != VMCI_SUCCESS))
2702 return result;
2703
2704 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2705 produce_q->q_header,
2706 consume_q_size);
2707 if (buf_ready == 0)
2708 return VMCI_ERROR_QUEUEPAIR_NODATA;
2709
2710 if (buf_ready < VMCI_SUCCESS)
2711 return (ssize_t) buf_ready;
2712
2713 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2714 head = vmci_q_header_consumer_head(produce_q->q_header);
2715 if (likely(head + read < consume_q_size)) {
2716 result = memcpy_from_queue(buf, 0, consume_q, head, read);
2717 } else {
2718
2719
2720 const size_t tmp = (size_t) (consume_q_size - head);
2721
2722 result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
2723 if (result >= VMCI_SUCCESS)
2724 result = memcpy_from_queue(buf, tmp, consume_q, 0,
2725 read - tmp);
2726
2727 }
2728
2729 if (result < VMCI_SUCCESS)
2730 return result;
2731
2732 if (update_consumer)
2733 vmci_q_header_add_consumer_head(produce_q->q_header,
2734 read, consume_q_size);
2735
2736 return read;
2737}
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755int vmci_qpair_alloc(struct vmci_qp **qpair,
2756 struct vmci_handle *handle,
2757 u64 produce_qsize,
2758 u64 consume_qsize,
2759 u32 peer,
2760 u32 flags,
2761 u32 priv_flags)
2762{
2763 struct vmci_qp *my_qpair;
2764 int retval;
2765 struct vmci_handle src = VMCI_INVALID_HANDLE;
2766 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2767 enum vmci_route route;
2768 vmci_event_release_cb wakeup_cb;
2769 void *client_data;
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2789 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2790 return VMCI_ERROR_NO_RESOURCES;
2791
2792 retval = vmci_route(&src, &dst, false, &route);
2793 if (retval < VMCI_SUCCESS)
2794 route = vmci_guest_code_active() ?
2795 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2796
2797 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2798 pr_devel("NONBLOCK OR PINNED set");
2799 return VMCI_ERROR_INVALID_ARGS;
2800 }
2801
2802 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2803 if (!my_qpair)
2804 return VMCI_ERROR_NO_MEM;
2805
2806 my_qpair->produce_q_size = produce_qsize;
2807 my_qpair->consume_q_size = consume_qsize;
2808 my_qpair->peer = peer;
2809 my_qpair->flags = flags;
2810 my_qpair->priv_flags = priv_flags;
2811
2812 wakeup_cb = NULL;
2813 client_data = NULL;
2814
2815 if (VMCI_ROUTE_AS_HOST == route) {
2816 my_qpair->guest_endpoint = false;
2817 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2818 my_qpair->blocked = 0;
2819 my_qpair->generation = 0;
2820 init_waitqueue_head(&my_qpair->event);
2821 wakeup_cb = qp_wakeup_cb;
2822 client_data = (void *)my_qpair;
2823 }
2824 } else {
2825 my_qpair->guest_endpoint = true;
2826 }
2827
2828 retval = vmci_qp_alloc(handle,
2829 &my_qpair->produce_q,
2830 my_qpair->produce_q_size,
2831 &my_qpair->consume_q,
2832 my_qpair->consume_q_size,
2833 my_qpair->peer,
2834 my_qpair->flags,
2835 my_qpair->priv_flags,
2836 my_qpair->guest_endpoint,
2837 wakeup_cb, client_data);
2838
2839 if (retval < VMCI_SUCCESS) {
2840 kfree(my_qpair);
2841 return retval;
2842 }
2843
2844 *qpair = my_qpair;
2845 my_qpair->handle = *handle;
2846
2847 return retval;
2848}
2849EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859int vmci_qpair_detach(struct vmci_qp **qpair)
2860{
2861 int result;
2862 struct vmci_qp *old_qpair;
2863
2864 if (!qpair || !(*qpair))
2865 return VMCI_ERROR_INVALID_ARGS;
2866
2867 old_qpair = *qpair;
2868 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880 memset(old_qpair, 0, sizeof(*old_qpair));
2881 old_qpair->handle = VMCI_INVALID_HANDLE;
2882 old_qpair->peer = VMCI_INVALID_ID;
2883 kfree(old_qpair);
2884 *qpair = NULL;
2885
2886 return result;
2887}
2888EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2900 u64 *producer_tail,
2901 u64 *consumer_head)
2902{
2903 struct vmci_queue_header *produce_q_header;
2904 struct vmci_queue_header *consume_q_header;
2905 int result;
2906
2907 if (!qpair)
2908 return VMCI_ERROR_INVALID_ARGS;
2909
2910 qp_lock(qpair);
2911 result =
2912 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2913 if (result == VMCI_SUCCESS)
2914 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2915 producer_tail, consumer_head);
2916 qp_unlock(qpair);
2917
2918 if (result == VMCI_SUCCESS &&
2919 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2920 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2921 return VMCI_ERROR_INVALID_SIZE;
2922
2923 return result;
2924}
2925EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2937 u64 *consumer_tail,
2938 u64 *producer_head)
2939{
2940 struct vmci_queue_header *produce_q_header;
2941 struct vmci_queue_header *consume_q_header;
2942 int result;
2943
2944 if (!qpair)
2945 return VMCI_ERROR_INVALID_ARGS;
2946
2947 qp_lock(qpair);
2948 result =
2949 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2950 if (result == VMCI_SUCCESS)
2951 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2952 consumer_tail, producer_head);
2953 qp_unlock(qpair);
2954
2955 if (result == VMCI_SUCCESS &&
2956 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2957 (producer_head && *producer_head >= qpair->consume_q_size)))
2958 return VMCI_ERROR_INVALID_SIZE;
2959
2960 return result;
2961}
2962EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2974{
2975 struct vmci_queue_header *produce_q_header;
2976 struct vmci_queue_header *consume_q_header;
2977 s64 result;
2978
2979 if (!qpair)
2980 return VMCI_ERROR_INVALID_ARGS;
2981
2982 qp_lock(qpair);
2983 result =
2984 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2985 if (result == VMCI_SUCCESS)
2986 result = vmci_q_header_free_space(produce_q_header,
2987 consume_q_header,
2988 qpair->produce_q_size);
2989 else
2990 result = 0;
2991
2992 qp_unlock(qpair);
2993
2994 return result;
2995}
2996EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
3008{
3009 struct vmci_queue_header *produce_q_header;
3010 struct vmci_queue_header *consume_q_header;
3011 s64 result;
3012
3013 if (!qpair)
3014 return VMCI_ERROR_INVALID_ARGS;
3015
3016 qp_lock(qpair);
3017 result =
3018 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3019 if (result == VMCI_SUCCESS)
3020 result = vmci_q_header_free_space(consume_q_header,
3021 produce_q_header,
3022 qpair->consume_q_size);
3023 else
3024 result = 0;
3025
3026 qp_unlock(qpair);
3027
3028 return result;
3029}
3030EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
3043{
3044 struct vmci_queue_header *produce_q_header;
3045 struct vmci_queue_header *consume_q_header;
3046 s64 result;
3047
3048 if (!qpair)
3049 return VMCI_ERROR_INVALID_ARGS;
3050
3051 qp_lock(qpair);
3052 result =
3053 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3054 if (result == VMCI_SUCCESS)
3055 result = vmci_q_header_buf_ready(produce_q_header,
3056 consume_q_header,
3057 qpair->produce_q_size);
3058 else
3059 result = 0;
3060
3061 qp_unlock(qpair);
3062
3063 return result;
3064}
3065EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
3078{
3079 struct vmci_queue_header *produce_q_header;
3080 struct vmci_queue_header *consume_q_header;
3081 s64 result;
3082
3083 if (!qpair)
3084 return VMCI_ERROR_INVALID_ARGS;
3085
3086 qp_lock(qpair);
3087 result =
3088 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3089 if (result == VMCI_SUCCESS)
3090 result = vmci_q_header_buf_ready(consume_q_header,
3091 produce_q_header,
3092 qpair->consume_q_size);
3093 else
3094 result = 0;
3095
3096 qp_unlock(qpair);
3097
3098 return result;
3099}
3100EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3113 const void *buf,
3114 size_t buf_size,
3115 int buf_type)
3116{
3117 ssize_t result;
3118
3119 if (!qpair || !buf)
3120 return VMCI_ERROR_INVALID_ARGS;
3121
3122 qp_lock(qpair);
3123
3124 do {
3125 result = qp_enqueue_locked(qpair->produce_q,
3126 qpair->consume_q,
3127 qpair->produce_q_size,
3128 buf, buf_size,
3129 qp_memcpy_to_queue);
3130
3131 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3132 !qp_wait_for_ready_queue(qpair))
3133 result = VMCI_ERROR_WOULD_BLOCK;
3134
3135 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3136
3137 qp_unlock(qpair);
3138
3139 return result;
3140}
3141EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3154 void *buf,
3155 size_t buf_size,
3156 int buf_type)
3157{
3158 ssize_t result;
3159
3160 if (!qpair || !buf)
3161 return VMCI_ERROR_INVALID_ARGS;
3162
3163 qp_lock(qpair);
3164
3165 do {
3166 result = qp_dequeue_locked(qpair->produce_q,
3167 qpair->consume_q,
3168 qpair->consume_q_size,
3169 buf, buf_size,
3170 qp_memcpy_from_queue, true);
3171
3172 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3173 !qp_wait_for_ready_queue(qpair))
3174 result = VMCI_ERROR_WOULD_BLOCK;
3175
3176 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3177
3178 qp_unlock(qpair);
3179
3180 return result;
3181}
3182EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3196 void *buf,
3197 size_t buf_size,
3198 int buf_type)
3199{
3200 ssize_t result;
3201
3202 if (!qpair || !buf)
3203 return VMCI_ERROR_INVALID_ARGS;
3204
3205 qp_lock(qpair);
3206
3207 do {
3208 result = qp_dequeue_locked(qpair->produce_q,
3209 qpair->consume_q,
3210 qpair->consume_q_size,
3211 buf, buf_size,
3212 qp_memcpy_from_queue, false);
3213
3214 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3215 !qp_wait_for_ready_queue(qpair))
3216 result = VMCI_ERROR_WOULD_BLOCK;
3217
3218 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3219
3220 qp_unlock(qpair);
3221
3222 return result;
3223}
3224EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3238 void *iov,
3239 size_t iov_size,
3240 int buf_type)
3241{
3242 ssize_t result;
3243
3244 if (!qpair || !iov)
3245 return VMCI_ERROR_INVALID_ARGS;
3246
3247 qp_lock(qpair);
3248
3249 do {
3250 result = qp_enqueue_locked(qpair->produce_q,
3251 qpair->consume_q,
3252 qpair->produce_q_size,
3253 iov, iov_size,
3254 qp_memcpy_to_queue_iov);
3255
3256 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3257 !qp_wait_for_ready_queue(qpair))
3258 result = VMCI_ERROR_WOULD_BLOCK;
3259
3260 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3261
3262 qp_unlock(qpair);
3263
3264 return result;
3265}
3266EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3280 void *iov,
3281 size_t iov_size,
3282 int buf_type)
3283{
3284 ssize_t result;
3285
3286 if (!qpair || !iov)
3287 return VMCI_ERROR_INVALID_ARGS;
3288
3289 qp_lock(qpair);
3290
3291 do {
3292 result = qp_dequeue_locked(qpair->produce_q,
3293 qpair->consume_q,
3294 qpair->consume_q_size,
3295 iov, iov_size,
3296 qp_memcpy_from_queue_iov,
3297 true);
3298
3299 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3300 !qp_wait_for_ready_queue(qpair))
3301 result = VMCI_ERROR_WOULD_BLOCK;
3302
3303 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3304
3305 qp_unlock(qpair);
3306
3307 return result;
3308}
3309EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3324 void *iov,
3325 size_t iov_size,
3326 int buf_type)
3327{
3328 ssize_t result;
3329
3330 if (!qpair || !iov)
3331 return VMCI_ERROR_INVALID_ARGS;
3332
3333 qp_lock(qpair);
3334
3335 do {
3336 result = qp_dequeue_locked(qpair->produce_q,
3337 qpair->consume_q,
3338 qpair->consume_q_size,
3339 iov, iov_size,
3340 qp_memcpy_from_queue_iov,
3341 false);
3342
3343 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3344 !qp_wait_for_ready_queue(qpair))
3345 result = VMCI_ERROR_WOULD_BLOCK;
3346
3347 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3348
3349 qp_unlock(qpair);
3350 return result;
3351}
3352EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
3353