1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/highmem.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/pagemap.h>
24#include <linux/pci.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/uio.h>
28#include <linux/wait.h>
29#include <linux/vmalloc.h>
30
31#include "vmci_handle_array.h"
32#include "vmci_queue_pair.h"
33#include "vmci_datagram.h"
34#include "vmci_resource.h"
35#include "vmci_context.h"
36#include "vmci_driver.h"
37#include "vmci_event.h"
38#include "vmci_route.h"
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
142 u64 queue_offset, const void *src,
143 size_t src_offset, size_t size);
144typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
145 const struct vmci_queue *queue,
146 u64 queue_offset, size_t size);
147
148
149struct vmci_queue_kern_if {
150 struct mutex __mutex;
151 struct mutex *mutex;
152 size_t num_pages;
153 bool host;
154 union {
155 struct {
156 dma_addr_t *pas;
157 void **vas;
158 } g;
159 struct {
160 struct page **page;
161 struct page **header_page;
162 } h;
163 } u;
164};
165
166
167
168
169struct vmci_qp {
170 struct vmci_handle handle;
171 struct vmci_queue *produce_q;
172 struct vmci_queue *consume_q;
173 u64 produce_q_size;
174 u64 consume_q_size;
175 u32 peer;
176 u32 flags;
177 u32 priv_flags;
178 bool guest_endpoint;
179 unsigned int blocked;
180 unsigned int generation;
181 wait_queue_head_t event;
182};
183
184enum qp_broker_state {
185 VMCIQPB_NEW,
186 VMCIQPB_CREATED_NO_MEM,
187 VMCIQPB_CREATED_MEM,
188 VMCIQPB_ATTACHED_NO_MEM,
189 VMCIQPB_ATTACHED_MEM,
190 VMCIQPB_SHUTDOWN_NO_MEM,
191 VMCIQPB_SHUTDOWN_MEM,
192 VMCIQPB_GONE
193};
194
195#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
196 _qpb->state == VMCIQPB_ATTACHED_MEM || \
197 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
198
199
200
201
202
203
204
205
206
207
208struct qp_entry {
209 struct list_head list_item;
210 struct vmci_handle handle;
211 u32 peer;
212 u32 flags;
213 u64 produce_size;
214 u64 consume_size;
215 u32 ref_count;
216};
217
218struct qp_broker_entry {
219 struct vmci_resource resource;
220 struct qp_entry qp;
221 u32 create_id;
222 u32 attach_id;
223 enum qp_broker_state state;
224 bool require_trusted_attach;
225 bool created_by_trusted;
226 bool vmci_page_files;
227 struct vmci_queue *produce_q;
228 struct vmci_queue *consume_q;
229 struct vmci_queue_header saved_produce_q;
230 struct vmci_queue_header saved_consume_q;
231 vmci_event_release_cb wakeup_cb;
232 void *client_data;
233 void *local_mem;
234};
235
236struct qp_guest_endpoint {
237 struct vmci_resource resource;
238 struct qp_entry qp;
239 u64 num_ppns;
240 void *produce_q;
241 void *consume_q;
242 struct ppn_set ppn_set;
243};
244
245struct qp_list {
246 struct list_head head;
247 struct mutex mutex;
248};
249
250static struct qp_list qp_broker_list = {
251 .head = LIST_HEAD_INIT(qp_broker_list.head),
252 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
253};
254
255static struct qp_list qp_guest_endpoints = {
256 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
257 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
258};
259
260#define INVALID_VMCI_GUEST_MEM_ID 0
261#define QPE_NUM_PAGES(_QPE) ((u32) \
262 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
263 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
264
265
266
267
268
269
270static void qp_free_queue(void *q, u64 size)
271{
272 struct vmci_queue *queue = q;
273
274 if (queue) {
275 u64 i;
276
277
278 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
279 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
280 queue->kernel_if->u.g.vas[i],
281 queue->kernel_if->u.g.pas[i]);
282 }
283
284 vfree(queue);
285 }
286}
287
288
289
290
291
292
293static void *qp_alloc_queue(u64 size, u32 flags)
294{
295 u64 i;
296 struct vmci_queue *queue;
297 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
298 const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
299 const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
300 const size_t queue_size =
301 sizeof(*queue) + sizeof(*queue->kernel_if) +
302 pas_size + vas_size;
303
304 queue = vmalloc(queue_size);
305 if (!queue)
306 return NULL;
307
308 queue->q_header = NULL;
309 queue->saved_header = NULL;
310 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
311 queue->kernel_if->mutex = NULL;
312 queue->kernel_if->num_pages = num_pages;
313 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
314 queue->kernel_if->u.g.vas =
315 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
316 queue->kernel_if->host = false;
317
318 for (i = 0; i < num_pages; i++) {
319 queue->kernel_if->u.g.vas[i] =
320 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
321 &queue->kernel_if->u.g.pas[i],
322 GFP_KERNEL);
323 if (!queue->kernel_if->u.g.vas[i]) {
324
325 qp_free_queue(queue, i * PAGE_SIZE);
326 return NULL;
327 }
328 }
329
330
331 queue->q_header = queue->kernel_if->u.g.vas[0];
332
333 return queue;
334}
335
336
337
338
339
340
341
342static int __qp_memcpy_to_queue(struct vmci_queue *queue,
343 u64 queue_offset,
344 const void *src,
345 size_t size,
346 bool is_iovec)
347{
348 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
349 size_t bytes_copied = 0;
350
351 while (bytes_copied < size) {
352 const u64 page_index =
353 (queue_offset + bytes_copied) / PAGE_SIZE;
354 const size_t page_offset =
355 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
356 void *va;
357 size_t to_copy;
358
359 if (kernel_if->host)
360 va = kmap(kernel_if->u.h.page[page_index]);
361 else
362 va = kernel_if->u.g.vas[page_index + 1];
363
364
365 if (size - bytes_copied > PAGE_SIZE - page_offset)
366
367 to_copy = PAGE_SIZE - page_offset;
368 else
369 to_copy = size - bytes_copied;
370
371 if (is_iovec) {
372 struct iovec *iov = (struct iovec *)src;
373 int err;
374
375
376 err = memcpy_fromiovec((u8 *)va + page_offset,
377 iov, to_copy);
378 if (err != 0) {
379 if (kernel_if->host)
380 kunmap(kernel_if->u.h.page[page_index]);
381 return VMCI_ERROR_INVALID_ARGS;
382 }
383 } else {
384 memcpy((u8 *)va + page_offset,
385 (u8 *)src + bytes_copied, to_copy);
386 }
387
388 bytes_copied += to_copy;
389 if (kernel_if->host)
390 kunmap(kernel_if->u.h.page[page_index]);
391 }
392
393 return VMCI_SUCCESS;
394}
395
396
397
398
399
400
401
402static int __qp_memcpy_from_queue(void *dest,
403 const struct vmci_queue *queue,
404 u64 queue_offset,
405 size_t size,
406 bool is_iovec)
407{
408 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
409 size_t bytes_copied = 0;
410
411 while (bytes_copied < size) {
412 const u64 page_index =
413 (queue_offset + bytes_copied) / PAGE_SIZE;
414 const size_t page_offset =
415 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
416 void *va;
417 size_t to_copy;
418
419 if (kernel_if->host)
420 va = kmap(kernel_if->u.h.page[page_index]);
421 else
422 va = kernel_if->u.g.vas[page_index + 1];
423
424
425 if (size - bytes_copied > PAGE_SIZE - page_offset)
426
427 to_copy = PAGE_SIZE - page_offset;
428 else
429 to_copy = size - bytes_copied;
430
431 if (is_iovec) {
432 struct iovec *iov = (struct iovec *)dest;
433 int err;
434
435
436 err = memcpy_toiovec(iov, (u8 *)va + page_offset,
437 to_copy);
438 if (err != 0) {
439 if (kernel_if->host)
440 kunmap(kernel_if->u.h.page[page_index]);
441 return VMCI_ERROR_INVALID_ARGS;
442 }
443 } else {
444 memcpy((u8 *)dest + bytes_copied,
445 (u8 *)va + page_offset, to_copy);
446 }
447
448 bytes_copied += to_copy;
449 if (kernel_if->host)
450 kunmap(kernel_if->u.h.page[page_index]);
451 }
452
453 return VMCI_SUCCESS;
454}
455
456
457
458
459
460
461
462static int qp_alloc_ppn_set(void *prod_q,
463 u64 num_produce_pages,
464 void *cons_q,
465 u64 num_consume_pages, struct ppn_set *ppn_set)
466{
467 u32 *produce_ppns;
468 u32 *consume_ppns;
469 struct vmci_queue *produce_q = prod_q;
470 struct vmci_queue *consume_q = cons_q;
471 u64 i;
472
473 if (!produce_q || !num_produce_pages || !consume_q ||
474 !num_consume_pages || !ppn_set)
475 return VMCI_ERROR_INVALID_ARGS;
476
477 if (ppn_set->initialized)
478 return VMCI_ERROR_ALREADY_EXISTS;
479
480 produce_ppns =
481 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
482 if (!produce_ppns)
483 return VMCI_ERROR_NO_MEM;
484
485 consume_ppns =
486 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
487 if (!consume_ppns) {
488 kfree(produce_ppns);
489 return VMCI_ERROR_NO_MEM;
490 }
491
492 for (i = 0; i < num_produce_pages; i++) {
493 unsigned long pfn;
494
495 produce_ppns[i] =
496 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
497 pfn = produce_ppns[i];
498
499
500 if (sizeof(pfn) > sizeof(*produce_ppns)
501 && pfn != produce_ppns[i])
502 goto ppn_error;
503 }
504
505 for (i = 0; i < num_consume_pages; i++) {
506 unsigned long pfn;
507
508 consume_ppns[i] =
509 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
510 pfn = consume_ppns[i];
511
512
513 if (sizeof(pfn) > sizeof(*consume_ppns)
514 && pfn != consume_ppns[i])
515 goto ppn_error;
516 }
517
518 ppn_set->num_produce_pages = num_produce_pages;
519 ppn_set->num_consume_pages = num_consume_pages;
520 ppn_set->produce_ppns = produce_ppns;
521 ppn_set->consume_ppns = consume_ppns;
522 ppn_set->initialized = true;
523 return VMCI_SUCCESS;
524
525 ppn_error:
526 kfree(produce_ppns);
527 kfree(consume_ppns);
528 return VMCI_ERROR_INVALID_ARGS;
529}
530
531
532
533
534static void qp_free_ppn_set(struct ppn_set *ppn_set)
535{
536 if (ppn_set->initialized) {
537
538 kfree(ppn_set->produce_ppns);
539 kfree(ppn_set->consume_ppns);
540 }
541 memset(ppn_set, 0, sizeof(*ppn_set));
542}
543
544
545
546
547
548static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
549{
550 memcpy(call_buf, ppn_set->produce_ppns,
551 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
552 memcpy(call_buf +
553 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
554 ppn_set->consume_ppns,
555 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
556
557 return VMCI_SUCCESS;
558}
559
560static int qp_memcpy_to_queue(struct vmci_queue *queue,
561 u64 queue_offset,
562 const void *src, size_t src_offset, size_t size)
563{
564 return __qp_memcpy_to_queue(queue, queue_offset,
565 (u8 *)src + src_offset, size, false);
566}
567
568static int qp_memcpy_from_queue(void *dest,
569 size_t dest_offset,
570 const struct vmci_queue *queue,
571 u64 queue_offset, size_t size)
572{
573 return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
574 queue, queue_offset, size, false);
575}
576
577
578
579
580static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
581 u64 queue_offset,
582 const void *src,
583 size_t src_offset, size_t size)
584{
585
586
587
588
589
590 return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
591}
592
593
594
595
596static int qp_memcpy_from_queue_iov(void *dest,
597 size_t dest_offset,
598 const struct vmci_queue *queue,
599 u64 queue_offset, size_t size)
600{
601
602
603
604
605 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
606}
607
608
609
610
611
612
613
614static struct vmci_queue *qp_host_alloc_queue(u64 size)
615{
616 struct vmci_queue *queue;
617 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
618 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
619 const size_t queue_page_size =
620 num_pages * sizeof(*queue->kernel_if->u.h.page);
621
622 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
623 if (queue) {
624 queue->q_header = NULL;
625 queue->saved_header = NULL;
626 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
627 queue->kernel_if->host = true;
628 queue->kernel_if->mutex = NULL;
629 queue->kernel_if->num_pages = num_pages;
630 queue->kernel_if->u.h.header_page =
631 (struct page **)((u8 *)queue + queue_size);
632 queue->kernel_if->u.h.page =
633 &queue->kernel_if->u.h.header_page[1];
634 }
635
636 return queue;
637}
638
639
640
641
642
643static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
644{
645 kfree(queue);
646}
647
648
649
650
651
652
653
654
655static void qp_init_queue_mutex(struct vmci_queue *produce_q,
656 struct vmci_queue *consume_q)
657{
658
659
660
661
662
663 if (produce_q->kernel_if->host) {
664 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
665 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
666 mutex_init(produce_q->kernel_if->mutex);
667 }
668}
669
670
671
672
673static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
674 struct vmci_queue *consume_q)
675{
676 if (produce_q->kernel_if->host) {
677 produce_q->kernel_if->mutex = NULL;
678 consume_q->kernel_if->mutex = NULL;
679 }
680}
681
682
683
684
685
686
687static void qp_acquire_queue_mutex(struct vmci_queue *queue)
688{
689 if (queue->kernel_if->host)
690 mutex_lock(queue->kernel_if->mutex);
691}
692
693
694
695
696
697
698static void qp_release_queue_mutex(struct vmci_queue *queue)
699{
700 if (queue->kernel_if->host)
701 mutex_unlock(queue->kernel_if->mutex);
702}
703
704
705
706
707
708static void qp_release_pages(struct page **pages,
709 u64 num_pages, bool dirty)
710{
711 int i;
712
713 for (i = 0; i < num_pages; i++) {
714 if (dirty)
715 set_page_dirty(pages[i]);
716
717 page_cache_release(pages[i]);
718 pages[i] = NULL;
719 }
720}
721
722
723
724
725
726
727static int qp_host_get_user_memory(u64 produce_uva,
728 u64 consume_uva,
729 struct vmci_queue *produce_q,
730 struct vmci_queue *consume_q)
731{
732 int retval;
733 int err = VMCI_SUCCESS;
734
735 retval = get_user_pages_fast((uintptr_t) produce_uva,
736 produce_q->kernel_if->num_pages, 1,
737 produce_q->kernel_if->u.h.header_page);
738 if (retval < produce_q->kernel_if->num_pages) {
739 pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
740 qp_release_pages(produce_q->kernel_if->u.h.header_page,
741 retval, false);
742 err = VMCI_ERROR_NO_MEM;
743 goto out;
744 }
745
746 retval = get_user_pages_fast((uintptr_t) consume_uva,
747 consume_q->kernel_if->num_pages, 1,
748 consume_q->kernel_if->u.h.header_page);
749 if (retval < consume_q->kernel_if->num_pages) {
750 pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
751 qp_release_pages(consume_q->kernel_if->u.h.header_page,
752 retval, false);
753 qp_release_pages(produce_q->kernel_if->u.h.header_page,
754 produce_q->kernel_if->num_pages, false);
755 err = VMCI_ERROR_NO_MEM;
756 }
757
758 out:
759 return err;
760}
761
762
763
764
765
766
767static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
768 struct vmci_queue *produce_q,
769 struct vmci_queue *consume_q)
770{
771 u64 produce_uva;
772 u64 consume_uva;
773
774
775
776
777
778
779 produce_uva = page_store->pages;
780 consume_uva = page_store->pages +
781 produce_q->kernel_if->num_pages * PAGE_SIZE;
782 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
783 consume_q);
784}
785
786
787
788
789
790
791static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
792 struct vmci_queue *consume_q)
793{
794 qp_release_pages(produce_q->kernel_if->u.h.header_page,
795 produce_q->kernel_if->num_pages, true);
796 memset(produce_q->kernel_if->u.h.header_page, 0,
797 sizeof(*produce_q->kernel_if->u.h.header_page) *
798 produce_q->kernel_if->num_pages);
799 qp_release_pages(consume_q->kernel_if->u.h.header_page,
800 consume_q->kernel_if->num_pages, true);
801 memset(consume_q->kernel_if->u.h.header_page, 0,
802 sizeof(*consume_q->kernel_if->u.h.header_page) *
803 consume_q->kernel_if->num_pages);
804}
805
806
807
808
809
810
811
812
813
814static int qp_host_map_queues(struct vmci_queue *produce_q,
815 struct vmci_queue *consume_q)
816{
817 int result;
818
819 if (!produce_q->q_header || !consume_q->q_header) {
820 struct page *headers[2];
821
822 if (produce_q->q_header != consume_q->q_header)
823 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
824
825 if (produce_q->kernel_if->u.h.header_page == NULL ||
826 *produce_q->kernel_if->u.h.header_page == NULL)
827 return VMCI_ERROR_UNAVAILABLE;
828
829 headers[0] = *produce_q->kernel_if->u.h.header_page;
830 headers[1] = *consume_q->kernel_if->u.h.header_page;
831
832 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
833 if (produce_q->q_header != NULL) {
834 consume_q->q_header =
835 (struct vmci_queue_header *)((u8 *)
836 produce_q->q_header +
837 PAGE_SIZE);
838 result = VMCI_SUCCESS;
839 } else {
840 pr_warn("vmap failed\n");
841 result = VMCI_ERROR_NO_MEM;
842 }
843 } else {
844 result = VMCI_SUCCESS;
845 }
846
847 return result;
848}
849
850
851
852
853
854static int qp_host_unmap_queues(u32 gid,
855 struct vmci_queue *produce_q,
856 struct vmci_queue *consume_q)
857{
858 if (produce_q->q_header) {
859 if (produce_q->q_header < consume_q->q_header)
860 vunmap(produce_q->q_header);
861 else
862 vunmap(consume_q->q_header);
863
864 produce_q->q_header = NULL;
865 consume_q->q_header = NULL;
866 }
867
868 return VMCI_SUCCESS;
869}
870
871
872
873
874
875static struct qp_entry *qp_list_find(struct qp_list *qp_list,
876 struct vmci_handle handle)
877{
878 struct qp_entry *entry;
879
880 if (vmci_handle_is_invalid(handle))
881 return NULL;
882
883 list_for_each_entry(entry, &qp_list->head, list_item) {
884 if (vmci_handle_is_equal(entry->handle, handle))
885 return entry;
886 }
887
888 return NULL;
889}
890
891
892
893
894static struct qp_guest_endpoint *
895qp_guest_handle_to_entry(struct vmci_handle handle)
896{
897 struct qp_guest_endpoint *entry;
898 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
899
900 entry = qp ? container_of(
901 qp, struct qp_guest_endpoint, qp) : NULL;
902 return entry;
903}
904
905
906
907
908static struct qp_broker_entry *
909qp_broker_handle_to_entry(struct vmci_handle handle)
910{
911 struct qp_broker_entry *entry;
912 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
913
914 entry = qp ? container_of(
915 qp, struct qp_broker_entry, qp) : NULL;
916 return entry;
917}
918
919
920
921
922
923static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
924{
925 u32 context_id = vmci_get_context_id();
926 struct vmci_event_qp ev;
927
928 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
929 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
930 VMCI_CONTEXT_RESOURCE_ID);
931 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
932 ev.msg.event_data.event =
933 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
934 ev.payload.peer_id = context_id;
935 ev.payload.handle = handle;
936
937 return vmci_event_dispatch(&ev.msg.hdr);
938}
939
940
941
942
943
944
945
946
947static struct qp_guest_endpoint *
948qp_guest_endpoint_create(struct vmci_handle handle,
949 u32 peer,
950 u32 flags,
951 u64 produce_size,
952 u64 consume_size,
953 void *produce_q,
954 void *consume_q)
955{
956 int result;
957 struct qp_guest_endpoint *entry;
958
959 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
960 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
961
962 if (vmci_handle_is_invalid(handle)) {
963 u32 context_id = vmci_get_context_id();
964
965 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
966 }
967
968 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
969 if (entry) {
970 entry->qp.peer = peer;
971 entry->qp.flags = flags;
972 entry->qp.produce_size = produce_size;
973 entry->qp.consume_size = consume_size;
974 entry->qp.ref_count = 0;
975 entry->num_ppns = num_ppns;
976 entry->produce_q = produce_q;
977 entry->consume_q = consume_q;
978 INIT_LIST_HEAD(&entry->qp.list_item);
979
980
981 result = vmci_resource_add(&entry->resource,
982 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
983 handle);
984 entry->qp.handle = vmci_resource_handle(&entry->resource);
985 if ((result != VMCI_SUCCESS) ||
986 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
987 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
988 handle.context, handle.resource, result);
989 kfree(entry);
990 entry = NULL;
991 }
992 }
993 return entry;
994}
995
996
997
998
999static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
1000{
1001 qp_free_ppn_set(&entry->ppn_set);
1002 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
1003 qp_free_queue(entry->produce_q, entry->qp.produce_size);
1004 qp_free_queue(entry->consume_q, entry->qp.consume_size);
1005
1006 vmci_resource_remove(&entry->resource);
1007
1008 kfree(entry);
1009}
1010
1011
1012
1013
1014
1015static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
1016{
1017 struct vmci_qp_alloc_msg *alloc_msg;
1018 size_t msg_size;
1019 int result;
1020
1021 if (!entry || entry->num_ppns <= 2)
1022 return VMCI_ERROR_INVALID_ARGS;
1023
1024 msg_size = sizeof(*alloc_msg) +
1025 (size_t) entry->num_ppns * sizeof(u32);
1026 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
1027 if (!alloc_msg)
1028 return VMCI_ERROR_NO_MEM;
1029
1030 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1031 VMCI_QUEUEPAIR_ALLOC);
1032 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
1033 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
1034 alloc_msg->handle = entry->qp.handle;
1035 alloc_msg->peer = entry->qp.peer;
1036 alloc_msg->flags = entry->qp.flags;
1037 alloc_msg->produce_size = entry->qp.produce_size;
1038 alloc_msg->consume_size = entry->qp.consume_size;
1039 alloc_msg->num_ppns = entry->num_ppns;
1040
1041 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
1042 &entry->ppn_set);
1043 if (result == VMCI_SUCCESS)
1044 result = vmci_send_datagram(&alloc_msg->hdr);
1045
1046 kfree(alloc_msg);
1047
1048 return result;
1049}
1050
1051
1052
1053
1054
1055static int qp_detatch_hypercall(struct vmci_handle handle)
1056{
1057 struct vmci_qp_detach_msg detach_msg;
1058
1059 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1060 VMCI_QUEUEPAIR_DETACH);
1061 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
1062 detach_msg.hdr.payload_size = sizeof(handle);
1063 detach_msg.handle = handle;
1064
1065 return vmci_send_datagram(&detach_msg.hdr);
1066}
1067
1068
1069
1070
1071static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1072{
1073 if (entry)
1074 list_add(&entry->list_item, &qp_list->head);
1075}
1076
1077
1078
1079
1080static void qp_list_remove_entry(struct qp_list *qp_list,
1081 struct qp_entry *entry)
1082{
1083 if (entry)
1084 list_del(&entry->list_item);
1085}
1086
1087
1088
1089
1090
1091static int qp_detatch_guest_work(struct vmci_handle handle)
1092{
1093 int result;
1094 struct qp_guest_endpoint *entry;
1095 u32 ref_count = ~0;
1096
1097 mutex_lock(&qp_guest_endpoints.mutex);
1098
1099 entry = qp_guest_handle_to_entry(handle);
1100 if (!entry) {
1101 mutex_unlock(&qp_guest_endpoints.mutex);
1102 return VMCI_ERROR_NOT_FOUND;
1103 }
1104
1105 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1106 result = VMCI_SUCCESS;
1107
1108 if (entry->qp.ref_count > 1) {
1109 result = qp_notify_peer_local(false, handle);
1110
1111
1112
1113
1114
1115
1116 }
1117 } else {
1118 result = qp_detatch_hypercall(handle);
1119 if (result < VMCI_SUCCESS) {
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130 mutex_unlock(&qp_guest_endpoints.mutex);
1131 return result;
1132 }
1133 }
1134
1135
1136
1137
1138
1139
1140 entry->qp.ref_count--;
1141 if (entry->qp.ref_count == 0)
1142 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1143
1144
1145 if (entry)
1146 ref_count = entry->qp.ref_count;
1147
1148 mutex_unlock(&qp_guest_endpoints.mutex);
1149
1150 if (ref_count == 0)
1151 qp_guest_endpoint_destroy(entry);
1152
1153 return result;
1154}
1155
1156
1157
1158
1159
1160
1161static int qp_alloc_guest_work(struct vmci_handle *handle,
1162 struct vmci_queue **produce_q,
1163 u64 produce_size,
1164 struct vmci_queue **consume_q,
1165 u64 consume_size,
1166 u32 peer,
1167 u32 flags,
1168 u32 priv_flags)
1169{
1170 const u64 num_produce_pages =
1171 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1172 const u64 num_consume_pages =
1173 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1174 void *my_produce_q = NULL;
1175 void *my_consume_q = NULL;
1176 int result;
1177 struct qp_guest_endpoint *queue_pair_entry = NULL;
1178
1179 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1180 return VMCI_ERROR_NO_ACCESS;
1181
1182 mutex_lock(&qp_guest_endpoints.mutex);
1183
1184 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1185 if (queue_pair_entry) {
1186 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1187
1188 if (queue_pair_entry->qp.ref_count > 1) {
1189 pr_devel("Error attempting to attach more than once\n");
1190 result = VMCI_ERROR_UNAVAILABLE;
1191 goto error_keep_entry;
1192 }
1193
1194 if (queue_pair_entry->qp.produce_size != consume_size ||
1195 queue_pair_entry->qp.consume_size !=
1196 produce_size ||
1197 queue_pair_entry->qp.flags !=
1198 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1199 pr_devel("Error mismatched queue pair in local attach\n");
1200 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1201 goto error_keep_entry;
1202 }
1203
1204
1205
1206
1207
1208
1209 result = qp_notify_peer_local(true, *handle);
1210 if (result < VMCI_SUCCESS)
1211 goto error_keep_entry;
1212
1213 my_produce_q = queue_pair_entry->consume_q;
1214 my_consume_q = queue_pair_entry->produce_q;
1215 goto out;
1216 }
1217
1218 result = VMCI_ERROR_ALREADY_EXISTS;
1219 goto error_keep_entry;
1220 }
1221
1222 my_produce_q = qp_alloc_queue(produce_size, flags);
1223 if (!my_produce_q) {
1224 pr_warn("Error allocating pages for produce queue\n");
1225 result = VMCI_ERROR_NO_MEM;
1226 goto error;
1227 }
1228
1229 my_consume_q = qp_alloc_queue(consume_size, flags);
1230 if (!my_consume_q) {
1231 pr_warn("Error allocating pages for consume queue\n");
1232 result = VMCI_ERROR_NO_MEM;
1233 goto error;
1234 }
1235
1236 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1237 produce_size, consume_size,
1238 my_produce_q, my_consume_q);
1239 if (!queue_pair_entry) {
1240 pr_warn("Error allocating memory in %s\n", __func__);
1241 result = VMCI_ERROR_NO_MEM;
1242 goto error;
1243 }
1244
1245 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1246 num_consume_pages,
1247 &queue_pair_entry->ppn_set);
1248 if (result < VMCI_SUCCESS) {
1249 pr_warn("qp_alloc_ppn_set failed\n");
1250 goto error;
1251 }
1252
1253
1254
1255
1256
1257 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1258
1259 u32 context_id = vmci_get_context_id();
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (queue_pair_entry->qp.handle.context != context_id ||
1271 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1272 queue_pair_entry->qp.peer != context_id)) {
1273 result = VMCI_ERROR_NO_ACCESS;
1274 goto error;
1275 }
1276
1277 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1278 result = VMCI_ERROR_NOT_FOUND;
1279 goto error;
1280 }
1281 } else {
1282 result = qp_alloc_hypercall(queue_pair_entry);
1283 if (result < VMCI_SUCCESS) {
1284 pr_warn("qp_alloc_hypercall result = %d\n", result);
1285 goto error;
1286 }
1287 }
1288
1289 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1290 (struct vmci_queue *)my_consume_q);
1291
1292 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1293
1294 out:
1295 queue_pair_entry->qp.ref_count++;
1296 *handle = queue_pair_entry->qp.handle;
1297 *produce_q = (struct vmci_queue *)my_produce_q;
1298 *consume_q = (struct vmci_queue *)my_consume_q;
1299
1300
1301
1302
1303
1304
1305 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1306 queue_pair_entry->qp.ref_count == 1) {
1307 vmci_q_header_init((*produce_q)->q_header, *handle);
1308 vmci_q_header_init((*consume_q)->q_header, *handle);
1309 }
1310
1311 mutex_unlock(&qp_guest_endpoints.mutex);
1312
1313 return VMCI_SUCCESS;
1314
1315 error:
1316 mutex_unlock(&qp_guest_endpoints.mutex);
1317 if (queue_pair_entry) {
1318
1319 qp_guest_endpoint_destroy(queue_pair_entry);
1320 } else {
1321 qp_free_queue(my_produce_q, produce_size);
1322 qp_free_queue(my_consume_q, consume_size);
1323 }
1324 return result;
1325
1326 error_keep_entry:
1327
1328 mutex_unlock(&qp_guest_endpoints.mutex);
1329 return result;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350static int qp_broker_create(struct vmci_handle handle,
1351 u32 peer,
1352 u32 flags,
1353 u32 priv_flags,
1354 u64 produce_size,
1355 u64 consume_size,
1356 struct vmci_qp_page_store *page_store,
1357 struct vmci_ctx *context,
1358 vmci_event_release_cb wakeup_cb,
1359 void *client_data, struct qp_broker_entry **ent)
1360{
1361 struct qp_broker_entry *entry = NULL;
1362 const u32 context_id = vmci_ctx_get_id(context);
1363 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1364 int result;
1365 u64 guest_produce_size;
1366 u64 guest_consume_size;
1367
1368
1369 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1370 return VMCI_ERROR_NOT_FOUND;
1371
1372
1373
1374
1375
1376 if (handle.context != context_id && handle.context != peer)
1377 return VMCI_ERROR_NO_ACCESS;
1378
1379 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1380 return VMCI_ERROR_DST_UNREACHABLE;
1381
1382
1383
1384
1385
1386 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1387 return VMCI_ERROR_NO_ACCESS;
1388
1389 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1390 if (!entry)
1391 return VMCI_ERROR_NO_MEM;
1392
1393 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1394
1395
1396
1397
1398
1399
1400
1401
1402 guest_produce_size = consume_size;
1403 guest_consume_size = produce_size;
1404 } else {
1405 guest_produce_size = produce_size;
1406 guest_consume_size = consume_size;
1407 }
1408
1409 entry->qp.handle = handle;
1410 entry->qp.peer = peer;
1411 entry->qp.flags = flags;
1412 entry->qp.produce_size = guest_produce_size;
1413 entry->qp.consume_size = guest_consume_size;
1414 entry->qp.ref_count = 1;
1415 entry->create_id = context_id;
1416 entry->attach_id = VMCI_INVALID_ID;
1417 entry->state = VMCIQPB_NEW;
1418 entry->require_trusted_attach =
1419 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1420 entry->created_by_trusted =
1421 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1422 entry->vmci_page_files = false;
1423 entry->wakeup_cb = wakeup_cb;
1424 entry->client_data = client_data;
1425 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1426 if (entry->produce_q == NULL) {
1427 result = VMCI_ERROR_NO_MEM;
1428 goto error;
1429 }
1430 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1431 if (entry->consume_q == NULL) {
1432 result = VMCI_ERROR_NO_MEM;
1433 goto error;
1434 }
1435
1436 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1437
1438 INIT_LIST_HEAD(&entry->qp.list_item);
1439
1440 if (is_local) {
1441 u8 *tmp;
1442
1443 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1444 PAGE_SIZE, GFP_KERNEL);
1445 if (entry->local_mem == NULL) {
1446 result = VMCI_ERROR_NO_MEM;
1447 goto error;
1448 }
1449 entry->state = VMCIQPB_CREATED_MEM;
1450 entry->produce_q->q_header = entry->local_mem;
1451 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1452 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1453 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1454 } else if (page_store) {
1455
1456
1457
1458
1459 result = qp_host_register_user_memory(page_store,
1460 entry->produce_q,
1461 entry->consume_q);
1462 if (result < VMCI_SUCCESS)
1463 goto error;
1464
1465 entry->state = VMCIQPB_CREATED_MEM;
1466 } else {
1467
1468
1469
1470
1471
1472
1473
1474 entry->state = VMCIQPB_CREATED_NO_MEM;
1475 }
1476
1477 qp_list_add_entry(&qp_broker_list, &entry->qp);
1478 if (ent != NULL)
1479 *ent = entry;
1480
1481
1482 result = vmci_resource_add(&entry->resource,
1483 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1484 handle);
1485 if (result != VMCI_SUCCESS) {
1486 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1487 handle.context, handle.resource, result);
1488 goto error;
1489 }
1490
1491 entry->qp.handle = vmci_resource_handle(&entry->resource);
1492 if (is_local) {
1493 vmci_q_header_init(entry->produce_q->q_header,
1494 entry->qp.handle);
1495 vmci_q_header_init(entry->consume_q->q_header,
1496 entry->qp.handle);
1497 }
1498
1499 vmci_ctx_qp_create(context, entry->qp.handle);
1500
1501 return VMCI_SUCCESS;
1502
1503 error:
1504 if (entry != NULL) {
1505 qp_host_free_queue(entry->produce_q, guest_produce_size);
1506 qp_host_free_queue(entry->consume_q, guest_consume_size);
1507 kfree(entry);
1508 }
1509
1510 return result;
1511}
1512
1513
1514
1515
1516
1517
1518
1519static int qp_notify_peer(bool attach,
1520 struct vmci_handle handle,
1521 u32 my_id,
1522 u32 peer_id)
1523{
1524 int rv;
1525 struct vmci_event_qp ev;
1526
1527 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1528 peer_id == VMCI_INVALID_ID)
1529 return VMCI_ERROR_INVALID_ARGS;
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1540 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1541 VMCI_CONTEXT_RESOURCE_ID);
1542 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1543 ev.msg.event_data.event = attach ?
1544 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1545 ev.payload.handle = handle;
1546 ev.payload.peer_id = my_id;
1547
1548 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1549 &ev.msg.hdr, false);
1550 if (rv < VMCI_SUCCESS)
1551 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1552 attach ? "ATTACH" : "DETACH", peer_id);
1553
1554 return rv;
1555}
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static int qp_broker_attach(struct qp_broker_entry *entry,
1579 u32 peer,
1580 u32 flags,
1581 u32 priv_flags,
1582 u64 produce_size,
1583 u64 consume_size,
1584 struct vmci_qp_page_store *page_store,
1585 struct vmci_ctx *context,
1586 vmci_event_release_cb wakeup_cb,
1587 void *client_data,
1588 struct qp_broker_entry **ent)
1589{
1590 const u32 context_id = vmci_ctx_get_id(context);
1591 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1592 int result;
1593
1594 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1595 entry->state != VMCIQPB_CREATED_MEM)
1596 return VMCI_ERROR_UNAVAILABLE;
1597
1598 if (is_local) {
1599 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1600 context_id != entry->create_id) {
1601 return VMCI_ERROR_INVALID_ARGS;
1602 }
1603 } else if (context_id == entry->create_id ||
1604 context_id == entry->attach_id) {
1605 return VMCI_ERROR_ALREADY_EXISTS;
1606 }
1607
1608 if (VMCI_CONTEXT_IS_VM(context_id) &&
1609 VMCI_CONTEXT_IS_VM(entry->create_id))
1610 return VMCI_ERROR_DST_UNREACHABLE;
1611
1612
1613
1614
1615
1616 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1617 !entry->created_by_trusted)
1618 return VMCI_ERROR_NO_ACCESS;
1619
1620
1621
1622
1623
1624 if (entry->require_trusted_attach &&
1625 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1626 return VMCI_ERROR_NO_ACCESS;
1627
1628
1629
1630
1631
1632 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1633 return VMCI_ERROR_NO_ACCESS;
1634
1635 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1636
1637
1638
1639
1640
1641 if (!vmci_ctx_supports_host_qp(context))
1642 return VMCI_ERROR_INVALID_RESOURCE;
1643
1644 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1645 struct vmci_ctx *create_context;
1646 bool supports_host_qp;
1647
1648
1649
1650
1651
1652
1653 create_context = vmci_ctx_get(entry->create_id);
1654 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1655 vmci_ctx_put(create_context);
1656
1657 if (!supports_host_qp)
1658 return VMCI_ERROR_INVALID_RESOURCE;
1659 }
1660
1661 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1662 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1663
1664 if (context_id != VMCI_HOST_CONTEXT_ID) {
1665
1666
1667
1668
1669
1670
1671 if (entry->qp.produce_size != produce_size ||
1672 entry->qp.consume_size != consume_size) {
1673 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1674 }
1675 } else if (entry->qp.produce_size != consume_size ||
1676 entry->qp.consume_size != produce_size) {
1677 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1678 }
1679
1680 if (context_id != VMCI_HOST_CONTEXT_ID) {
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1695 return VMCI_ERROR_INVALID_ARGS;
1696
1697 if (page_store != NULL) {
1698
1699
1700
1701
1702
1703
1704
1705 result = qp_host_register_user_memory(page_store,
1706 entry->produce_q,
1707 entry->consume_q);
1708 if (result < VMCI_SUCCESS)
1709 return result;
1710
1711 entry->state = VMCIQPB_ATTACHED_MEM;
1712 } else {
1713 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1714 }
1715 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1716
1717
1718
1719
1720
1721
1722
1723 return VMCI_ERROR_UNAVAILABLE;
1724 } else {
1725
1726 entry->state = VMCIQPB_ATTACHED_MEM;
1727 }
1728
1729 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1730 result =
1731 qp_notify_peer(true, entry->qp.handle, context_id,
1732 entry->create_id);
1733 if (result < VMCI_SUCCESS)
1734 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1735 entry->create_id, entry->qp.handle.context,
1736 entry->qp.handle.resource);
1737 }
1738
1739 entry->attach_id = context_id;
1740 entry->qp.ref_count++;
1741 if (wakeup_cb) {
1742 entry->wakeup_cb = wakeup_cb;
1743 entry->client_data = client_data;
1744 }
1745
1746
1747
1748
1749
1750 if (!is_local)
1751 vmci_ctx_qp_create(context, entry->qp.handle);
1752
1753 if (ent != NULL)
1754 *ent = entry;
1755
1756 return VMCI_SUCCESS;
1757}
1758
1759
1760
1761
1762
1763static int qp_broker_alloc(struct vmci_handle handle,
1764 u32 peer,
1765 u32 flags,
1766 u32 priv_flags,
1767 u64 produce_size,
1768 u64 consume_size,
1769 struct vmci_qp_page_store *page_store,
1770 struct vmci_ctx *context,
1771 vmci_event_release_cb wakeup_cb,
1772 void *client_data,
1773 struct qp_broker_entry **ent,
1774 bool *swap)
1775{
1776 const u32 context_id = vmci_ctx_get_id(context);
1777 bool create;
1778 struct qp_broker_entry *entry = NULL;
1779 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1780 int result;
1781
1782 if (vmci_handle_is_invalid(handle) ||
1783 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1784 !(produce_size || consume_size) ||
1785 !context || context_id == VMCI_INVALID_ID ||
1786 handle.context == VMCI_INVALID_ID) {
1787 return VMCI_ERROR_INVALID_ARGS;
1788 }
1789
1790 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1791 return VMCI_ERROR_INVALID_ARGS;
1792
1793
1794
1795
1796
1797
1798 mutex_lock(&qp_broker_list.mutex);
1799
1800 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1801 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1802 context_id, handle.context, handle.resource);
1803 mutex_unlock(&qp_broker_list.mutex);
1804 return VMCI_ERROR_ALREADY_EXISTS;
1805 }
1806
1807 if (handle.resource != VMCI_INVALID_ID)
1808 entry = qp_broker_handle_to_entry(handle);
1809
1810 if (!entry) {
1811 create = true;
1812 result =
1813 qp_broker_create(handle, peer, flags, priv_flags,
1814 produce_size, consume_size, page_store,
1815 context, wakeup_cb, client_data, ent);
1816 } else {
1817 create = false;
1818 result =
1819 qp_broker_attach(entry, peer, flags, priv_flags,
1820 produce_size, consume_size, page_store,
1821 context, wakeup_cb, client_data, ent);
1822 }
1823
1824 mutex_unlock(&qp_broker_list.mutex);
1825
1826 if (swap)
1827 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1828 !(create && is_local);
1829
1830 return result;
1831}
1832
1833
1834
1835
1836
1837static int qp_alloc_host_work(struct vmci_handle *handle,
1838 struct vmci_queue **produce_q,
1839 u64 produce_size,
1840 struct vmci_queue **consume_q,
1841 u64 consume_size,
1842 u32 peer,
1843 u32 flags,
1844 u32 priv_flags,
1845 vmci_event_release_cb wakeup_cb,
1846 void *client_data)
1847{
1848 struct vmci_handle new_handle;
1849 struct vmci_ctx *context;
1850 struct qp_broker_entry *entry;
1851 int result;
1852 bool swap;
1853
1854 if (vmci_handle_is_invalid(*handle)) {
1855 new_handle = vmci_make_handle(
1856 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1857 } else
1858 new_handle = *handle;
1859
1860 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1861 entry = NULL;
1862 result =
1863 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1864 produce_size, consume_size, NULL, context,
1865 wakeup_cb, client_data, &entry, &swap);
1866 if (result == VMCI_SUCCESS) {
1867 if (swap) {
1868
1869
1870
1871
1872
1873
1874 *produce_q = entry->consume_q;
1875 *consume_q = entry->produce_q;
1876 } else {
1877 *produce_q = entry->produce_q;
1878 *consume_q = entry->consume_q;
1879 }
1880
1881 *handle = vmci_resource_handle(&entry->resource);
1882 } else {
1883 *handle = VMCI_INVALID_HANDLE;
1884 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1885 result);
1886 }
1887 vmci_ctx_put(context);
1888 return result;
1889}
1890
1891
1892
1893
1894
1895
1896int vmci_qp_alloc(struct vmci_handle *handle,
1897 struct vmci_queue **produce_q,
1898 u64 produce_size,
1899 struct vmci_queue **consume_q,
1900 u64 consume_size,
1901 u32 peer,
1902 u32 flags,
1903 u32 priv_flags,
1904 bool guest_endpoint,
1905 vmci_event_release_cb wakeup_cb,
1906 void *client_data)
1907{
1908 if (!handle || !produce_q || !consume_q ||
1909 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1910 return VMCI_ERROR_INVALID_ARGS;
1911
1912 if (guest_endpoint) {
1913 return qp_alloc_guest_work(handle, produce_q,
1914 produce_size, consume_q,
1915 consume_size, peer,
1916 flags, priv_flags);
1917 } else {
1918 return qp_alloc_host_work(handle, produce_q,
1919 produce_size, consume_q,
1920 consume_size, peer, flags,
1921 priv_flags, wakeup_cb, client_data);
1922 }
1923}
1924
1925
1926
1927
1928
1929static int qp_detatch_host_work(struct vmci_handle handle)
1930{
1931 int result;
1932 struct vmci_ctx *context;
1933
1934 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1935
1936 result = vmci_qp_broker_detach(handle, context);
1937
1938 vmci_ctx_put(context);
1939 return result;
1940}
1941
1942
1943
1944
1945
1946static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1947{
1948 if (vmci_handle_is_invalid(handle))
1949 return VMCI_ERROR_INVALID_ARGS;
1950
1951 if (guest_endpoint)
1952 return qp_detatch_guest_work(handle);
1953 else
1954 return qp_detatch_host_work(handle);
1955}
1956
1957
1958
1959
1960
1961static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1962{
1963 if (!list_empty(&qp_list->head)) {
1964 struct qp_entry *entry =
1965 list_first_entry(&qp_list->head, struct qp_entry,
1966 list_item);
1967 return entry;
1968 }
1969
1970 return NULL;
1971}
1972
1973void vmci_qp_broker_exit(void)
1974{
1975 struct qp_entry *entry;
1976 struct qp_broker_entry *be;
1977
1978 mutex_lock(&qp_broker_list.mutex);
1979
1980 while ((entry = qp_list_get_head(&qp_broker_list))) {
1981 be = (struct qp_broker_entry *)entry;
1982
1983 qp_list_remove_entry(&qp_broker_list, entry);
1984 kfree(be);
1985 }
1986
1987 mutex_unlock(&qp_broker_list.mutex);
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997int vmci_qp_broker_alloc(struct vmci_handle handle,
1998 u32 peer,
1999 u32 flags,
2000 u32 priv_flags,
2001 u64 produce_size,
2002 u64 consume_size,
2003 struct vmci_qp_page_store *page_store,
2004 struct vmci_ctx *context)
2005{
2006 return qp_broker_alloc(handle, peer, flags, priv_flags,
2007 produce_size, consume_size,
2008 page_store, context, NULL, NULL, NULL, NULL);
2009}
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027int vmci_qp_broker_set_page_store(struct vmci_handle handle,
2028 u64 produce_uva,
2029 u64 consume_uva,
2030 struct vmci_ctx *context)
2031{
2032 struct qp_broker_entry *entry;
2033 int result;
2034 const u32 context_id = vmci_ctx_get_id(context);
2035
2036 if (vmci_handle_is_invalid(handle) || !context ||
2037 context_id == VMCI_INVALID_ID)
2038 return VMCI_ERROR_INVALID_ARGS;
2039
2040
2041
2042
2043
2044
2045 if (produce_uva == 0 || consume_uva == 0)
2046 return VMCI_ERROR_INVALID_ARGS;
2047
2048 mutex_lock(&qp_broker_list.mutex);
2049
2050 if (!vmci_ctx_qp_exists(context, handle)) {
2051 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2052 context_id, handle.context, handle.resource);
2053 result = VMCI_ERROR_NOT_FOUND;
2054 goto out;
2055 }
2056
2057 entry = qp_broker_handle_to_entry(handle);
2058 if (!entry) {
2059 result = VMCI_ERROR_NOT_FOUND;
2060 goto out;
2061 }
2062
2063
2064
2065
2066
2067
2068
2069 if (entry->create_id != context_id &&
2070 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2071 entry->attach_id != context_id)) {
2072 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2073 goto out;
2074 }
2075
2076 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2077 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2078 result = VMCI_ERROR_UNAVAILABLE;
2079 goto out;
2080 }
2081
2082 result = qp_host_get_user_memory(produce_uva, consume_uva,
2083 entry->produce_q, entry->consume_q);
2084 if (result < VMCI_SUCCESS)
2085 goto out;
2086
2087 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2088 if (result < VMCI_SUCCESS) {
2089 qp_host_unregister_user_memory(entry->produce_q,
2090 entry->consume_q);
2091 goto out;
2092 }
2093
2094 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2095 entry->state = VMCIQPB_CREATED_MEM;
2096 else
2097 entry->state = VMCIQPB_ATTACHED_MEM;
2098
2099 entry->vmci_page_files = true;
2100
2101 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2102 result =
2103 qp_notify_peer(true, handle, context_id, entry->create_id);
2104 if (result < VMCI_SUCCESS) {
2105 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2106 entry->create_id, entry->qp.handle.context,
2107 entry->qp.handle.resource);
2108 }
2109 }
2110
2111 result = VMCI_SUCCESS;
2112 out:
2113 mutex_unlock(&qp_broker_list.mutex);
2114 return result;
2115}
2116
2117
2118
2119
2120
2121
2122static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2123{
2124 entry->produce_q->saved_header = NULL;
2125 entry->consume_q->saved_header = NULL;
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2147{
2148 struct qp_broker_entry *entry;
2149 const u32 context_id = vmci_ctx_get_id(context);
2150 u32 peer_id;
2151 bool is_local = false;
2152 int result;
2153
2154 if (vmci_handle_is_invalid(handle) || !context ||
2155 context_id == VMCI_INVALID_ID) {
2156 return VMCI_ERROR_INVALID_ARGS;
2157 }
2158
2159 mutex_lock(&qp_broker_list.mutex);
2160
2161 if (!vmci_ctx_qp_exists(context, handle)) {
2162 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2163 context_id, handle.context, handle.resource);
2164 result = VMCI_ERROR_NOT_FOUND;
2165 goto out;
2166 }
2167
2168 entry = qp_broker_handle_to_entry(handle);
2169 if (!entry) {
2170 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2171 context_id, handle.context, handle.resource);
2172 result = VMCI_ERROR_NOT_FOUND;
2173 goto out;
2174 }
2175
2176 if (context_id != entry->create_id && context_id != entry->attach_id) {
2177 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2178 goto out;
2179 }
2180
2181 if (context_id == entry->create_id) {
2182 peer_id = entry->attach_id;
2183 entry->create_id = VMCI_INVALID_ID;
2184 } else {
2185 peer_id = entry->create_id;
2186 entry->attach_id = VMCI_INVALID_ID;
2187 }
2188 entry->qp.ref_count--;
2189
2190 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2191
2192 if (context_id != VMCI_HOST_CONTEXT_ID) {
2193 bool headers_mapped;
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203 qp_acquire_queue_mutex(entry->produce_q);
2204 headers_mapped = entry->produce_q->q_header ||
2205 entry->consume_q->q_header;
2206 if (QPBROKERSTATE_HAS_MEM(entry)) {
2207 result =
2208 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2209 entry->produce_q,
2210 entry->consume_q);
2211 if (result < VMCI_SUCCESS)
2212 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2213 handle.context, handle.resource,
2214 result);
2215
2216 if (entry->vmci_page_files)
2217 qp_host_unregister_user_memory(entry->produce_q,
2218 entry->
2219 consume_q);
2220 else
2221 qp_host_unregister_user_memory(entry->produce_q,
2222 entry->
2223 consume_q);
2224
2225 }
2226
2227 if (!headers_mapped)
2228 qp_reset_saved_headers(entry);
2229
2230 qp_release_queue_mutex(entry->produce_q);
2231
2232 if (!headers_mapped && entry->wakeup_cb)
2233 entry->wakeup_cb(entry->client_data);
2234
2235 } else {
2236 if (entry->wakeup_cb) {
2237 entry->wakeup_cb = NULL;
2238 entry->client_data = NULL;
2239 }
2240 }
2241
2242 if (entry->qp.ref_count == 0) {
2243 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2244
2245 if (is_local)
2246 kfree(entry->local_mem);
2247
2248 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2249 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2250 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2251
2252 vmci_resource_remove(&entry->resource);
2253
2254 kfree(entry);
2255
2256 vmci_ctx_qp_destroy(context, handle);
2257 } else {
2258 qp_notify_peer(false, handle, context_id, peer_id);
2259 if (context_id == VMCI_HOST_CONTEXT_ID &&
2260 QPBROKERSTATE_HAS_MEM(entry)) {
2261 entry->state = VMCIQPB_SHUTDOWN_MEM;
2262 } else {
2263 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2264 }
2265
2266 if (!is_local)
2267 vmci_ctx_qp_destroy(context, handle);
2268
2269 }
2270 result = VMCI_SUCCESS;
2271 out:
2272 mutex_unlock(&qp_broker_list.mutex);
2273 return result;
2274}
2275
2276
2277
2278
2279
2280
2281
2282int vmci_qp_broker_map(struct vmci_handle handle,
2283 struct vmci_ctx *context,
2284 u64 guest_mem)
2285{
2286 struct qp_broker_entry *entry;
2287 const u32 context_id = vmci_ctx_get_id(context);
2288 bool is_local = false;
2289 int result;
2290
2291 if (vmci_handle_is_invalid(handle) || !context ||
2292 context_id == VMCI_INVALID_ID)
2293 return VMCI_ERROR_INVALID_ARGS;
2294
2295 mutex_lock(&qp_broker_list.mutex);
2296
2297 if (!vmci_ctx_qp_exists(context, handle)) {
2298 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2299 context_id, handle.context, handle.resource);
2300 result = VMCI_ERROR_NOT_FOUND;
2301 goto out;
2302 }
2303
2304 entry = qp_broker_handle_to_entry(handle);
2305 if (!entry) {
2306 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2307 context_id, handle.context, handle.resource);
2308 result = VMCI_ERROR_NOT_FOUND;
2309 goto out;
2310 }
2311
2312 if (context_id != entry->create_id && context_id != entry->attach_id) {
2313 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2314 goto out;
2315 }
2316
2317 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2318 result = VMCI_SUCCESS;
2319
2320 if (context_id != VMCI_HOST_CONTEXT_ID) {
2321 struct vmci_qp_page_store page_store;
2322
2323 page_store.pages = guest_mem;
2324 page_store.len = QPE_NUM_PAGES(entry->qp);
2325
2326 qp_acquire_queue_mutex(entry->produce_q);
2327 qp_reset_saved_headers(entry);
2328 result =
2329 qp_host_register_user_memory(&page_store,
2330 entry->produce_q,
2331 entry->consume_q);
2332 qp_release_queue_mutex(entry->produce_q);
2333 if (result == VMCI_SUCCESS) {
2334
2335
2336 entry->state++;
2337
2338 if (entry->wakeup_cb)
2339 entry->wakeup_cb(entry->client_data);
2340 }
2341 }
2342
2343 out:
2344 mutex_unlock(&qp_broker_list.mutex);
2345 return result;
2346}
2347
2348
2349
2350
2351
2352
2353
2354
2355static int qp_save_headers(struct qp_broker_entry *entry)
2356{
2357 int result;
2358
2359 if (entry->produce_q->saved_header != NULL &&
2360 entry->consume_q->saved_header != NULL) {
2361
2362
2363
2364
2365
2366
2367 return VMCI_SUCCESS;
2368 }
2369
2370 if (NULL == entry->produce_q->q_header ||
2371 NULL == entry->consume_q->q_header) {
2372 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2373 if (result < VMCI_SUCCESS)
2374 return result;
2375 }
2376
2377 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2378 sizeof(entry->saved_produce_q));
2379 entry->produce_q->saved_header = &entry->saved_produce_q;
2380 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2381 sizeof(entry->saved_consume_q));
2382 entry->consume_q->saved_header = &entry->saved_consume_q;
2383
2384 return VMCI_SUCCESS;
2385}
2386
2387
2388
2389
2390
2391
2392
2393int vmci_qp_broker_unmap(struct vmci_handle handle,
2394 struct vmci_ctx *context,
2395 u32 gid)
2396{
2397 struct qp_broker_entry *entry;
2398 const u32 context_id = vmci_ctx_get_id(context);
2399 bool is_local = false;
2400 int result;
2401
2402 if (vmci_handle_is_invalid(handle) || !context ||
2403 context_id == VMCI_INVALID_ID)
2404 return VMCI_ERROR_INVALID_ARGS;
2405
2406 mutex_lock(&qp_broker_list.mutex);
2407
2408 if (!vmci_ctx_qp_exists(context, handle)) {
2409 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2410 context_id, handle.context, handle.resource);
2411 result = VMCI_ERROR_NOT_FOUND;
2412 goto out;
2413 }
2414
2415 entry = qp_broker_handle_to_entry(handle);
2416 if (!entry) {
2417 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2418 context_id, handle.context, handle.resource);
2419 result = VMCI_ERROR_NOT_FOUND;
2420 goto out;
2421 }
2422
2423 if (context_id != entry->create_id && context_id != entry->attach_id) {
2424 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2425 goto out;
2426 }
2427
2428 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2429
2430 if (context_id != VMCI_HOST_CONTEXT_ID) {
2431 qp_acquire_queue_mutex(entry->produce_q);
2432 result = qp_save_headers(entry);
2433 if (result < VMCI_SUCCESS)
2434 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2435 handle.context, handle.resource, result);
2436
2437 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2438
2439
2440
2441
2442
2443
2444
2445
2446 qp_host_unregister_user_memory(entry->produce_q,
2447 entry->consume_q);
2448
2449
2450
2451
2452 entry->state--;
2453
2454 qp_release_queue_mutex(entry->produce_q);
2455 }
2456
2457 result = VMCI_SUCCESS;
2458
2459 out:
2460 mutex_unlock(&qp_broker_list.mutex);
2461 return result;
2462}
2463
2464
2465
2466
2467
2468
2469
2470void vmci_qp_guest_endpoints_exit(void)
2471{
2472 struct qp_entry *entry;
2473 struct qp_guest_endpoint *ep;
2474
2475 mutex_lock(&qp_guest_endpoints.mutex);
2476
2477 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2478 ep = (struct qp_guest_endpoint *)entry;
2479
2480
2481 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2482 qp_detatch_hypercall(entry->handle);
2483
2484
2485 entry->ref_count = 0;
2486 qp_list_remove_entry(&qp_guest_endpoints, entry);
2487
2488 qp_guest_endpoint_destroy(ep);
2489 }
2490
2491 mutex_unlock(&qp_guest_endpoints.mutex);
2492}
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502static void qp_lock(const struct vmci_qp *qpair)
2503{
2504 qp_acquire_queue_mutex(qpair->produce_q);
2505}
2506
2507
2508
2509
2510
2511static void qp_unlock(const struct vmci_qp *qpair)
2512{
2513 qp_release_queue_mutex(qpair->produce_q);
2514}
2515
2516
2517
2518
2519
2520static int qp_map_queue_headers(struct vmci_queue *produce_q,
2521 struct vmci_queue *consume_q)
2522{
2523 int result;
2524
2525 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2526 result = qp_host_map_queues(produce_q, consume_q);
2527 if (result < VMCI_SUCCESS)
2528 return (produce_q->saved_header &&
2529 consume_q->saved_header) ?
2530 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2531 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2532 }
2533
2534 return VMCI_SUCCESS;
2535}
2536
2537
2538
2539
2540
2541
2542
2543static int qp_get_queue_headers(const struct vmci_qp *qpair,
2544 struct vmci_queue_header **produce_q_header,
2545 struct vmci_queue_header **consume_q_header)
2546{
2547 int result;
2548
2549 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2550 if (result == VMCI_SUCCESS) {
2551 *produce_q_header = qpair->produce_q->q_header;
2552 *consume_q_header = qpair->consume_q->q_header;
2553 } else if (qpair->produce_q->saved_header &&
2554 qpair->consume_q->saved_header) {
2555 *produce_q_header = qpair->produce_q->saved_header;
2556 *consume_q_header = qpair->consume_q->saved_header;
2557 result = VMCI_SUCCESS;
2558 }
2559
2560 return result;
2561}
2562
2563
2564
2565
2566
2567
2568static int qp_wakeup_cb(void *client_data)
2569{
2570 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2571
2572 qp_lock(qpair);
2573 while (qpair->blocked > 0) {
2574 qpair->blocked--;
2575 qpair->generation++;
2576 wake_up(&qpair->event);
2577 }
2578 qp_unlock(qpair);
2579
2580 return VMCI_SUCCESS;
2581}
2582
2583
2584
2585
2586
2587
2588static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2589{
2590 unsigned int generation;
2591
2592 qpair->blocked++;
2593 generation = qpair->generation;
2594 qp_unlock(qpair);
2595 wait_event(qpair->event, generation != qpair->generation);
2596 qp_lock(qpair);
2597
2598 return true;
2599}
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2614 struct vmci_queue *consume_q,
2615 const u64 produce_q_size,
2616 const void *buf,
2617 size_t buf_size,
2618 vmci_memcpy_to_queue_func memcpy_to_queue)
2619{
2620 s64 free_space;
2621 u64 tail;
2622 size_t written;
2623 ssize_t result;
2624
2625 result = qp_map_queue_headers(produce_q, consume_q);
2626 if (unlikely(result != VMCI_SUCCESS))
2627 return result;
2628
2629 free_space = vmci_q_header_free_space(produce_q->q_header,
2630 consume_q->q_header,
2631 produce_q_size);
2632 if (free_space == 0)
2633 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2634
2635 if (free_space < VMCI_SUCCESS)
2636 return (ssize_t) free_space;
2637
2638 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2639 tail = vmci_q_header_producer_tail(produce_q->q_header);
2640 if (likely(tail + written < produce_q_size)) {
2641 result = memcpy_to_queue(produce_q, tail, buf, 0, written);
2642 } else {
2643
2644
2645 const size_t tmp = (size_t) (produce_q_size - tail);
2646
2647 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
2648 if (result >= VMCI_SUCCESS)
2649 result = memcpy_to_queue(produce_q, 0, buf, tmp,
2650 written - tmp);
2651 }
2652
2653 if (result < VMCI_SUCCESS)
2654 return result;
2655
2656 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2657 produce_q_size);
2658 return written;
2659}
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2675 struct vmci_queue *consume_q,
2676 const u64 consume_q_size,
2677 void *buf,
2678 size_t buf_size,
2679 vmci_memcpy_from_queue_func memcpy_from_queue,
2680 bool update_consumer)
2681{
2682 s64 buf_ready;
2683 u64 head;
2684 size_t read;
2685 ssize_t result;
2686
2687 result = qp_map_queue_headers(produce_q, consume_q);
2688 if (unlikely(result != VMCI_SUCCESS))
2689 return result;
2690
2691 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2692 produce_q->q_header,
2693 consume_q_size);
2694 if (buf_ready == 0)
2695 return VMCI_ERROR_QUEUEPAIR_NODATA;
2696
2697 if (buf_ready < VMCI_SUCCESS)
2698 return (ssize_t) buf_ready;
2699
2700 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2701 head = vmci_q_header_consumer_head(produce_q->q_header);
2702 if (likely(head + read < consume_q_size)) {
2703 result = memcpy_from_queue(buf, 0, consume_q, head, read);
2704 } else {
2705
2706
2707 const size_t tmp = (size_t) (consume_q_size - head);
2708
2709 result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
2710 if (result >= VMCI_SUCCESS)
2711 result = memcpy_from_queue(buf, tmp, consume_q, 0,
2712 read - tmp);
2713
2714 }
2715
2716 if (result < VMCI_SUCCESS)
2717 return result;
2718
2719 if (update_consumer)
2720 vmci_q_header_add_consumer_head(produce_q->q_header,
2721 read, consume_q_size);
2722
2723 return read;
2724}
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742int vmci_qpair_alloc(struct vmci_qp **qpair,
2743 struct vmci_handle *handle,
2744 u64 produce_qsize,
2745 u64 consume_qsize,
2746 u32 peer,
2747 u32 flags,
2748 u32 priv_flags)
2749{
2750 struct vmci_qp *my_qpair;
2751 int retval;
2752 struct vmci_handle src = VMCI_INVALID_HANDLE;
2753 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2754 enum vmci_route route;
2755 vmci_event_release_cb wakeup_cb;
2756 void *client_data;
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2776 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2777 return VMCI_ERROR_NO_RESOURCES;
2778
2779 retval = vmci_route(&src, &dst, false, &route);
2780 if (retval < VMCI_SUCCESS)
2781 route = vmci_guest_code_active() ?
2782 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2783
2784 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2785 pr_devel("NONBLOCK OR PINNED set");
2786 return VMCI_ERROR_INVALID_ARGS;
2787 }
2788
2789 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2790 if (!my_qpair)
2791 return VMCI_ERROR_NO_MEM;
2792
2793 my_qpair->produce_q_size = produce_qsize;
2794 my_qpair->consume_q_size = consume_qsize;
2795 my_qpair->peer = peer;
2796 my_qpair->flags = flags;
2797 my_qpair->priv_flags = priv_flags;
2798
2799 wakeup_cb = NULL;
2800 client_data = NULL;
2801
2802 if (VMCI_ROUTE_AS_HOST == route) {
2803 my_qpair->guest_endpoint = false;
2804 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2805 my_qpair->blocked = 0;
2806 my_qpair->generation = 0;
2807 init_waitqueue_head(&my_qpair->event);
2808 wakeup_cb = qp_wakeup_cb;
2809 client_data = (void *)my_qpair;
2810 }
2811 } else {
2812 my_qpair->guest_endpoint = true;
2813 }
2814
2815 retval = vmci_qp_alloc(handle,
2816 &my_qpair->produce_q,
2817 my_qpair->produce_q_size,
2818 &my_qpair->consume_q,
2819 my_qpair->consume_q_size,
2820 my_qpair->peer,
2821 my_qpair->flags,
2822 my_qpair->priv_flags,
2823 my_qpair->guest_endpoint,
2824 wakeup_cb, client_data);
2825
2826 if (retval < VMCI_SUCCESS) {
2827 kfree(my_qpair);
2828 return retval;
2829 }
2830
2831 *qpair = my_qpair;
2832 my_qpair->handle = *handle;
2833
2834 return retval;
2835}
2836EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846int vmci_qpair_detach(struct vmci_qp **qpair)
2847{
2848 int result;
2849 struct vmci_qp *old_qpair;
2850
2851 if (!qpair || !(*qpair))
2852 return VMCI_ERROR_INVALID_ARGS;
2853
2854 old_qpair = *qpair;
2855 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867 memset(old_qpair, 0, sizeof(*old_qpair));
2868 old_qpair->handle = VMCI_INVALID_HANDLE;
2869 old_qpair->peer = VMCI_INVALID_ID;
2870 kfree(old_qpair);
2871 *qpair = NULL;
2872
2873 return result;
2874}
2875EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2887 u64 *producer_tail,
2888 u64 *consumer_head)
2889{
2890 struct vmci_queue_header *produce_q_header;
2891 struct vmci_queue_header *consume_q_header;
2892 int result;
2893
2894 if (!qpair)
2895 return VMCI_ERROR_INVALID_ARGS;
2896
2897 qp_lock(qpair);
2898 result =
2899 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2900 if (result == VMCI_SUCCESS)
2901 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2902 producer_tail, consumer_head);
2903 qp_unlock(qpair);
2904
2905 if (result == VMCI_SUCCESS &&
2906 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2907 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2908 return VMCI_ERROR_INVALID_SIZE;
2909
2910 return result;
2911}
2912EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2924 u64 *consumer_tail,
2925 u64 *producer_head)
2926{
2927 struct vmci_queue_header *produce_q_header;
2928 struct vmci_queue_header *consume_q_header;
2929 int result;
2930
2931 if (!qpair)
2932 return VMCI_ERROR_INVALID_ARGS;
2933
2934 qp_lock(qpair);
2935 result =
2936 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2937 if (result == VMCI_SUCCESS)
2938 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2939 consumer_tail, producer_head);
2940 qp_unlock(qpair);
2941
2942 if (result == VMCI_SUCCESS &&
2943 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2944 (producer_head && *producer_head >= qpair->consume_q_size)))
2945 return VMCI_ERROR_INVALID_SIZE;
2946
2947 return result;
2948}
2949EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2961{
2962 struct vmci_queue_header *produce_q_header;
2963 struct vmci_queue_header *consume_q_header;
2964 s64 result;
2965
2966 if (!qpair)
2967 return VMCI_ERROR_INVALID_ARGS;
2968
2969 qp_lock(qpair);
2970 result =
2971 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2972 if (result == VMCI_SUCCESS)
2973 result = vmci_q_header_free_space(produce_q_header,
2974 consume_q_header,
2975 qpair->produce_q_size);
2976 else
2977 result = 0;
2978
2979 qp_unlock(qpair);
2980
2981 return result;
2982}
2983EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
2995{
2996 struct vmci_queue_header *produce_q_header;
2997 struct vmci_queue_header *consume_q_header;
2998 s64 result;
2999
3000 if (!qpair)
3001 return VMCI_ERROR_INVALID_ARGS;
3002
3003 qp_lock(qpair);
3004 result =
3005 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3006 if (result == VMCI_SUCCESS)
3007 result = vmci_q_header_free_space(consume_q_header,
3008 produce_q_header,
3009 qpair->consume_q_size);
3010 else
3011 result = 0;
3012
3013 qp_unlock(qpair);
3014
3015 return result;
3016}
3017EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
3030{
3031 struct vmci_queue_header *produce_q_header;
3032 struct vmci_queue_header *consume_q_header;
3033 s64 result;
3034
3035 if (!qpair)
3036 return VMCI_ERROR_INVALID_ARGS;
3037
3038 qp_lock(qpair);
3039 result =
3040 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3041 if (result == VMCI_SUCCESS)
3042 result = vmci_q_header_buf_ready(produce_q_header,
3043 consume_q_header,
3044 qpair->produce_q_size);
3045 else
3046 result = 0;
3047
3048 qp_unlock(qpair);
3049
3050 return result;
3051}
3052EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
3065{
3066 struct vmci_queue_header *produce_q_header;
3067 struct vmci_queue_header *consume_q_header;
3068 s64 result;
3069
3070 if (!qpair)
3071 return VMCI_ERROR_INVALID_ARGS;
3072
3073 qp_lock(qpair);
3074 result =
3075 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3076 if (result == VMCI_SUCCESS)
3077 result = vmci_q_header_buf_ready(consume_q_header,
3078 produce_q_header,
3079 qpair->consume_q_size);
3080 else
3081 result = 0;
3082
3083 qp_unlock(qpair);
3084
3085 return result;
3086}
3087EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3100 const void *buf,
3101 size_t buf_size,
3102 int buf_type)
3103{
3104 ssize_t result;
3105
3106 if (!qpair || !buf)
3107 return VMCI_ERROR_INVALID_ARGS;
3108
3109 qp_lock(qpair);
3110
3111 do {
3112 result = qp_enqueue_locked(qpair->produce_q,
3113 qpair->consume_q,
3114 qpair->produce_q_size,
3115 buf, buf_size,
3116 qp_memcpy_to_queue);
3117
3118 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3119 !qp_wait_for_ready_queue(qpair))
3120 result = VMCI_ERROR_WOULD_BLOCK;
3121
3122 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3123
3124 qp_unlock(qpair);
3125
3126 return result;
3127}
3128EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3141 void *buf,
3142 size_t buf_size,
3143 int buf_type)
3144{
3145 ssize_t result;
3146
3147 if (!qpair || !buf)
3148 return VMCI_ERROR_INVALID_ARGS;
3149
3150 qp_lock(qpair);
3151
3152 do {
3153 result = qp_dequeue_locked(qpair->produce_q,
3154 qpair->consume_q,
3155 qpair->consume_q_size,
3156 buf, buf_size,
3157 qp_memcpy_from_queue, true);
3158
3159 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3160 !qp_wait_for_ready_queue(qpair))
3161 result = VMCI_ERROR_WOULD_BLOCK;
3162
3163 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3164
3165 qp_unlock(qpair);
3166
3167 return result;
3168}
3169EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3183 void *buf,
3184 size_t buf_size,
3185 int buf_type)
3186{
3187 ssize_t result;
3188
3189 if (!qpair || !buf)
3190 return VMCI_ERROR_INVALID_ARGS;
3191
3192 qp_lock(qpair);
3193
3194 do {
3195 result = qp_dequeue_locked(qpair->produce_q,
3196 qpair->consume_q,
3197 qpair->consume_q_size,
3198 buf, buf_size,
3199 qp_memcpy_from_queue, false);
3200
3201 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3202 !qp_wait_for_ready_queue(qpair))
3203 result = VMCI_ERROR_WOULD_BLOCK;
3204
3205 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3206
3207 qp_unlock(qpair);
3208
3209 return result;
3210}
3211EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3225 void *iov,
3226 size_t iov_size,
3227 int buf_type)
3228{
3229 ssize_t result;
3230
3231 if (!qpair || !iov)
3232 return VMCI_ERROR_INVALID_ARGS;
3233
3234 qp_lock(qpair);
3235
3236 do {
3237 result = qp_enqueue_locked(qpair->produce_q,
3238 qpair->consume_q,
3239 qpair->produce_q_size,
3240 iov, iov_size,
3241 qp_memcpy_to_queue_iov);
3242
3243 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3244 !qp_wait_for_ready_queue(qpair))
3245 result = VMCI_ERROR_WOULD_BLOCK;
3246
3247 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3248
3249 qp_unlock(qpair);
3250
3251 return result;
3252}
3253EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3267 void *iov,
3268 size_t iov_size,
3269 int buf_type)
3270{
3271 ssize_t result;
3272
3273 if (!qpair || !iov)
3274 return VMCI_ERROR_INVALID_ARGS;
3275
3276 qp_lock(qpair);
3277
3278 do {
3279 result = qp_dequeue_locked(qpair->produce_q,
3280 qpair->consume_q,
3281 qpair->consume_q_size,
3282 iov, iov_size,
3283 qp_memcpy_from_queue_iov,
3284 true);
3285
3286 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3287 !qp_wait_for_ready_queue(qpair))
3288 result = VMCI_ERROR_WOULD_BLOCK;
3289
3290 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3291
3292 qp_unlock(qpair);
3293
3294 return result;
3295}
3296EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3311 void *iov,
3312 size_t iov_size,
3313 int buf_type)
3314{
3315 ssize_t result;
3316
3317 if (!qpair || !iov)
3318 return VMCI_ERROR_INVALID_ARGS;
3319
3320 qp_lock(qpair);
3321
3322 do {
3323 result = qp_dequeue_locked(qpair->produce_q,
3324 qpair->consume_q,
3325 qpair->consume_q_size,
3326 iov, iov_size,
3327 qp_memcpy_from_queue_iov,
3328 false);
3329
3330 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3331 !qp_wait_for_ready_queue(qpair))
3332 result = VMCI_ERROR_WOULD_BLOCK;
3333
3334 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3335
3336 qp_unlock(qpair);
3337 return result;
3338}
3339EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
3340