1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _HYPERV_H
26#define _HYPERV_H
27
28#include <uapi/linux/hyperv.h>
29#include <uapi/asm/hyperv.h>
30
31#include <linux/types.h>
32#include <linux/scatterlist.h>
33#include <linux/list.h>
34#include <linux/timer.h>
35#include <linux/completion.h>
36#include <linux/device.h>
37#include <linux/mod_devicetable.h>
38#include <linux/interrupt.h>
39
40#define MAX_PAGE_BUFFER_COUNT 32
41#define MAX_MULTIPAGE_BUFFER_COUNT 32
42
43#pragma pack(push, 1)
44
45
46struct hv_page_buffer {
47 u32 len;
48 u32 offset;
49 u64 pfn;
50};
51
52
53struct hv_multipage_buffer {
54
55 u32 len;
56 u32 offset;
57 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
58};
59
60
61
62
63
64
65struct hv_mpb_array {
66
67 u32 len;
68 u32 offset;
69 u64 pfn_array[];
70};
71
72
73#define MAX_PAGE_BUFFER_PACKET (0x18 + \
74 (sizeof(struct hv_page_buffer) * \
75 MAX_PAGE_BUFFER_COUNT))
76#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
77 sizeof(struct hv_multipage_buffer))
78
79
80#pragma pack(pop)
81
82struct hv_ring_buffer {
83
84 u32 write_index;
85
86
87 u32 read_index;
88
89 u32 interrupt_mask;
90
91
92
93
94
95
96
97
98
99
100 u32 pending_send_sz;
101
102 u32 reserved1[12];
103
104 union {
105 struct {
106 u32 feat_pending_send_sz:1;
107 };
108 u32 value;
109 } feature_bits;
110
111
112 u8 reserved2[4028];
113
114
115
116
117
118 u8 buffer[0];
119} __packed;
120
121struct hv_ring_buffer_info {
122 struct hv_ring_buffer *ring_buffer;
123 u32 ring_size;
124 spinlock_t ring_lock;
125
126 u32 ring_datasize;
127 u32 ring_data_startoffset;
128 u32 priv_write_index;
129 u32 priv_read_index;
130 u32 cached_read_index;
131};
132
133
134
135
136
137
138
139
140static inline void
141hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
142 u32 *read, u32 *write)
143{
144 u32 read_loc, write_loc, dsize;
145
146
147 read_loc = rbi->ring_buffer->read_index;
148 write_loc = rbi->ring_buffer->write_index;
149 dsize = rbi->ring_datasize;
150
151 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
152 read_loc - write_loc;
153 *read = dsize - *write;
154}
155
156static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
157{
158 u32 read_loc, write_loc, dsize, read;
159
160 dsize = rbi->ring_datasize;
161 read_loc = rbi->ring_buffer->read_index;
162 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
163
164 read = write_loc >= read_loc ? (write_loc - read_loc) :
165 (dsize - read_loc) + write_loc;
166
167 return read;
168}
169
170static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
171{
172 u32 read_loc, write_loc, dsize, write;
173
174 dsize = rbi->ring_datasize;
175 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
176 write_loc = rbi->ring_buffer->write_index;
177
178 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
179 read_loc - write_loc;
180 return write;
181}
182
183static inline u32 hv_get_cached_bytes_to_write(
184 const struct hv_ring_buffer_info *rbi)
185{
186 u32 read_loc, write_loc, dsize, write;
187
188 dsize = rbi->ring_datasize;
189 read_loc = rbi->cached_read_index;
190 write_loc = rbi->ring_buffer->write_index;
191
192 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
193 read_loc - write_loc;
194 return write;
195}
196
197
198
199
200
201
202
203
204
205
206
207#define VERSION_WS2008 ((0 << 16) | (13))
208#define VERSION_WIN7 ((1 << 16) | (1))
209#define VERSION_WIN8 ((2 << 16) | (4))
210#define VERSION_WIN8_1 ((3 << 16) | (0))
211#define VERSION_WIN10 ((4 << 16) | (0))
212
213#define VERSION_INVAL -1
214
215#define VERSION_CURRENT VERSION_WIN10
216
217
218#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
219
220
221#define VMBUS_PIPE_TYPE_BYTE 0x00000000
222#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
223
224
225#define MAX_USER_DEFINED_BYTES 120
226
227
228#define MAX_PIPE_USER_DEFINED_BYTES 116
229
230
231
232
233
234struct vmbus_channel_offer {
235 uuid_le if_type;
236 uuid_le if_instance;
237
238
239
240
241 u64 reserved1;
242 u64 reserved2;
243
244 u16 chn_flags;
245 u16 mmio_megabytes;
246
247 union {
248
249 struct {
250 unsigned char user_def[MAX_USER_DEFINED_BYTES];
251 } std;
252
253
254
255
256
257
258
259
260 struct {
261 u32 pipe_mode;
262 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
263 } pipe;
264 } u;
265
266
267
268 u16 sub_channel_index;
269 u16 reserved3;
270} __packed;
271
272
273#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
274#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
275#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
276#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
277#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
278#define VMBUS_CHANNEL_PARENT_OFFER 0x200
279#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
280#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
281
282struct vmpacket_descriptor {
283 u16 type;
284 u16 offset8;
285 u16 len8;
286 u16 flags;
287 u64 trans_id;
288} __packed;
289
290struct vmpacket_header {
291 u32 prev_pkt_start_offset;
292 struct vmpacket_descriptor descriptor;
293} __packed;
294
295struct vmtransfer_page_range {
296 u32 byte_count;
297 u32 byte_offset;
298} __packed;
299
300struct vmtransfer_page_packet_header {
301 struct vmpacket_descriptor d;
302 u16 xfer_pageset_id;
303 u8 sender_owns_set;
304 u8 reserved;
305 u32 range_cnt;
306 struct vmtransfer_page_range ranges[1];
307} __packed;
308
309struct vmgpadl_packet_header {
310 struct vmpacket_descriptor d;
311 u32 gpadl;
312 u32 reserved;
313} __packed;
314
315struct vmadd_remove_transfer_page_set {
316 struct vmpacket_descriptor d;
317 u32 gpadl;
318 u16 xfer_pageset_id;
319 u16 reserved;
320} __packed;
321
322
323
324
325
326struct gpa_range {
327 u32 byte_count;
328 u32 byte_offset;
329 u64 pfn_array[0];
330};
331
332
333
334
335
336
337
338
339struct vmestablish_gpadl {
340 struct vmpacket_descriptor d;
341 u32 gpadl;
342 u32 range_cnt;
343 struct gpa_range range[1];
344} __packed;
345
346
347
348
349
350struct vmteardown_gpadl {
351 struct vmpacket_descriptor d;
352 u32 gpadl;
353 u32 reserved;
354} __packed;
355
356
357
358
359
360struct vmdata_gpa_direct {
361 struct vmpacket_descriptor d;
362 u32 reserved;
363 u32 range_cnt;
364 struct gpa_range range[1];
365} __packed;
366
367
368struct vmadditional_data {
369 struct vmpacket_descriptor d;
370 u64 total_bytes;
371 u32 offset;
372 u32 byte_cnt;
373 unsigned char data[1];
374} __packed;
375
376union vmpacket_largest_possible_header {
377 struct vmpacket_descriptor simple_hdr;
378 struct vmtransfer_page_packet_header xfer_page_hdr;
379 struct vmgpadl_packet_header gpadl_hdr;
380 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
381 struct vmestablish_gpadl establish_gpadl_hdr;
382 struct vmteardown_gpadl teardown_gpadl_hdr;
383 struct vmdata_gpa_direct data_gpa_direct_hdr;
384};
385
386#define VMPACKET_DATA_START_ADDRESS(__packet) \
387 (void *)(((unsigned char *)__packet) + \
388 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
389
390#define VMPACKET_DATA_LENGTH(__packet) \
391 ((((struct vmpacket_descriptor)__packet)->len8 - \
392 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
393
394#define VMPACKET_TRANSFER_MODE(__packet) \
395 (((struct IMPACT)__packet)->type)
396
397enum vmbus_packet_type {
398 VM_PKT_INVALID = 0x0,
399 VM_PKT_SYNCH = 0x1,
400 VM_PKT_ADD_XFER_PAGESET = 0x2,
401 VM_PKT_RM_XFER_PAGESET = 0x3,
402 VM_PKT_ESTABLISH_GPADL = 0x4,
403 VM_PKT_TEARDOWN_GPADL = 0x5,
404 VM_PKT_DATA_INBAND = 0x6,
405 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
406 VM_PKT_DATA_USING_GPADL = 0x8,
407 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
408 VM_PKT_CANCEL_REQUEST = 0xa,
409 VM_PKT_COMP = 0xb,
410 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
411 VM_PKT_ADDITIONAL_DATA = 0xd
412};
413
414#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
415
416
417
418enum vmbus_channel_message_type {
419 CHANNELMSG_INVALID = 0,
420 CHANNELMSG_OFFERCHANNEL = 1,
421 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
422 CHANNELMSG_REQUESTOFFERS = 3,
423 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
424 CHANNELMSG_OPENCHANNEL = 5,
425 CHANNELMSG_OPENCHANNEL_RESULT = 6,
426 CHANNELMSG_CLOSECHANNEL = 7,
427 CHANNELMSG_GPADL_HEADER = 8,
428 CHANNELMSG_GPADL_BODY = 9,
429 CHANNELMSG_GPADL_CREATED = 10,
430 CHANNELMSG_GPADL_TEARDOWN = 11,
431 CHANNELMSG_GPADL_TORNDOWN = 12,
432 CHANNELMSG_RELID_RELEASED = 13,
433 CHANNELMSG_INITIATE_CONTACT = 14,
434 CHANNELMSG_VERSION_RESPONSE = 15,
435 CHANNELMSG_UNLOAD = 16,
436 CHANNELMSG_UNLOAD_RESPONSE = 17,
437 CHANNELMSG_18 = 18,
438 CHANNELMSG_19 = 19,
439 CHANNELMSG_20 = 20,
440 CHANNELMSG_TL_CONNECT_REQUEST = 21,
441 CHANNELMSG_COUNT
442};
443
444struct vmbus_channel_message_header {
445 enum vmbus_channel_message_type msgtype;
446 u32 padding;
447} __packed;
448
449
450struct vmbus_channel_query_vmbus_version {
451 struct vmbus_channel_message_header header;
452 u32 version;
453} __packed;
454
455
456struct vmbus_channel_version_supported {
457 struct vmbus_channel_message_header header;
458 u8 version_supported;
459} __packed;
460
461
462struct vmbus_channel_offer_channel {
463 struct vmbus_channel_message_header header;
464 struct vmbus_channel_offer offer;
465 u32 child_relid;
466 u8 monitorid;
467
468
469
470 u8 monitor_allocated:1;
471 u8 reserved:7;
472
473
474
475
476
477
478
479
480
481
482
483 u16 is_dedicated_interrupt:1;
484 u16 reserved1:15;
485 u32 connection_id;
486} __packed;
487
488
489struct vmbus_channel_rescind_offer {
490 struct vmbus_channel_message_header header;
491 u32 child_relid;
492} __packed;
493
494
495
496
497
498
499
500
501
502
503
504struct vmbus_channel_open_channel {
505 struct vmbus_channel_message_header header;
506
507
508 u32 child_relid;
509
510
511 u32 openid;
512
513
514 u32 ringbuffer_gpadlhandle;
515
516
517
518
519
520
521
522
523
524 u32 target_vp;
525
526
527
528
529
530
531 u32 downstream_ringbuffer_pageoffset;
532
533
534 unsigned char userdata[MAX_USER_DEFINED_BYTES];
535} __packed;
536
537
538struct vmbus_channel_open_result {
539 struct vmbus_channel_message_header header;
540 u32 child_relid;
541 u32 openid;
542 u32 status;
543} __packed;
544
545
546struct vmbus_channel_close_channel {
547 struct vmbus_channel_message_header header;
548 u32 child_relid;
549} __packed;
550
551
552#define GPADL_TYPE_RING_BUFFER 1
553#define GPADL_TYPE_SERVER_SAVE_AREA 2
554#define GPADL_TYPE_TRANSACTION 8
555
556
557
558
559
560
561
562struct vmbus_channel_gpadl_header {
563 struct vmbus_channel_message_header header;
564 u32 child_relid;
565 u32 gpadl;
566 u16 range_buflen;
567 u16 rangecount;
568 struct gpa_range range[0];
569} __packed;
570
571
572struct vmbus_channel_gpadl_body {
573 struct vmbus_channel_message_header header;
574 u32 msgnumber;
575 u32 gpadl;
576 u64 pfn[0];
577} __packed;
578
579struct vmbus_channel_gpadl_created {
580 struct vmbus_channel_message_header header;
581 u32 child_relid;
582 u32 gpadl;
583 u32 creation_status;
584} __packed;
585
586struct vmbus_channel_gpadl_teardown {
587 struct vmbus_channel_message_header header;
588 u32 child_relid;
589 u32 gpadl;
590} __packed;
591
592struct vmbus_channel_gpadl_torndown {
593 struct vmbus_channel_message_header header;
594 u32 gpadl;
595} __packed;
596
597struct vmbus_channel_relid_released {
598 struct vmbus_channel_message_header header;
599 u32 child_relid;
600} __packed;
601
602struct vmbus_channel_initiate_contact {
603 struct vmbus_channel_message_header header;
604 u32 vmbus_version_requested;
605 u32 target_vcpu;
606 u64 interrupt_page;
607 u64 monitor_page1;
608 u64 monitor_page2;
609} __packed;
610
611
612struct vmbus_channel_tl_connect_request {
613 struct vmbus_channel_message_header header;
614 uuid_le guest_endpoint_id;
615 uuid_le host_service_id;
616} __packed;
617
618struct vmbus_channel_version_response {
619 struct vmbus_channel_message_header header;
620 u8 version_supported;
621} __packed;
622
623enum vmbus_channel_state {
624 CHANNEL_OFFER_STATE,
625 CHANNEL_OPENING_STATE,
626 CHANNEL_OPEN_STATE,
627 CHANNEL_OPENED_STATE,
628};
629
630
631
632
633
634struct vmbus_channel_msginfo {
635
636 struct list_head msglistentry;
637
638
639 struct list_head submsglist;
640
641
642 struct completion waitevent;
643 struct vmbus_channel *waiting_channel;
644 union {
645 struct vmbus_channel_version_supported version_supported;
646 struct vmbus_channel_open_result open_result;
647 struct vmbus_channel_gpadl_torndown gpadl_torndown;
648 struct vmbus_channel_gpadl_created gpadl_created;
649 struct vmbus_channel_version_response version_response;
650 } response;
651
652 u32 msgsize;
653
654
655
656
657 unsigned char msg[0];
658};
659
660struct vmbus_close_msg {
661 struct vmbus_channel_msginfo info;
662 struct vmbus_channel_close_channel msg;
663};
664
665
666union hv_connection_id {
667 u32 asu32;
668 struct {
669 u32 id:24;
670 u32 reserved:8;
671 } u;
672};
673
674
675struct hv_input_signal_event {
676 union hv_connection_id connectionid;
677 u16 flag_number;
678 u16 rsvdz;
679};
680
681struct hv_input_signal_event_buffer {
682 u64 align8;
683 struct hv_input_signal_event event;
684};
685
686enum hv_numa_policy {
687 HV_BALANCED = 0,
688 HV_LOCALIZED,
689};
690
691enum vmbus_device_type {
692 HV_IDE = 0,
693 HV_SCSI,
694 HV_FC,
695 HV_NIC,
696 HV_ND,
697 HV_PCIE,
698 HV_FB,
699 HV_KBD,
700 HV_MOUSE,
701 HV_KVP,
702 HV_TS,
703 HV_HB,
704 HV_SHUTDOWN,
705 HV_FCOPY,
706 HV_BACKUP,
707 HV_DM,
708 HV_UNKNOWN,
709};
710
711struct vmbus_device {
712 u16 dev_type;
713 uuid_le guid;
714 bool perf_device;
715};
716
717struct vmbus_channel {
718 struct list_head listentry;
719
720 struct hv_device *device_obj;
721
722 enum vmbus_channel_state state;
723
724 struct vmbus_channel_offer_channel offermsg;
725
726
727
728
729 u8 monitor_grp;
730 u8 monitor_bit;
731
732 bool rescind;
733
734 u32 ringbuffer_gpadlhandle;
735
736
737 void *ringbuffer_pages;
738 u32 ringbuffer_pagecount;
739 struct hv_ring_buffer_info outbound;
740 struct hv_ring_buffer_info inbound;
741 spinlock_t inbound_lock;
742
743 struct vmbus_close_msg close_msg;
744
745
746 struct tasklet_struct callback_event;
747 void (*onchannel_callback)(void *context);
748 void *channel_callback_context;
749
750
751
752
753
754
755
756
757
758
759
760
761 enum hv_callback_mode {
762 HV_CALL_BATCHED,
763 HV_CALL_DIRECT,
764 HV_CALL_ISR
765 } callback_mode;
766
767 bool is_dedicated_interrupt;
768 struct hv_input_signal_event_buffer sig_buf;
769 struct hv_input_signal_event *sig_event;
770
771
772
773
774
775
776
777
778
779 u32 target_vp;
780
781 u32 target_cpu;
782
783
784
785 struct cpumask alloced_cpus_in_node;
786 int numa_node;
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
808
809
810
811
812
813 void (*chn_rescind_callback)(struct vmbus_channel *channel);
814
815
816
817
818
819
820 spinlock_t lock;
821
822
823
824 struct list_head sc_list;
825
826
827
828 int num_sc;
829
830
831
832
833 int next_oc;
834
835
836
837
838 struct vmbus_channel *primary_channel;
839
840
841
842 void *per_channel_state;
843
844
845
846
847 struct list_head percpu_list;
848
849
850
851
852
853 struct rcu_head rcu;
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879 bool low_latency;
880
881
882
883
884
885
886
887
888
889
890
891 enum hv_numa_policy affinity_policy;
892
893};
894
895static inline bool is_hvsock_channel(const struct vmbus_channel *c)
896{
897 return !!(c->offermsg.offer.chn_flags &
898 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
899}
900
901static inline void set_channel_affinity_state(struct vmbus_channel *c,
902 enum hv_numa_policy policy)
903{
904 c->affinity_policy = policy;
905}
906
907static inline void set_channel_read_mode(struct vmbus_channel *c,
908 enum hv_callback_mode mode)
909{
910 c->callback_mode = mode;
911}
912
913static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
914{
915 c->per_channel_state = s;
916}
917
918static inline void *get_per_channel_state(struct vmbus_channel *c)
919{
920 return c->per_channel_state;
921}
922
923static inline void set_channel_pending_send_size(struct vmbus_channel *c,
924 u32 size)
925{
926 c->outbound.ring_buffer->pending_send_sz = size;
927}
928
929static inline void set_low_latency_mode(struct vmbus_channel *c)
930{
931 c->low_latency = true;
932}
933
934static inline void clear_low_latency_mode(struct vmbus_channel *c)
935{
936 c->low_latency = false;
937}
938
939void vmbus_onmessage(void *context);
940
941int vmbus_request_offers(void);
942
943
944
945
946
947void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
948 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
949
950void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
951 void (*chn_rescind_cb)(struct vmbus_channel *));
952
953
954
955
956
957
958
959struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
960
961
962
963
964
965
966
967
968
969
970
971
972bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
973
974
975struct vmbus_channel_packet_page_buffer {
976 u16 type;
977 u16 dataoffset8;
978 u16 length8;
979 u16 flags;
980 u64 transactionid;
981 u32 reserved;
982 u32 rangecount;
983 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
984} __packed;
985
986
987struct vmbus_channel_packet_multipage_buffer {
988 u16 type;
989 u16 dataoffset8;
990 u16 length8;
991 u16 flags;
992 u64 transactionid;
993 u32 reserved;
994 u32 rangecount;
995 struct hv_multipage_buffer range;
996} __packed;
997
998
999struct vmbus_packet_mpb_array {
1000 u16 type;
1001 u16 dataoffset8;
1002 u16 length8;
1003 u16 flags;
1004 u64 transactionid;
1005 u32 reserved;
1006 u32 rangecount;
1007 struct hv_mpb_array range;
1008} __packed;
1009
1010
1011extern int vmbus_open(struct vmbus_channel *channel,
1012 u32 send_ringbuffersize,
1013 u32 recv_ringbuffersize,
1014 void *userdata,
1015 u32 userdatalen,
1016 void(*onchannel_callback)(void *context),
1017 void *context);
1018
1019extern void vmbus_close(struct vmbus_channel *channel);
1020
1021extern int vmbus_sendpacket(struct vmbus_channel *channel,
1022 void *buffer,
1023 u32 bufferLen,
1024 u64 requestid,
1025 enum vmbus_packet_type type,
1026 u32 flags);
1027
1028extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
1029 void *buffer,
1030 u32 bufferLen,
1031 u64 requestid,
1032 enum vmbus_packet_type type,
1033 u32 flags);
1034
1035extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1036 struct hv_page_buffer pagebuffers[],
1037 u32 pagecount,
1038 void *buffer,
1039 u32 bufferlen,
1040 u64 requestid);
1041
1042extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
1043 struct hv_page_buffer pagebuffers[],
1044 u32 pagecount,
1045 void *buffer,
1046 u32 bufferlen,
1047 u64 requestid,
1048 u32 flags);
1049
1050extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
1051 struct hv_multipage_buffer *mpb,
1052 void *buffer,
1053 u32 bufferlen,
1054 u64 requestid);
1055
1056extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1057 struct vmbus_packet_mpb_array *mpb,
1058 u32 desc_size,
1059 void *buffer,
1060 u32 bufferlen,
1061 u64 requestid);
1062
1063extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1064 void *kbuffer,
1065 u32 size,
1066 u32 *gpadl_handle);
1067
1068extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1069 u32 gpadl_handle);
1070
1071extern int vmbus_recvpacket(struct vmbus_channel *channel,
1072 void *buffer,
1073 u32 bufferlen,
1074 u32 *buffer_actual_len,
1075 u64 *requestid);
1076
1077extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1078 void *buffer,
1079 u32 bufferlen,
1080 u32 *buffer_actual_len,
1081 u64 *requestid);
1082
1083
1084extern void vmbus_ontimer(unsigned long data);
1085
1086
1087struct hv_driver {
1088 const char *name;
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 bool hvsock;
1103
1104
1105 uuid_le dev_type;
1106 const struct hv_vmbus_device_id *id_table;
1107
1108 struct device_driver driver;
1109
1110
1111 struct {
1112 spinlock_t lock;
1113 struct list_head list;
1114 } dynids;
1115
1116 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1117 int (*remove)(struct hv_device *);
1118 void (*shutdown)(struct hv_device *);
1119
1120};
1121
1122
1123struct hv_device {
1124
1125 uuid_le dev_type;
1126
1127
1128 uuid_le dev_instance;
1129 u16 vendor_id;
1130 u16 device_id;
1131
1132 struct device device;
1133
1134 struct vmbus_channel *channel;
1135};
1136
1137
1138static inline struct hv_device *device_to_hv_device(struct device *d)
1139{
1140 return container_of(d, struct hv_device, device);
1141}
1142
1143static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1144{
1145 return container_of(d, struct hv_driver, driver);
1146}
1147
1148static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1149{
1150 dev_set_drvdata(&dev->device, data);
1151}
1152
1153static inline void *hv_get_drvdata(struct hv_device *dev)
1154{
1155 return dev_get_drvdata(&dev->device);
1156}
1157
1158
1159#define vmbus_driver_register(driver) \
1160 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1161int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1162 struct module *owner,
1163 const char *mod_name);
1164void vmbus_driver_unregister(struct hv_driver *hv_driver);
1165
1166void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1167
1168int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1169 resource_size_t min, resource_size_t max,
1170 resource_size_t size, resource_size_t align,
1171 bool fb_overlap_ok);
1172void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1173int vmbus_cpu_number_to_vp_number(int cpu_number);
1174u64 hv_do_hypercall(u64 control, void *input, void *output);
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184#define HV_NIC_GUID \
1185 .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1186 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1187
1188
1189
1190
1191
1192#define HV_IDE_GUID \
1193 .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1194 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1195
1196
1197
1198
1199
1200#define HV_SCSI_GUID \
1201 .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1202 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1203
1204
1205
1206
1207
1208#define HV_SHUTDOWN_GUID \
1209 .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1210 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1211
1212
1213
1214
1215
1216#define HV_TS_GUID \
1217 .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1218 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1219
1220
1221
1222
1223
1224#define HV_HEART_BEAT_GUID \
1225 .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1226 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1227
1228
1229
1230
1231
1232#define HV_KVP_GUID \
1233 .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1234 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1235
1236
1237
1238
1239
1240#define HV_DM_GUID \
1241 .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1242 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1243
1244
1245
1246
1247
1248#define HV_MOUSE_GUID \
1249 .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1250 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1251
1252
1253
1254
1255
1256#define HV_KBD_GUID \
1257 .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1258 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1259
1260
1261
1262
1263#define HV_VSS_GUID \
1264 .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1265 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1266
1267
1268
1269
1270#define HV_SYNTHVID_GUID \
1271 .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1272 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1273
1274
1275
1276
1277
1278#define HV_SYNTHFC_GUID \
1279 .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1280 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1281
1282
1283
1284
1285
1286
1287#define HV_FCOPY_GUID \
1288 .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1289 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1290
1291
1292
1293
1294
1295#define HV_ND_GUID \
1296 .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1297 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1298
1299
1300
1301
1302
1303
1304#define HV_PCIE_GUID \
1305 .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1306 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317#define HV_AVMA1_GUID \
1318 .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1319 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1320
1321#define HV_AVMA2_GUID \
1322 .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1323 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1324
1325#define HV_RDV_GUID \
1326 .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1327 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1328
1329
1330
1331
1332
1333#define ICMSGTYPE_NEGOTIATE 0
1334#define ICMSGTYPE_HEARTBEAT 1
1335#define ICMSGTYPE_KVPEXCHANGE 2
1336#define ICMSGTYPE_SHUTDOWN 3
1337#define ICMSGTYPE_TIMESYNC 4
1338#define ICMSGTYPE_VSS 5
1339
1340#define ICMSGHDRFLAG_TRANSACTION 1
1341#define ICMSGHDRFLAG_REQUEST 2
1342#define ICMSGHDRFLAG_RESPONSE 4
1343
1344
1345
1346
1347
1348
1349
1350
1351struct hv_util_service {
1352 u8 *recv_buffer;
1353 void *channel;
1354 void (*util_cb)(void *);
1355 int (*util_init)(struct hv_util_service *);
1356 void (*util_deinit)(void);
1357};
1358
1359struct vmbuspipe_hdr {
1360 u32 flags;
1361 u32 msgsize;
1362} __packed;
1363
1364struct ic_version {
1365 u16 major;
1366 u16 minor;
1367} __packed;
1368
1369struct icmsg_hdr {
1370 struct ic_version icverframe;
1371 u16 icmsgtype;
1372 struct ic_version icvermsg;
1373 u16 icmsgsize;
1374 u32 status;
1375 u8 ictransaction_id;
1376 u8 icflags;
1377 u8 reserved[2];
1378} __packed;
1379
1380struct icmsg_negotiate {
1381 u16 icframe_vercnt;
1382 u16 icmsg_vercnt;
1383 u32 reserved;
1384 struct ic_version icversion_data[1];
1385} __packed;
1386
1387struct shutdown_msg_data {
1388 u32 reason_code;
1389 u32 timeout_seconds;
1390 u32 flags;
1391 u8 display_message[2048];
1392} __packed;
1393
1394struct heartbeat_msg_data {
1395 u64 seq_num;
1396 u32 reserved[8];
1397} __packed;
1398
1399
1400#define ICTIMESYNCFLAG_PROBE 0
1401#define ICTIMESYNCFLAG_SYNC 1
1402#define ICTIMESYNCFLAG_SAMPLE 2
1403
1404#ifdef __x86_64__
1405#define WLTIMEDELTA 116444736000000000L
1406#else
1407#define WLTIMEDELTA 116444736000000000LL
1408#endif
1409
1410struct ictimesync_data {
1411 u64 parenttime;
1412 u64 childtime;
1413 u64 roundtriptime;
1414 u8 flags;
1415} __packed;
1416
1417struct ictimesync_ref_data {
1418 u64 parenttime;
1419 u64 vmreferencetime;
1420 u8 flags;
1421 char leapflags;
1422 char stratum;
1423 u8 reserved[3];
1424} __packed;
1425
1426struct hyperv_service_callback {
1427 u8 msg_type;
1428 char *log_msg;
1429 uuid_le data;
1430 struct vmbus_channel *channel;
1431 void (*callback) (void *context);
1432};
1433
1434#define MAX_SRV_VER 0x7ffffff
1435extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1436 const int *fw_version, int fw_vercnt,
1437 const int *srv_version, int srv_vercnt,
1438 int *nego_fw_version, int *nego_srv_version);
1439
1440void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1441
1442void vmbus_setevent(struct vmbus_channel *channel);
1443
1444
1445
1446
1447extern __u32 vmbus_proto_version;
1448
1449int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1450 const uuid_le *shv_host_servie_id);
1451void vmbus_set_event(struct vmbus_channel *channel);
1452
1453
1454static inline void *
1455hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1456{
1457 return ring_info->ring_buffer->buffer;
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475static inline void hv_signal_on_read(struct vmbus_channel *channel)
1476{
1477 u32 cur_write_sz, cached_write_sz;
1478 u32 pending_sz;
1479 struct hv_ring_buffer_info *rbi = &channel->inbound;
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 virt_mb();
1493
1494 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
1495
1496 if (pending_sz == 0)
1497 return;
1498
1499 cur_write_sz = hv_get_bytes_to_write(rbi);
1500
1501 if (cur_write_sz < pending_sz)
1502 return;
1503
1504 cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1505 if (cached_write_sz < pending_sz)
1506 vmbus_setevent(channel);
1507
1508 return;
1509}
1510
1511static inline void
1512init_cached_read_index(struct vmbus_channel *channel)
1513{
1514 struct hv_ring_buffer_info *rbi = &channel->inbound;
1515
1516 rbi->cached_read_index = rbi->ring_buffer->read_index;
1517}
1518
1519
1520
1521
1522static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1523{
1524 rbi->ring_buffer->interrupt_mask = 1;
1525
1526
1527 virt_mb();
1528}
1529
1530
1531
1532
1533static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1534{
1535
1536 rbi->ring_buffer->interrupt_mask = 0;
1537
1538
1539 virt_mb();
1540
1541
1542
1543
1544
1545
1546 return hv_get_bytes_to_read(rbi);
1547}
1548
1549
1550
1551
1552#define VMBUS_PKT_TRAILER 8
1553
1554static inline struct vmpacket_descriptor *
1555get_next_pkt_raw(struct vmbus_channel *channel)
1556{
1557 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1558 u32 priv_read_loc = ring_info->priv_read_index;
1559 void *ring_buffer = hv_get_ring_buffer(ring_info);
1560 u32 dsize = ring_info->ring_datasize;
1561
1562
1563
1564
1565
1566 u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
1567 priv_read_loc - ring_info->ring_buffer->read_index :
1568 (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
1569 u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
1570
1571 if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
1572 return NULL;
1573
1574 return ring_buffer + priv_read_loc;
1575}
1576
1577
1578
1579
1580
1581
1582static inline void put_pkt_raw(struct vmbus_channel *channel,
1583 struct vmpacket_descriptor *desc)
1584{
1585 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1586 u32 packetlen = desc->len8 << 3;
1587 u32 dsize = ring_info->ring_datasize;
1588
1589
1590
1591
1592 ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
1593 ring_info->priv_read_index %= dsize;
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609static inline void commit_rd_index(struct vmbus_channel *channel)
1610{
1611 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1612
1613
1614
1615
1616
1617 virt_rmb();
1618 ring_info->ring_buffer->read_index = ring_info->priv_read_index;
1619
1620 hv_signal_on_read(channel);
1621}
1622
1623
1624#endif
1625