1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _HYPERV_H
26#define _HYPERV_H
27
28#include <uapi/linux/hyperv.h>
29#include <uapi/asm/hyperv.h>
30
31#include <linux/types.h>
32#include <linux/scatterlist.h>
33#include <linux/list.h>
34#include <linux/timer.h>
35#include <linux/workqueue.h>
36#include <linux/completion.h>
37#include <linux/device.h>
38#include <linux/mod_devicetable.h>
39
40
41#define MAX_PAGE_BUFFER_COUNT 32
42#define MAX_MULTIPAGE_BUFFER_COUNT 32
43
44#pragma pack(push, 1)
45
46
47struct hv_page_buffer {
48 u32 len;
49 u32 offset;
50 u64 pfn;
51};
52
53
54struct hv_multipage_buffer {
55
56 u32 len;
57 u32 offset;
58 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
59};
60
61
62
63
64
65
66struct hv_mpb_array {
67
68 u32 len;
69 u32 offset;
70 u64 pfn_array[];
71};
72
73
74#define MAX_PAGE_BUFFER_PACKET (0x18 + \
75 (sizeof(struct hv_page_buffer) * \
76 MAX_PAGE_BUFFER_COUNT))
77#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
78 sizeof(struct hv_multipage_buffer))
79
80
81#pragma pack(pop)
82
83struct hv_ring_buffer {
84
85 u32 write_index;
86
87
88 u32 read_index;
89
90 u32 interrupt_mask;
91
92
93
94
95
96
97
98
99
100
101 u32 pending_send_sz;
102
103 u32 reserved1[12];
104
105 union {
106 struct {
107 u32 feat_pending_send_sz:1;
108 };
109 u32 value;
110 } feature_bits;
111
112
113 u8 reserved2[4028];
114
115
116
117
118
119 u8 buffer[0];
120} __packed;
121
122struct hv_ring_buffer_info {
123 struct hv_ring_buffer *ring_buffer;
124 u32 ring_size;
125 spinlock_t ring_lock;
126
127 u32 ring_datasize;
128 u32 ring_data_startoffset;
129 u32 priv_write_index;
130 u32 priv_read_index;
131};
132
133
134
135
136
137
138
139
140static inline void
141hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
142 u32 *read, u32 *write)
143{
144 u32 read_loc, write_loc, dsize;
145
146
147 read_loc = rbi->ring_buffer->read_index;
148 write_loc = rbi->ring_buffer->write_index;
149 dsize = rbi->ring_datasize;
150
151 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
152 read_loc - write_loc;
153 *read = dsize - *write;
154}
155
156static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
157{
158 u32 read_loc, write_loc, dsize, read;
159
160 dsize = rbi->ring_datasize;
161 read_loc = rbi->ring_buffer->read_index;
162 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
163
164 read = write_loc >= read_loc ? (write_loc - read_loc) :
165 (dsize - read_loc) + write_loc;
166
167 return read;
168}
169
170static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
171{
172 u32 read_loc, write_loc, dsize, write;
173
174 dsize = rbi->ring_datasize;
175 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
176 write_loc = rbi->ring_buffer->write_index;
177
178 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
179 read_loc - write_loc;
180 return write;
181}
182
183
184
185
186
187
188
189
190
191
192
193
194#define VERSION_WS2008 ((0 << 16) | (13))
195#define VERSION_WIN7 ((1 << 16) | (1))
196#define VERSION_WIN8 ((2 << 16) | (4))
197#define VERSION_WIN8_1 ((3 << 16) | (0))
198#define VERSION_WIN10 ((4 << 16) | (0))
199
200#define VERSION_INVAL -1
201
202#define VERSION_CURRENT VERSION_WIN10
203
204
205#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
206
207
208#define VMBUS_PIPE_TYPE_BYTE 0x00000000
209#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
210
211
212#define MAX_USER_DEFINED_BYTES 120
213
214
215#define MAX_PIPE_USER_DEFINED_BYTES 116
216
217
218
219
220
221struct vmbus_channel_offer {
222 uuid_le if_type;
223 uuid_le if_instance;
224
225
226
227
228 u64 reserved1;
229 u64 reserved2;
230
231 u16 chn_flags;
232 u16 mmio_megabytes;
233
234 union {
235
236 struct {
237 unsigned char user_def[MAX_USER_DEFINED_BYTES];
238 } std;
239
240
241
242
243
244
245
246
247 struct {
248 u32 pipe_mode;
249 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
250 } pipe;
251 } u;
252
253
254
255 u16 sub_channel_index;
256 u16 reserved3;
257} __packed;
258
259
260#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
261#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
262#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
263#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
264#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
265#define VMBUS_CHANNEL_PARENT_OFFER 0x200
266#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
267#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
268
269struct vmpacket_descriptor {
270 u16 type;
271 u16 offset8;
272 u16 len8;
273 u16 flags;
274 u64 trans_id;
275} __packed;
276
277struct vmpacket_header {
278 u32 prev_pkt_start_offset;
279 struct vmpacket_descriptor descriptor;
280} __packed;
281
282struct vmtransfer_page_range {
283 u32 byte_count;
284 u32 byte_offset;
285} __packed;
286
287struct vmtransfer_page_packet_header {
288 struct vmpacket_descriptor d;
289 u16 xfer_pageset_id;
290 u8 sender_owns_set;
291 u8 reserved;
292 u32 range_cnt;
293 struct vmtransfer_page_range ranges[1];
294} __packed;
295
296struct vmgpadl_packet_header {
297 struct vmpacket_descriptor d;
298 u32 gpadl;
299 u32 reserved;
300} __packed;
301
302struct vmadd_remove_transfer_page_set {
303 struct vmpacket_descriptor d;
304 u32 gpadl;
305 u16 xfer_pageset_id;
306 u16 reserved;
307} __packed;
308
309
310
311
312
313struct gpa_range {
314 u32 byte_count;
315 u32 byte_offset;
316 u64 pfn_array[0];
317};
318
319
320
321
322
323
324
325
326struct vmestablish_gpadl {
327 struct vmpacket_descriptor d;
328 u32 gpadl;
329 u32 range_cnt;
330 struct gpa_range range[1];
331} __packed;
332
333
334
335
336
337struct vmteardown_gpadl {
338 struct vmpacket_descriptor d;
339 u32 gpadl;
340 u32 reserved;
341} __packed;
342
343
344
345
346
347struct vmdata_gpa_direct {
348 struct vmpacket_descriptor d;
349 u32 reserved;
350 u32 range_cnt;
351 struct gpa_range range[1];
352} __packed;
353
354
355struct vmadditional_data {
356 struct vmpacket_descriptor d;
357 u64 total_bytes;
358 u32 offset;
359 u32 byte_cnt;
360 unsigned char data[1];
361} __packed;
362
363union vmpacket_largest_possible_header {
364 struct vmpacket_descriptor simple_hdr;
365 struct vmtransfer_page_packet_header xfer_page_hdr;
366 struct vmgpadl_packet_header gpadl_hdr;
367 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
368 struct vmestablish_gpadl establish_gpadl_hdr;
369 struct vmteardown_gpadl teardown_gpadl_hdr;
370 struct vmdata_gpa_direct data_gpa_direct_hdr;
371};
372
373#define VMPACKET_DATA_START_ADDRESS(__packet) \
374 (void *)(((unsigned char *)__packet) + \
375 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
376
377#define VMPACKET_DATA_LENGTH(__packet) \
378 ((((struct vmpacket_descriptor)__packet)->len8 - \
379 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
380
381#define VMPACKET_TRANSFER_MODE(__packet) \
382 (((struct IMPACT)__packet)->type)
383
384enum vmbus_packet_type {
385 VM_PKT_INVALID = 0x0,
386 VM_PKT_SYNCH = 0x1,
387 VM_PKT_ADD_XFER_PAGESET = 0x2,
388 VM_PKT_RM_XFER_PAGESET = 0x3,
389 VM_PKT_ESTABLISH_GPADL = 0x4,
390 VM_PKT_TEARDOWN_GPADL = 0x5,
391 VM_PKT_DATA_INBAND = 0x6,
392 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
393 VM_PKT_DATA_USING_GPADL = 0x8,
394 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
395 VM_PKT_CANCEL_REQUEST = 0xa,
396 VM_PKT_COMP = 0xb,
397 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
398 VM_PKT_ADDITIONAL_DATA = 0xd
399};
400
401#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
402
403
404
405enum vmbus_channel_message_type {
406 CHANNELMSG_INVALID = 0,
407 CHANNELMSG_OFFERCHANNEL = 1,
408 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
409 CHANNELMSG_REQUESTOFFERS = 3,
410 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
411 CHANNELMSG_OPENCHANNEL = 5,
412 CHANNELMSG_OPENCHANNEL_RESULT = 6,
413 CHANNELMSG_CLOSECHANNEL = 7,
414 CHANNELMSG_GPADL_HEADER = 8,
415 CHANNELMSG_GPADL_BODY = 9,
416 CHANNELMSG_GPADL_CREATED = 10,
417 CHANNELMSG_GPADL_TEARDOWN = 11,
418 CHANNELMSG_GPADL_TORNDOWN = 12,
419 CHANNELMSG_RELID_RELEASED = 13,
420 CHANNELMSG_INITIATE_CONTACT = 14,
421 CHANNELMSG_VERSION_RESPONSE = 15,
422 CHANNELMSG_UNLOAD = 16,
423 CHANNELMSG_UNLOAD_RESPONSE = 17,
424 CHANNELMSG_18 = 18,
425 CHANNELMSG_19 = 19,
426 CHANNELMSG_20 = 20,
427 CHANNELMSG_TL_CONNECT_REQUEST = 21,
428 CHANNELMSG_COUNT
429};
430
431struct vmbus_channel_message_header {
432 enum vmbus_channel_message_type msgtype;
433 u32 padding;
434} __packed;
435
436
437struct vmbus_channel_query_vmbus_version {
438 struct vmbus_channel_message_header header;
439 u32 version;
440} __packed;
441
442
443struct vmbus_channel_version_supported {
444 struct vmbus_channel_message_header header;
445 u8 version_supported;
446} __packed;
447
448
449struct vmbus_channel_offer_channel {
450 struct vmbus_channel_message_header header;
451 struct vmbus_channel_offer offer;
452 u32 child_relid;
453 u8 monitorid;
454
455
456
457 u8 monitor_allocated:1;
458 u8 reserved:7;
459
460
461
462
463
464
465
466
467
468
469
470 u16 is_dedicated_interrupt:1;
471 u16 reserved1:15;
472 u32 connection_id;
473} __packed;
474
475
476struct vmbus_channel_rescind_offer {
477 struct vmbus_channel_message_header header;
478 u32 child_relid;
479} __packed;
480
481
482
483
484
485
486
487
488
489
490
491struct vmbus_channel_open_channel {
492 struct vmbus_channel_message_header header;
493
494
495 u32 child_relid;
496
497
498 u32 openid;
499
500
501 u32 ringbuffer_gpadlhandle;
502
503
504
505
506
507
508
509
510
511 u32 target_vp;
512
513
514
515
516
517
518 u32 downstream_ringbuffer_pageoffset;
519
520
521 unsigned char userdata[MAX_USER_DEFINED_BYTES];
522} __packed;
523
524
525struct vmbus_channel_open_result {
526 struct vmbus_channel_message_header header;
527 u32 child_relid;
528 u32 openid;
529 u32 status;
530} __packed;
531
532
533struct vmbus_channel_close_channel {
534 struct vmbus_channel_message_header header;
535 u32 child_relid;
536} __packed;
537
538
539#define GPADL_TYPE_RING_BUFFER 1
540#define GPADL_TYPE_SERVER_SAVE_AREA 2
541#define GPADL_TYPE_TRANSACTION 8
542
543
544
545
546
547
548
549struct vmbus_channel_gpadl_header {
550 struct vmbus_channel_message_header header;
551 u32 child_relid;
552 u32 gpadl;
553 u16 range_buflen;
554 u16 rangecount;
555 struct gpa_range range[0];
556} __packed;
557
558
559struct vmbus_channel_gpadl_body {
560 struct vmbus_channel_message_header header;
561 u32 msgnumber;
562 u32 gpadl;
563 u64 pfn[0];
564} __packed;
565
566struct vmbus_channel_gpadl_created {
567 struct vmbus_channel_message_header header;
568 u32 child_relid;
569 u32 gpadl;
570 u32 creation_status;
571} __packed;
572
573struct vmbus_channel_gpadl_teardown {
574 struct vmbus_channel_message_header header;
575 u32 child_relid;
576 u32 gpadl;
577} __packed;
578
579struct vmbus_channel_gpadl_torndown {
580 struct vmbus_channel_message_header header;
581 u32 gpadl;
582} __packed;
583
584struct vmbus_channel_relid_released {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587} __packed;
588
589struct vmbus_channel_initiate_contact {
590 struct vmbus_channel_message_header header;
591 u32 vmbus_version_requested;
592 u32 target_vcpu;
593 u64 interrupt_page;
594 u64 monitor_page1;
595 u64 monitor_page2;
596} __packed;
597
598
599struct vmbus_channel_tl_connect_request {
600 struct vmbus_channel_message_header header;
601 uuid_le guest_endpoint_id;
602 uuid_le host_service_id;
603} __packed;
604
605struct vmbus_channel_version_response {
606 struct vmbus_channel_message_header header;
607 u8 version_supported;
608} __packed;
609
610enum vmbus_channel_state {
611 CHANNEL_OFFER_STATE,
612 CHANNEL_OPENING_STATE,
613 CHANNEL_OPEN_STATE,
614 CHANNEL_OPENED_STATE,
615};
616
617
618
619
620
621struct vmbus_channel_msginfo {
622
623 struct list_head msglistentry;
624
625
626 struct list_head submsglist;
627
628
629 struct completion waitevent;
630 union {
631 struct vmbus_channel_version_supported version_supported;
632 struct vmbus_channel_open_result open_result;
633 struct vmbus_channel_gpadl_torndown gpadl_torndown;
634 struct vmbus_channel_gpadl_created gpadl_created;
635 struct vmbus_channel_version_response version_response;
636 } response;
637
638 u32 msgsize;
639
640
641
642
643 unsigned char msg[0];
644};
645
646struct vmbus_close_msg {
647 struct vmbus_channel_msginfo info;
648 struct vmbus_channel_close_channel msg;
649};
650
651
652union hv_connection_id {
653 u32 asu32;
654 struct {
655 u32 id:24;
656 u32 reserved:8;
657 } u;
658};
659
660
661struct hv_input_signal_event {
662 union hv_connection_id connectionid;
663 u16 flag_number;
664 u16 rsvdz;
665};
666
667struct hv_input_signal_event_buffer {
668 u64 align8;
669 struct hv_input_signal_event event;
670};
671
672enum hv_signal_policy {
673 HV_SIGNAL_POLICY_DEFAULT = 0,
674 HV_SIGNAL_POLICY_EXPLICIT,
675};
676
677enum vmbus_device_type {
678 HV_IDE = 0,
679 HV_SCSI,
680 HV_FC,
681 HV_NIC,
682 HV_ND,
683 HV_PCIE,
684 HV_FB,
685 HV_KBD,
686 HV_MOUSE,
687 HV_KVP,
688 HV_TS,
689 HV_HB,
690 HV_SHUTDOWN,
691 HV_FCOPY,
692 HV_BACKUP,
693 HV_DM,
694 HV_UNKOWN,
695};
696
697struct vmbus_device {
698 u16 dev_type;
699 uuid_le guid;
700 bool perf_device;
701};
702
703struct vmbus_channel {
704
705 int id;
706
707 struct list_head listentry;
708
709 struct hv_device *device_obj;
710
711 enum vmbus_channel_state state;
712
713 struct vmbus_channel_offer_channel offermsg;
714
715
716
717
718 u8 monitor_grp;
719 u8 monitor_bit;
720
721 bool rescind;
722
723 u32 ringbuffer_gpadlhandle;
724
725
726 void *ringbuffer_pages;
727 u32 ringbuffer_pagecount;
728 struct hv_ring_buffer_info outbound;
729 struct hv_ring_buffer_info inbound;
730 spinlock_t inbound_lock;
731
732 struct vmbus_close_msg close_msg;
733
734
735
736
737 void (*onchannel_callback)(void *context);
738 void *channel_callback_context;
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753 bool batched_reading;
754
755 bool is_dedicated_interrupt;
756 struct hv_input_signal_event_buffer sig_buf;
757 struct hv_input_signal_event *sig_event;
758
759
760
761
762
763
764
765
766
767 u32 target_vp;
768
769 u32 target_cpu;
770
771
772
773 struct cpumask alloced_cpus_in_node;
774 int numa_node;
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
796
797
798
799
800
801 void (*chn_rescind_callback)(struct vmbus_channel *channel);
802
803
804
805
806
807
808 spinlock_t lock;
809
810
811
812 struct list_head sc_list;
813
814
815
816 int num_sc;
817
818
819
820
821 int next_oc;
822
823
824
825
826 struct vmbus_channel *primary_channel;
827
828
829
830 void *per_channel_state;
831
832
833
834
835 struct list_head percpu_list;
836
837
838
839
840
841
842 enum hv_signal_policy signal_policy;
843
844
845
846
847
848
849
850
851
852 bool acquire_ring_lock;
853
854};
855
856static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
857{
858 c->acquire_ring_lock = state;
859}
860
861static inline bool is_hvsock_channel(const struct vmbus_channel *c)
862{
863 return !!(c->offermsg.offer.chn_flags &
864 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
865}
866
867static inline void set_channel_signal_state(struct vmbus_channel *c,
868 enum hv_signal_policy policy)
869{
870 c->signal_policy = policy;
871}
872
873static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
874{
875 c->batched_reading = state;
876}
877
878static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
879{
880 c->per_channel_state = s;
881}
882
883static inline void *get_per_channel_state(struct vmbus_channel *c)
884{
885 return c->per_channel_state;
886}
887
888static inline void set_channel_pending_send_size(struct vmbus_channel *c,
889 u32 size)
890{
891 c->outbound.ring_buffer->pending_send_sz = size;
892}
893
894void vmbus_onmessage(void *context);
895
896int vmbus_request_offers(void);
897
898
899
900
901
902void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
903 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
904
905void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
906 void (*chn_rescind_cb)(struct vmbus_channel *));
907
908
909
910
911
912
913
914struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
915
916
917
918
919
920
921
922
923
924
925
926
927bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
928
929
930struct vmbus_channel_packet_page_buffer {
931 u16 type;
932 u16 dataoffset8;
933 u16 length8;
934 u16 flags;
935 u64 transactionid;
936 u32 reserved;
937 u32 rangecount;
938 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
939} __packed;
940
941
942struct vmbus_channel_packet_multipage_buffer {
943 u16 type;
944 u16 dataoffset8;
945 u16 length8;
946 u16 flags;
947 u64 transactionid;
948 u32 reserved;
949 u32 rangecount;
950 struct hv_multipage_buffer range;
951} __packed;
952
953
954struct vmbus_packet_mpb_array {
955 u16 type;
956 u16 dataoffset8;
957 u16 length8;
958 u16 flags;
959 u64 transactionid;
960 u32 reserved;
961 u32 rangecount;
962 struct hv_mpb_array range;
963} __packed;
964
965
966extern int vmbus_open(struct vmbus_channel *channel,
967 u32 send_ringbuffersize,
968 u32 recv_ringbuffersize,
969 void *userdata,
970 u32 userdatalen,
971 void(*onchannel_callback)(void *context),
972 void *context);
973
974extern void vmbus_close(struct vmbus_channel *channel);
975
976extern int vmbus_sendpacket(struct vmbus_channel *channel,
977 void *buffer,
978 u32 bufferLen,
979 u64 requestid,
980 enum vmbus_packet_type type,
981 u32 flags);
982
983extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
984 void *buffer,
985 u32 bufferLen,
986 u64 requestid,
987 enum vmbus_packet_type type,
988 u32 flags,
989 bool kick_q);
990
991extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
992 struct hv_page_buffer pagebuffers[],
993 u32 pagecount,
994 void *buffer,
995 u32 bufferlen,
996 u64 requestid);
997
998extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
999 struct hv_page_buffer pagebuffers[],
1000 u32 pagecount,
1001 void *buffer,
1002 u32 bufferlen,
1003 u64 requestid,
1004 u32 flags,
1005 bool kick_q);
1006
1007extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
1008 struct hv_multipage_buffer *mpb,
1009 void *buffer,
1010 u32 bufferlen,
1011 u64 requestid);
1012
1013extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1014 struct vmbus_packet_mpb_array *mpb,
1015 u32 desc_size,
1016 void *buffer,
1017 u32 bufferlen,
1018 u64 requestid);
1019
1020extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1021 void *kbuffer,
1022 u32 size,
1023 u32 *gpadl_handle);
1024
1025extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1026 u32 gpadl_handle);
1027
1028extern int vmbus_recvpacket(struct vmbus_channel *channel,
1029 void *buffer,
1030 u32 bufferlen,
1031 u32 *buffer_actual_len,
1032 u64 *requestid);
1033
1034extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1035 void *buffer,
1036 u32 bufferlen,
1037 u32 *buffer_actual_len,
1038 u64 *requestid);
1039
1040
1041extern void vmbus_ontimer(unsigned long data);
1042
1043
1044struct hv_driver {
1045 const char *name;
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 bool hvsock;
1060
1061
1062 uuid_le dev_type;
1063 const struct hv_vmbus_device_id *id_table;
1064
1065 struct device_driver driver;
1066
1067 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1068 int (*remove)(struct hv_device *);
1069 void (*shutdown)(struct hv_device *);
1070
1071};
1072
1073
1074struct hv_device {
1075
1076 uuid_le dev_type;
1077
1078
1079 uuid_le dev_instance;
1080 u16 vendor_id;
1081 u16 device_id;
1082
1083 struct device device;
1084
1085 struct vmbus_channel *channel;
1086};
1087
1088
1089static inline struct hv_device *device_to_hv_device(struct device *d)
1090{
1091 return container_of(d, struct hv_device, device);
1092}
1093
1094static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1095{
1096 return container_of(d, struct hv_driver, driver);
1097}
1098
1099static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1100{
1101 dev_set_drvdata(&dev->device, data);
1102}
1103
1104static inline void *hv_get_drvdata(struct hv_device *dev)
1105{
1106 return dev_get_drvdata(&dev->device);
1107}
1108
1109
1110#define vmbus_driver_register(driver) \
1111 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1112int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1113 struct module *owner,
1114 const char *mod_name);
1115void vmbus_driver_unregister(struct hv_driver *hv_driver);
1116
1117void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1118
1119int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1120 resource_size_t min, resource_size_t max,
1121 resource_size_t size, resource_size_t align,
1122 bool fb_overlap_ok);
1123void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1124int vmbus_cpu_number_to_vp_number(int cpu_number);
1125u64 hv_do_hypercall(u64 control, void *input, void *output);
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135#define HV_NIC_GUID \
1136 .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1137 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1138
1139
1140
1141
1142
1143#define HV_IDE_GUID \
1144 .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1145 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1146
1147
1148
1149
1150
1151#define HV_SCSI_GUID \
1152 .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1153 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1154
1155
1156
1157
1158
1159#define HV_SHUTDOWN_GUID \
1160 .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1161 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1162
1163
1164
1165
1166
1167#define HV_TS_GUID \
1168 .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1169 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1170
1171
1172
1173
1174
1175#define HV_HEART_BEAT_GUID \
1176 .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1177 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1178
1179
1180
1181
1182
1183#define HV_KVP_GUID \
1184 .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1185 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1186
1187
1188
1189
1190
1191#define HV_DM_GUID \
1192 .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1193 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1194
1195
1196
1197
1198
1199#define HV_MOUSE_GUID \
1200 .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1201 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1202
1203
1204
1205
1206
1207#define HV_KBD_GUID \
1208 .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1209 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1210
1211
1212
1213
1214#define HV_VSS_GUID \
1215 .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1216 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1217
1218
1219
1220
1221#define HV_SYNTHVID_GUID \
1222 .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1223 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1224
1225
1226
1227
1228
1229#define HV_SYNTHFC_GUID \
1230 .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1231 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1232
1233
1234
1235
1236
1237
1238#define HV_FCOPY_GUID \
1239 .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1240 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1241
1242
1243
1244
1245
1246#define HV_ND_GUID \
1247 .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1248 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1249
1250
1251
1252
1253
1254
1255#define HV_PCIE_GUID \
1256 .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1257 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1258
1259
1260
1261
1262
1263#define ICMSGTYPE_NEGOTIATE 0
1264#define ICMSGTYPE_HEARTBEAT 1
1265#define ICMSGTYPE_KVPEXCHANGE 2
1266#define ICMSGTYPE_SHUTDOWN 3
1267#define ICMSGTYPE_TIMESYNC 4
1268#define ICMSGTYPE_VSS 5
1269
1270#define ICMSGHDRFLAG_TRANSACTION 1
1271#define ICMSGHDRFLAG_REQUEST 2
1272#define ICMSGHDRFLAG_RESPONSE 4
1273
1274
1275
1276
1277
1278
1279
1280
1281struct hv_util_service {
1282 u8 *recv_buffer;
1283 void *channel;
1284 void (*util_cb)(void *);
1285 int (*util_init)(struct hv_util_service *);
1286 void (*util_deinit)(void);
1287};
1288
1289struct vmbuspipe_hdr {
1290 u32 flags;
1291 u32 msgsize;
1292} __packed;
1293
1294struct ic_version {
1295 u16 major;
1296 u16 minor;
1297} __packed;
1298
1299struct icmsg_hdr {
1300 struct ic_version icverframe;
1301 u16 icmsgtype;
1302 struct ic_version icvermsg;
1303 u16 icmsgsize;
1304 u32 status;
1305 u8 ictransaction_id;
1306 u8 icflags;
1307 u8 reserved[2];
1308} __packed;
1309
1310struct icmsg_negotiate {
1311 u16 icframe_vercnt;
1312 u16 icmsg_vercnt;
1313 u32 reserved;
1314 struct ic_version icversion_data[1];
1315} __packed;
1316
1317struct shutdown_msg_data {
1318 u32 reason_code;
1319 u32 timeout_seconds;
1320 u32 flags;
1321 u8 display_message[2048];
1322} __packed;
1323
1324struct heartbeat_msg_data {
1325 u64 seq_num;
1326 u32 reserved[8];
1327} __packed;
1328
1329
1330#define ICTIMESYNCFLAG_PROBE 0
1331#define ICTIMESYNCFLAG_SYNC 1
1332#define ICTIMESYNCFLAG_SAMPLE 2
1333
1334#ifdef __x86_64__
1335#define WLTIMEDELTA 116444736000000000L
1336#else
1337#define WLTIMEDELTA 116444736000000000LL
1338#endif
1339
1340struct ictimesync_data {
1341 u64 parenttime;
1342 u64 childtime;
1343 u64 roundtriptime;
1344 u8 flags;
1345} __packed;
1346
1347struct hyperv_service_callback {
1348 u8 msg_type;
1349 char *log_msg;
1350 uuid_le data;
1351 struct vmbus_channel *channel;
1352 void (*callback) (void *context);
1353};
1354
1355#define MAX_SRV_VER 0x7ffffff
1356extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1357 struct icmsg_negotiate *, u8 *, int,
1358 int);
1359
1360void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1361
1362
1363
1364
1365
1366extern __u32 vmbus_proto_version;
1367
1368int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1369 const uuid_le *shv_host_servie_id);
1370void vmbus_set_event(struct vmbus_channel *channel);
1371
1372
1373static inline void *
1374hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1375{
1376 return (void *)ring_info->ring_buffer->buffer;
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
1395{
1396 u32 cur_write_sz;
1397 u32 pending_sz;
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410 virt_mb();
1411
1412 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
1413
1414 if (pending_sz == 0)
1415 return false;
1416
1417 cur_write_sz = hv_get_bytes_to_write(rbi);
1418
1419 if (cur_write_sz >= pending_sz)
1420 return true;
1421
1422 return false;
1423}
1424
1425
1426
1427
1428#define VMBUS_PKT_TRAILER 8
1429
1430static inline struct vmpacket_descriptor *
1431get_next_pkt_raw(struct vmbus_channel *channel)
1432{
1433 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1434 u32 read_loc = ring_info->priv_read_index;
1435 void *ring_buffer = hv_get_ring_buffer(ring_info);
1436 struct vmpacket_descriptor *cur_desc;
1437 u32 packetlen;
1438 u32 dsize = ring_info->ring_datasize;
1439 u32 delta = read_loc - ring_info->ring_buffer->read_index;
1440 u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
1441
1442 if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
1443 return NULL;
1444
1445 if ((read_loc + sizeof(*cur_desc)) > dsize)
1446 return NULL;
1447
1448 cur_desc = ring_buffer + read_loc;
1449 packetlen = cur_desc->len8 << 3;
1450
1451
1452
1453
1454
1455 if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
1456 return NULL;
1457
1458 return cur_desc;
1459}
1460
1461
1462
1463
1464
1465
1466static inline void put_pkt_raw(struct vmbus_channel *channel,
1467 struct vmpacket_descriptor *desc)
1468{
1469 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1470 u32 read_loc = ring_info->priv_read_index;
1471 u32 packetlen = desc->len8 << 3;
1472 u32 dsize = ring_info->ring_datasize;
1473
1474 if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
1475 BUG();
1476
1477
1478
1479 ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
1480}
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493static inline void commit_rd_index(struct vmbus_channel *channel)
1494{
1495 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1496
1497
1498
1499
1500
1501 virt_rmb();
1502 ring_info->ring_buffer->read_index = ring_info->priv_read_index;
1503
1504 if (hv_need_to_signal_on_read(ring_info))
1505 vmbus_set_event(channel);
1506}
1507
1508
1509#endif
1510