1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _HYPERV_H
26#define _HYPERV_H
27
28#include <uapi/linux/hyperv.h>
29
30#include <linux/types.h>
31#include <linux/scatterlist.h>
32#include <linux/list.h>
33#include <linux/timer.h>
34#include <linux/completion.h>
35#include <linux/device.h>
36#include <linux/mod_devicetable.h>
37#include <linux/interrupt.h>
38#include <linux/reciprocal_div.h>
39
40#define MAX_PAGE_BUFFER_COUNT 32
41#define MAX_MULTIPAGE_BUFFER_COUNT 32
42
43#pragma pack(push, 1)
44
45
46struct hv_page_buffer {
47 u32 len;
48 u32 offset;
49 u64 pfn;
50};
51
52
53struct hv_multipage_buffer {
54
55 u32 len;
56 u32 offset;
57 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
58};
59
60
61
62
63
64
65struct hv_mpb_array {
66
67 u32 len;
68 u32 offset;
69 u64 pfn_array[];
70};
71
72
73#define MAX_PAGE_BUFFER_PACKET (0x18 + \
74 (sizeof(struct hv_page_buffer) * \
75 MAX_PAGE_BUFFER_COUNT))
76#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
77 sizeof(struct hv_multipage_buffer))
78
79
80#pragma pack(pop)
81
82struct hv_ring_buffer {
83
84 u32 write_index;
85
86
87 u32 read_index;
88
89 u32 interrupt_mask;
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 u32 pending_send_sz;
118 u32 reserved1[12];
119 union {
120 struct {
121 u32 feat_pending_send_sz:1;
122 };
123 u32 value;
124 } feature_bits;
125
126
127 u8 reserved2[4028];
128
129
130
131
132
133 u8 buffer[0];
134} __packed;
135
136struct hv_ring_buffer_info {
137 struct hv_ring_buffer *ring_buffer;
138 u32 ring_size;
139 struct reciprocal_value ring_size_div10_reciprocal;
140 spinlock_t ring_lock;
141
142 u32 ring_datasize;
143 u32 priv_read_index;
144
145
146
147
148 struct mutex ring_buffer_mutex;
149};
150
151
152static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
153{
154 u32 read_loc, write_loc, dsize, read;
155
156 dsize = rbi->ring_datasize;
157 read_loc = rbi->ring_buffer->read_index;
158 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
159
160 read = write_loc >= read_loc ? (write_loc - read_loc) :
161 (dsize - read_loc) + write_loc;
162
163 return read;
164}
165
166static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
167{
168 u32 read_loc, write_loc, dsize, write;
169
170 dsize = rbi->ring_datasize;
171 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
172 write_loc = rbi->ring_buffer->write_index;
173
174 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
175 read_loc - write_loc;
176 return write;
177}
178
179static inline u32 hv_get_avail_to_write_percent(
180 const struct hv_ring_buffer_info *rbi)
181{
182 u32 avail_write = hv_get_bytes_to_write(rbi);
183
184 return reciprocal_divide(
185 (avail_write << 3) + (avail_write << 1),
186 rbi->ring_size_div10_reciprocal);
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204#define VERSION_WS2008 ((0 << 16) | (13))
205#define VERSION_WIN7 ((1 << 16) | (1))
206#define VERSION_WIN8 ((2 << 16) | (4))
207#define VERSION_WIN8_1 ((3 << 16) | (0))
208#define VERSION_WIN10 ((4 << 16) | (0))
209#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
210#define VERSION_WIN10_V5 ((5 << 16) | (0))
211#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
212#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
213
214
215#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
216
217
218#define VMBUS_PIPE_TYPE_BYTE 0x00000000
219#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
220
221
222#define MAX_USER_DEFINED_BYTES 120
223
224
225#define MAX_PIPE_USER_DEFINED_BYTES 116
226
227
228
229
230
231struct vmbus_channel_offer {
232 guid_t if_type;
233 guid_t if_instance;
234
235
236
237
238 u64 reserved1;
239 u64 reserved2;
240
241 u16 chn_flags;
242 u16 mmio_megabytes;
243
244 union {
245
246 struct {
247 unsigned char user_def[MAX_USER_DEFINED_BYTES];
248 } std;
249
250
251
252
253
254
255
256
257 struct {
258 u32 pipe_mode;
259 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
260 } pipe;
261 } u;
262
263
264
265
266
267
268 u16 sub_channel_index;
269 u16 reserved3;
270} __packed;
271
272
273#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
274#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
275#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
276#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
277#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
278#define VMBUS_CHANNEL_PARENT_OFFER 0x200
279#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
280#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
281
282struct vmpacket_descriptor {
283 u16 type;
284 u16 offset8;
285 u16 len8;
286 u16 flags;
287 u64 trans_id;
288} __packed;
289
290struct vmpacket_header {
291 u32 prev_pkt_start_offset;
292 struct vmpacket_descriptor descriptor;
293} __packed;
294
295struct vmtransfer_page_range {
296 u32 byte_count;
297 u32 byte_offset;
298} __packed;
299
300struct vmtransfer_page_packet_header {
301 struct vmpacket_descriptor d;
302 u16 xfer_pageset_id;
303 u8 sender_owns_set;
304 u8 reserved;
305 u32 range_cnt;
306 struct vmtransfer_page_range ranges[1];
307} __packed;
308
309struct vmgpadl_packet_header {
310 struct vmpacket_descriptor d;
311 u32 gpadl;
312 u32 reserved;
313} __packed;
314
315struct vmadd_remove_transfer_page_set {
316 struct vmpacket_descriptor d;
317 u32 gpadl;
318 u16 xfer_pageset_id;
319 u16 reserved;
320} __packed;
321
322
323
324
325
326struct gpa_range {
327 u32 byte_count;
328 u32 byte_offset;
329 u64 pfn_array[0];
330};
331
332
333
334
335
336
337
338
339struct vmestablish_gpadl {
340 struct vmpacket_descriptor d;
341 u32 gpadl;
342 u32 range_cnt;
343 struct gpa_range range[1];
344} __packed;
345
346
347
348
349
350struct vmteardown_gpadl {
351 struct vmpacket_descriptor d;
352 u32 gpadl;
353 u32 reserved;
354} __packed;
355
356
357
358
359
360struct vmdata_gpa_direct {
361 struct vmpacket_descriptor d;
362 u32 reserved;
363 u32 range_cnt;
364 struct gpa_range range[1];
365} __packed;
366
367
368struct vmadditional_data {
369 struct vmpacket_descriptor d;
370 u64 total_bytes;
371 u32 offset;
372 u32 byte_cnt;
373 unsigned char data[1];
374} __packed;
375
376union vmpacket_largest_possible_header {
377 struct vmpacket_descriptor simple_hdr;
378 struct vmtransfer_page_packet_header xfer_page_hdr;
379 struct vmgpadl_packet_header gpadl_hdr;
380 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
381 struct vmestablish_gpadl establish_gpadl_hdr;
382 struct vmteardown_gpadl teardown_gpadl_hdr;
383 struct vmdata_gpa_direct data_gpa_direct_hdr;
384};
385
386#define VMPACKET_DATA_START_ADDRESS(__packet) \
387 (void *)(((unsigned char *)__packet) + \
388 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
389
390#define VMPACKET_DATA_LENGTH(__packet) \
391 ((((struct vmpacket_descriptor)__packet)->len8 - \
392 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
393
394#define VMPACKET_TRANSFER_MODE(__packet) \
395 (((struct IMPACT)__packet)->type)
396
397enum vmbus_packet_type {
398 VM_PKT_INVALID = 0x0,
399 VM_PKT_SYNCH = 0x1,
400 VM_PKT_ADD_XFER_PAGESET = 0x2,
401 VM_PKT_RM_XFER_PAGESET = 0x3,
402 VM_PKT_ESTABLISH_GPADL = 0x4,
403 VM_PKT_TEARDOWN_GPADL = 0x5,
404 VM_PKT_DATA_INBAND = 0x6,
405 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
406 VM_PKT_DATA_USING_GPADL = 0x8,
407 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
408 VM_PKT_CANCEL_REQUEST = 0xa,
409 VM_PKT_COMP = 0xb,
410 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
411 VM_PKT_ADDITIONAL_DATA = 0xd
412};
413
414#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
415
416
417
418enum vmbus_channel_message_type {
419 CHANNELMSG_INVALID = 0,
420 CHANNELMSG_OFFERCHANNEL = 1,
421 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
422 CHANNELMSG_REQUESTOFFERS = 3,
423 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
424 CHANNELMSG_OPENCHANNEL = 5,
425 CHANNELMSG_OPENCHANNEL_RESULT = 6,
426 CHANNELMSG_CLOSECHANNEL = 7,
427 CHANNELMSG_GPADL_HEADER = 8,
428 CHANNELMSG_GPADL_BODY = 9,
429 CHANNELMSG_GPADL_CREATED = 10,
430 CHANNELMSG_GPADL_TEARDOWN = 11,
431 CHANNELMSG_GPADL_TORNDOWN = 12,
432 CHANNELMSG_RELID_RELEASED = 13,
433 CHANNELMSG_INITIATE_CONTACT = 14,
434 CHANNELMSG_VERSION_RESPONSE = 15,
435 CHANNELMSG_UNLOAD = 16,
436 CHANNELMSG_UNLOAD_RESPONSE = 17,
437 CHANNELMSG_18 = 18,
438 CHANNELMSG_19 = 19,
439 CHANNELMSG_20 = 20,
440 CHANNELMSG_TL_CONNECT_REQUEST = 21,
441 CHANNELMSG_22 = 22,
442 CHANNELMSG_TL_CONNECT_RESULT = 23,
443 CHANNELMSG_COUNT
444};
445
446
447#define INVALID_RELID U32_MAX
448
449struct vmbus_channel_message_header {
450 enum vmbus_channel_message_type msgtype;
451 u32 padding;
452} __packed;
453
454
455struct vmbus_channel_query_vmbus_version {
456 struct vmbus_channel_message_header header;
457 u32 version;
458} __packed;
459
460
461struct vmbus_channel_version_supported {
462 struct vmbus_channel_message_header header;
463 u8 version_supported;
464} __packed;
465
466
467struct vmbus_channel_offer_channel {
468 struct vmbus_channel_message_header header;
469 struct vmbus_channel_offer offer;
470 u32 child_relid;
471 u8 monitorid;
472
473
474
475 u8 monitor_allocated:1;
476 u8 reserved:7;
477
478
479
480
481
482
483
484
485
486
487
488 u16 is_dedicated_interrupt:1;
489 u16 reserved1:15;
490 u32 connection_id;
491} __packed;
492
493
494struct vmbus_channel_rescind_offer {
495 struct vmbus_channel_message_header header;
496 u32 child_relid;
497} __packed;
498
499static inline u32
500hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
501{
502 return rbi->ring_buffer->pending_send_sz;
503}
504
505
506
507
508
509
510
511
512
513
514
515struct vmbus_channel_open_channel {
516 struct vmbus_channel_message_header header;
517
518
519 u32 child_relid;
520
521
522 u32 openid;
523
524
525 u32 ringbuffer_gpadlhandle;
526
527
528
529
530
531
532
533
534
535 u32 target_vp;
536
537
538
539
540
541
542 u32 downstream_ringbuffer_pageoffset;
543
544
545 unsigned char userdata[MAX_USER_DEFINED_BYTES];
546} __packed;
547
548
549struct vmbus_channel_open_result {
550 struct vmbus_channel_message_header header;
551 u32 child_relid;
552 u32 openid;
553 u32 status;
554} __packed;
555
556
557struct vmbus_channel_close_channel {
558 struct vmbus_channel_message_header header;
559 u32 child_relid;
560} __packed;
561
562
563#define GPADL_TYPE_RING_BUFFER 1
564#define GPADL_TYPE_SERVER_SAVE_AREA 2
565#define GPADL_TYPE_TRANSACTION 8
566
567
568
569
570
571
572
573struct vmbus_channel_gpadl_header {
574 struct vmbus_channel_message_header header;
575 u32 child_relid;
576 u32 gpadl;
577 u16 range_buflen;
578 u16 rangecount;
579 struct gpa_range range[0];
580} __packed;
581
582
583struct vmbus_channel_gpadl_body {
584 struct vmbus_channel_message_header header;
585 u32 msgnumber;
586 u32 gpadl;
587 u64 pfn[0];
588} __packed;
589
590struct vmbus_channel_gpadl_created {
591 struct vmbus_channel_message_header header;
592 u32 child_relid;
593 u32 gpadl;
594 u32 creation_status;
595} __packed;
596
597struct vmbus_channel_gpadl_teardown {
598 struct vmbus_channel_message_header header;
599 u32 child_relid;
600 u32 gpadl;
601} __packed;
602
603struct vmbus_channel_gpadl_torndown {
604 struct vmbus_channel_message_header header;
605 u32 gpadl;
606} __packed;
607
608struct vmbus_channel_relid_released {
609 struct vmbus_channel_message_header header;
610 u32 child_relid;
611} __packed;
612
613struct vmbus_channel_initiate_contact {
614 struct vmbus_channel_message_header header;
615 u32 vmbus_version_requested;
616 u32 target_vcpu;
617 union {
618 u64 interrupt_page;
619 struct {
620 u8 msg_sint;
621 u8 padding1[3];
622 u32 padding2;
623 };
624 };
625 u64 monitor_page1;
626 u64 monitor_page2;
627} __packed;
628
629
630struct vmbus_channel_tl_connect_request {
631 struct vmbus_channel_message_header header;
632 guid_t guest_endpoint_id;
633 guid_t host_service_id;
634} __packed;
635
636struct vmbus_channel_version_response {
637 struct vmbus_channel_message_header header;
638 u8 version_supported;
639
640 u8 connection_state;
641 u16 padding;
642
643
644
645
646
647
648
649
650
651 u32 msg_conn_id;
652} __packed;
653
654enum vmbus_channel_state {
655 CHANNEL_OFFER_STATE,
656 CHANNEL_OPENING_STATE,
657 CHANNEL_OPEN_STATE,
658 CHANNEL_OPENED_STATE,
659};
660
661
662
663
664
665struct vmbus_channel_msginfo {
666
667 struct list_head msglistentry;
668
669
670 struct list_head submsglist;
671
672
673 struct completion waitevent;
674 struct vmbus_channel *waiting_channel;
675 union {
676 struct vmbus_channel_version_supported version_supported;
677 struct vmbus_channel_open_result open_result;
678 struct vmbus_channel_gpadl_torndown gpadl_torndown;
679 struct vmbus_channel_gpadl_created gpadl_created;
680 struct vmbus_channel_version_response version_response;
681 } response;
682
683 u32 msgsize;
684
685
686
687
688 unsigned char msg[0];
689};
690
691struct vmbus_close_msg {
692 struct vmbus_channel_msginfo info;
693 struct vmbus_channel_close_channel msg;
694};
695
696
697union hv_connection_id {
698 u32 asu32;
699 struct {
700 u32 id:24;
701 u32 reserved:8;
702 } u;
703};
704
705enum hv_numa_policy {
706 HV_BALANCED = 0,
707 HV_LOCALIZED,
708};
709
710enum vmbus_device_type {
711 HV_IDE = 0,
712 HV_SCSI,
713 HV_FC,
714 HV_NIC,
715 HV_ND,
716 HV_PCIE,
717 HV_FB,
718 HV_KBD,
719 HV_MOUSE,
720 HV_KVP,
721 HV_TS,
722 HV_HB,
723 HV_SHUTDOWN,
724 HV_FCOPY,
725 HV_BACKUP,
726 HV_DM,
727 HV_UNKNOWN,
728};
729
730struct vmbus_device {
731 u16 dev_type;
732 guid_t guid;
733 bool perf_device;
734};
735
736struct vmbus_channel {
737 struct list_head listentry;
738
739 struct hv_device *device_obj;
740
741 enum vmbus_channel_state state;
742
743 struct vmbus_channel_offer_channel offermsg;
744
745
746
747
748 u8 monitor_grp;
749 u8 monitor_bit;
750
751 bool rescind;
752 struct completion rescind_event;
753
754 u32 ringbuffer_gpadlhandle;
755
756
757 struct page *ringbuffer_page;
758 u32 ringbuffer_pagecount;
759 u32 ringbuffer_send_offset;
760 struct hv_ring_buffer_info outbound;
761 struct hv_ring_buffer_info inbound;
762
763 struct vmbus_close_msg close_msg;
764
765
766 u64 interrupts;
767 u64 sig_events;
768
769
770
771
772
773 u64 intr_out_empty;
774
775
776
777
778
779
780 bool out_full_flag;
781
782
783 struct tasklet_struct callback_event;
784 void (*onchannel_callback)(void *context);
785 void *channel_callback_context;
786
787
788
789
790
791
792
793
794
795
796
797
798 enum hv_callback_mode {
799 HV_CALL_BATCHED,
800 HV_CALL_DIRECT,
801 HV_CALL_ISR
802 } callback_mode;
803
804 bool is_dedicated_interrupt;
805 u64 sig_event;
806
807
808
809
810
811
812
813
814
815 u32 target_vp;
816
817 u32 target_cpu;
818
819
820
821 struct cpumask alloced_cpus_in_node;
822 int numa_node;
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
844
845
846
847
848
849 void (*chn_rescind_callback)(struct vmbus_channel *channel);
850
851
852
853
854
855
856 spinlock_t lock;
857
858
859
860 struct list_head sc_list;
861
862
863
864
865 struct vmbus_channel *primary_channel;
866
867
868
869 void *per_channel_state;
870
871
872
873
874 struct list_head percpu_list;
875
876
877
878
879
880 struct rcu_head rcu;
881
882
883
884
885 struct kobject kobj;
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911 bool low_latency;
912
913
914
915
916
917
918
919
920
921
922
923 enum hv_numa_policy affinity_policy;
924
925 bool probe_done;
926
927
928
929
930
931
932
933 struct work_struct add_channel_work;
934
935
936
937
938
939 u64 intr_in_full;
940
941
942
943
944
945 u64 out_full_total;
946
947
948
949
950
951 u64 out_full_first;
952
953
954 bool fuzz_testing_state;
955
956
957
958
959
960
961
962
963
964 u32 fuzz_testing_interrupt_delay;
965 u32 fuzz_testing_message_delay;
966
967};
968
969static inline bool is_hvsock_channel(const struct vmbus_channel *c)
970{
971 return !!(c->offermsg.offer.chn_flags &
972 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
973}
974
975static inline bool is_sub_channel(const struct vmbus_channel *c)
976{
977 return c->offermsg.offer.sub_channel_index != 0;
978}
979
980static inline void set_channel_affinity_state(struct vmbus_channel *c,
981 enum hv_numa_policy policy)
982{
983 c->affinity_policy = policy;
984}
985
986static inline void set_channel_read_mode(struct vmbus_channel *c,
987 enum hv_callback_mode mode)
988{
989 c->callback_mode = mode;
990}
991
992static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
993{
994 c->per_channel_state = s;
995}
996
997static inline void *get_per_channel_state(struct vmbus_channel *c)
998{
999 return c->per_channel_state;
1000}
1001
1002static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1003 u32 size)
1004{
1005 unsigned long flags;
1006
1007 if (size) {
1008 spin_lock_irqsave(&c->outbound.ring_lock, flags);
1009 ++c->out_full_total;
1010
1011 if (!c->out_full_flag) {
1012 ++c->out_full_first;
1013 c->out_full_flag = true;
1014 }
1015 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1016 } else {
1017 c->out_full_flag = false;
1018 }
1019
1020 c->outbound.ring_buffer->pending_send_sz = size;
1021}
1022
1023static inline void set_low_latency_mode(struct vmbus_channel *c)
1024{
1025 c->low_latency = true;
1026}
1027
1028static inline void clear_low_latency_mode(struct vmbus_channel *c)
1029{
1030 c->low_latency = false;
1031}
1032
1033void vmbus_onmessage(void *context);
1034
1035int vmbus_request_offers(void);
1036
1037
1038
1039
1040
1041void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1042 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1043
1044void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1045 void (*chn_rescind_cb)(struct vmbus_channel *));
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1059
1060
1061struct vmbus_channel_packet_page_buffer {
1062 u16 type;
1063 u16 dataoffset8;
1064 u16 length8;
1065 u16 flags;
1066 u64 transactionid;
1067 u32 reserved;
1068 u32 rangecount;
1069 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1070} __packed;
1071
1072
1073struct vmbus_channel_packet_multipage_buffer {
1074 u16 type;
1075 u16 dataoffset8;
1076 u16 length8;
1077 u16 flags;
1078 u64 transactionid;
1079 u32 reserved;
1080 u32 rangecount;
1081 struct hv_multipage_buffer range;
1082} __packed;
1083
1084
1085struct vmbus_packet_mpb_array {
1086 u16 type;
1087 u16 dataoffset8;
1088 u16 length8;
1089 u16 flags;
1090 u64 transactionid;
1091 u32 reserved;
1092 u32 rangecount;
1093 struct hv_mpb_array range;
1094} __packed;
1095
1096int vmbus_alloc_ring(struct vmbus_channel *channel,
1097 u32 send_size, u32 recv_size);
1098void vmbus_free_ring(struct vmbus_channel *channel);
1099
1100int vmbus_connect_ring(struct vmbus_channel *channel,
1101 void (*onchannel_callback)(void *context),
1102 void *context);
1103int vmbus_disconnect_ring(struct vmbus_channel *channel);
1104
1105extern int vmbus_open(struct vmbus_channel *channel,
1106 u32 send_ringbuffersize,
1107 u32 recv_ringbuffersize,
1108 void *userdata,
1109 u32 userdatalen,
1110 void (*onchannel_callback)(void *context),
1111 void *context);
1112
1113extern void vmbus_close(struct vmbus_channel *channel);
1114
1115extern int vmbus_sendpacket(struct vmbus_channel *channel,
1116 void *buffer,
1117 u32 bufferLen,
1118 u64 requestid,
1119 enum vmbus_packet_type type,
1120 u32 flags);
1121
1122extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1123 struct hv_page_buffer pagebuffers[],
1124 u32 pagecount,
1125 void *buffer,
1126 u32 bufferlen,
1127 u64 requestid);
1128
1129extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1130 struct vmbus_packet_mpb_array *mpb,
1131 u32 desc_size,
1132 void *buffer,
1133 u32 bufferlen,
1134 u64 requestid);
1135
1136extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1137 void *kbuffer,
1138 u32 size,
1139 u32 *gpadl_handle);
1140
1141extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1142 u32 gpadl_handle);
1143
1144void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1145
1146extern int vmbus_recvpacket(struct vmbus_channel *channel,
1147 void *buffer,
1148 u32 bufferlen,
1149 u32 *buffer_actual_len,
1150 u64 *requestid);
1151
1152extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1153 void *buffer,
1154 u32 bufferlen,
1155 u32 *buffer_actual_len,
1156 u64 *requestid);
1157
1158
1159extern void vmbus_ontimer(unsigned long data);
1160
1161
1162struct hv_driver {
1163 const char *name;
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 bool hvsock;
1178
1179
1180 guid_t dev_type;
1181 const struct hv_vmbus_device_id *id_table;
1182
1183 struct device_driver driver;
1184
1185
1186 struct {
1187 spinlock_t lock;
1188 struct list_head list;
1189 } dynids;
1190
1191 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1192 int (*remove)(struct hv_device *);
1193 void (*shutdown)(struct hv_device *);
1194
1195 int (*suspend)(struct hv_device *);
1196 int (*resume)(struct hv_device *);
1197
1198};
1199
1200
1201struct hv_device {
1202
1203 guid_t dev_type;
1204
1205
1206 guid_t dev_instance;
1207 u16 vendor_id;
1208 u16 device_id;
1209
1210 struct device device;
1211 char *driver_override;
1212
1213 struct vmbus_channel *channel;
1214 struct kset *channels_kset;
1215
1216
1217 struct dentry *debug_dir;
1218
1219};
1220
1221
1222static inline struct hv_device *device_to_hv_device(struct device *d)
1223{
1224 return container_of(d, struct hv_device, device);
1225}
1226
1227static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1228{
1229 return container_of(d, struct hv_driver, driver);
1230}
1231
1232static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1233{
1234 dev_set_drvdata(&dev->device, data);
1235}
1236
1237static inline void *hv_get_drvdata(struct hv_device *dev)
1238{
1239 return dev_get_drvdata(&dev->device);
1240}
1241
1242struct hv_ring_buffer_debug_info {
1243 u32 current_interrupt_mask;
1244 u32 current_read_index;
1245 u32 current_write_index;
1246 u32 bytes_avail_toread;
1247 u32 bytes_avail_towrite;
1248};
1249
1250
1251int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1252 struct hv_ring_buffer_debug_info *debug_info);
1253
1254
1255#define vmbus_driver_register(driver) \
1256 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1257int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1258 struct module *owner,
1259 const char *mod_name);
1260void vmbus_driver_unregister(struct hv_driver *hv_driver);
1261
1262void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1263
1264int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1265 resource_size_t min, resource_size_t max,
1266 resource_size_t size, resource_size_t align,
1267 bool fb_overlap_ok);
1268void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278#define HV_NIC_GUID \
1279 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1280 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1281
1282
1283
1284
1285
1286#define HV_IDE_GUID \
1287 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1288 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1289
1290
1291
1292
1293
1294#define HV_SCSI_GUID \
1295 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1296 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1297
1298
1299
1300
1301
1302#define HV_SHUTDOWN_GUID \
1303 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1304 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1305
1306
1307
1308
1309
1310#define HV_TS_GUID \
1311 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1312 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1313
1314
1315
1316
1317
1318#define HV_HEART_BEAT_GUID \
1319 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1320 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1321
1322
1323
1324
1325
1326#define HV_KVP_GUID \
1327 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1328 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1329
1330
1331
1332
1333
1334#define HV_DM_GUID \
1335 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1336 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1337
1338
1339
1340
1341
1342#define HV_MOUSE_GUID \
1343 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1344 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1345
1346
1347
1348
1349
1350#define HV_KBD_GUID \
1351 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1352 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1353
1354
1355
1356
1357#define HV_VSS_GUID \
1358 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1359 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1360
1361
1362
1363
1364#define HV_SYNTHVID_GUID \
1365 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1366 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1367
1368
1369
1370
1371
1372#define HV_SYNTHFC_GUID \
1373 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1374 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1375
1376
1377
1378
1379
1380
1381#define HV_FCOPY_GUID \
1382 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1383 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1384
1385
1386
1387
1388
1389#define HV_ND_GUID \
1390 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1391 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1392
1393
1394
1395
1396
1397
1398#define HV_PCIE_GUID \
1399 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1400 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411#define HV_AVMA1_GUID \
1412 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1413 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1414
1415#define HV_AVMA2_GUID \
1416 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1417 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1418
1419#define HV_RDV_GUID \
1420 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1421 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1422
1423
1424
1425
1426
1427#define ICMSGTYPE_NEGOTIATE 0
1428#define ICMSGTYPE_HEARTBEAT 1
1429#define ICMSGTYPE_KVPEXCHANGE 2
1430#define ICMSGTYPE_SHUTDOWN 3
1431#define ICMSGTYPE_TIMESYNC 4
1432#define ICMSGTYPE_VSS 5
1433
1434#define ICMSGHDRFLAG_TRANSACTION 1
1435#define ICMSGHDRFLAG_REQUEST 2
1436#define ICMSGHDRFLAG_RESPONSE 4
1437
1438
1439
1440
1441
1442
1443
1444
1445struct hv_util_service {
1446 u8 *recv_buffer;
1447 void *channel;
1448 void (*util_cb)(void *);
1449 int (*util_init)(struct hv_util_service *);
1450 void (*util_deinit)(void);
1451 int (*util_pre_suspend)(void);
1452 int (*util_pre_resume)(void);
1453};
1454
1455struct vmbuspipe_hdr {
1456 u32 flags;
1457 u32 msgsize;
1458} __packed;
1459
1460struct ic_version {
1461 u16 major;
1462 u16 minor;
1463} __packed;
1464
1465struct icmsg_hdr {
1466 struct ic_version icverframe;
1467 u16 icmsgtype;
1468 struct ic_version icvermsg;
1469 u16 icmsgsize;
1470 u32 status;
1471 u8 ictransaction_id;
1472 u8 icflags;
1473 u8 reserved[2];
1474} __packed;
1475
1476struct icmsg_negotiate {
1477 u16 icframe_vercnt;
1478 u16 icmsg_vercnt;
1479 u32 reserved;
1480 struct ic_version icversion_data[1];
1481} __packed;
1482
1483struct shutdown_msg_data {
1484 u32 reason_code;
1485 u32 timeout_seconds;
1486 u32 flags;
1487 u8 display_message[2048];
1488} __packed;
1489
1490struct heartbeat_msg_data {
1491 u64 seq_num;
1492 u32 reserved[8];
1493} __packed;
1494
1495
1496#define ICTIMESYNCFLAG_PROBE 0
1497#define ICTIMESYNCFLAG_SYNC 1
1498#define ICTIMESYNCFLAG_SAMPLE 2
1499
1500#ifdef __x86_64__
1501#define WLTIMEDELTA 116444736000000000L
1502#else
1503#define WLTIMEDELTA 116444736000000000LL
1504#endif
1505
1506struct ictimesync_data {
1507 u64 parenttime;
1508 u64 childtime;
1509 u64 roundtriptime;
1510 u8 flags;
1511} __packed;
1512
1513struct ictimesync_ref_data {
1514 u64 parenttime;
1515 u64 vmreferencetime;
1516 u8 flags;
1517 char leapflags;
1518 char stratum;
1519 u8 reserved[3];
1520} __packed;
1521
1522struct hyperv_service_callback {
1523 u8 msg_type;
1524 char *log_msg;
1525 guid_t data;
1526 struct vmbus_channel *channel;
1527 void (*callback)(void *context);
1528};
1529
1530#define MAX_SRV_VER 0x7ffffff
1531extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1532 const int *fw_version, int fw_vercnt,
1533 const int *srv_version, int srv_vercnt,
1534 int *nego_fw_version, int *nego_srv_version);
1535
1536void hv_process_channel_removal(struct vmbus_channel *channel);
1537
1538void vmbus_setevent(struct vmbus_channel *channel);
1539
1540
1541
1542
1543extern __u32 vmbus_proto_version;
1544
1545int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1546 const guid_t *shv_host_servie_id);
1547void vmbus_set_event(struct vmbus_channel *channel);
1548
1549
1550static inline void *
1551hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1552{
1553 return ring_info->ring_buffer->buffer;
1554}
1555
1556
1557
1558
1559static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1560{
1561 rbi->ring_buffer->interrupt_mask = 1;
1562
1563
1564 virt_mb();
1565}
1566
1567
1568
1569
1570static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1571{
1572
1573 rbi->ring_buffer->interrupt_mask = 0;
1574
1575
1576 virt_mb();
1577
1578
1579
1580
1581
1582
1583 return hv_get_bytes_to_read(rbi);
1584}
1585
1586
1587
1588
1589
1590
1591static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1592{
1593 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1594}
1595
1596
1597static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1598{
1599 return (desc->len8 << 3) - (desc->offset8 << 3);
1600}
1601
1602
1603struct vmpacket_descriptor *
1604hv_pkt_iter_first(struct vmbus_channel *channel);
1605
1606struct vmpacket_descriptor *
1607__hv_pkt_iter_next(struct vmbus_channel *channel,
1608 const struct vmpacket_descriptor *pkt);
1609
1610void hv_pkt_iter_close(struct vmbus_channel *channel);
1611
1612
1613
1614
1615
1616static inline struct vmpacket_descriptor *
1617hv_pkt_iter_next(struct vmbus_channel *channel,
1618 const struct vmpacket_descriptor *pkt)
1619{
1620 struct vmpacket_descriptor *nxt;
1621
1622 nxt = __hv_pkt_iter_next(channel, pkt);
1623 if (!nxt)
1624 hv_pkt_iter_close(channel);
1625
1626 return nxt;
1627}
1628
1629#define foreach_vmbus_pkt(pkt, channel) \
1630 for (pkt = hv_pkt_iter_first(channel); pkt; \
1631 pkt = hv_pkt_iter_next(channel, pkt))
1632
1633
1634
1635
1636
1637
1638
1639
1640#define HV_CONFIG_BLOCK_SIZE_MAX 128
1641
1642int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1643 unsigned int block_id, unsigned int *bytes_returned);
1644int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1645 unsigned int block_id);
1646int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1647 void (*block_invalidate)(void *context,
1648 u64 block_mask));
1649
1650struct hyperv_pci_block_ops {
1651 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1652 unsigned int block_id, unsigned int *bytes_returned);
1653 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1654 unsigned int block_id);
1655 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1656 void (*block_invalidate)(void *context,
1657 u64 block_mask));
1658};
1659
1660extern struct hyperv_pci_block_ops hvpci_block_ops;
1661
1662#endif
1663