1
2
3
4
5
6
7
8
9
10
11
12#ifndef _HYPERV_H
13#define _HYPERV_H
14
15#include <uapi/linux/hyperv.h>
16
17#include <linux/types.h>
18#include <linux/scatterlist.h>
19#include <linux/list.h>
20#include <linux/timer.h>
21#include <linux/completion.h>
22#include <linux/device.h>
23#include <linux/mod_devicetable.h>
24#include <linux/interrupt.h>
25#include <linux/reciprocal_div.h>
26
27#define MAX_PAGE_BUFFER_COUNT 32
28#define MAX_MULTIPAGE_BUFFER_COUNT 32
29
30#pragma pack(push, 1)
31
32
33struct hv_page_buffer {
34 u32 len;
35 u32 offset;
36 u64 pfn;
37};
38
39
40struct hv_multipage_buffer {
41
42 u32 len;
43 u32 offset;
44 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
45};
46
47
48
49
50
51
52struct hv_mpb_array {
53
54 u32 len;
55 u32 offset;
56 u64 pfn_array[];
57};
58
59
60#define MAX_PAGE_BUFFER_PACKET (0x18 + \
61 (sizeof(struct hv_page_buffer) * \
62 MAX_PAGE_BUFFER_COUNT))
63#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
64 sizeof(struct hv_multipage_buffer))
65
66
67#pragma pack(pop)
68
69struct hv_ring_buffer {
70
71 u32 write_index;
72
73
74 u32 read_index;
75
76 u32 interrupt_mask;
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104 u32 pending_send_sz;
105 u32 reserved1[12];
106 union {
107 struct {
108 u32 feat_pending_send_sz:1;
109 };
110 u32 value;
111 } feature_bits;
112
113
114 u8 reserved2[4028];
115
116
117
118
119
120 u8 buffer[0];
121} __packed;
122
123struct hv_ring_buffer_info {
124 struct hv_ring_buffer *ring_buffer;
125 u32 ring_size;
126 struct reciprocal_value ring_size_div10_reciprocal;
127 spinlock_t ring_lock;
128
129 u32 ring_datasize;
130 u32 priv_read_index;
131
132
133
134
135 struct mutex ring_buffer_mutex;
136};
137
138
139static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
140{
141 u32 read_loc, write_loc, dsize, read;
142
143 dsize = rbi->ring_datasize;
144 read_loc = rbi->ring_buffer->read_index;
145 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
146
147 read = write_loc >= read_loc ? (write_loc - read_loc) :
148 (dsize - read_loc) + write_loc;
149
150 return read;
151}
152
153static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
154{
155 u32 read_loc, write_loc, dsize, write;
156
157 dsize = rbi->ring_datasize;
158 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
159 write_loc = rbi->ring_buffer->write_index;
160
161 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
162 read_loc - write_loc;
163 return write;
164}
165
166static inline u32 hv_get_avail_to_write_percent(
167 const struct hv_ring_buffer_info *rbi)
168{
169 u32 avail_write = hv_get_bytes_to_write(rbi);
170
171 return reciprocal_divide(
172 (avail_write << 3) + (avail_write << 1),
173 rbi->ring_size_div10_reciprocal);
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188#define VERSION_WS2008 ((0 << 16) | (13))
189#define VERSION_WIN7 ((1 << 16) | (1))
190#define VERSION_WIN8 ((2 << 16) | (4))
191#define VERSION_WIN8_1 ((3 << 16) | (0))
192#define VERSION_WIN10 ((4 << 16) | (0))
193#define VERSION_WIN10_V5 ((5 << 16) | (0))
194
195#define VERSION_INVAL -1
196
197#define VERSION_CURRENT VERSION_WIN10_V5
198
199
200#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
201
202
203#define VMBUS_PIPE_TYPE_BYTE 0x00000000
204#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
205
206
207#define MAX_USER_DEFINED_BYTES 120
208
209
210#define MAX_PIPE_USER_DEFINED_BYTES 116
211
212
213
214
215
216struct vmbus_channel_offer {
217 guid_t if_type;
218 guid_t if_instance;
219
220
221
222
223 u64 reserved1;
224 u64 reserved2;
225
226 u16 chn_flags;
227 u16 mmio_megabytes;
228
229 union {
230
231 struct {
232 unsigned char user_def[MAX_USER_DEFINED_BYTES];
233 } std;
234
235
236
237
238
239
240
241
242 struct {
243 u32 pipe_mode;
244 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
245 } pipe;
246 } u;
247
248
249
250 u16 sub_channel_index;
251 u16 reserved3;
252} __packed;
253
254
255#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
256#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
257#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
258#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
259#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
260#define VMBUS_CHANNEL_PARENT_OFFER 0x200
261#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
262#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
263
264struct vmpacket_descriptor {
265 u16 type;
266 u16 offset8;
267 u16 len8;
268 u16 flags;
269 u64 trans_id;
270} __packed;
271
272struct vmpacket_header {
273 u32 prev_pkt_start_offset;
274 struct vmpacket_descriptor descriptor;
275} __packed;
276
277struct vmtransfer_page_range {
278 u32 byte_count;
279 u32 byte_offset;
280} __packed;
281
282struct vmtransfer_page_packet_header {
283 struct vmpacket_descriptor d;
284 u16 xfer_pageset_id;
285 u8 sender_owns_set;
286 u8 reserved;
287 u32 range_cnt;
288 struct vmtransfer_page_range ranges[1];
289} __packed;
290
291struct vmgpadl_packet_header {
292 struct vmpacket_descriptor d;
293 u32 gpadl;
294 u32 reserved;
295} __packed;
296
297struct vmadd_remove_transfer_page_set {
298 struct vmpacket_descriptor d;
299 u32 gpadl;
300 u16 xfer_pageset_id;
301 u16 reserved;
302} __packed;
303
304
305
306
307
308struct gpa_range {
309 u32 byte_count;
310 u32 byte_offset;
311 u64 pfn_array[0];
312};
313
314
315
316
317
318
319
320
321struct vmestablish_gpadl {
322 struct vmpacket_descriptor d;
323 u32 gpadl;
324 u32 range_cnt;
325 struct gpa_range range[1];
326} __packed;
327
328
329
330
331
332struct vmteardown_gpadl {
333 struct vmpacket_descriptor d;
334 u32 gpadl;
335 u32 reserved;
336} __packed;
337
338
339
340
341
342struct vmdata_gpa_direct {
343 struct vmpacket_descriptor d;
344 u32 reserved;
345 u32 range_cnt;
346 struct gpa_range range[1];
347} __packed;
348
349
350struct vmadditional_data {
351 struct vmpacket_descriptor d;
352 u64 total_bytes;
353 u32 offset;
354 u32 byte_cnt;
355 unsigned char data[1];
356} __packed;
357
358union vmpacket_largest_possible_header {
359 struct vmpacket_descriptor simple_hdr;
360 struct vmtransfer_page_packet_header xfer_page_hdr;
361 struct vmgpadl_packet_header gpadl_hdr;
362 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
363 struct vmestablish_gpadl establish_gpadl_hdr;
364 struct vmteardown_gpadl teardown_gpadl_hdr;
365 struct vmdata_gpa_direct data_gpa_direct_hdr;
366};
367
368#define VMPACKET_DATA_START_ADDRESS(__packet) \
369 (void *)(((unsigned char *)__packet) + \
370 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
371
372#define VMPACKET_DATA_LENGTH(__packet) \
373 ((((struct vmpacket_descriptor)__packet)->len8 - \
374 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
375
376#define VMPACKET_TRANSFER_MODE(__packet) \
377 (((struct IMPACT)__packet)->type)
378
379enum vmbus_packet_type {
380 VM_PKT_INVALID = 0x0,
381 VM_PKT_SYNCH = 0x1,
382 VM_PKT_ADD_XFER_PAGESET = 0x2,
383 VM_PKT_RM_XFER_PAGESET = 0x3,
384 VM_PKT_ESTABLISH_GPADL = 0x4,
385 VM_PKT_TEARDOWN_GPADL = 0x5,
386 VM_PKT_DATA_INBAND = 0x6,
387 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
388 VM_PKT_DATA_USING_GPADL = 0x8,
389 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
390 VM_PKT_CANCEL_REQUEST = 0xa,
391 VM_PKT_COMP = 0xb,
392 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
393 VM_PKT_ADDITIONAL_DATA = 0xd
394};
395
396#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
397
398
399
400enum vmbus_channel_message_type {
401 CHANNELMSG_INVALID = 0,
402 CHANNELMSG_OFFERCHANNEL = 1,
403 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
404 CHANNELMSG_REQUESTOFFERS = 3,
405 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
406 CHANNELMSG_OPENCHANNEL = 5,
407 CHANNELMSG_OPENCHANNEL_RESULT = 6,
408 CHANNELMSG_CLOSECHANNEL = 7,
409 CHANNELMSG_GPADL_HEADER = 8,
410 CHANNELMSG_GPADL_BODY = 9,
411 CHANNELMSG_GPADL_CREATED = 10,
412 CHANNELMSG_GPADL_TEARDOWN = 11,
413 CHANNELMSG_GPADL_TORNDOWN = 12,
414 CHANNELMSG_RELID_RELEASED = 13,
415 CHANNELMSG_INITIATE_CONTACT = 14,
416 CHANNELMSG_VERSION_RESPONSE = 15,
417 CHANNELMSG_UNLOAD = 16,
418 CHANNELMSG_UNLOAD_RESPONSE = 17,
419 CHANNELMSG_18 = 18,
420 CHANNELMSG_19 = 19,
421 CHANNELMSG_20 = 20,
422 CHANNELMSG_TL_CONNECT_REQUEST = 21,
423 CHANNELMSG_COUNT
424};
425
426struct vmbus_channel_message_header {
427 enum vmbus_channel_message_type msgtype;
428 u32 padding;
429} __packed;
430
431
432struct vmbus_channel_query_vmbus_version {
433 struct vmbus_channel_message_header header;
434 u32 version;
435} __packed;
436
437
438struct vmbus_channel_version_supported {
439 struct vmbus_channel_message_header header;
440 u8 version_supported;
441} __packed;
442
443
444struct vmbus_channel_offer_channel {
445 struct vmbus_channel_message_header header;
446 struct vmbus_channel_offer offer;
447 u32 child_relid;
448 u8 monitorid;
449
450
451
452 u8 monitor_allocated:1;
453 u8 reserved:7;
454
455
456
457
458
459
460
461
462
463
464
465 u16 is_dedicated_interrupt:1;
466 u16 reserved1:15;
467 u32 connection_id;
468} __packed;
469
470
471struct vmbus_channel_rescind_offer {
472 struct vmbus_channel_message_header header;
473 u32 child_relid;
474} __packed;
475
476static inline u32
477hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
478{
479 return rbi->ring_buffer->pending_send_sz;
480}
481
482
483
484
485
486
487
488
489
490
491
492struct vmbus_channel_open_channel {
493 struct vmbus_channel_message_header header;
494
495
496 u32 child_relid;
497
498
499 u32 openid;
500
501
502 u32 ringbuffer_gpadlhandle;
503
504
505
506
507
508
509
510
511
512 u32 target_vp;
513
514
515
516
517
518
519 u32 downstream_ringbuffer_pageoffset;
520
521
522 unsigned char userdata[MAX_USER_DEFINED_BYTES];
523} __packed;
524
525
526struct vmbus_channel_open_result {
527 struct vmbus_channel_message_header header;
528 u32 child_relid;
529 u32 openid;
530 u32 status;
531} __packed;
532
533
534struct vmbus_channel_close_channel {
535 struct vmbus_channel_message_header header;
536 u32 child_relid;
537} __packed;
538
539
540#define GPADL_TYPE_RING_BUFFER 1
541#define GPADL_TYPE_SERVER_SAVE_AREA 2
542#define GPADL_TYPE_TRANSACTION 8
543
544
545
546
547
548
549
550struct vmbus_channel_gpadl_header {
551 struct vmbus_channel_message_header header;
552 u32 child_relid;
553 u32 gpadl;
554 u16 range_buflen;
555 u16 rangecount;
556 struct gpa_range range[0];
557} __packed;
558
559
560struct vmbus_channel_gpadl_body {
561 struct vmbus_channel_message_header header;
562 u32 msgnumber;
563 u32 gpadl;
564 u64 pfn[0];
565} __packed;
566
567struct vmbus_channel_gpadl_created {
568 struct vmbus_channel_message_header header;
569 u32 child_relid;
570 u32 gpadl;
571 u32 creation_status;
572} __packed;
573
574struct vmbus_channel_gpadl_teardown {
575 struct vmbus_channel_message_header header;
576 u32 child_relid;
577 u32 gpadl;
578} __packed;
579
580struct vmbus_channel_gpadl_torndown {
581 struct vmbus_channel_message_header header;
582 u32 gpadl;
583} __packed;
584
585struct vmbus_channel_relid_released {
586 struct vmbus_channel_message_header header;
587 u32 child_relid;
588} __packed;
589
590struct vmbus_channel_initiate_contact {
591 struct vmbus_channel_message_header header;
592 u32 vmbus_version_requested;
593 u32 target_vcpu;
594 union {
595 u64 interrupt_page;
596 struct {
597 u8 msg_sint;
598 u8 padding1[3];
599 u32 padding2;
600 };
601 };
602 u64 monitor_page1;
603 u64 monitor_page2;
604} __packed;
605
606
607struct vmbus_channel_tl_connect_request {
608 struct vmbus_channel_message_header header;
609 guid_t guest_endpoint_id;
610 guid_t host_service_id;
611} __packed;
612
613struct vmbus_channel_version_response {
614 struct vmbus_channel_message_header header;
615 u8 version_supported;
616
617 u8 connection_state;
618 u16 padding;
619
620
621
622
623
624
625
626
627
628 u32 msg_conn_id;
629} __packed;
630
631enum vmbus_channel_state {
632 CHANNEL_OFFER_STATE,
633 CHANNEL_OPENING_STATE,
634 CHANNEL_OPEN_STATE,
635 CHANNEL_OPENED_STATE,
636};
637
638
639
640
641
642struct vmbus_channel_msginfo {
643
644 struct list_head msglistentry;
645
646
647 struct list_head submsglist;
648
649
650 struct completion waitevent;
651 struct vmbus_channel *waiting_channel;
652 union {
653 struct vmbus_channel_version_supported version_supported;
654 struct vmbus_channel_open_result open_result;
655 struct vmbus_channel_gpadl_torndown gpadl_torndown;
656 struct vmbus_channel_gpadl_created gpadl_created;
657 struct vmbus_channel_version_response version_response;
658 } response;
659
660 u32 msgsize;
661
662
663
664
665 unsigned char msg[0];
666};
667
668struct vmbus_close_msg {
669 struct vmbus_channel_msginfo info;
670 struct vmbus_channel_close_channel msg;
671};
672
673
674union hv_connection_id {
675 u32 asu32;
676 struct {
677 u32 id:24;
678 u32 reserved:8;
679 } u;
680};
681
682enum hv_numa_policy {
683 HV_BALANCED = 0,
684 HV_LOCALIZED,
685};
686
687enum vmbus_device_type {
688 HV_IDE = 0,
689 HV_SCSI,
690 HV_FC,
691 HV_NIC,
692 HV_ND,
693 HV_PCIE,
694 HV_FB,
695 HV_KBD,
696 HV_MOUSE,
697 HV_KVP,
698 HV_TS,
699 HV_HB,
700 HV_SHUTDOWN,
701 HV_FCOPY,
702 HV_BACKUP,
703 HV_DM,
704 HV_UNKNOWN,
705};
706
707struct vmbus_device {
708 u16 dev_type;
709 guid_t guid;
710 bool perf_device;
711};
712
713struct vmbus_channel {
714 struct list_head listentry;
715
716 struct hv_device *device_obj;
717
718 enum vmbus_channel_state state;
719
720 struct vmbus_channel_offer_channel offermsg;
721
722
723
724
725 u8 monitor_grp;
726 u8 monitor_bit;
727
728 bool rescind;
729 struct completion rescind_event;
730
731 u32 ringbuffer_gpadlhandle;
732
733
734 struct page *ringbuffer_page;
735 u32 ringbuffer_pagecount;
736 u32 ringbuffer_send_offset;
737 struct hv_ring_buffer_info outbound;
738 struct hv_ring_buffer_info inbound;
739
740 struct vmbus_close_msg close_msg;
741
742
743 u64 interrupts;
744 u64 sig_events;
745
746
747
748
749
750 u64 intr_out_empty;
751
752
753
754
755
756
757 bool out_full_flag;
758
759
760 struct tasklet_struct callback_event;
761 void (*onchannel_callback)(void *context);
762 void *channel_callback_context;
763
764
765
766
767
768
769
770
771
772
773
774
775 enum hv_callback_mode {
776 HV_CALL_BATCHED,
777 HV_CALL_DIRECT,
778 HV_CALL_ISR
779 } callback_mode;
780
781 bool is_dedicated_interrupt;
782 u64 sig_event;
783
784
785
786
787
788
789
790
791
792 u32 target_vp;
793
794 u32 target_cpu;
795
796
797
798 struct cpumask alloced_cpus_in_node;
799 int numa_node;
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
821
822
823
824
825
826 void (*chn_rescind_callback)(struct vmbus_channel *channel);
827
828
829
830
831
832
833 spinlock_t lock;
834
835
836
837 struct list_head sc_list;
838
839
840
841
842 struct vmbus_channel *primary_channel;
843
844
845
846 void *per_channel_state;
847
848
849
850
851 struct list_head percpu_list;
852
853
854
855
856
857 struct rcu_head rcu;
858
859
860
861
862 struct kobject kobj;
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888 bool low_latency;
889
890
891
892
893
894
895
896
897
898
899
900 enum hv_numa_policy affinity_policy;
901
902 bool probe_done;
903
904
905
906
907
908
909
910 struct work_struct add_channel_work;
911
912
913
914
915
916 u64 intr_in_full;
917
918
919
920
921
922 u64 out_full_total;
923
924
925
926
927
928 u64 out_full_first;
929};
930
931static inline bool is_hvsock_channel(const struct vmbus_channel *c)
932{
933 return !!(c->offermsg.offer.chn_flags &
934 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
935}
936
937static inline void set_channel_affinity_state(struct vmbus_channel *c,
938 enum hv_numa_policy policy)
939{
940 c->affinity_policy = policy;
941}
942
943static inline void set_channel_read_mode(struct vmbus_channel *c,
944 enum hv_callback_mode mode)
945{
946 c->callback_mode = mode;
947}
948
949static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
950{
951 c->per_channel_state = s;
952}
953
954static inline void *get_per_channel_state(struct vmbus_channel *c)
955{
956 return c->per_channel_state;
957}
958
959static inline void set_channel_pending_send_size(struct vmbus_channel *c,
960 u32 size)
961{
962 unsigned long flags;
963
964 if (size) {
965 spin_lock_irqsave(&c->outbound.ring_lock, flags);
966 ++c->out_full_total;
967
968 if (!c->out_full_flag) {
969 ++c->out_full_first;
970 c->out_full_flag = true;
971 }
972 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
973 } else {
974 c->out_full_flag = false;
975 }
976
977 c->outbound.ring_buffer->pending_send_sz = size;
978}
979
980static inline void set_low_latency_mode(struct vmbus_channel *c)
981{
982 c->low_latency = true;
983}
984
985static inline void clear_low_latency_mode(struct vmbus_channel *c)
986{
987 c->low_latency = false;
988}
989
990void vmbus_onmessage(void *context);
991
992int vmbus_request_offers(void);
993
994
995
996
997
998void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
999 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1000
1001void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1002 void (*chn_rescind_cb)(struct vmbus_channel *));
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1016
1017
1018struct vmbus_channel_packet_page_buffer {
1019 u16 type;
1020 u16 dataoffset8;
1021 u16 length8;
1022 u16 flags;
1023 u64 transactionid;
1024 u32 reserved;
1025 u32 rangecount;
1026 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1027} __packed;
1028
1029
1030struct vmbus_channel_packet_multipage_buffer {
1031 u16 type;
1032 u16 dataoffset8;
1033 u16 length8;
1034 u16 flags;
1035 u64 transactionid;
1036 u32 reserved;
1037 u32 rangecount;
1038 struct hv_multipage_buffer range;
1039} __packed;
1040
1041
1042struct vmbus_packet_mpb_array {
1043 u16 type;
1044 u16 dataoffset8;
1045 u16 length8;
1046 u16 flags;
1047 u64 transactionid;
1048 u32 reserved;
1049 u32 rangecount;
1050 struct hv_mpb_array range;
1051} __packed;
1052
1053int vmbus_alloc_ring(struct vmbus_channel *channel,
1054 u32 send_size, u32 recv_size);
1055void vmbus_free_ring(struct vmbus_channel *channel);
1056
1057int vmbus_connect_ring(struct vmbus_channel *channel,
1058 void (*onchannel_callback)(void *context),
1059 void *context);
1060int vmbus_disconnect_ring(struct vmbus_channel *channel);
1061
1062extern int vmbus_open(struct vmbus_channel *channel,
1063 u32 send_ringbuffersize,
1064 u32 recv_ringbuffersize,
1065 void *userdata,
1066 u32 userdatalen,
1067 void (*onchannel_callback)(void *context),
1068 void *context);
1069
1070extern void vmbus_close(struct vmbus_channel *channel);
1071
1072extern int vmbus_sendpacket(struct vmbus_channel *channel,
1073 void *buffer,
1074 u32 bufferLen,
1075 u64 requestid,
1076 enum vmbus_packet_type type,
1077 u32 flags);
1078
1079extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1080 struct hv_page_buffer pagebuffers[],
1081 u32 pagecount,
1082 void *buffer,
1083 u32 bufferlen,
1084 u64 requestid);
1085
1086extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1087 struct vmbus_packet_mpb_array *mpb,
1088 u32 desc_size,
1089 void *buffer,
1090 u32 bufferlen,
1091 u64 requestid);
1092
1093extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1094 void *kbuffer,
1095 u32 size,
1096 u32 *gpadl_handle);
1097
1098extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1099 u32 gpadl_handle);
1100
1101void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1102
1103extern int vmbus_recvpacket(struct vmbus_channel *channel,
1104 void *buffer,
1105 u32 bufferlen,
1106 u32 *buffer_actual_len,
1107 u64 *requestid);
1108
1109extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1110 void *buffer,
1111 u32 bufferlen,
1112 u32 *buffer_actual_len,
1113 u64 *requestid);
1114
1115
1116extern void vmbus_ontimer(unsigned long data);
1117
1118
1119struct hv_driver {
1120 const char *name;
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 bool hvsock;
1135
1136
1137 guid_t dev_type;
1138 const struct hv_vmbus_device_id *id_table;
1139
1140 struct device_driver driver;
1141
1142
1143 struct {
1144 spinlock_t lock;
1145 struct list_head list;
1146 } dynids;
1147
1148 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1149 int (*remove)(struct hv_device *);
1150 void (*shutdown)(struct hv_device *);
1151
1152};
1153
1154
1155struct hv_device {
1156
1157 guid_t dev_type;
1158
1159
1160 guid_t dev_instance;
1161 u16 vendor_id;
1162 u16 device_id;
1163
1164 struct device device;
1165 char *driver_override;
1166
1167 struct vmbus_channel *channel;
1168 struct kset *channels_kset;
1169};
1170
1171
1172static inline struct hv_device *device_to_hv_device(struct device *d)
1173{
1174 return container_of(d, struct hv_device, device);
1175}
1176
1177static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1178{
1179 return container_of(d, struct hv_driver, driver);
1180}
1181
1182static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1183{
1184 dev_set_drvdata(&dev->device, data);
1185}
1186
1187static inline void *hv_get_drvdata(struct hv_device *dev)
1188{
1189 return dev_get_drvdata(&dev->device);
1190}
1191
1192struct hv_ring_buffer_debug_info {
1193 u32 current_interrupt_mask;
1194 u32 current_read_index;
1195 u32 current_write_index;
1196 u32 bytes_avail_toread;
1197 u32 bytes_avail_towrite;
1198};
1199
1200
1201int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1202 struct hv_ring_buffer_debug_info *debug_info);
1203
1204
1205#define vmbus_driver_register(driver) \
1206 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1207int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1208 struct module *owner,
1209 const char *mod_name);
1210void vmbus_driver_unregister(struct hv_driver *hv_driver);
1211
1212void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1213
1214int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1215 resource_size_t min, resource_size_t max,
1216 resource_size_t size, resource_size_t align,
1217 bool fb_overlap_ok);
1218void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228#define HV_NIC_GUID \
1229 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1230 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1231
1232
1233
1234
1235
1236#define HV_IDE_GUID \
1237 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1238 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1239
1240
1241
1242
1243
1244#define HV_SCSI_GUID \
1245 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1246 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1247
1248
1249
1250
1251
1252#define HV_SHUTDOWN_GUID \
1253 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1254 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1255
1256
1257
1258
1259
1260#define HV_TS_GUID \
1261 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1262 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1263
1264
1265
1266
1267
1268#define HV_HEART_BEAT_GUID \
1269 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1270 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1271
1272
1273
1274
1275
1276#define HV_KVP_GUID \
1277 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1278 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1279
1280
1281
1282
1283
1284#define HV_DM_GUID \
1285 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1286 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1287
1288
1289
1290
1291
1292#define HV_MOUSE_GUID \
1293 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1294 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1295
1296
1297
1298
1299
1300#define HV_KBD_GUID \
1301 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1302 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1303
1304
1305
1306
1307#define HV_VSS_GUID \
1308 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1309 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1310
1311
1312
1313
1314#define HV_SYNTHVID_GUID \
1315 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1316 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1317
1318
1319
1320
1321
1322#define HV_SYNTHFC_GUID \
1323 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1324 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1325
1326
1327
1328
1329
1330
1331#define HV_FCOPY_GUID \
1332 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1333 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1334
1335
1336
1337
1338
1339#define HV_ND_GUID \
1340 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1341 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1342
1343
1344
1345
1346
1347
1348#define HV_PCIE_GUID \
1349 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1350 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361#define HV_AVMA1_GUID \
1362 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1363 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1364
1365#define HV_AVMA2_GUID \
1366 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1367 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1368
1369#define HV_RDV_GUID \
1370 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1371 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1372
1373
1374
1375
1376
1377#define ICMSGTYPE_NEGOTIATE 0
1378#define ICMSGTYPE_HEARTBEAT 1
1379#define ICMSGTYPE_KVPEXCHANGE 2
1380#define ICMSGTYPE_SHUTDOWN 3
1381#define ICMSGTYPE_TIMESYNC 4
1382#define ICMSGTYPE_VSS 5
1383
1384#define ICMSGHDRFLAG_TRANSACTION 1
1385#define ICMSGHDRFLAG_REQUEST 2
1386#define ICMSGHDRFLAG_RESPONSE 4
1387
1388
1389
1390
1391
1392
1393
1394
1395struct hv_util_service {
1396 u8 *recv_buffer;
1397 void *channel;
1398 void (*util_cb)(void *);
1399 int (*util_init)(struct hv_util_service *);
1400 void (*util_deinit)(void);
1401};
1402
1403struct vmbuspipe_hdr {
1404 u32 flags;
1405 u32 msgsize;
1406} __packed;
1407
1408struct ic_version {
1409 u16 major;
1410 u16 minor;
1411} __packed;
1412
1413struct icmsg_hdr {
1414 struct ic_version icverframe;
1415 u16 icmsgtype;
1416 struct ic_version icvermsg;
1417 u16 icmsgsize;
1418 u32 status;
1419 u8 ictransaction_id;
1420 u8 icflags;
1421 u8 reserved[2];
1422} __packed;
1423
1424struct icmsg_negotiate {
1425 u16 icframe_vercnt;
1426 u16 icmsg_vercnt;
1427 u32 reserved;
1428 struct ic_version icversion_data[1];
1429} __packed;
1430
1431struct shutdown_msg_data {
1432 u32 reason_code;
1433 u32 timeout_seconds;
1434 u32 flags;
1435 u8 display_message[2048];
1436} __packed;
1437
1438struct heartbeat_msg_data {
1439 u64 seq_num;
1440 u32 reserved[8];
1441} __packed;
1442
1443
1444#define ICTIMESYNCFLAG_PROBE 0
1445#define ICTIMESYNCFLAG_SYNC 1
1446#define ICTIMESYNCFLAG_SAMPLE 2
1447
1448#ifdef __x86_64__
1449#define WLTIMEDELTA 116444736000000000L
1450#else
1451#define WLTIMEDELTA 116444736000000000LL
1452#endif
1453
1454struct ictimesync_data {
1455 u64 parenttime;
1456 u64 childtime;
1457 u64 roundtriptime;
1458 u8 flags;
1459} __packed;
1460
1461struct ictimesync_ref_data {
1462 u64 parenttime;
1463 u64 vmreferencetime;
1464 u8 flags;
1465 char leapflags;
1466 char stratum;
1467 u8 reserved[3];
1468} __packed;
1469
1470struct hyperv_service_callback {
1471 u8 msg_type;
1472 char *log_msg;
1473 guid_t data;
1474 struct vmbus_channel *channel;
1475 void (*callback)(void *context);
1476};
1477
1478#define MAX_SRV_VER 0x7ffffff
1479extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1480 const int *fw_version, int fw_vercnt,
1481 const int *srv_version, int srv_vercnt,
1482 int *nego_fw_version, int *nego_srv_version);
1483
1484void hv_process_channel_removal(struct vmbus_channel *channel);
1485
1486void vmbus_setevent(struct vmbus_channel *channel);
1487
1488
1489
1490
1491extern __u32 vmbus_proto_version;
1492
1493int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1494 const guid_t *shv_host_servie_id);
1495void vmbus_set_event(struct vmbus_channel *channel);
1496
1497
1498static inline void *
1499hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1500{
1501 return ring_info->ring_buffer->buffer;
1502}
1503
1504
1505
1506
1507static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1508{
1509 rbi->ring_buffer->interrupt_mask = 1;
1510
1511
1512 virt_mb();
1513}
1514
1515
1516
1517
1518static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1519{
1520
1521 rbi->ring_buffer->interrupt_mask = 0;
1522
1523
1524 virt_mb();
1525
1526
1527
1528
1529
1530
1531 return hv_get_bytes_to_read(rbi);
1532}
1533
1534
1535
1536
1537
1538
1539static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1540{
1541 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1542}
1543
1544
1545static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1546{
1547 return (desc->len8 << 3) - (desc->offset8 << 3);
1548}
1549
1550
1551struct vmpacket_descriptor *
1552hv_pkt_iter_first(struct vmbus_channel *channel);
1553
1554struct vmpacket_descriptor *
1555__hv_pkt_iter_next(struct vmbus_channel *channel,
1556 const struct vmpacket_descriptor *pkt);
1557
1558void hv_pkt_iter_close(struct vmbus_channel *channel);
1559
1560
1561
1562
1563
1564static inline struct vmpacket_descriptor *
1565hv_pkt_iter_next(struct vmbus_channel *channel,
1566 const struct vmpacket_descriptor *pkt)
1567{
1568 struct vmpacket_descriptor *nxt;
1569
1570 nxt = __hv_pkt_iter_next(channel, pkt);
1571 if (!nxt)
1572 hv_pkt_iter_close(channel);
1573
1574 return nxt;
1575}
1576
1577#define foreach_vmbus_pkt(pkt, channel) \
1578 for (pkt = hv_pkt_iter_first(channel); pkt; \
1579 pkt = hv_pkt_iter_next(channel, pkt))
1580
1581#endif
1582