1
2
3
4
5
6
7
8
9
10
11
12#ifndef _HYPERV_H
13#define _HYPERV_H
14
15#include <uapi/linux/hyperv.h>
16
17#include <linux/mm.h>
18#include <linux/types.h>
19#include <linux/scatterlist.h>
20#include <linux/list.h>
21#include <linux/timer.h>
22#include <linux/completion.h>
23#include <linux/device.h>
24#include <linux/mod_devicetable.h>
25#include <linux/interrupt.h>
26#include <linux/reciprocal_div.h>
27#include <asm/hyperv-tlfs.h>
28
29#define MAX_PAGE_BUFFER_COUNT 32
30#define MAX_MULTIPAGE_BUFFER_COUNT 32
31
32#pragma pack(push, 1)
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71enum hv_gpadl_type {
72 HV_GPADL_BUFFER,
73 HV_GPADL_RING
74};
75
76
77struct hv_page_buffer {
78 u32 len;
79 u32 offset;
80 u64 pfn;
81};
82
83
84struct hv_multipage_buffer {
85
86 u32 len;
87 u32 offset;
88 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
89};
90
91
92
93
94
95
96struct hv_mpb_array {
97
98 u32 len;
99 u32 offset;
100 u64 pfn_array[];
101};
102
103
104#define MAX_PAGE_BUFFER_PACKET (0x18 + \
105 (sizeof(struct hv_page_buffer) * \
106 MAX_PAGE_BUFFER_COUNT))
107#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
108 sizeof(struct hv_multipage_buffer))
109
110
111#pragma pack(pop)
112
113struct hv_ring_buffer {
114
115 u32 write_index;
116
117
118 u32 read_index;
119
120 u32 interrupt_mask;
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148 u32 pending_send_sz;
149 u32 reserved1[12];
150 union {
151 struct {
152 u32 feat_pending_send_sz:1;
153 };
154 u32 value;
155 } feature_bits;
156
157
158 u8 reserved2[PAGE_SIZE - 68];
159
160
161
162
163
164 u8 buffer[];
165} __packed;
166
167
168#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
169 (payload_sz))
170
171struct hv_ring_buffer_info {
172 struct hv_ring_buffer *ring_buffer;
173 u32 ring_size;
174 struct reciprocal_value ring_size_div10_reciprocal;
175 spinlock_t ring_lock;
176
177 u32 ring_datasize;
178 u32 priv_read_index;
179
180
181
182
183 struct mutex ring_buffer_mutex;
184};
185
186
187static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
188{
189 u32 read_loc, write_loc, dsize, read;
190
191 dsize = rbi->ring_datasize;
192 read_loc = rbi->ring_buffer->read_index;
193 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
194
195 read = write_loc >= read_loc ? (write_loc - read_loc) :
196 (dsize - read_loc) + write_loc;
197
198 return read;
199}
200
201static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
202{
203 u32 read_loc, write_loc, dsize, write;
204
205 dsize = rbi->ring_datasize;
206 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
207 write_loc = rbi->ring_buffer->write_index;
208
209 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
210 read_loc - write_loc;
211 return write;
212}
213
214static inline u32 hv_get_avail_to_write_percent(
215 const struct hv_ring_buffer_info *rbi)
216{
217 u32 avail_write = hv_get_bytes_to_write(rbi);
218
219 return reciprocal_divide(
220 (avail_write << 3) + (avail_write << 1),
221 rbi->ring_size_div10_reciprocal);
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239#define VERSION_WS2008 ((0 << 16) | (13))
240#define VERSION_WIN7 ((1 << 16) | (1))
241#define VERSION_WIN8 ((2 << 16) | (4))
242#define VERSION_WIN8_1 ((3 << 16) | (0))
243#define VERSION_WIN10 ((4 << 16) | (0))
244#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
245#define VERSION_WIN10_V5 ((5 << 16) | (0))
246#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
247#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
248
249
250#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
251
252
253#define VMBUS_PIPE_TYPE_BYTE 0x00000000
254#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
255
256
257#define MAX_USER_DEFINED_BYTES 120
258
259
260#define MAX_PIPE_USER_DEFINED_BYTES 116
261
262
263
264
265
266struct vmbus_channel_offer {
267 guid_t if_type;
268 guid_t if_instance;
269
270
271
272
273 u64 reserved1;
274 u64 reserved2;
275
276 u16 chn_flags;
277 u16 mmio_megabytes;
278
279 union {
280
281 struct {
282 unsigned char user_def[MAX_USER_DEFINED_BYTES];
283 } std;
284
285
286
287
288
289
290
291
292 struct {
293 u32 pipe_mode;
294 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
295 } pipe;
296 } u;
297
298
299
300
301
302
303 u16 sub_channel_index;
304 u16 reserved3;
305} __packed;
306
307
308#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
309#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
310#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
311#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
312#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
313#define VMBUS_CHANNEL_PARENT_OFFER 0x200
314#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
315#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
316
317struct vmpacket_descriptor {
318 u16 type;
319 u16 offset8;
320 u16 len8;
321 u16 flags;
322 u64 trans_id;
323} __packed;
324
325struct vmpacket_header {
326 u32 prev_pkt_start_offset;
327 struct vmpacket_descriptor descriptor;
328} __packed;
329
330struct vmtransfer_page_range {
331 u32 byte_count;
332 u32 byte_offset;
333} __packed;
334
335struct vmtransfer_page_packet_header {
336 struct vmpacket_descriptor d;
337 u16 xfer_pageset_id;
338 u8 sender_owns_set;
339 u8 reserved;
340 u32 range_cnt;
341 struct vmtransfer_page_range ranges[1];
342} __packed;
343
344struct vmgpadl_packet_header {
345 struct vmpacket_descriptor d;
346 u32 gpadl;
347 u32 reserved;
348} __packed;
349
350struct vmadd_remove_transfer_page_set {
351 struct vmpacket_descriptor d;
352 u32 gpadl;
353 u16 xfer_pageset_id;
354 u16 reserved;
355} __packed;
356
357
358
359
360
361struct gpa_range {
362 u32 byte_count;
363 u32 byte_offset;
364 u64 pfn_array[];
365};
366
367
368
369
370
371
372
373
374struct vmestablish_gpadl {
375 struct vmpacket_descriptor d;
376 u32 gpadl;
377 u32 range_cnt;
378 struct gpa_range range[1];
379} __packed;
380
381
382
383
384
385struct vmteardown_gpadl {
386 struct vmpacket_descriptor d;
387 u32 gpadl;
388 u32 reserved;
389} __packed;
390
391
392
393
394
395struct vmdata_gpa_direct {
396 struct vmpacket_descriptor d;
397 u32 reserved;
398 u32 range_cnt;
399 struct gpa_range range[1];
400} __packed;
401
402
403struct vmadditional_data {
404 struct vmpacket_descriptor d;
405 u64 total_bytes;
406 u32 offset;
407 u32 byte_cnt;
408 unsigned char data[1];
409} __packed;
410
411union vmpacket_largest_possible_header {
412 struct vmpacket_descriptor simple_hdr;
413 struct vmtransfer_page_packet_header xfer_page_hdr;
414 struct vmgpadl_packet_header gpadl_hdr;
415 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
416 struct vmestablish_gpadl establish_gpadl_hdr;
417 struct vmteardown_gpadl teardown_gpadl_hdr;
418 struct vmdata_gpa_direct data_gpa_direct_hdr;
419};
420
421#define VMPACKET_DATA_START_ADDRESS(__packet) \
422 (void *)(((unsigned char *)__packet) + \
423 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
424
425#define VMPACKET_DATA_LENGTH(__packet) \
426 ((((struct vmpacket_descriptor)__packet)->len8 - \
427 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
428
429#define VMPACKET_TRANSFER_MODE(__packet) \
430 (((struct IMPACT)__packet)->type)
431
432enum vmbus_packet_type {
433 VM_PKT_INVALID = 0x0,
434 VM_PKT_SYNCH = 0x1,
435 VM_PKT_ADD_XFER_PAGESET = 0x2,
436 VM_PKT_RM_XFER_PAGESET = 0x3,
437 VM_PKT_ESTABLISH_GPADL = 0x4,
438 VM_PKT_TEARDOWN_GPADL = 0x5,
439 VM_PKT_DATA_INBAND = 0x6,
440 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
441 VM_PKT_DATA_USING_GPADL = 0x8,
442 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
443 VM_PKT_CANCEL_REQUEST = 0xa,
444 VM_PKT_COMP = 0xb,
445 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
446 VM_PKT_ADDITIONAL_DATA = 0xd
447};
448
449#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
450
451
452
453enum vmbus_channel_message_type {
454 CHANNELMSG_INVALID = 0,
455 CHANNELMSG_OFFERCHANNEL = 1,
456 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
457 CHANNELMSG_REQUESTOFFERS = 3,
458 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
459 CHANNELMSG_OPENCHANNEL = 5,
460 CHANNELMSG_OPENCHANNEL_RESULT = 6,
461 CHANNELMSG_CLOSECHANNEL = 7,
462 CHANNELMSG_GPADL_HEADER = 8,
463 CHANNELMSG_GPADL_BODY = 9,
464 CHANNELMSG_GPADL_CREATED = 10,
465 CHANNELMSG_GPADL_TEARDOWN = 11,
466 CHANNELMSG_GPADL_TORNDOWN = 12,
467 CHANNELMSG_RELID_RELEASED = 13,
468 CHANNELMSG_INITIATE_CONTACT = 14,
469 CHANNELMSG_VERSION_RESPONSE = 15,
470 CHANNELMSG_UNLOAD = 16,
471 CHANNELMSG_UNLOAD_RESPONSE = 17,
472 CHANNELMSG_18 = 18,
473 CHANNELMSG_19 = 19,
474 CHANNELMSG_20 = 20,
475 CHANNELMSG_TL_CONNECT_REQUEST = 21,
476 CHANNELMSG_MODIFYCHANNEL = 22,
477 CHANNELMSG_TL_CONNECT_RESULT = 23,
478 CHANNELMSG_COUNT
479};
480
481
482#define INVALID_RELID U32_MAX
483
484struct vmbus_channel_message_header {
485 enum vmbus_channel_message_type msgtype;
486 u32 padding;
487} __packed;
488
489
490struct vmbus_channel_query_vmbus_version {
491 struct vmbus_channel_message_header header;
492 u32 version;
493} __packed;
494
495
496struct vmbus_channel_version_supported {
497 struct vmbus_channel_message_header header;
498 u8 version_supported;
499} __packed;
500
501
502struct vmbus_channel_offer_channel {
503 struct vmbus_channel_message_header header;
504 struct vmbus_channel_offer offer;
505 u32 child_relid;
506 u8 monitorid;
507
508
509
510 u8 monitor_allocated:1;
511 u8 reserved:7;
512
513
514
515
516
517
518
519
520
521
522
523 u16 is_dedicated_interrupt:1;
524 u16 reserved1:15;
525 u32 connection_id;
526} __packed;
527
528
529struct vmbus_channel_rescind_offer {
530 struct vmbus_channel_message_header header;
531 u32 child_relid;
532} __packed;
533
534static inline u32
535hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
536{
537 return rbi->ring_buffer->pending_send_sz;
538}
539
540
541
542
543
544
545
546
547
548
549
550struct vmbus_channel_open_channel {
551 struct vmbus_channel_message_header header;
552
553
554 u32 child_relid;
555
556
557 u32 openid;
558
559
560 u32 ringbuffer_gpadlhandle;
561
562
563
564
565
566
567
568
569
570 u32 target_vp;
571
572
573
574
575
576
577 u32 downstream_ringbuffer_pageoffset;
578
579
580 unsigned char userdata[MAX_USER_DEFINED_BYTES];
581} __packed;
582
583
584struct vmbus_channel_open_result {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587 u32 openid;
588 u32 status;
589} __packed;
590
591
592struct vmbus_channel_close_channel {
593 struct vmbus_channel_message_header header;
594 u32 child_relid;
595} __packed;
596
597
598#define GPADL_TYPE_RING_BUFFER 1
599#define GPADL_TYPE_SERVER_SAVE_AREA 2
600#define GPADL_TYPE_TRANSACTION 8
601
602
603
604
605
606
607
608struct vmbus_channel_gpadl_header {
609 struct vmbus_channel_message_header header;
610 u32 child_relid;
611 u32 gpadl;
612 u16 range_buflen;
613 u16 rangecount;
614 struct gpa_range range[];
615} __packed;
616
617
618struct vmbus_channel_gpadl_body {
619 struct vmbus_channel_message_header header;
620 u32 msgnumber;
621 u32 gpadl;
622 u64 pfn[];
623} __packed;
624
625struct vmbus_channel_gpadl_created {
626 struct vmbus_channel_message_header header;
627 u32 child_relid;
628 u32 gpadl;
629 u32 creation_status;
630} __packed;
631
632struct vmbus_channel_gpadl_teardown {
633 struct vmbus_channel_message_header header;
634 u32 child_relid;
635 u32 gpadl;
636} __packed;
637
638struct vmbus_channel_gpadl_torndown {
639 struct vmbus_channel_message_header header;
640 u32 gpadl;
641} __packed;
642
643struct vmbus_channel_relid_released {
644 struct vmbus_channel_message_header header;
645 u32 child_relid;
646} __packed;
647
648struct vmbus_channel_initiate_contact {
649 struct vmbus_channel_message_header header;
650 u32 vmbus_version_requested;
651 u32 target_vcpu;
652 union {
653 u64 interrupt_page;
654 struct {
655 u8 msg_sint;
656 u8 padding1[3];
657 u32 padding2;
658 };
659 };
660 u64 monitor_page1;
661 u64 monitor_page2;
662} __packed;
663
664
665struct vmbus_channel_tl_connect_request {
666 struct vmbus_channel_message_header header;
667 guid_t guest_endpoint_id;
668 guid_t host_service_id;
669} __packed;
670
671
672struct vmbus_channel_modifychannel {
673 struct vmbus_channel_message_header header;
674 u32 child_relid;
675 u32 target_vp;
676} __packed;
677
678struct vmbus_channel_version_response {
679 struct vmbus_channel_message_header header;
680 u8 version_supported;
681
682 u8 connection_state;
683 u16 padding;
684
685
686
687
688
689
690
691
692
693 u32 msg_conn_id;
694} __packed;
695
696enum vmbus_channel_state {
697 CHANNEL_OFFER_STATE,
698 CHANNEL_OPENING_STATE,
699 CHANNEL_OPEN_STATE,
700 CHANNEL_OPENED_STATE,
701};
702
703
704
705
706
707struct vmbus_channel_msginfo {
708
709 struct list_head msglistentry;
710
711
712 struct list_head submsglist;
713
714
715 struct completion waitevent;
716 struct vmbus_channel *waiting_channel;
717 union {
718 struct vmbus_channel_version_supported version_supported;
719 struct vmbus_channel_open_result open_result;
720 struct vmbus_channel_gpadl_torndown gpadl_torndown;
721 struct vmbus_channel_gpadl_created gpadl_created;
722 struct vmbus_channel_version_response version_response;
723 } response;
724
725 u32 msgsize;
726
727
728
729
730 unsigned char msg[];
731};
732
733struct vmbus_close_msg {
734 struct vmbus_channel_msginfo info;
735 struct vmbus_channel_close_channel msg;
736};
737
738
739union hv_connection_id {
740 u32 asu32;
741 struct {
742 u32 id:24;
743 u32 reserved:8;
744 } u;
745};
746
747enum vmbus_device_type {
748 HV_IDE = 0,
749 HV_SCSI,
750 HV_FC,
751 HV_NIC,
752 HV_ND,
753 HV_PCIE,
754 HV_FB,
755 HV_KBD,
756 HV_MOUSE,
757 HV_KVP,
758 HV_TS,
759 HV_HB,
760 HV_SHUTDOWN,
761 HV_FCOPY,
762 HV_BACKUP,
763 HV_DM,
764 HV_UNKNOWN,
765};
766
767struct vmbus_device {
768 u16 dev_type;
769 guid_t guid;
770 bool perf_device;
771};
772
773struct vmbus_channel {
774 struct list_head listentry;
775
776 struct hv_device *device_obj;
777
778 enum vmbus_channel_state state;
779
780 struct vmbus_channel_offer_channel offermsg;
781
782
783
784
785 u8 monitor_grp;
786 u8 monitor_bit;
787
788 bool rescind;
789 struct completion rescind_event;
790
791 u32 ringbuffer_gpadlhandle;
792
793
794 struct page *ringbuffer_page;
795 u32 ringbuffer_pagecount;
796 u32 ringbuffer_send_offset;
797 struct hv_ring_buffer_info outbound;
798 struct hv_ring_buffer_info inbound;
799
800 struct vmbus_close_msg close_msg;
801
802
803 u64 interrupts;
804 u64 sig_events;
805
806
807
808
809
810 u64 intr_out_empty;
811
812
813
814
815
816
817 bool out_full_flag;
818
819
820 struct tasklet_struct callback_event;
821 void (*onchannel_callback)(void *context);
822 void *channel_callback_context;
823
824 void (*change_target_cpu_callback)(struct vmbus_channel *channel,
825 u32 old, u32 new);
826
827
828
829
830
831 spinlock_t sched_lock;
832
833
834
835
836
837
838
839
840
841
842
843
844 enum hv_callback_mode {
845 HV_CALL_BATCHED,
846 HV_CALL_DIRECT,
847 HV_CALL_ISR
848 } callback_mode;
849
850 bool is_dedicated_interrupt;
851 u64 sig_event;
852
853
854
855
856
857
858
859
860
861
862 u32 target_cpu;
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
884
885
886
887
888
889 void (*chn_rescind_callback)(struct vmbus_channel *channel);
890
891
892
893
894 struct list_head sc_list;
895
896
897
898
899 struct vmbus_channel *primary_channel;
900
901
902
903 void *per_channel_state;
904
905
906
907
908
909 struct rcu_head rcu;
910
911
912
913
914 struct kobject kobj;
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940 bool low_latency;
941
942 bool probe_done;
943
944
945
946
947
948
949 u16 device_id;
950
951
952
953
954
955
956
957 struct work_struct add_channel_work;
958
959
960
961
962
963 u64 intr_in_full;
964
965
966
967
968
969 u64 out_full_total;
970
971
972
973
974
975 u64 out_full_first;
976
977
978 bool fuzz_testing_state;
979
980
981
982
983
984
985
986
987
988 u32 fuzz_testing_interrupt_delay;
989 u32 fuzz_testing_message_delay;
990
991};
992
993static inline bool is_hvsock_channel(const struct vmbus_channel *c)
994{
995 return !!(c->offermsg.offer.chn_flags &
996 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
997}
998
999static inline bool is_sub_channel(const struct vmbus_channel *c)
1000{
1001 return c->offermsg.offer.sub_channel_index != 0;
1002}
1003
1004static inline void set_channel_read_mode(struct vmbus_channel *c,
1005 enum hv_callback_mode mode)
1006{
1007 c->callback_mode = mode;
1008}
1009
1010static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1011{
1012 c->per_channel_state = s;
1013}
1014
1015static inline void *get_per_channel_state(struct vmbus_channel *c)
1016{
1017 return c->per_channel_state;
1018}
1019
1020static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1021 u32 size)
1022{
1023 unsigned long flags;
1024
1025 if (size) {
1026 spin_lock_irqsave(&c->outbound.ring_lock, flags);
1027 ++c->out_full_total;
1028
1029 if (!c->out_full_flag) {
1030 ++c->out_full_first;
1031 c->out_full_flag = true;
1032 }
1033 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1034 } else {
1035 c->out_full_flag = false;
1036 }
1037
1038 c->outbound.ring_buffer->pending_send_sz = size;
1039}
1040
1041static inline void set_low_latency_mode(struct vmbus_channel *c)
1042{
1043 c->low_latency = true;
1044}
1045
1046static inline void clear_low_latency_mode(struct vmbus_channel *c)
1047{
1048 c->low_latency = false;
1049}
1050
1051void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1052
1053int vmbus_request_offers(void);
1054
1055
1056
1057
1058
1059void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1060 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1061
1062void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1063 void (*chn_rescind_cb)(struct vmbus_channel *));
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1077
1078
1079struct vmbus_channel_packet_page_buffer {
1080 u16 type;
1081 u16 dataoffset8;
1082 u16 length8;
1083 u16 flags;
1084 u64 transactionid;
1085 u32 reserved;
1086 u32 rangecount;
1087 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1088} __packed;
1089
1090
1091struct vmbus_channel_packet_multipage_buffer {
1092 u16 type;
1093 u16 dataoffset8;
1094 u16 length8;
1095 u16 flags;
1096 u64 transactionid;
1097 u32 reserved;
1098 u32 rangecount;
1099 struct hv_multipage_buffer range;
1100} __packed;
1101
1102
1103struct vmbus_packet_mpb_array {
1104 u16 type;
1105 u16 dataoffset8;
1106 u16 length8;
1107 u16 flags;
1108 u64 transactionid;
1109 u32 reserved;
1110 u32 rangecount;
1111 struct hv_mpb_array range;
1112} __packed;
1113
1114int vmbus_alloc_ring(struct vmbus_channel *channel,
1115 u32 send_size, u32 recv_size);
1116void vmbus_free_ring(struct vmbus_channel *channel);
1117
1118int vmbus_connect_ring(struct vmbus_channel *channel,
1119 void (*onchannel_callback)(void *context),
1120 void *context);
1121int vmbus_disconnect_ring(struct vmbus_channel *channel);
1122
1123extern int vmbus_open(struct vmbus_channel *channel,
1124 u32 send_ringbuffersize,
1125 u32 recv_ringbuffersize,
1126 void *userdata,
1127 u32 userdatalen,
1128 void (*onchannel_callback)(void *context),
1129 void *context);
1130
1131extern void vmbus_close(struct vmbus_channel *channel);
1132
1133extern int vmbus_sendpacket(struct vmbus_channel *channel,
1134 void *buffer,
1135 u32 bufferLen,
1136 u64 requestid,
1137 enum vmbus_packet_type type,
1138 u32 flags);
1139
1140extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1141 struct hv_page_buffer pagebuffers[],
1142 u32 pagecount,
1143 void *buffer,
1144 u32 bufferlen,
1145 u64 requestid);
1146
1147extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1148 struct vmbus_packet_mpb_array *mpb,
1149 u32 desc_size,
1150 void *buffer,
1151 u32 bufferlen,
1152 u64 requestid);
1153
1154extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1155 void *kbuffer,
1156 u32 size,
1157 u32 *gpadl_handle);
1158
1159extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1160 u32 gpadl_handle);
1161
1162void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1163
1164extern int vmbus_recvpacket(struct vmbus_channel *channel,
1165 void *buffer,
1166 u32 bufferlen,
1167 u32 *buffer_actual_len,
1168 u64 *requestid);
1169
1170extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1171 void *buffer,
1172 u32 bufferlen,
1173 u32 *buffer_actual_len,
1174 u64 *requestid);
1175
1176
1177extern void vmbus_ontimer(unsigned long data);
1178
1179
1180struct hv_driver {
1181 const char *name;
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 bool hvsock;
1196
1197
1198 guid_t dev_type;
1199 const struct hv_vmbus_device_id *id_table;
1200
1201 struct device_driver driver;
1202
1203
1204 struct {
1205 spinlock_t lock;
1206 struct list_head list;
1207 } dynids;
1208
1209 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1210 int (*remove)(struct hv_device *);
1211 void (*shutdown)(struct hv_device *);
1212
1213 int (*suspend)(struct hv_device *);
1214 int (*resume)(struct hv_device *);
1215
1216};
1217
1218
1219struct hv_device {
1220
1221 guid_t dev_type;
1222
1223
1224 guid_t dev_instance;
1225 u16 vendor_id;
1226 u16 device_id;
1227
1228 struct device device;
1229 char *driver_override;
1230
1231 struct vmbus_channel *channel;
1232 struct kset *channels_kset;
1233
1234
1235 struct dentry *debug_dir;
1236
1237};
1238
1239
1240static inline struct hv_device *device_to_hv_device(struct device *d)
1241{
1242 return container_of(d, struct hv_device, device);
1243}
1244
1245static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1246{
1247 return container_of(d, struct hv_driver, driver);
1248}
1249
1250static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1251{
1252 dev_set_drvdata(&dev->device, data);
1253}
1254
1255static inline void *hv_get_drvdata(struct hv_device *dev)
1256{
1257 return dev_get_drvdata(&dev->device);
1258}
1259
1260struct hv_ring_buffer_debug_info {
1261 u32 current_interrupt_mask;
1262 u32 current_read_index;
1263 u32 current_write_index;
1264 u32 bytes_avail_toread;
1265 u32 bytes_avail_towrite;
1266};
1267
1268
1269int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1270 struct hv_ring_buffer_debug_info *debug_info);
1271
1272
1273#define vmbus_driver_register(driver) \
1274 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1275int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1276 struct module *owner,
1277 const char *mod_name);
1278void vmbus_driver_unregister(struct hv_driver *hv_driver);
1279
1280void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1281
1282int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1283 resource_size_t min, resource_size_t max,
1284 resource_size_t size, resource_size_t align,
1285 bool fb_overlap_ok);
1286void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296#define HV_NIC_GUID \
1297 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1298 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1299
1300
1301
1302
1303
1304#define HV_IDE_GUID \
1305 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1306 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1307
1308
1309
1310
1311
1312#define HV_SCSI_GUID \
1313 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1314 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1315
1316
1317
1318
1319
1320#define HV_SHUTDOWN_GUID \
1321 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1322 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1323
1324
1325
1326
1327
1328#define HV_TS_GUID \
1329 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1330 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1331
1332
1333
1334
1335
1336#define HV_HEART_BEAT_GUID \
1337 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1338 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1339
1340
1341
1342
1343
1344#define HV_KVP_GUID \
1345 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1346 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1347
1348
1349
1350
1351
1352#define HV_DM_GUID \
1353 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1354 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1355
1356
1357
1358
1359
1360#define HV_MOUSE_GUID \
1361 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1362 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1363
1364
1365
1366
1367
1368#define HV_KBD_GUID \
1369 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1370 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1371
1372
1373
1374
1375#define HV_VSS_GUID \
1376 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1377 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1378
1379
1380
1381
1382#define HV_SYNTHVID_GUID \
1383 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1384 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1385
1386
1387
1388
1389
1390#define HV_SYNTHFC_GUID \
1391 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1392 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1393
1394
1395
1396
1397
1398
1399#define HV_FCOPY_GUID \
1400 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1401 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1402
1403
1404
1405
1406
1407#define HV_ND_GUID \
1408 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1409 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1410
1411
1412
1413
1414
1415
1416#define HV_PCIE_GUID \
1417 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1418 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429#define HV_AVMA1_GUID \
1430 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1431 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1432
1433#define HV_AVMA2_GUID \
1434 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1435 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1436
1437#define HV_RDV_GUID \
1438 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1439 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1440
1441
1442
1443
1444
1445#define ICMSGTYPE_NEGOTIATE 0
1446#define ICMSGTYPE_HEARTBEAT 1
1447#define ICMSGTYPE_KVPEXCHANGE 2
1448#define ICMSGTYPE_SHUTDOWN 3
1449#define ICMSGTYPE_TIMESYNC 4
1450#define ICMSGTYPE_VSS 5
1451
1452#define ICMSGHDRFLAG_TRANSACTION 1
1453#define ICMSGHDRFLAG_REQUEST 2
1454#define ICMSGHDRFLAG_RESPONSE 4
1455
1456
1457
1458
1459
1460
1461
1462
1463struct hv_util_service {
1464 u8 *recv_buffer;
1465 void *channel;
1466 void (*util_cb)(void *);
1467 int (*util_init)(struct hv_util_service *);
1468 void (*util_deinit)(void);
1469 int (*util_pre_suspend)(void);
1470 int (*util_pre_resume)(void);
1471};
1472
1473struct vmbuspipe_hdr {
1474 u32 flags;
1475 u32 msgsize;
1476} __packed;
1477
1478struct ic_version {
1479 u16 major;
1480 u16 minor;
1481} __packed;
1482
1483struct icmsg_hdr {
1484 struct ic_version icverframe;
1485 u16 icmsgtype;
1486 struct ic_version icvermsg;
1487 u16 icmsgsize;
1488 u32 status;
1489 u8 ictransaction_id;
1490 u8 icflags;
1491 u8 reserved[2];
1492} __packed;
1493
1494struct icmsg_negotiate {
1495 u16 icframe_vercnt;
1496 u16 icmsg_vercnt;
1497 u32 reserved;
1498 struct ic_version icversion_data[1];
1499} __packed;
1500
1501struct shutdown_msg_data {
1502 u32 reason_code;
1503 u32 timeout_seconds;
1504 u32 flags;
1505 u8 display_message[2048];
1506} __packed;
1507
1508struct heartbeat_msg_data {
1509 u64 seq_num;
1510 u32 reserved[8];
1511} __packed;
1512
1513
1514#define ICTIMESYNCFLAG_PROBE 0
1515#define ICTIMESYNCFLAG_SYNC 1
1516#define ICTIMESYNCFLAG_SAMPLE 2
1517
1518#ifdef __x86_64__
1519#define WLTIMEDELTA 116444736000000000L
1520#else
1521#define WLTIMEDELTA 116444736000000000LL
1522#endif
1523
1524struct ictimesync_data {
1525 u64 parenttime;
1526 u64 childtime;
1527 u64 roundtriptime;
1528 u8 flags;
1529} __packed;
1530
1531struct ictimesync_ref_data {
1532 u64 parenttime;
1533 u64 vmreferencetime;
1534 u8 flags;
1535 char leapflags;
1536 char stratum;
1537 u8 reserved[3];
1538} __packed;
1539
1540struct hyperv_service_callback {
1541 u8 msg_type;
1542 char *log_msg;
1543 guid_t data;
1544 struct vmbus_channel *channel;
1545 void (*callback)(void *context);
1546};
1547
1548#define MAX_SRV_VER 0x7ffffff
1549extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1550 const int *fw_version, int fw_vercnt,
1551 const int *srv_version, int srv_vercnt,
1552 int *nego_fw_version, int *nego_srv_version);
1553
1554void hv_process_channel_removal(struct vmbus_channel *channel);
1555
1556void vmbus_setevent(struct vmbus_channel *channel);
1557
1558
1559
1560
1561extern __u32 vmbus_proto_version;
1562
1563int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1564 const guid_t *shv_host_servie_id);
1565int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
1566void vmbus_set_event(struct vmbus_channel *channel);
1567
1568
1569static inline void *
1570hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1571{
1572 return ring_info->ring_buffer->buffer;
1573}
1574
1575
1576
1577
1578static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1579{
1580 rbi->ring_buffer->interrupt_mask = 1;
1581
1582
1583 virt_mb();
1584}
1585
1586
1587
1588
1589static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1590{
1591
1592 rbi->ring_buffer->interrupt_mask = 0;
1593
1594
1595 virt_mb();
1596
1597
1598
1599
1600
1601
1602 return hv_get_bytes_to_read(rbi);
1603}
1604
1605
1606
1607
1608
1609
1610static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1611{
1612 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1613}
1614
1615
1616static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1617{
1618 return (desc->len8 << 3) - (desc->offset8 << 3);
1619}
1620
1621
1622struct vmpacket_descriptor *
1623hv_pkt_iter_first(struct vmbus_channel *channel);
1624
1625struct vmpacket_descriptor *
1626__hv_pkt_iter_next(struct vmbus_channel *channel,
1627 const struct vmpacket_descriptor *pkt);
1628
1629void hv_pkt_iter_close(struct vmbus_channel *channel);
1630
1631
1632
1633
1634
1635static inline struct vmpacket_descriptor *
1636hv_pkt_iter_next(struct vmbus_channel *channel,
1637 const struct vmpacket_descriptor *pkt)
1638{
1639 struct vmpacket_descriptor *nxt;
1640
1641 nxt = __hv_pkt_iter_next(channel, pkt);
1642 if (!nxt)
1643 hv_pkt_iter_close(channel);
1644
1645 return nxt;
1646}
1647
1648#define foreach_vmbus_pkt(pkt, channel) \
1649 for (pkt = hv_pkt_iter_first(channel); pkt; \
1650 pkt = hv_pkt_iter_next(channel, pkt))
1651
1652
1653
1654
1655
1656
1657
1658
1659#define HV_CONFIG_BLOCK_SIZE_MAX 128
1660
1661int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1662 unsigned int block_id, unsigned int *bytes_returned);
1663int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1664 unsigned int block_id);
1665int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1666 void (*block_invalidate)(void *context,
1667 u64 block_mask));
1668
1669struct hyperv_pci_block_ops {
1670 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1671 unsigned int block_id, unsigned int *bytes_returned);
1672 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1673 unsigned int block_id);
1674 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1675 void (*block_invalidate)(void *context,
1676 u64 block_mask));
1677};
1678
1679extern struct hyperv_pci_block_ops hvpci_block_ops;
1680
1681static inline unsigned long virt_to_hvpfn(void *addr)
1682{
1683 phys_addr_t paddr;
1684
1685 if (is_vmalloc_addr(addr))
1686 paddr = page_to_phys(vmalloc_to_page(addr)) +
1687 offset_in_page(addr);
1688 else
1689 paddr = __pa(addr);
1690
1691 return paddr >> HV_HYP_PAGE_SHIFT;
1692}
1693
1694#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1695#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1696#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1697#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1698
1699#endif
1700