1
2
3
4
5
6
7
8
9
10
11
12#ifndef _HYPERV_H
13#define _HYPERV_H
14
15#include <uapi/linux/hyperv.h>
16
17#include <linux/mm.h>
18#include <linux/types.h>
19#include <linux/scatterlist.h>
20#include <linux/list.h>
21#include <linux/timer.h>
22#include <linux/completion.h>
23#include <linux/device.h>
24#include <linux/mod_devicetable.h>
25#include <linux/interrupt.h>
26#include <linux/reciprocal_div.h>
27#include <asm/hyperv-tlfs.h>
28
29#define MAX_PAGE_BUFFER_COUNT 32
30#define MAX_MULTIPAGE_BUFFER_COUNT 32
31
32#pragma pack(push, 1)
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71enum hv_gpadl_type {
72 HV_GPADL_BUFFER,
73 HV_GPADL_RING
74};
75
76
77struct hv_page_buffer {
78 u32 len;
79 u32 offset;
80 u64 pfn;
81};
82
83
84struct hv_multipage_buffer {
85
86 u32 len;
87 u32 offset;
88 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
89};
90
91
92
93
94
95
96struct hv_mpb_array {
97
98 u32 len;
99 u32 offset;
100 u64 pfn_array[];
101};
102
103
104#define MAX_PAGE_BUFFER_PACKET (0x18 + \
105 (sizeof(struct hv_page_buffer) * \
106 MAX_PAGE_BUFFER_COUNT))
107#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
108 sizeof(struct hv_multipage_buffer))
109
110
111#pragma pack(pop)
112
113struct hv_ring_buffer {
114
115 u32 write_index;
116
117
118 u32 read_index;
119
120 u32 interrupt_mask;
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148 u32 pending_send_sz;
149 u32 reserved1[12];
150 union {
151 struct {
152 u32 feat_pending_send_sz:1;
153 };
154 u32 value;
155 } feature_bits;
156
157
158 u8 reserved2[PAGE_SIZE - 68];
159
160
161
162
163
164 u8 buffer[];
165} __packed;
166
167
168#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
169 (payload_sz))
170
171struct hv_ring_buffer_info {
172 struct hv_ring_buffer *ring_buffer;
173 u32 ring_size;
174 struct reciprocal_value ring_size_div10_reciprocal;
175 spinlock_t ring_lock;
176
177 u32 ring_datasize;
178 u32 priv_read_index;
179
180
181
182
183 struct mutex ring_buffer_mutex;
184};
185
186
187static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
188{
189 u32 read_loc, write_loc, dsize, read;
190
191 dsize = rbi->ring_datasize;
192 read_loc = rbi->ring_buffer->read_index;
193 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
194
195 read = write_loc >= read_loc ? (write_loc - read_loc) :
196 (dsize - read_loc) + write_loc;
197
198 return read;
199}
200
201static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
202{
203 u32 read_loc, write_loc, dsize, write;
204
205 dsize = rbi->ring_datasize;
206 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
207 write_loc = rbi->ring_buffer->write_index;
208
209 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
210 read_loc - write_loc;
211 return write;
212}
213
214static inline u32 hv_get_avail_to_write_percent(
215 const struct hv_ring_buffer_info *rbi)
216{
217 u32 avail_write = hv_get_bytes_to_write(rbi);
218
219 return reciprocal_divide(
220 (avail_write << 3) + (avail_write << 1),
221 rbi->ring_size_div10_reciprocal);
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239#define VERSION_WS2008 ((0 << 16) | (13))
240#define VERSION_WIN7 ((1 << 16) | (1))
241#define VERSION_WIN8 ((2 << 16) | (4))
242#define VERSION_WIN8_1 ((3 << 16) | (0))
243#define VERSION_WIN10 ((4 << 16) | (0))
244#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
245#define VERSION_WIN10_V5 ((5 << 16) | (0))
246#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
247#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
248
249
250#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
251
252
253#define VMBUS_PIPE_TYPE_BYTE 0x00000000
254#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
255
256
257#define MAX_USER_DEFINED_BYTES 120
258
259
260#define MAX_PIPE_USER_DEFINED_BYTES 116
261
262
263
264
265
266struct vmbus_channel_offer {
267 guid_t if_type;
268 guid_t if_instance;
269
270
271
272
273 u64 reserved1;
274 u64 reserved2;
275
276 u16 chn_flags;
277 u16 mmio_megabytes;
278
279 union {
280
281 struct {
282 unsigned char user_def[MAX_USER_DEFINED_BYTES];
283 } std;
284
285
286
287
288
289
290
291
292 struct {
293 u32 pipe_mode;
294 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
295 } pipe;
296 } u;
297
298
299
300
301
302
303 u16 sub_channel_index;
304 u16 reserved3;
305} __packed;
306
307
308#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
309#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
310#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
311#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
312#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
313#define VMBUS_CHANNEL_PARENT_OFFER 0x200
314#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
315#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
316
317struct vmpacket_descriptor {
318 u16 type;
319 u16 offset8;
320 u16 len8;
321 u16 flags;
322 u64 trans_id;
323} __packed;
324
325struct vmpacket_header {
326 u32 prev_pkt_start_offset;
327 struct vmpacket_descriptor descriptor;
328} __packed;
329
330struct vmtransfer_page_range {
331 u32 byte_count;
332 u32 byte_offset;
333} __packed;
334
335struct vmtransfer_page_packet_header {
336 struct vmpacket_descriptor d;
337 u16 xfer_pageset_id;
338 u8 sender_owns_set;
339 u8 reserved;
340 u32 range_cnt;
341 struct vmtransfer_page_range ranges[1];
342} __packed;
343
344struct vmgpadl_packet_header {
345 struct vmpacket_descriptor d;
346 u32 gpadl;
347 u32 reserved;
348} __packed;
349
350struct vmadd_remove_transfer_page_set {
351 struct vmpacket_descriptor d;
352 u32 gpadl;
353 u16 xfer_pageset_id;
354 u16 reserved;
355} __packed;
356
357
358
359
360
361struct gpa_range {
362 u32 byte_count;
363 u32 byte_offset;
364 u64 pfn_array[];
365};
366
367
368
369
370
371
372
373
374struct vmestablish_gpadl {
375 struct vmpacket_descriptor d;
376 u32 gpadl;
377 u32 range_cnt;
378 struct gpa_range range[1];
379} __packed;
380
381
382
383
384
385struct vmteardown_gpadl {
386 struct vmpacket_descriptor d;
387 u32 gpadl;
388 u32 reserved;
389} __packed;
390
391
392
393
394
395struct vmdata_gpa_direct {
396 struct vmpacket_descriptor d;
397 u32 reserved;
398 u32 range_cnt;
399 struct gpa_range range[1];
400} __packed;
401
402
403struct vmadditional_data {
404 struct vmpacket_descriptor d;
405 u64 total_bytes;
406 u32 offset;
407 u32 byte_cnt;
408 unsigned char data[1];
409} __packed;
410
411union vmpacket_largest_possible_header {
412 struct vmpacket_descriptor simple_hdr;
413 struct vmtransfer_page_packet_header xfer_page_hdr;
414 struct vmgpadl_packet_header gpadl_hdr;
415 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
416 struct vmestablish_gpadl establish_gpadl_hdr;
417 struct vmteardown_gpadl teardown_gpadl_hdr;
418 struct vmdata_gpa_direct data_gpa_direct_hdr;
419};
420
421#define VMPACKET_DATA_START_ADDRESS(__packet) \
422 (void *)(((unsigned char *)__packet) + \
423 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
424
425#define VMPACKET_DATA_LENGTH(__packet) \
426 ((((struct vmpacket_descriptor)__packet)->len8 - \
427 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
428
429#define VMPACKET_TRANSFER_MODE(__packet) \
430 (((struct IMPACT)__packet)->type)
431
432enum vmbus_packet_type {
433 VM_PKT_INVALID = 0x0,
434 VM_PKT_SYNCH = 0x1,
435 VM_PKT_ADD_XFER_PAGESET = 0x2,
436 VM_PKT_RM_XFER_PAGESET = 0x3,
437 VM_PKT_ESTABLISH_GPADL = 0x4,
438 VM_PKT_TEARDOWN_GPADL = 0x5,
439 VM_PKT_DATA_INBAND = 0x6,
440 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
441 VM_PKT_DATA_USING_GPADL = 0x8,
442 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
443 VM_PKT_CANCEL_REQUEST = 0xa,
444 VM_PKT_COMP = 0xb,
445 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
446 VM_PKT_ADDITIONAL_DATA = 0xd
447};
448
449#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
450
451
452
453enum vmbus_channel_message_type {
454 CHANNELMSG_INVALID = 0,
455 CHANNELMSG_OFFERCHANNEL = 1,
456 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
457 CHANNELMSG_REQUESTOFFERS = 3,
458 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
459 CHANNELMSG_OPENCHANNEL = 5,
460 CHANNELMSG_OPENCHANNEL_RESULT = 6,
461 CHANNELMSG_CLOSECHANNEL = 7,
462 CHANNELMSG_GPADL_HEADER = 8,
463 CHANNELMSG_GPADL_BODY = 9,
464 CHANNELMSG_GPADL_CREATED = 10,
465 CHANNELMSG_GPADL_TEARDOWN = 11,
466 CHANNELMSG_GPADL_TORNDOWN = 12,
467 CHANNELMSG_RELID_RELEASED = 13,
468 CHANNELMSG_INITIATE_CONTACT = 14,
469 CHANNELMSG_VERSION_RESPONSE = 15,
470 CHANNELMSG_UNLOAD = 16,
471 CHANNELMSG_UNLOAD_RESPONSE = 17,
472 CHANNELMSG_18 = 18,
473 CHANNELMSG_19 = 19,
474 CHANNELMSG_20 = 20,
475 CHANNELMSG_TL_CONNECT_REQUEST = 21,
476 CHANNELMSG_MODIFYCHANNEL = 22,
477 CHANNELMSG_TL_CONNECT_RESULT = 23,
478 CHANNELMSG_COUNT
479};
480
481
482#define INVALID_RELID U32_MAX
483
484struct vmbus_channel_message_header {
485 enum vmbus_channel_message_type msgtype;
486 u32 padding;
487} __packed;
488
489
490struct vmbus_channel_query_vmbus_version {
491 struct vmbus_channel_message_header header;
492 u32 version;
493} __packed;
494
495
496struct vmbus_channel_version_supported {
497 struct vmbus_channel_message_header header;
498 u8 version_supported;
499} __packed;
500
501
502struct vmbus_channel_offer_channel {
503 struct vmbus_channel_message_header header;
504 struct vmbus_channel_offer offer;
505 u32 child_relid;
506 u8 monitorid;
507
508
509
510 u8 monitor_allocated:1;
511 u8 reserved:7;
512
513
514
515
516
517
518
519
520
521
522
523 u16 is_dedicated_interrupt:1;
524 u16 reserved1:15;
525 u32 connection_id;
526} __packed;
527
528
529struct vmbus_channel_rescind_offer {
530 struct vmbus_channel_message_header header;
531 u32 child_relid;
532} __packed;
533
534static inline u32
535hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
536{
537 return rbi->ring_buffer->pending_send_sz;
538}
539
540
541
542
543
544
545
546
547
548
549
550struct vmbus_channel_open_channel {
551 struct vmbus_channel_message_header header;
552
553
554 u32 child_relid;
555
556
557 u32 openid;
558
559
560 u32 ringbuffer_gpadlhandle;
561
562
563
564
565
566
567
568
569
570 u32 target_vp;
571
572
573
574
575
576
577 u32 downstream_ringbuffer_pageoffset;
578
579
580 unsigned char userdata[MAX_USER_DEFINED_BYTES];
581} __packed;
582
583
584struct vmbus_channel_open_result {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587 u32 openid;
588 u32 status;
589} __packed;
590
591
592struct vmbus_channel_close_channel {
593 struct vmbus_channel_message_header header;
594 u32 child_relid;
595} __packed;
596
597
598#define GPADL_TYPE_RING_BUFFER 1
599#define GPADL_TYPE_SERVER_SAVE_AREA 2
600#define GPADL_TYPE_TRANSACTION 8
601
602
603
604
605
606
607
608struct vmbus_channel_gpadl_header {
609 struct vmbus_channel_message_header header;
610 u32 child_relid;
611 u32 gpadl;
612 u16 range_buflen;
613 u16 rangecount;
614 struct gpa_range range[];
615} __packed;
616
617
618struct vmbus_channel_gpadl_body {
619 struct vmbus_channel_message_header header;
620 u32 msgnumber;
621 u32 gpadl;
622 u64 pfn[];
623} __packed;
624
625struct vmbus_channel_gpadl_created {
626 struct vmbus_channel_message_header header;
627 u32 child_relid;
628 u32 gpadl;
629 u32 creation_status;
630} __packed;
631
632struct vmbus_channel_gpadl_teardown {
633 struct vmbus_channel_message_header header;
634 u32 child_relid;
635 u32 gpadl;
636} __packed;
637
638struct vmbus_channel_gpadl_torndown {
639 struct vmbus_channel_message_header header;
640 u32 gpadl;
641} __packed;
642
643struct vmbus_channel_relid_released {
644 struct vmbus_channel_message_header header;
645 u32 child_relid;
646} __packed;
647
648struct vmbus_channel_initiate_contact {
649 struct vmbus_channel_message_header header;
650 u32 vmbus_version_requested;
651 u32 target_vcpu;
652 union {
653 u64 interrupt_page;
654 struct {
655 u8 msg_sint;
656 u8 padding1[3];
657 u32 padding2;
658 };
659 };
660 u64 monitor_page1;
661 u64 monitor_page2;
662} __packed;
663
664
665struct vmbus_channel_tl_connect_request {
666 struct vmbus_channel_message_header header;
667 guid_t guest_endpoint_id;
668 guid_t host_service_id;
669} __packed;
670
671
672struct vmbus_channel_modifychannel {
673 struct vmbus_channel_message_header header;
674 u32 child_relid;
675 u32 target_vp;
676} __packed;
677
678struct vmbus_channel_version_response {
679 struct vmbus_channel_message_header header;
680 u8 version_supported;
681
682 u8 connection_state;
683 u16 padding;
684
685
686
687
688
689
690
691
692
693 u32 msg_conn_id;
694} __packed;
695
696enum vmbus_channel_state {
697 CHANNEL_OFFER_STATE,
698 CHANNEL_OPENING_STATE,
699 CHANNEL_OPEN_STATE,
700 CHANNEL_OPENED_STATE,
701};
702
703
704
705
706
707struct vmbus_channel_msginfo {
708
709 struct list_head msglistentry;
710
711
712 struct list_head submsglist;
713
714
715 struct completion waitevent;
716 struct vmbus_channel *waiting_channel;
717 union {
718 struct vmbus_channel_version_supported version_supported;
719 struct vmbus_channel_open_result open_result;
720 struct vmbus_channel_gpadl_torndown gpadl_torndown;
721 struct vmbus_channel_gpadl_created gpadl_created;
722 struct vmbus_channel_version_response version_response;
723 } response;
724
725 u32 msgsize;
726
727
728
729
730 unsigned char msg[];
731};
732
733struct vmbus_close_msg {
734 struct vmbus_channel_msginfo info;
735 struct vmbus_channel_close_channel msg;
736};
737
738
739union hv_connection_id {
740 u32 asu32;
741 struct {
742 u32 id:24;
743 u32 reserved:8;
744 } u;
745};
746
747enum vmbus_device_type {
748 HV_IDE = 0,
749 HV_SCSI,
750 HV_FC,
751 HV_NIC,
752 HV_ND,
753 HV_PCIE,
754 HV_FB,
755 HV_KBD,
756 HV_MOUSE,
757 HV_KVP,
758 HV_TS,
759 HV_HB,
760 HV_SHUTDOWN,
761 HV_FCOPY,
762 HV_BACKUP,
763 HV_DM,
764 HV_UNKNOWN,
765};
766
767
768
769
770
771
772struct vmbus_requestor {
773 u64 *req_arr;
774 unsigned long *req_bitmap;
775 u32 size;
776 u64 next_request_id;
777 spinlock_t req_lock;
778};
779
780#define VMBUS_NO_RQSTOR U64_MAX
781#define VMBUS_RQST_ERROR (U64_MAX - 1)
782#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
783
784struct vmbus_device {
785 u16 dev_type;
786 guid_t guid;
787 bool perf_device;
788 bool allowed_in_isolated;
789};
790
791struct vmbus_channel {
792 struct list_head listentry;
793
794 struct hv_device *device_obj;
795
796 enum vmbus_channel_state state;
797
798 struct vmbus_channel_offer_channel offermsg;
799
800
801
802
803 u8 monitor_grp;
804 u8 monitor_bit;
805
806 bool rescind;
807 bool rescind_ref;
808 struct completion rescind_event;
809
810 u32 ringbuffer_gpadlhandle;
811
812
813 struct page *ringbuffer_page;
814 u32 ringbuffer_pagecount;
815 u32 ringbuffer_send_offset;
816 struct hv_ring_buffer_info outbound;
817 struct hv_ring_buffer_info inbound;
818
819 struct vmbus_close_msg close_msg;
820
821
822 u64 interrupts;
823 u64 sig_events;
824
825
826
827
828
829 u64 intr_out_empty;
830
831
832
833
834
835
836 bool out_full_flag;
837
838
839 struct tasklet_struct callback_event;
840 void (*onchannel_callback)(void *context);
841 void *channel_callback_context;
842
843 void (*change_target_cpu_callback)(struct vmbus_channel *channel,
844 u32 old, u32 new);
845
846
847
848
849
850 spinlock_t sched_lock;
851
852
853
854
855
856
857
858
859
860
861
862
863 enum hv_callback_mode {
864 HV_CALL_BATCHED,
865 HV_CALL_DIRECT,
866 HV_CALL_ISR
867 } callback_mode;
868
869 bool is_dedicated_interrupt;
870 u64 sig_event;
871
872
873
874
875
876
877
878
879
880
881 u32 target_cpu;
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
903
904
905
906
907
908 void (*chn_rescind_callback)(struct vmbus_channel *channel);
909
910
911
912
913 struct list_head sc_list;
914
915
916
917
918 struct vmbus_channel *primary_channel;
919
920
921
922 void *per_channel_state;
923
924
925
926
927
928 struct rcu_head rcu;
929
930
931
932
933 struct kobject kobj;
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959 bool low_latency;
960
961 bool probe_done;
962
963
964
965
966
967
968 u16 device_id;
969
970
971
972
973
974
975
976 struct work_struct add_channel_work;
977
978
979
980
981
982 u64 intr_in_full;
983
984
985
986
987
988 u64 out_full_total;
989
990
991
992
993
994 u64 out_full_first;
995
996
997 bool fuzz_testing_state;
998
999
1000
1001
1002
1003
1004
1005
1006
1007 u32 fuzz_testing_interrupt_delay;
1008 u32 fuzz_testing_message_delay;
1009
1010
1011 struct vmbus_requestor requestor;
1012 u32 rqstor_size;
1013};
1014
1015u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr);
1016u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id);
1017
1018static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1019{
1020 return !!(c->offermsg.offer.chn_flags &
1021 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1022}
1023
1024static inline bool is_sub_channel(const struct vmbus_channel *c)
1025{
1026 return c->offermsg.offer.sub_channel_index != 0;
1027}
1028
1029static inline void set_channel_read_mode(struct vmbus_channel *c,
1030 enum hv_callback_mode mode)
1031{
1032 c->callback_mode = mode;
1033}
1034
1035static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1036{
1037 c->per_channel_state = s;
1038}
1039
1040static inline void *get_per_channel_state(struct vmbus_channel *c)
1041{
1042 return c->per_channel_state;
1043}
1044
1045static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1046 u32 size)
1047{
1048 unsigned long flags;
1049
1050 if (size) {
1051 spin_lock_irqsave(&c->outbound.ring_lock, flags);
1052 ++c->out_full_total;
1053
1054 if (!c->out_full_flag) {
1055 ++c->out_full_first;
1056 c->out_full_flag = true;
1057 }
1058 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1059 } else {
1060 c->out_full_flag = false;
1061 }
1062
1063 c->outbound.ring_buffer->pending_send_sz = size;
1064}
1065
1066static inline void set_low_latency_mode(struct vmbus_channel *c)
1067{
1068 c->low_latency = true;
1069}
1070
1071static inline void clear_low_latency_mode(struct vmbus_channel *c)
1072{
1073 c->low_latency = false;
1074}
1075
1076void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1077
1078int vmbus_request_offers(void);
1079
1080
1081
1082
1083
1084void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1085 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1086
1087void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1088 void (*chn_rescind_cb)(struct vmbus_channel *));
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1102
1103
1104struct vmbus_channel_packet_page_buffer {
1105 u16 type;
1106 u16 dataoffset8;
1107 u16 length8;
1108 u16 flags;
1109 u64 transactionid;
1110 u32 reserved;
1111 u32 rangecount;
1112 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1113} __packed;
1114
1115
1116struct vmbus_channel_packet_multipage_buffer {
1117 u16 type;
1118 u16 dataoffset8;
1119 u16 length8;
1120 u16 flags;
1121 u64 transactionid;
1122 u32 reserved;
1123 u32 rangecount;
1124 struct hv_multipage_buffer range;
1125} __packed;
1126
1127
1128struct vmbus_packet_mpb_array {
1129 u16 type;
1130 u16 dataoffset8;
1131 u16 length8;
1132 u16 flags;
1133 u64 transactionid;
1134 u32 reserved;
1135 u32 rangecount;
1136 struct hv_mpb_array range;
1137} __packed;
1138
1139int vmbus_alloc_ring(struct vmbus_channel *channel,
1140 u32 send_size, u32 recv_size);
1141void vmbus_free_ring(struct vmbus_channel *channel);
1142
1143int vmbus_connect_ring(struct vmbus_channel *channel,
1144 void (*onchannel_callback)(void *context),
1145 void *context);
1146int vmbus_disconnect_ring(struct vmbus_channel *channel);
1147
1148extern int vmbus_open(struct vmbus_channel *channel,
1149 u32 send_ringbuffersize,
1150 u32 recv_ringbuffersize,
1151 void *userdata,
1152 u32 userdatalen,
1153 void (*onchannel_callback)(void *context),
1154 void *context);
1155
1156extern void vmbus_close(struct vmbus_channel *channel);
1157
1158extern int vmbus_sendpacket(struct vmbus_channel *channel,
1159 void *buffer,
1160 u32 bufferLen,
1161 u64 requestid,
1162 enum vmbus_packet_type type,
1163 u32 flags);
1164
1165extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1166 struct hv_page_buffer pagebuffers[],
1167 u32 pagecount,
1168 void *buffer,
1169 u32 bufferlen,
1170 u64 requestid);
1171
1172extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1173 struct vmbus_packet_mpb_array *mpb,
1174 u32 desc_size,
1175 void *buffer,
1176 u32 bufferlen,
1177 u64 requestid);
1178
1179extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1180 void *kbuffer,
1181 u32 size,
1182 u32 *gpadl_handle);
1183
1184extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1185 u32 gpadl_handle);
1186
1187void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1188
1189extern int vmbus_recvpacket(struct vmbus_channel *channel,
1190 void *buffer,
1191 u32 bufferlen,
1192 u32 *buffer_actual_len,
1193 u64 *requestid);
1194
1195extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1196 void *buffer,
1197 u32 bufferlen,
1198 u32 *buffer_actual_len,
1199 u64 *requestid);
1200
1201
1202extern void vmbus_ontimer(unsigned long data);
1203
1204
1205struct hv_driver {
1206 const char *name;
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220 bool hvsock;
1221
1222
1223 guid_t dev_type;
1224 const struct hv_vmbus_device_id *id_table;
1225
1226 struct device_driver driver;
1227
1228
1229 struct {
1230 spinlock_t lock;
1231 struct list_head list;
1232 } dynids;
1233
1234 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1235 int (*remove)(struct hv_device *);
1236 void (*shutdown)(struct hv_device *);
1237
1238 int (*suspend)(struct hv_device *);
1239 int (*resume)(struct hv_device *);
1240
1241};
1242
1243
1244struct hv_device {
1245
1246 guid_t dev_type;
1247
1248
1249 guid_t dev_instance;
1250 u16 vendor_id;
1251 u16 device_id;
1252
1253 struct device device;
1254 char *driver_override;
1255
1256 struct vmbus_channel *channel;
1257 struct kset *channels_kset;
1258
1259
1260 struct dentry *debug_dir;
1261
1262};
1263
1264
1265static inline struct hv_device *device_to_hv_device(struct device *d)
1266{
1267 return container_of(d, struct hv_device, device);
1268}
1269
1270static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1271{
1272 return container_of(d, struct hv_driver, driver);
1273}
1274
1275static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1276{
1277 dev_set_drvdata(&dev->device, data);
1278}
1279
1280static inline void *hv_get_drvdata(struct hv_device *dev)
1281{
1282 return dev_get_drvdata(&dev->device);
1283}
1284
1285struct hv_ring_buffer_debug_info {
1286 u32 current_interrupt_mask;
1287 u32 current_read_index;
1288 u32 current_write_index;
1289 u32 bytes_avail_toread;
1290 u32 bytes_avail_towrite;
1291};
1292
1293
1294int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1295 struct hv_ring_buffer_debug_info *debug_info);
1296
1297
1298#define vmbus_driver_register(driver) \
1299 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1300int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1301 struct module *owner,
1302 const char *mod_name);
1303void vmbus_driver_unregister(struct hv_driver *hv_driver);
1304
1305void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1306
1307int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1308 resource_size_t min, resource_size_t max,
1309 resource_size_t size, resource_size_t align,
1310 bool fb_overlap_ok);
1311void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321#define HV_NIC_GUID \
1322 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1323 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1324
1325
1326
1327
1328
1329#define HV_IDE_GUID \
1330 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1331 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1332
1333
1334
1335
1336
1337#define HV_SCSI_GUID \
1338 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1339 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1340
1341
1342
1343
1344
1345#define HV_SHUTDOWN_GUID \
1346 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1347 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1348
1349
1350
1351
1352
1353#define HV_TS_GUID \
1354 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1355 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1356
1357
1358
1359
1360
1361#define HV_HEART_BEAT_GUID \
1362 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1363 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1364
1365
1366
1367
1368
1369#define HV_KVP_GUID \
1370 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1371 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1372
1373
1374
1375
1376
1377#define HV_DM_GUID \
1378 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1379 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1380
1381
1382
1383
1384
1385#define HV_MOUSE_GUID \
1386 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1387 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1388
1389
1390
1391
1392
1393#define HV_KBD_GUID \
1394 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1395 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1396
1397
1398
1399
1400#define HV_VSS_GUID \
1401 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1402 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1403
1404
1405
1406
1407#define HV_SYNTHVID_GUID \
1408 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1409 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1410
1411
1412
1413
1414
1415#define HV_SYNTHFC_GUID \
1416 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1417 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1418
1419
1420
1421
1422
1423
1424#define HV_FCOPY_GUID \
1425 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1426 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1427
1428
1429
1430
1431
1432#define HV_ND_GUID \
1433 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1434 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1435
1436
1437
1438
1439
1440
1441#define HV_PCIE_GUID \
1442 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1443 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454#define HV_AVMA1_GUID \
1455 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1456 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1457
1458#define HV_AVMA2_GUID \
1459 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1460 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1461
1462#define HV_RDV_GUID \
1463 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1464 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1465
1466
1467
1468
1469
1470#define ICMSGTYPE_NEGOTIATE 0
1471#define ICMSGTYPE_HEARTBEAT 1
1472#define ICMSGTYPE_KVPEXCHANGE 2
1473#define ICMSGTYPE_SHUTDOWN 3
1474#define ICMSGTYPE_TIMESYNC 4
1475#define ICMSGTYPE_VSS 5
1476#define ICMSGTYPE_FCOPY 7
1477
1478#define ICMSGHDRFLAG_TRANSACTION 1
1479#define ICMSGHDRFLAG_REQUEST 2
1480#define ICMSGHDRFLAG_RESPONSE 4
1481
1482
1483
1484
1485
1486
1487
1488
1489struct hv_util_service {
1490 u8 *recv_buffer;
1491 void *channel;
1492 void (*util_cb)(void *);
1493 int (*util_init)(struct hv_util_service *);
1494 void (*util_deinit)(void);
1495 int (*util_pre_suspend)(void);
1496 int (*util_pre_resume)(void);
1497};
1498
1499struct vmbuspipe_hdr {
1500 u32 flags;
1501 u32 msgsize;
1502} __packed;
1503
1504struct ic_version {
1505 u16 major;
1506 u16 minor;
1507} __packed;
1508
1509struct icmsg_hdr {
1510 struct ic_version icverframe;
1511 u16 icmsgtype;
1512 struct ic_version icvermsg;
1513 u16 icmsgsize;
1514 u32 status;
1515 u8 ictransaction_id;
1516 u8 icflags;
1517 u8 reserved[2];
1518} __packed;
1519
1520#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
1521#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
1522#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
1523 (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
1524 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
1525
1526struct icmsg_negotiate {
1527 u16 icframe_vercnt;
1528 u16 icmsg_vercnt;
1529 u32 reserved;
1530 struct ic_version icversion_data[];
1531} __packed;
1532
1533struct shutdown_msg_data {
1534 u32 reason_code;
1535 u32 timeout_seconds;
1536 u32 flags;
1537 u8 display_message[2048];
1538} __packed;
1539
1540struct heartbeat_msg_data {
1541 u64 seq_num;
1542 u32 reserved[8];
1543} __packed;
1544
1545
1546#define ICTIMESYNCFLAG_PROBE 0
1547#define ICTIMESYNCFLAG_SYNC 1
1548#define ICTIMESYNCFLAG_SAMPLE 2
1549
1550#ifdef __x86_64__
1551#define WLTIMEDELTA 116444736000000000L
1552#else
1553#define WLTIMEDELTA 116444736000000000LL
1554#endif
1555
1556struct ictimesync_data {
1557 u64 parenttime;
1558 u64 childtime;
1559 u64 roundtriptime;
1560 u8 flags;
1561} __packed;
1562
1563struct ictimesync_ref_data {
1564 u64 parenttime;
1565 u64 vmreferencetime;
1566 u8 flags;
1567 char leapflags;
1568 char stratum;
1569 u8 reserved[3];
1570} __packed;
1571
1572struct hyperv_service_callback {
1573 u8 msg_type;
1574 char *log_msg;
1575 guid_t data;
1576 struct vmbus_channel *channel;
1577 void (*callback)(void *context);
1578};
1579
1580#define MAX_SRV_VER 0x7ffffff
1581extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
1582 const int *fw_version, int fw_vercnt,
1583 const int *srv_version, int srv_vercnt,
1584 int *nego_fw_version, int *nego_srv_version);
1585
1586void hv_process_channel_removal(struct vmbus_channel *channel);
1587
1588void vmbus_setevent(struct vmbus_channel *channel);
1589
1590
1591
1592
1593extern __u32 vmbus_proto_version;
1594
1595int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1596 const guid_t *shv_host_servie_id);
1597int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
1598void vmbus_set_event(struct vmbus_channel *channel);
1599
1600
1601static inline void *
1602hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1603{
1604 return ring_info->ring_buffer->buffer;
1605}
1606
1607
1608
1609
1610static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1611{
1612 rbi->ring_buffer->interrupt_mask = 1;
1613
1614
1615 virt_mb();
1616}
1617
1618
1619
1620
1621static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1622{
1623
1624 rbi->ring_buffer->interrupt_mask = 0;
1625
1626
1627 virt_mb();
1628
1629
1630
1631
1632
1633
1634 return hv_get_bytes_to_read(rbi);
1635}
1636
1637
1638
1639
1640
1641
1642static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1643{
1644 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1645}
1646
1647
1648static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1649{
1650 return (desc->len8 << 3) - (desc->offset8 << 3);
1651}
1652
1653
1654struct vmpacket_descriptor *
1655hv_pkt_iter_first(struct vmbus_channel *channel);
1656
1657struct vmpacket_descriptor *
1658__hv_pkt_iter_next(struct vmbus_channel *channel,
1659 const struct vmpacket_descriptor *pkt);
1660
1661void hv_pkt_iter_close(struct vmbus_channel *channel);
1662
1663
1664
1665
1666
1667static inline struct vmpacket_descriptor *
1668hv_pkt_iter_next(struct vmbus_channel *channel,
1669 const struct vmpacket_descriptor *pkt)
1670{
1671 struct vmpacket_descriptor *nxt;
1672
1673 nxt = __hv_pkt_iter_next(channel, pkt);
1674 if (!nxt)
1675 hv_pkt_iter_close(channel);
1676
1677 return nxt;
1678}
1679
1680#define foreach_vmbus_pkt(pkt, channel) \
1681 for (pkt = hv_pkt_iter_first(channel); pkt; \
1682 pkt = hv_pkt_iter_next(channel, pkt))
1683
1684
1685
1686
1687
1688
1689
1690
1691#define HV_CONFIG_BLOCK_SIZE_MAX 128
1692
1693int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1694 unsigned int block_id, unsigned int *bytes_returned);
1695int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1696 unsigned int block_id);
1697int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1698 void (*block_invalidate)(void *context,
1699 u64 block_mask));
1700
1701struct hyperv_pci_block_ops {
1702 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1703 unsigned int block_id, unsigned int *bytes_returned);
1704 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1705 unsigned int block_id);
1706 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1707 void (*block_invalidate)(void *context,
1708 u64 block_mask));
1709};
1710
1711extern struct hyperv_pci_block_ops hvpci_block_ops;
1712
1713static inline unsigned long virt_to_hvpfn(void *addr)
1714{
1715 phys_addr_t paddr;
1716
1717 if (is_vmalloc_addr(addr))
1718 paddr = page_to_phys(vmalloc_to_page(addr)) +
1719 offset_in_page(addr);
1720 else
1721 paddr = __pa(addr);
1722
1723 return paddr >> HV_HYP_PAGE_SHIFT;
1724}
1725
1726#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1727#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1728#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1729#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1730
1731#endif
1732