1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _HYPERV_H
26#define _HYPERV_H
27
28#include <uapi/linux/hyperv.h>
29
30#include <linux/types.h>
31#include <linux/scatterlist.h>
32#include <linux/list.h>
33#include <linux/timer.h>
34#include <linux/workqueue.h>
35#include <linux/completion.h>
36#include <linux/device.h>
37#include <linux/mod_devicetable.h>
38
39
40#define MAX_PAGE_BUFFER_COUNT 32
41#define MAX_MULTIPAGE_BUFFER_COUNT 32
42
43#pragma pack(push, 1)
44
45
46struct hv_page_buffer {
47 u32 len;
48 u32 offset;
49 u64 pfn;
50};
51
52
53struct hv_multipage_buffer {
54
55 u32 len;
56 u32 offset;
57 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
58};
59
60
61#define MAX_PAGE_BUFFER_PACKET (0x18 + \
62 (sizeof(struct hv_page_buffer) * \
63 MAX_PAGE_BUFFER_COUNT))
64#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
65 sizeof(struct hv_multipage_buffer))
66
67
68#pragma pack(pop)
69
70struct hv_ring_buffer {
71
72 u32 write_index;
73
74
75 u32 read_index;
76
77 u32 interrupt_mask;
78
79
80
81
82
83
84
85
86
87
88 u32 pending_send_sz;
89
90 u32 reserved1[12];
91
92 union {
93 struct {
94 u32 feat_pending_send_sz:1;
95 };
96 u32 value;
97 } feature_bits;
98
99
100 u8 reserved2[4028];
101
102
103
104
105
106 u8 buffer[0];
107} __packed;
108
109struct hv_ring_buffer_info {
110 struct hv_ring_buffer *ring_buffer;
111 u32 ring_size;
112 spinlock_t ring_lock;
113
114 u32 ring_datasize;
115 u32 ring_data_startoffset;
116};
117
118
119
120
121
122
123
124
125static inline void
126hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
127 u32 *read, u32 *write)
128{
129 u32 read_loc, write_loc, dsize;
130
131 smp_read_barrier_depends();
132
133
134 read_loc = rbi->ring_buffer->read_index;
135 write_loc = rbi->ring_buffer->write_index;
136 dsize = rbi->ring_datasize;
137
138 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
139 read_loc - write_loc;
140 *read = dsize - *write;
141}
142
143
144
145
146
147
148
149
150
151
152
153#define VERSION_WS2008 ((0 << 16) | (13))
154#define VERSION_WIN7 ((1 << 16) | (1))
155#define VERSION_WIN8 ((2 << 16) | (4))
156#define VERSION_WIN8_1 ((3 << 16) | (0))
157
158#define VERSION_INVAL -1
159
160#define VERSION_CURRENT VERSION_WIN8_1
161
162
163#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
164
165
166#define VMBUS_PIPE_TYPE_BYTE 0x00000000
167#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
168
169
170#define MAX_USER_DEFINED_BYTES 120
171
172
173#define MAX_PIPE_USER_DEFINED_BYTES 116
174
175
176
177
178
179struct vmbus_channel_offer {
180 uuid_le if_type;
181 uuid_le if_instance;
182
183
184
185
186 u64 reserved1;
187 u64 reserved2;
188
189 u16 chn_flags;
190 u16 mmio_megabytes;
191
192 union {
193
194 struct {
195 unsigned char user_def[MAX_USER_DEFINED_BYTES];
196 } std;
197
198
199
200
201
202
203
204
205 struct {
206 u32 pipe_mode;
207 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
208 } pipe;
209 } u;
210
211
212
213 u16 sub_channel_index;
214 u16 reserved3;
215} __packed;
216
217
218#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
219#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
220#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
221#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
222#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
223#define VMBUS_CHANNEL_PARENT_OFFER 0x200
224#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
225
226struct vmpacket_descriptor {
227 u16 type;
228 u16 offset8;
229 u16 len8;
230 u16 flags;
231 u64 trans_id;
232} __packed;
233
234struct vmpacket_header {
235 u32 prev_pkt_start_offset;
236 struct vmpacket_descriptor descriptor;
237} __packed;
238
239struct vmtransfer_page_range {
240 u32 byte_count;
241 u32 byte_offset;
242} __packed;
243
244struct vmtransfer_page_packet_header {
245 struct vmpacket_descriptor d;
246 u16 xfer_pageset_id;
247 u8 sender_owns_set;
248 u8 reserved;
249 u32 range_cnt;
250 struct vmtransfer_page_range ranges[1];
251} __packed;
252
253struct vmgpadl_packet_header {
254 struct vmpacket_descriptor d;
255 u32 gpadl;
256 u32 reserved;
257} __packed;
258
259struct vmadd_remove_transfer_page_set {
260 struct vmpacket_descriptor d;
261 u32 gpadl;
262 u16 xfer_pageset_id;
263 u16 reserved;
264} __packed;
265
266
267
268
269
270struct gpa_range {
271 u32 byte_count;
272 u32 byte_offset;
273 u64 pfn_array[0];
274};
275
276
277
278
279
280
281
282
283struct vmestablish_gpadl {
284 struct vmpacket_descriptor d;
285 u32 gpadl;
286 u32 range_cnt;
287 struct gpa_range range[1];
288} __packed;
289
290
291
292
293
294struct vmteardown_gpadl {
295 struct vmpacket_descriptor d;
296 u32 gpadl;
297 u32 reserved;
298} __packed;
299
300
301
302
303
304struct vmdata_gpa_direct {
305 struct vmpacket_descriptor d;
306 u32 reserved;
307 u32 range_cnt;
308 struct gpa_range range[1];
309} __packed;
310
311
312struct vmadditional_data {
313 struct vmpacket_descriptor d;
314 u64 total_bytes;
315 u32 offset;
316 u32 byte_cnt;
317 unsigned char data[1];
318} __packed;
319
320union vmpacket_largest_possible_header {
321 struct vmpacket_descriptor simple_hdr;
322 struct vmtransfer_page_packet_header xfer_page_hdr;
323 struct vmgpadl_packet_header gpadl_hdr;
324 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
325 struct vmestablish_gpadl establish_gpadl_hdr;
326 struct vmteardown_gpadl teardown_gpadl_hdr;
327 struct vmdata_gpa_direct data_gpa_direct_hdr;
328};
329
330#define VMPACKET_DATA_START_ADDRESS(__packet) \
331 (void *)(((unsigned char *)__packet) + \
332 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
333
334#define VMPACKET_DATA_LENGTH(__packet) \
335 ((((struct vmpacket_descriptor)__packet)->len8 - \
336 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
337
338#define VMPACKET_TRANSFER_MODE(__packet) \
339 (((struct IMPACT)__packet)->type)
340
341enum vmbus_packet_type {
342 VM_PKT_INVALID = 0x0,
343 VM_PKT_SYNCH = 0x1,
344 VM_PKT_ADD_XFER_PAGESET = 0x2,
345 VM_PKT_RM_XFER_PAGESET = 0x3,
346 VM_PKT_ESTABLISH_GPADL = 0x4,
347 VM_PKT_TEARDOWN_GPADL = 0x5,
348 VM_PKT_DATA_INBAND = 0x6,
349 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
350 VM_PKT_DATA_USING_GPADL = 0x8,
351 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
352 VM_PKT_CANCEL_REQUEST = 0xa,
353 VM_PKT_COMP = 0xb,
354 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
355 VM_PKT_ADDITIONAL_DATA = 0xd
356};
357
358#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
359
360
361
362enum vmbus_channel_message_type {
363 CHANNELMSG_INVALID = 0,
364 CHANNELMSG_OFFERCHANNEL = 1,
365 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
366 CHANNELMSG_REQUESTOFFERS = 3,
367 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
368 CHANNELMSG_OPENCHANNEL = 5,
369 CHANNELMSG_OPENCHANNEL_RESULT = 6,
370 CHANNELMSG_CLOSECHANNEL = 7,
371 CHANNELMSG_GPADL_HEADER = 8,
372 CHANNELMSG_GPADL_BODY = 9,
373 CHANNELMSG_GPADL_CREATED = 10,
374 CHANNELMSG_GPADL_TEARDOWN = 11,
375 CHANNELMSG_GPADL_TORNDOWN = 12,
376 CHANNELMSG_RELID_RELEASED = 13,
377 CHANNELMSG_INITIATE_CONTACT = 14,
378 CHANNELMSG_VERSION_RESPONSE = 15,
379 CHANNELMSG_UNLOAD = 16,
380#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
381 CHANNELMSG_VIEWRANGE_ADD = 17,
382 CHANNELMSG_VIEWRANGE_REMOVE = 18,
383#endif
384 CHANNELMSG_COUNT
385};
386
387struct vmbus_channel_message_header {
388 enum vmbus_channel_message_type msgtype;
389 u32 padding;
390} __packed;
391
392
393struct vmbus_channel_query_vmbus_version {
394 struct vmbus_channel_message_header header;
395 u32 version;
396} __packed;
397
398
399struct vmbus_channel_version_supported {
400 struct vmbus_channel_message_header header;
401 u8 version_supported;
402} __packed;
403
404
405struct vmbus_channel_offer_channel {
406 struct vmbus_channel_message_header header;
407 struct vmbus_channel_offer offer;
408 u32 child_relid;
409 u8 monitorid;
410
411
412
413 u8 monitor_allocated:1;
414 u8 reserved:7;
415
416
417
418
419
420
421
422
423
424
425
426 u16 is_dedicated_interrupt:1;
427 u16 reserved1:15;
428 u32 connection_id;
429} __packed;
430
431
432struct vmbus_channel_rescind_offer {
433 struct vmbus_channel_message_header header;
434 u32 child_relid;
435} __packed;
436
437
438
439
440
441
442
443
444
445
446
447struct vmbus_channel_open_channel {
448 struct vmbus_channel_message_header header;
449
450
451 u32 child_relid;
452
453
454 u32 openid;
455
456
457 u32 ringbuffer_gpadlhandle;
458
459
460
461
462
463
464
465
466
467 u32 target_vp;
468
469
470
471
472
473
474 u32 downstream_ringbuffer_pageoffset;
475
476
477 unsigned char userdata[MAX_USER_DEFINED_BYTES];
478} __packed;
479
480
481struct vmbus_channel_open_result {
482 struct vmbus_channel_message_header header;
483 u32 child_relid;
484 u32 openid;
485 u32 status;
486} __packed;
487
488
489struct vmbus_channel_close_channel {
490 struct vmbus_channel_message_header header;
491 u32 child_relid;
492} __packed;
493
494
495#define GPADL_TYPE_RING_BUFFER 1
496#define GPADL_TYPE_SERVER_SAVE_AREA 2
497#define GPADL_TYPE_TRANSACTION 8
498
499
500
501
502
503
504
505struct vmbus_channel_gpadl_header {
506 struct vmbus_channel_message_header header;
507 u32 child_relid;
508 u32 gpadl;
509 u16 range_buflen;
510 u16 rangecount;
511 struct gpa_range range[0];
512} __packed;
513
514
515struct vmbus_channel_gpadl_body {
516 struct vmbus_channel_message_header header;
517 u32 msgnumber;
518 u32 gpadl;
519 u64 pfn[0];
520} __packed;
521
522struct vmbus_channel_gpadl_created {
523 struct vmbus_channel_message_header header;
524 u32 child_relid;
525 u32 gpadl;
526 u32 creation_status;
527} __packed;
528
529struct vmbus_channel_gpadl_teardown {
530 struct vmbus_channel_message_header header;
531 u32 child_relid;
532 u32 gpadl;
533} __packed;
534
535struct vmbus_channel_gpadl_torndown {
536 struct vmbus_channel_message_header header;
537 u32 gpadl;
538} __packed;
539
540#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
541struct vmbus_channel_view_range_add {
542 struct vmbus_channel_message_header header;
543 PHYSICAL_ADDRESS viewrange_base;
544 u64 viewrange_length;
545 u32 child_relid;
546} __packed;
547
548struct vmbus_channel_view_range_remove {
549 struct vmbus_channel_message_header header;
550 PHYSICAL_ADDRESS viewrange_base;
551 u32 child_relid;
552} __packed;
553#endif
554
555struct vmbus_channel_relid_released {
556 struct vmbus_channel_message_header header;
557 u32 child_relid;
558} __packed;
559
560struct vmbus_channel_initiate_contact {
561 struct vmbus_channel_message_header header;
562 u32 vmbus_version_requested;
563 u32 target_vcpu;
564 u64 interrupt_page;
565 u64 monitor_page1;
566 u64 monitor_page2;
567} __packed;
568
569struct vmbus_channel_version_response {
570 struct vmbus_channel_message_header header;
571 u8 version_supported;
572} __packed;
573
574enum vmbus_channel_state {
575 CHANNEL_OFFER_STATE,
576 CHANNEL_OPENING_STATE,
577 CHANNEL_OPEN_STATE,
578 CHANNEL_OPENED_STATE,
579};
580
581
582
583
584
585struct vmbus_channel_msginfo {
586
587 struct list_head msglistentry;
588
589
590 struct list_head submsglist;
591
592
593 struct completion waitevent;
594 union {
595 struct vmbus_channel_version_supported version_supported;
596 struct vmbus_channel_open_result open_result;
597 struct vmbus_channel_gpadl_torndown gpadl_torndown;
598 struct vmbus_channel_gpadl_created gpadl_created;
599 struct vmbus_channel_version_response version_response;
600 } response;
601
602 u32 msgsize;
603
604
605
606
607 unsigned char msg[0];
608};
609
610struct vmbus_close_msg {
611 struct vmbus_channel_msginfo info;
612 struct vmbus_channel_close_channel msg;
613};
614
615
616union hv_connection_id {
617 u32 asu32;
618 struct {
619 u32 id:24;
620 u32 reserved:8;
621 } u;
622};
623
624
625struct hv_input_signal_event {
626 union hv_connection_id connectionid;
627 u16 flag_number;
628 u16 rsvdz;
629};
630
631struct hv_input_signal_event_buffer {
632 u64 align8;
633 struct hv_input_signal_event event;
634};
635
636struct vmbus_channel {
637 struct list_head listentry;
638
639 struct hv_device *device_obj;
640
641 struct work_struct work;
642
643 enum vmbus_channel_state state;
644
645 struct vmbus_channel_offer_channel offermsg;
646
647
648
649
650 u8 monitor_grp;
651 u8 monitor_bit;
652
653 u32 ringbuffer_gpadlhandle;
654
655
656 void *ringbuffer_pages;
657 u32 ringbuffer_pagecount;
658 struct hv_ring_buffer_info outbound;
659 struct hv_ring_buffer_info inbound;
660 spinlock_t inbound_lock;
661 struct workqueue_struct *controlwq;
662
663 struct vmbus_close_msg close_msg;
664
665
666
667
668 void (*onchannel_callback)(void *context);
669 void *channel_callback_context;
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684 bool batched_reading;
685
686 bool is_dedicated_interrupt;
687 struct hv_input_signal_event_buffer sig_buf;
688 struct hv_input_signal_event *sig_event;
689
690
691
692
693
694
695
696
697
698 u32 target_vp;
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
720
721 spinlock_t sc_lock;
722
723
724
725 struct list_head sc_list;
726
727
728
729
730 struct vmbus_channel *primary_channel;
731
732
733
734 void *per_channel_state;
735};
736
737static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
738{
739 c->batched_reading = state;
740}
741
742static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
743{
744 c->per_channel_state = s;
745}
746
747static inline void *get_per_channel_state(struct vmbus_channel *c)
748{
749 return c->per_channel_state;
750}
751
752void vmbus_onmessage(void *context);
753
754int vmbus_request_offers(void);
755
756
757
758
759
760void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
761 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
762
763
764
765
766
767
768
769struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
770
771
772
773
774
775
776
777
778
779
780
781
782bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
783
784
785struct vmbus_channel_packet_page_buffer {
786 u16 type;
787 u16 dataoffset8;
788 u16 length8;
789 u16 flags;
790 u64 transactionid;
791 u32 reserved;
792 u32 rangecount;
793 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
794} __packed;
795
796
797struct vmbus_channel_packet_multipage_buffer {
798 u16 type;
799 u16 dataoffset8;
800 u16 length8;
801 u16 flags;
802 u64 transactionid;
803 u32 reserved;
804 u32 rangecount;
805 struct hv_multipage_buffer range;
806} __packed;
807
808
809extern int vmbus_open(struct vmbus_channel *channel,
810 u32 send_ringbuffersize,
811 u32 recv_ringbuffersize,
812 void *userdata,
813 u32 userdatalen,
814 void(*onchannel_callback)(void *context),
815 void *context);
816
817extern void vmbus_close(struct vmbus_channel *channel);
818
819extern int vmbus_sendpacket(struct vmbus_channel *channel,
820 void *buffer,
821 u32 bufferLen,
822 u64 requestid,
823 enum vmbus_packet_type type,
824 u32 flags);
825
826extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
827 struct hv_page_buffer pagebuffers[],
828 u32 pagecount,
829 void *buffer,
830 u32 bufferlen,
831 u64 requestid);
832
833extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
834 struct hv_multipage_buffer *mpb,
835 void *buffer,
836 u32 bufferlen,
837 u64 requestid);
838
839extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
840 void *kbuffer,
841 u32 size,
842 u32 *gpadl_handle);
843
844extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
845 u32 gpadl_handle);
846
847extern int vmbus_recvpacket(struct vmbus_channel *channel,
848 void *buffer,
849 u32 bufferlen,
850 u32 *buffer_actual_len,
851 u64 *requestid);
852
853extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
854 void *buffer,
855 u32 bufferlen,
856 u32 *buffer_actual_len,
857 u64 *requestid);
858
859
860extern void vmbus_ontimer(unsigned long data);
861
862
863struct hv_driver {
864 const char *name;
865
866
867 uuid_le dev_type;
868 const struct hv_vmbus_device_id *id_table;
869
870 struct device_driver driver;
871
872 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
873 int (*remove)(struct hv_device *);
874 void (*shutdown)(struct hv_device *);
875
876};
877
878
879struct hv_device {
880
881 uuid_le dev_type;
882
883
884 uuid_le dev_instance;
885
886 struct device device;
887
888 struct vmbus_channel *channel;
889};
890
891
892static inline struct hv_device *device_to_hv_device(struct device *d)
893{
894 return container_of(d, struct hv_device, device);
895}
896
897static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
898{
899 return container_of(d, struct hv_driver, driver);
900}
901
902static inline void hv_set_drvdata(struct hv_device *dev, void *data)
903{
904 dev_set_drvdata(&dev->device, data);
905}
906
907static inline void *hv_get_drvdata(struct hv_device *dev)
908{
909 return dev_get_drvdata(&dev->device);
910}
911
912
913#define vmbus_driver_register(driver) \
914 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
915int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
916 struct module *owner,
917 const char *mod_name);
918void vmbus_driver_unregister(struct hv_driver *hv_driver);
919
920
921
922
923
924
925
926#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \
927 g8, g9, ga, gb, gc, gd, ge, gf) \
928 .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \
929 g8, g9, ga, gb, gc, gd, ge, gf },
930
931
932
933
934
935
936
937
938
939#define HV_NIC_GUID \
940 .guid = { \
941 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \
942 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \
943 }
944
945
946
947
948
949#define HV_IDE_GUID \
950 .guid = { \
951 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \
952 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \
953 }
954
955
956
957
958
959#define HV_SCSI_GUID \
960 .guid = { \
961 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \
962 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \
963 }
964
965
966
967
968
969#define HV_SHUTDOWN_GUID \
970 .guid = { \
971 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \
972 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \
973 }
974
975
976
977
978
979#define HV_TS_GUID \
980 .guid = { \
981 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \
982 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \
983 }
984
985
986
987
988
989#define HV_HEART_BEAT_GUID \
990 .guid = { \
991 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \
992 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \
993 }
994
995
996
997
998
999#define HV_KVP_GUID \
1000 .guid = { \
1001 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \
1002 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \
1003 }
1004
1005
1006
1007
1008
1009#define HV_DM_GUID \
1010 .guid = { \
1011 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \
1012 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \
1013 }
1014
1015
1016
1017
1018
1019#define HV_MOUSE_GUID \
1020 .guid = { \
1021 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \
1022 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \
1023 }
1024
1025
1026
1027
1028#define HV_VSS_GUID \
1029 .guid = { \
1030 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, \
1031 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \
1032 }
1033
1034
1035
1036
1037#define HV_SYNTHVID_GUID \
1038 .guid = { \
1039 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \
1040 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \
1041 }
1042
1043
1044
1045
1046
1047#define HV_SYNTHFC_GUID \
1048 .guid = { \
1049 0x4A, 0xCC, 0x9B, 0x2F, 0x69, 0x00, 0xF3, 0x4A, \
1050 0xB7, 0x6B, 0x6F, 0xD0, 0xBE, 0x52, 0x8C, 0xDA \
1051 }
1052
1053
1054
1055
1056
1057
1058#define HV_FCOPY_GUID \
1059 .guid = { \
1060 0xE3, 0x4B, 0xD1, 0x34, 0xE4, 0xDE, 0xC8, 0x41, \
1061 0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \
1062 }
1063
1064
1065
1066
1067
1068#define ICMSGTYPE_NEGOTIATE 0
1069#define ICMSGTYPE_HEARTBEAT 1
1070#define ICMSGTYPE_KVPEXCHANGE 2
1071#define ICMSGTYPE_SHUTDOWN 3
1072#define ICMSGTYPE_TIMESYNC 4
1073#define ICMSGTYPE_VSS 5
1074
1075#define ICMSGHDRFLAG_TRANSACTION 1
1076#define ICMSGHDRFLAG_REQUEST 2
1077#define ICMSGHDRFLAG_RESPONSE 4
1078
1079
1080
1081
1082
1083
1084
1085
1086struct hv_util_service {
1087 u8 *recv_buffer;
1088 void (*util_cb)(void *);
1089 int (*util_init)(struct hv_util_service *);
1090 void (*util_deinit)(void);
1091};
1092
1093struct vmbuspipe_hdr {
1094 u32 flags;
1095 u32 msgsize;
1096} __packed;
1097
1098struct ic_version {
1099 u16 major;
1100 u16 minor;
1101} __packed;
1102
1103struct icmsg_hdr {
1104 struct ic_version icverframe;
1105 u16 icmsgtype;
1106 struct ic_version icvermsg;
1107 u16 icmsgsize;
1108 u32 status;
1109 u8 ictransaction_id;
1110 u8 icflags;
1111 u8 reserved[2];
1112} __packed;
1113
1114struct icmsg_negotiate {
1115 u16 icframe_vercnt;
1116 u16 icmsg_vercnt;
1117 u32 reserved;
1118 struct ic_version icversion_data[1];
1119} __packed;
1120
1121struct shutdown_msg_data {
1122 u32 reason_code;
1123 u32 timeout_seconds;
1124 u32 flags;
1125 u8 display_message[2048];
1126} __packed;
1127
1128struct heartbeat_msg_data {
1129 u64 seq_num;
1130 u32 reserved[8];
1131} __packed;
1132
1133
1134#define ICTIMESYNCFLAG_PROBE 0
1135#define ICTIMESYNCFLAG_SYNC 1
1136#define ICTIMESYNCFLAG_SAMPLE 2
1137
1138#ifdef __x86_64__
1139#define WLTIMEDELTA 116444736000000000L
1140#else
1141#define WLTIMEDELTA 116444736000000000LL
1142#endif
1143
1144struct ictimesync_data {
1145 u64 parenttime;
1146 u64 childtime;
1147 u64 roundtriptime;
1148 u8 flags;
1149} __packed;
1150
1151struct hyperv_service_callback {
1152 u8 msg_type;
1153 char *log_msg;
1154 uuid_le data;
1155 struct vmbus_channel *channel;
1156 void (*callback) (void *context);
1157};
1158
1159#define MAX_SRV_VER 0x7ffffff
1160extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1161 struct icmsg_negotiate *, u8 *, int,
1162 int);
1163
1164int hv_kvp_init(struct hv_util_service *);
1165void hv_kvp_deinit(void);
1166void hv_kvp_onchannelcallback(void *);
1167
1168int hv_vss_init(struct hv_util_service *);
1169void hv_vss_deinit(void);
1170void hv_vss_onchannelcallback(void *);
1171
1172extern struct resource hyperv_mmio;
1173
1174
1175
1176
1177
1178extern __u32 vmbus_proto_version;
1179
1180#endif
1181