1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/interrupt.h>
13#include <linux/sched.h>
14#include <linux/wait.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <linux/module.h>
19#include <linux/completion.h>
20#include <linux/delay.h>
21#include <linux/cpu.h>
22#include <linux/hyperv.h>
23#include <asm/mshyperv.h>
24
25#include "hyperv_vmbus.h"
26
27static void init_vp_index(struct vmbus_channel *channel);
28
29const struct vmbus_device vmbus_devs[] = {
30
31 { .dev_type = HV_IDE,
32 HV_IDE_GUID,
33 .perf_device = true,
34 },
35
36
37 { .dev_type = HV_SCSI,
38 HV_SCSI_GUID,
39 .perf_device = true,
40 },
41
42
43 { .dev_type = HV_FC,
44 HV_SYNTHFC_GUID,
45 .perf_device = true,
46 },
47
48
49 { .dev_type = HV_NIC,
50 HV_NIC_GUID,
51 .perf_device = true,
52 },
53
54
55 { .dev_type = HV_ND,
56 HV_ND_GUID,
57 .perf_device = true,
58 },
59
60
61 { .dev_type = HV_PCIE,
62 HV_PCIE_GUID,
63 .perf_device = false,
64 },
65
66
67 { .dev_type = HV_FB,
68 HV_SYNTHVID_GUID,
69 .perf_device = false,
70 },
71
72
73 { .dev_type = HV_KBD,
74 HV_KBD_GUID,
75 .perf_device = false,
76 },
77
78
79 { .dev_type = HV_MOUSE,
80 HV_MOUSE_GUID,
81 .perf_device = false,
82 },
83
84
85 { .dev_type = HV_KVP,
86 HV_KVP_GUID,
87 .perf_device = false,
88 },
89
90
91 { .dev_type = HV_TS,
92 HV_TS_GUID,
93 .perf_device = false,
94 },
95
96
97 { .dev_type = HV_HB,
98 HV_HEART_BEAT_GUID,
99 .perf_device = false,
100 },
101
102
103 { .dev_type = HV_SHUTDOWN,
104 HV_SHUTDOWN_GUID,
105 .perf_device = false,
106 },
107
108
109 { .dev_type = HV_FCOPY,
110 HV_FCOPY_GUID,
111 .perf_device = false,
112 },
113
114
115 { .dev_type = HV_BACKUP,
116 HV_VSS_GUID,
117 .perf_device = false,
118 },
119
120
121 { .dev_type = HV_DM,
122 HV_DM_GUID,
123 .perf_device = false,
124 },
125
126
127 { .dev_type = HV_UNKNOWN,
128 .perf_device = false,
129 },
130};
131
132static const struct {
133 guid_t guid;
134} vmbus_unsupported_devs[] = {
135 { HV_AVMA1_GUID },
136 { HV_AVMA2_GUID },
137 { HV_RDV_GUID },
138};
139
140
141
142
143
144static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
145{
146 struct vmbus_channel_msginfo *msginfo;
147 unsigned long flags;
148
149
150 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
151 channel->rescind = true;
152 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
153 msglistentry) {
154
155 if (msginfo->waiting_channel == channel) {
156 complete(&msginfo->waitevent);
157 break;
158 }
159 }
160 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
161}
162
163static bool is_unsupported_vmbus_devs(const guid_t *guid)
164{
165 int i;
166
167 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
168 if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
169 return true;
170 return false;
171}
172
173static u16 hv_get_dev_type(const struct vmbus_channel *channel)
174{
175 const guid_t *guid = &channel->offermsg.offer.if_type;
176 u16 i;
177
178 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
179 return HV_UNKNOWN;
180
181 for (i = HV_IDE; i < HV_UNKNOWN; i++) {
182 if (guid_equal(guid, &vmbus_devs[i].guid))
183 return i;
184 }
185 pr_info("Unknown GUID: %pUl\n", guid);
186 return i;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
206 u8 *buf, const int *fw_version, int fw_vercnt,
207 const int *srv_version, int srv_vercnt,
208 int *nego_fw_version, int *nego_srv_version)
209{
210 int icframe_major, icframe_minor;
211 int icmsg_major, icmsg_minor;
212 int fw_major, fw_minor;
213 int srv_major, srv_minor;
214 int i, j;
215 bool found_match = false;
216 struct icmsg_negotiate *negop;
217
218 icmsghdrp->icmsgsize = 0x10;
219 negop = (struct icmsg_negotiate *)&buf[
220 sizeof(struct vmbuspipe_hdr) +
221 sizeof(struct icmsg_hdr)];
222
223 icframe_major = negop->icframe_vercnt;
224 icframe_minor = 0;
225
226 icmsg_major = negop->icmsg_vercnt;
227 icmsg_minor = 0;
228
229
230
231
232
233
234 for (i = 0; i < fw_vercnt; i++) {
235 fw_major = (fw_version[i] >> 16);
236 fw_minor = (fw_version[i] & 0xFFFF);
237
238 for (j = 0; j < negop->icframe_vercnt; j++) {
239 if ((negop->icversion_data[j].major == fw_major) &&
240 (negop->icversion_data[j].minor == fw_minor)) {
241 icframe_major = negop->icversion_data[j].major;
242 icframe_minor = negop->icversion_data[j].minor;
243 found_match = true;
244 break;
245 }
246 }
247
248 if (found_match)
249 break;
250 }
251
252 if (!found_match)
253 goto fw_error;
254
255 found_match = false;
256
257 for (i = 0; i < srv_vercnt; i++) {
258 srv_major = (srv_version[i] >> 16);
259 srv_minor = (srv_version[i] & 0xFFFF);
260
261 for (j = negop->icframe_vercnt;
262 (j < negop->icframe_vercnt + negop->icmsg_vercnt);
263 j++) {
264
265 if ((negop->icversion_data[j].major == srv_major) &&
266 (negop->icversion_data[j].minor == srv_minor)) {
267
268 icmsg_major = negop->icversion_data[j].major;
269 icmsg_minor = negop->icversion_data[j].minor;
270 found_match = true;
271 break;
272 }
273 }
274
275 if (found_match)
276 break;
277 }
278
279
280
281
282
283
284fw_error:
285 if (!found_match) {
286 negop->icframe_vercnt = 0;
287 negop->icmsg_vercnt = 0;
288 } else {
289 negop->icframe_vercnt = 1;
290 negop->icmsg_vercnt = 1;
291 }
292
293 if (nego_fw_version)
294 *nego_fw_version = (icframe_major << 16) | icframe_minor;
295
296 if (nego_srv_version)
297 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
298
299 negop->icversion_data[0].major = icframe_major;
300 negop->icversion_data[0].minor = icframe_minor;
301 negop->icversion_data[1].major = icmsg_major;
302 negop->icversion_data[1].minor = icmsg_minor;
303 return found_match;
304}
305
306EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
307
308
309
310
311static struct vmbus_channel *alloc_channel(void)
312{
313 struct vmbus_channel *channel;
314
315 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
316 if (!channel)
317 return NULL;
318
319 spin_lock_init(&channel->sched_lock);
320 init_completion(&channel->rescind_event);
321
322 INIT_LIST_HEAD(&channel->sc_list);
323
324 tasklet_init(&channel->callback_event,
325 vmbus_on_event, (unsigned long)channel);
326
327 hv_ringbuffer_pre_init(channel);
328
329 return channel;
330}
331
332
333
334
335static void free_channel(struct vmbus_channel *channel)
336{
337 tasklet_kill(&channel->callback_event);
338 vmbus_remove_channel_attr_group(channel);
339
340 kobject_put(&channel->kobj);
341}
342
343void vmbus_channel_map_relid(struct vmbus_channel *channel)
344{
345 if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
346 return;
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372 smp_store_mb(
373 vmbus_connection.channels[channel->offermsg.child_relid],
374 channel);
375}
376
377void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
378{
379 if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
380 return;
381 WRITE_ONCE(
382 vmbus_connection.channels[channel->offermsg.child_relid],
383 NULL);
384}
385
386static void vmbus_release_relid(u32 relid)
387{
388 struct vmbus_channel_relid_released msg;
389 int ret;
390
391 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
392 msg.child_relid = relid;
393 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
394 ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
395 true);
396
397 trace_vmbus_release_relid(&msg, ret);
398}
399
400void hv_process_channel_removal(struct vmbus_channel *channel)
401{
402 lockdep_assert_held(&vmbus_connection.channel_mutex);
403 BUG_ON(!channel->rescind);
404
405
406
407
408
409 WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
410 !is_hvsock_channel(channel));
411
412
413
414
415
416
417
418
419 if (channel->offermsg.child_relid != INVALID_RELID)
420 vmbus_channel_unmap_relid(channel);
421
422 if (channel->primary_channel == NULL)
423 list_del(&channel->listentry);
424 else
425 list_del(&channel->sc_list);
426
427
428
429
430
431 if (hv_is_perf_channel(channel))
432 hv_clear_alloced_cpu(channel->target_cpu);
433
434
435
436
437
438
439
440
441 if (channel->offermsg.child_relid != INVALID_RELID)
442 vmbus_release_relid(channel->offermsg.child_relid);
443
444 free_channel(channel);
445}
446
447void vmbus_free_channels(void)
448{
449 struct vmbus_channel *channel, *tmp;
450
451 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
452 listentry) {
453
454 channel->rescind = true;
455
456 vmbus_device_unregister(channel->device_obj);
457 }
458}
459
460
461static void vmbus_add_channel_work(struct work_struct *work)
462{
463 struct vmbus_channel *newchannel =
464 container_of(work, struct vmbus_channel, add_channel_work);
465 struct vmbus_channel *primary_channel = newchannel->primary_channel;
466 int ret;
467
468
469
470
471
472
473 newchannel->state = CHANNEL_OPEN_STATE;
474
475 if (primary_channel != NULL) {
476
477 struct hv_device *dev = primary_channel->device_obj;
478
479 if (vmbus_add_channel_kobj(dev, newchannel))
480 goto err_deq_chan;
481
482 if (primary_channel->sc_creation_callback != NULL)
483 primary_channel->sc_creation_callback(newchannel);
484
485 newchannel->probe_done = true;
486 return;
487 }
488
489
490
491
492 newchannel->device_obj = vmbus_device_create(
493 &newchannel->offermsg.offer.if_type,
494 &newchannel->offermsg.offer.if_instance,
495 newchannel);
496 if (!newchannel->device_obj)
497 goto err_deq_chan;
498
499 newchannel->device_obj->device_id = newchannel->device_id;
500
501
502
503
504
505 ret = vmbus_device_register(newchannel->device_obj);
506
507 if (ret != 0) {
508 pr_err("unable to add child device object (relid %d)\n",
509 newchannel->offermsg.child_relid);
510 kfree(newchannel->device_obj);
511 goto err_deq_chan;
512 }
513
514 newchannel->probe_done = true;
515 return;
516
517err_deq_chan:
518 mutex_lock(&vmbus_connection.channel_mutex);
519
520
521
522
523
524 newchannel->probe_done = true;
525
526 if (primary_channel == NULL)
527 list_del(&newchannel->listentry);
528 else
529 list_del(&newchannel->sc_list);
530
531
532 vmbus_channel_unmap_relid(newchannel);
533
534 mutex_unlock(&vmbus_connection.channel_mutex);
535
536 vmbus_release_relid(newchannel->offermsg.child_relid);
537
538 free_channel(newchannel);
539}
540
541
542
543
544
545static void vmbus_process_offer(struct vmbus_channel *newchannel)
546{
547 struct vmbus_channel *channel;
548 struct workqueue_struct *wq;
549 bool fnew = true;
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570 cpus_read_lock();
571
572
573
574
575
576 mutex_lock(&vmbus_connection.channel_mutex);
577
578 init_vp_index(newchannel);
579
580
581 if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
582 atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
583
584
585
586
587
588 atomic_dec(&vmbus_connection.offer_in_progress);
589
590 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
591 if (guid_equal(&channel->offermsg.offer.if_type,
592 &newchannel->offermsg.offer.if_type) &&
593 guid_equal(&channel->offermsg.offer.if_instance,
594 &newchannel->offermsg.offer.if_instance)) {
595 fnew = false;
596 break;
597 }
598 }
599
600 if (fnew) {
601 list_add_tail(&newchannel->listentry,
602 &vmbus_connection.chn_list);
603 } else {
604
605
606
607 if (newchannel->offermsg.offer.sub_channel_index == 0) {
608 mutex_unlock(&vmbus_connection.channel_mutex);
609
610
611
612
613 kfree(newchannel);
614 WARN_ON_ONCE(1);
615 return;
616 }
617
618
619
620 newchannel->primary_channel = channel;
621 list_add_tail(&newchannel->sc_list, &channel->sc_list);
622 }
623
624 vmbus_channel_map_relid(newchannel);
625
626 mutex_unlock(&vmbus_connection.channel_mutex);
627 cpus_read_unlock();
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
651 wq = fnew ? vmbus_connection.handle_primary_chan_wq :
652 vmbus_connection.handle_sub_chan_wq;
653 queue_work(wq, &newchannel->add_channel_work);
654}
655
656
657
658
659static int next_numa_node_id;
660
661
662
663
664
665
666
667
668
669
670
671
672static void init_vp_index(struct vmbus_channel *channel)
673{
674 bool perf_chn = hv_is_perf_channel(channel);
675 cpumask_var_t available_mask;
676 struct cpumask *alloced_mask;
677 u32 target_cpu;
678 int numa_node;
679
680 if ((vmbus_proto_version == VERSION_WS2008) ||
681 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
682 !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
683
684
685
686
687
688
689
690
691 channel->target_cpu = VMBUS_CONNECT_CPU;
692 if (perf_chn)
693 hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
694 return;
695 }
696
697 while (true) {
698 numa_node = next_numa_node_id++;
699 if (numa_node == nr_node_ids) {
700 next_numa_node_id = 0;
701 continue;
702 }
703 if (cpumask_empty(cpumask_of_node(numa_node)))
704 continue;
705 break;
706 }
707 alloced_mask = &hv_context.hv_numa_map[numa_node];
708
709 if (cpumask_weight(alloced_mask) ==
710 cpumask_weight(cpumask_of_node(numa_node))) {
711
712
713
714
715 cpumask_clear(alloced_mask);
716 }
717
718 cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
719
720 target_cpu = cpumask_first(available_mask);
721 cpumask_set_cpu(target_cpu, alloced_mask);
722
723 channel->target_cpu = target_cpu;
724
725 free_cpumask_var(available_mask);
726}
727
728static void vmbus_wait_for_unload(void)
729{
730 int cpu;
731 void *page_addr;
732 struct hv_message *msg;
733 struct vmbus_channel_message_header *hdr;
734 u32 message_type, i;
735
736
737
738
739
740
741
742
743
744
745
746
747
748 for (i = 0; i < 1000; i++) {
749 if (completion_done(&vmbus_connection.unload_event))
750 break;
751
752 for_each_online_cpu(cpu) {
753 struct hv_per_cpu_context *hv_cpu
754 = per_cpu_ptr(hv_context.cpu_context, cpu);
755
756 page_addr = hv_cpu->synic_message_page;
757 msg = (struct hv_message *)page_addr
758 + VMBUS_MESSAGE_SINT;
759
760 message_type = READ_ONCE(msg->header.message_type);
761 if (message_type == HVMSG_NONE)
762 continue;
763
764 hdr = (struct vmbus_channel_message_header *)
765 msg->u.payload;
766
767 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
768 complete(&vmbus_connection.unload_event);
769
770 vmbus_signal_eom(msg, message_type);
771 }
772
773 mdelay(10);
774 }
775
776
777
778
779
780
781 for_each_online_cpu(cpu) {
782 struct hv_per_cpu_context *hv_cpu
783 = per_cpu_ptr(hv_context.cpu_context, cpu);
784
785 page_addr = hv_cpu->synic_message_page;
786 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
787 msg->header.message_type = HVMSG_NONE;
788 }
789}
790
791
792
793
794static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
795{
796
797
798
799
800 complete(&vmbus_connection.unload_event);
801}
802
803void vmbus_initiate_unload(bool crash)
804{
805 struct vmbus_channel_message_header hdr;
806
807 if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
808 return;
809
810
811 if (vmbus_proto_version < VERSION_WIN8_1)
812 return;
813
814 init_completion(&vmbus_connection.unload_event);
815 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
816 hdr.msgtype = CHANNELMSG_UNLOAD;
817 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
818 !crash);
819
820
821
822
823
824 if (!crash)
825 wait_for_completion(&vmbus_connection.unload_event);
826 else
827 vmbus_wait_for_unload();
828}
829
830static void check_ready_for_resume_event(void)
831{
832
833
834
835
836 if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
837 complete(&vmbus_connection.ready_for_resume_event);
838}
839
840static void vmbus_setup_channel_state(struct vmbus_channel *channel,
841 struct vmbus_channel_offer_channel *offer)
842{
843
844
845
846 channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
847
848 if (vmbus_proto_version != VERSION_WS2008) {
849 channel->is_dedicated_interrupt =
850 (offer->is_dedicated_interrupt != 0);
851 channel->sig_event = offer->connection_id;
852 }
853
854 memcpy(&channel->offermsg, offer,
855 sizeof(struct vmbus_channel_offer_channel));
856 channel->monitor_grp = (u8)offer->monitorid / 32;
857 channel->monitor_bit = (u8)offer->monitorid % 32;
858 channel->device_id = hv_get_dev_type(channel);
859}
860
861
862
863
864
865static struct vmbus_channel *
866find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
867{
868 struct vmbus_channel *channel = NULL, *iter;
869 const guid_t *inst1, *inst2;
870
871
872 if (offer->offer.sub_channel_index != 0)
873 return NULL;
874
875 mutex_lock(&vmbus_connection.channel_mutex);
876
877 list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
878 inst1 = &iter->offermsg.offer.if_instance;
879 inst2 = &offer->offer.if_instance;
880
881 if (guid_equal(inst1, inst2)) {
882 channel = iter;
883 break;
884 }
885 }
886
887 mutex_unlock(&vmbus_connection.channel_mutex);
888
889 return channel;
890}
891
892
893
894
895
896static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
897{
898 struct vmbus_channel_offer_channel *offer;
899 struct vmbus_channel *oldchannel, *newchannel;
900 size_t offer_sz;
901
902 offer = (struct vmbus_channel_offer_channel *)hdr;
903
904 trace_vmbus_onoffer(offer);
905
906 oldchannel = find_primary_channel_by_offer(offer);
907
908 if (oldchannel != NULL) {
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938 mutex_lock(&vmbus_connection.channel_mutex);
939
940 atomic_dec(&vmbus_connection.offer_in_progress);
941
942 WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
943
944 oldchannel->offermsg.child_relid = offer->child_relid;
945
946 offer_sz = sizeof(*offer);
947 if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
948
949
950
951
952
953
954
955 pr_debug("vmbus offer changed: relid=%d\n",
956 offer->child_relid);
957
958 print_hex_dump_debug("Old vmbus offer: ",
959 DUMP_PREFIX_OFFSET, 16, 4,
960 &oldchannel->offermsg, offer_sz,
961 false);
962 print_hex_dump_debug("New vmbus offer: ",
963 DUMP_PREFIX_OFFSET, 16, 4,
964 offer, offer_sz, false);
965
966
967 vmbus_setup_channel_state(oldchannel, offer);
968 }
969
970
971 vmbus_channel_map_relid(oldchannel);
972 check_ready_for_resume_event();
973
974 mutex_unlock(&vmbus_connection.channel_mutex);
975 return;
976 }
977
978
979 newchannel = alloc_channel();
980 if (!newchannel) {
981 vmbus_release_relid(offer->child_relid);
982 atomic_dec(&vmbus_connection.offer_in_progress);
983 pr_err("Unable to allocate channel object\n");
984 return;
985 }
986
987 vmbus_setup_channel_state(newchannel, offer);
988
989 vmbus_process_offer(newchannel);
990}
991
992static void check_ready_for_suspend_event(void)
993{
994
995
996
997
998 if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
999 complete(&vmbus_connection.ready_for_suspend_event);
1000}
1001
1002
1003
1004
1005
1006
1007static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1008{
1009 struct vmbus_channel_rescind_offer *rescind;
1010 struct vmbus_channel *channel;
1011 struct device *dev;
1012 bool clean_up_chan_for_suspend;
1013
1014 rescind = (struct vmbus_channel_rescind_offer *)hdr;
1015
1016 trace_vmbus_onoffer_rescind(rescind);
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
1043
1044
1045
1046
1047 msleep(1);
1048 }
1049
1050 mutex_lock(&vmbus_connection.channel_mutex);
1051 channel = relid2channel(rescind->child_relid);
1052 mutex_unlock(&vmbus_connection.channel_mutex);
1053
1054 if (channel == NULL) {
1055
1056
1057
1058
1059
1060 return;
1061 }
1062
1063 clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
1064 is_sub_channel(channel);
1065
1066
1067
1068
1069 vmbus_reset_channel_cb(channel);
1070
1071
1072
1073
1074 vmbus_rescind_cleanup(channel);
1075 while (READ_ONCE(channel->probe_done) == false) {
1076
1077
1078
1079
1080 msleep(1);
1081 }
1082
1083
1084
1085
1086
1087 if (channel->device_obj) {
1088 if (channel->chn_rescind_callback) {
1089 channel->chn_rescind_callback(channel);
1090
1091 if (clean_up_chan_for_suspend)
1092 check_ready_for_suspend_event();
1093
1094 return;
1095 }
1096
1097
1098
1099
1100 dev = get_device(&channel->device_obj->device);
1101 if (dev) {
1102 vmbus_device_unregister(channel->device_obj);
1103 put_device(dev);
1104 }
1105 }
1106 if (channel->primary_channel != NULL) {
1107
1108
1109
1110
1111
1112
1113
1114 mutex_lock(&vmbus_connection.channel_mutex);
1115 if (channel->state == CHANNEL_OPEN_STATE) {
1116
1117
1118
1119
1120 hv_process_channel_removal(channel);
1121 } else {
1122 complete(&channel->rescind_event);
1123 }
1124 mutex_unlock(&vmbus_connection.channel_mutex);
1125 }
1126
1127
1128
1129 if (clean_up_chan_for_suspend)
1130 check_ready_for_suspend_event();
1131}
1132
1133void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
1134{
1135 BUG_ON(!is_hvsock_channel(channel));
1136
1137
1138 while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
1139 msleep(1);
1140
1141 vmbus_device_unregister(channel->device_obj);
1142}
1143EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
1144
1145
1146
1147
1148
1149
1150
1151
1152static void vmbus_onoffers_delivered(
1153 struct vmbus_channel_message_header *hdr)
1154{
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
1165{
1166 struct vmbus_channel_open_result *result;
1167 struct vmbus_channel_msginfo *msginfo;
1168 struct vmbus_channel_message_header *requestheader;
1169 struct vmbus_channel_open_channel *openmsg;
1170 unsigned long flags;
1171
1172 result = (struct vmbus_channel_open_result *)hdr;
1173
1174 trace_vmbus_onopen_result(result);
1175
1176
1177
1178
1179 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1180
1181 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1182 msglistentry) {
1183 requestheader =
1184 (struct vmbus_channel_message_header *)msginfo->msg;
1185
1186 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
1187 openmsg =
1188 (struct vmbus_channel_open_channel *)msginfo->msg;
1189 if (openmsg->child_relid == result->child_relid &&
1190 openmsg->openid == result->openid) {
1191 memcpy(&msginfo->response.open_result,
1192 result,
1193 sizeof(
1194 struct vmbus_channel_open_result));
1195 complete(&msginfo->waitevent);
1196 break;
1197 }
1198 }
1199 }
1200 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1211{
1212 struct vmbus_channel_gpadl_created *gpadlcreated;
1213 struct vmbus_channel_msginfo *msginfo;
1214 struct vmbus_channel_message_header *requestheader;
1215 struct vmbus_channel_gpadl_header *gpadlheader;
1216 unsigned long flags;
1217
1218 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1219
1220 trace_vmbus_ongpadl_created(gpadlcreated);
1221
1222
1223
1224
1225
1226 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1227
1228 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1229 msglistentry) {
1230 requestheader =
1231 (struct vmbus_channel_message_header *)msginfo->msg;
1232
1233 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1234 gpadlheader =
1235 (struct vmbus_channel_gpadl_header *)requestheader;
1236
1237 if ((gpadlcreated->child_relid ==
1238 gpadlheader->child_relid) &&
1239 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1240 memcpy(&msginfo->response.gpadl_created,
1241 gpadlcreated,
1242 sizeof(
1243 struct vmbus_channel_gpadl_created));
1244 complete(&msginfo->waitevent);
1245 break;
1246 }
1247 }
1248 }
1249 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259static void vmbus_ongpadl_torndown(
1260 struct vmbus_channel_message_header *hdr)
1261{
1262 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1263 struct vmbus_channel_msginfo *msginfo;
1264 struct vmbus_channel_message_header *requestheader;
1265 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1266 unsigned long flags;
1267
1268 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1269
1270 trace_vmbus_ongpadl_torndown(gpadl_torndown);
1271
1272
1273
1274
1275 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1276
1277 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1278 msglistentry) {
1279 requestheader =
1280 (struct vmbus_channel_message_header *)msginfo->msg;
1281
1282 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1283 gpadl_teardown =
1284 (struct vmbus_channel_gpadl_teardown *)requestheader;
1285
1286 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1287 memcpy(&msginfo->response.gpadl_torndown,
1288 gpadl_torndown,
1289 sizeof(
1290 struct vmbus_channel_gpadl_torndown));
1291 complete(&msginfo->waitevent);
1292 break;
1293 }
1294 }
1295 }
1296 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1297}
1298
1299
1300
1301
1302
1303
1304
1305
1306static void vmbus_onversion_response(
1307 struct vmbus_channel_message_header *hdr)
1308{
1309 struct vmbus_channel_msginfo *msginfo;
1310 struct vmbus_channel_message_header *requestheader;
1311 struct vmbus_channel_version_response *version_response;
1312 unsigned long flags;
1313
1314 version_response = (struct vmbus_channel_version_response *)hdr;
1315
1316 trace_vmbus_onversion_response(version_response);
1317
1318 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1319
1320 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1321 msglistentry) {
1322 requestheader =
1323 (struct vmbus_channel_message_header *)msginfo->msg;
1324
1325 if (requestheader->msgtype ==
1326 CHANNELMSG_INITIATE_CONTACT) {
1327 memcpy(&msginfo->response.version_response,
1328 version_response,
1329 sizeof(struct vmbus_channel_version_response));
1330 complete(&msginfo->waitevent);
1331 }
1332 }
1333 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1334}
1335
1336
1337const struct vmbus_channel_message_table_entry
1338channel_message_table[CHANNELMSG_COUNT] = {
1339 { CHANNELMSG_INVALID, 0, NULL, 0},
1340 { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
1341 sizeof(struct vmbus_channel_offer_channel)},
1342 { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
1343 sizeof(struct vmbus_channel_rescind_offer) },
1344 { CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
1345 { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
1346 { CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
1347 { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
1348 sizeof(struct vmbus_channel_open_result)},
1349 { CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
1350 { CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
1351 { CHANNELMSG_GPADL_BODY, 0, NULL, 0},
1352 { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
1353 sizeof(struct vmbus_channel_gpadl_created)},
1354 { CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
1355 { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
1356 sizeof(struct vmbus_channel_gpadl_torndown) },
1357 { CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
1358 { CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
1359 { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
1360 sizeof(struct vmbus_channel_version_response)},
1361 { CHANNELMSG_UNLOAD, 0, NULL, 0},
1362 { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
1363 { CHANNELMSG_18, 0, NULL, 0},
1364 { CHANNELMSG_19, 0, NULL, 0},
1365 { CHANNELMSG_20, 0, NULL, 0},
1366 { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
1367 { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
1368 { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
1369};
1370
1371
1372
1373
1374
1375
1376void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
1377{
1378 trace_vmbus_on_message(hdr);
1379
1380
1381
1382
1383
1384 channel_message_table[hdr->msgtype].message_handler(hdr);
1385}
1386
1387
1388
1389
1390int vmbus_request_offers(void)
1391{
1392 struct vmbus_channel_message_header *msg;
1393 struct vmbus_channel_msginfo *msginfo;
1394 int ret;
1395
1396 msginfo = kmalloc(sizeof(*msginfo) +
1397 sizeof(struct vmbus_channel_message_header),
1398 GFP_KERNEL);
1399 if (!msginfo)
1400 return -ENOMEM;
1401
1402 msg = (struct vmbus_channel_message_header *)msginfo->msg;
1403
1404 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1405
1406 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1407 true);
1408
1409 trace_vmbus_request_offers(ret);
1410
1411 if (ret != 0) {
1412 pr_err("Unable to request offers - %d\n", ret);
1413
1414 goto cleanup;
1415 }
1416
1417cleanup:
1418 kfree(msginfo);
1419
1420 return ret;
1421}
1422
1423static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1424{
1425 struct list_head *cur, *tmp;
1426 struct vmbus_channel *cur_channel;
1427
1428 if (primary_channel->sc_creation_callback == NULL)
1429 return;
1430
1431 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1432 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1433
1434 primary_channel->sc_creation_callback(cur_channel);
1435 }
1436}
1437
1438void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1439 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1440{
1441 primary_channel->sc_creation_callback = sc_cr_cb;
1442}
1443EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1444
1445bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1446{
1447 bool ret;
1448
1449 ret = !list_empty(&primary->sc_list);
1450
1451 if (ret) {
1452
1453
1454
1455
1456
1457 invoke_sc_cb(primary);
1458 }
1459
1460 return ret;
1461}
1462EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1463
1464void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1465 void (*chn_rescind_cb)(struct vmbus_channel *))
1466{
1467 channel->chn_rescind_callback = chn_rescind_cb;
1468}
1469EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
1470