1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/interrupt.h>
13#include <linux/sched.h>
14#include <linux/wait.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <linux/module.h>
19#include <linux/completion.h>
20#include <linux/delay.h>
21#include <linux/cpu.h>
22#include <linux/hyperv.h>
23#include <asm/mshyperv.h>
24
25#include "hyperv_vmbus.h"
26
27static void init_vp_index(struct vmbus_channel *channel);
28
29const struct vmbus_device vmbus_devs[] = {
30
31 { .dev_type = HV_IDE,
32 HV_IDE_GUID,
33 .perf_device = true,
34 .allowed_in_isolated = false,
35 },
36
37
38 { .dev_type = HV_SCSI,
39 HV_SCSI_GUID,
40 .perf_device = true,
41 .allowed_in_isolated = true,
42 },
43
44
45 { .dev_type = HV_FC,
46 HV_SYNTHFC_GUID,
47 .perf_device = true,
48 .allowed_in_isolated = false,
49 },
50
51
52 { .dev_type = HV_NIC,
53 HV_NIC_GUID,
54 .perf_device = true,
55 .allowed_in_isolated = true,
56 },
57
58
59 { .dev_type = HV_ND,
60 HV_ND_GUID,
61 .perf_device = true,
62 .allowed_in_isolated = false,
63 },
64
65
66 { .dev_type = HV_PCIE,
67 HV_PCIE_GUID,
68 .perf_device = false,
69 .allowed_in_isolated = false,
70 },
71
72
73 { .dev_type = HV_FB,
74 HV_SYNTHVID_GUID,
75 .perf_device = false,
76 .allowed_in_isolated = false,
77 },
78
79
80 { .dev_type = HV_KBD,
81 HV_KBD_GUID,
82 .perf_device = false,
83 .allowed_in_isolated = false,
84 },
85
86
87 { .dev_type = HV_MOUSE,
88 HV_MOUSE_GUID,
89 .perf_device = false,
90 .allowed_in_isolated = false,
91 },
92
93
94 { .dev_type = HV_KVP,
95 HV_KVP_GUID,
96 .perf_device = false,
97 .allowed_in_isolated = false,
98 },
99
100
101 { .dev_type = HV_TS,
102 HV_TS_GUID,
103 .perf_device = false,
104 .allowed_in_isolated = true,
105 },
106
107
108 { .dev_type = HV_HB,
109 HV_HEART_BEAT_GUID,
110 .perf_device = false,
111 .allowed_in_isolated = true,
112 },
113
114
115 { .dev_type = HV_SHUTDOWN,
116 HV_SHUTDOWN_GUID,
117 .perf_device = false,
118 .allowed_in_isolated = true,
119 },
120
121
122 { .dev_type = HV_FCOPY,
123 HV_FCOPY_GUID,
124 .perf_device = false,
125 .allowed_in_isolated = false,
126 },
127
128
129 { .dev_type = HV_BACKUP,
130 HV_VSS_GUID,
131 .perf_device = false,
132 .allowed_in_isolated = false,
133 },
134
135
136 { .dev_type = HV_DM,
137 HV_DM_GUID,
138 .perf_device = false,
139 .allowed_in_isolated = false,
140 },
141
142
143 { .dev_type = HV_UNKNOWN,
144 .perf_device = false,
145 .allowed_in_isolated = false,
146 },
147};
148
149static const struct {
150 guid_t guid;
151} vmbus_unsupported_devs[] = {
152 { HV_AVMA1_GUID },
153 { HV_AVMA2_GUID },
154 { HV_RDV_GUID },
155};
156
157
158
159
160
161static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
162{
163 struct vmbus_channel_msginfo *msginfo;
164 unsigned long flags;
165
166
167 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
168 channel->rescind = true;
169 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
170 msglistentry) {
171
172 if (msginfo->waiting_channel == channel) {
173 complete(&msginfo->waitevent);
174 break;
175 }
176 }
177 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
178}
179
180static bool is_unsupported_vmbus_devs(const guid_t *guid)
181{
182 int i;
183
184 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
185 if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
186 return true;
187 return false;
188}
189
190static u16 hv_get_dev_type(const struct vmbus_channel *channel)
191{
192 const guid_t *guid = &channel->offermsg.offer.if_type;
193 u16 i;
194
195 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
196 return HV_UNKNOWN;
197
198 for (i = HV_IDE; i < HV_UNKNOWN; i++) {
199 if (guid_equal(guid, &vmbus_devs[i].guid))
200 return i;
201 }
202 pr_info("Unknown GUID: %pUl\n", guid);
203 return i;
204}
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
224 u32 buflen, const int *fw_version, int fw_vercnt,
225 const int *srv_version, int srv_vercnt,
226 int *nego_fw_version, int *nego_srv_version)
227{
228 int icframe_major, icframe_minor;
229 int icmsg_major, icmsg_minor;
230 int fw_major, fw_minor;
231 int srv_major, srv_minor;
232 int i, j;
233 bool found_match = false;
234 struct icmsg_negotiate *negop;
235
236
237 if (buflen < ICMSG_HDR + offsetof(struct icmsg_negotiate, reserved)) {
238 pr_err_ratelimited("Invalid icmsg negotiate\n");
239 return false;
240 }
241
242 icmsghdrp->icmsgsize = 0x10;
243 negop = (struct icmsg_negotiate *)&buf[ICMSG_HDR];
244
245 icframe_major = negop->icframe_vercnt;
246 icframe_minor = 0;
247
248 icmsg_major = negop->icmsg_vercnt;
249 icmsg_minor = 0;
250
251
252 if (icframe_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
253 icmsg_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
254 ICMSG_NEGOTIATE_PKT_SIZE(icframe_major, icmsg_major) > buflen) {
255 pr_err_ratelimited("Invalid icmsg negotiate - icframe_major: %u, icmsg_major: %u\n",
256 icframe_major, icmsg_major);
257 goto fw_error;
258 }
259
260
261
262
263
264
265 for (i = 0; i < fw_vercnt; i++) {
266 fw_major = (fw_version[i] >> 16);
267 fw_minor = (fw_version[i] & 0xFFFF);
268
269 for (j = 0; j < negop->icframe_vercnt; j++) {
270 if ((negop->icversion_data[j].major == fw_major) &&
271 (negop->icversion_data[j].minor == fw_minor)) {
272 icframe_major = negop->icversion_data[j].major;
273 icframe_minor = negop->icversion_data[j].minor;
274 found_match = true;
275 break;
276 }
277 }
278
279 if (found_match)
280 break;
281 }
282
283 if (!found_match)
284 goto fw_error;
285
286 found_match = false;
287
288 for (i = 0; i < srv_vercnt; i++) {
289 srv_major = (srv_version[i] >> 16);
290 srv_minor = (srv_version[i] & 0xFFFF);
291
292 for (j = negop->icframe_vercnt;
293 (j < negop->icframe_vercnt + negop->icmsg_vercnt);
294 j++) {
295
296 if ((negop->icversion_data[j].major == srv_major) &&
297 (negop->icversion_data[j].minor == srv_minor)) {
298
299 icmsg_major = negop->icversion_data[j].major;
300 icmsg_minor = negop->icversion_data[j].minor;
301 found_match = true;
302 break;
303 }
304 }
305
306 if (found_match)
307 break;
308 }
309
310
311
312
313
314
315fw_error:
316 if (!found_match) {
317 negop->icframe_vercnt = 0;
318 negop->icmsg_vercnt = 0;
319 } else {
320 negop->icframe_vercnt = 1;
321 negop->icmsg_vercnt = 1;
322 }
323
324 if (nego_fw_version)
325 *nego_fw_version = (icframe_major << 16) | icframe_minor;
326
327 if (nego_srv_version)
328 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
329
330 negop->icversion_data[0].major = icframe_major;
331 negop->icversion_data[0].minor = icframe_minor;
332 negop->icversion_data[1].major = icmsg_major;
333 negop->icversion_data[1].minor = icmsg_minor;
334 return found_match;
335}
336EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
337
338
339
340
341static struct vmbus_channel *alloc_channel(void)
342{
343 struct vmbus_channel *channel;
344
345 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
346 if (!channel)
347 return NULL;
348
349 spin_lock_init(&channel->sched_lock);
350 init_completion(&channel->rescind_event);
351
352 INIT_LIST_HEAD(&channel->sc_list);
353
354 tasklet_init(&channel->callback_event,
355 vmbus_on_event, (unsigned long)channel);
356
357 hv_ringbuffer_pre_init(channel);
358
359 return channel;
360}
361
362
363
364
365static void free_channel(struct vmbus_channel *channel)
366{
367 tasklet_kill(&channel->callback_event);
368 vmbus_remove_channel_attr_group(channel);
369
370 kobject_put(&channel->kobj);
371}
372
373void vmbus_channel_map_relid(struct vmbus_channel *channel)
374{
375 if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
376 return;
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402 smp_store_mb(
403 vmbus_connection.channels[channel->offermsg.child_relid],
404 channel);
405}
406
407void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
408{
409 if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
410 return;
411 WRITE_ONCE(
412 vmbus_connection.channels[channel->offermsg.child_relid],
413 NULL);
414}
415
416static void vmbus_release_relid(u32 relid)
417{
418 struct vmbus_channel_relid_released msg;
419 int ret;
420
421 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
422 msg.child_relid = relid;
423 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
424 ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
425 true);
426
427 trace_vmbus_release_relid(&msg, ret);
428}
429
430void hv_process_channel_removal(struct vmbus_channel *channel)
431{
432 lockdep_assert_held(&vmbus_connection.channel_mutex);
433 BUG_ON(!channel->rescind);
434
435
436
437
438
439 WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
440 !is_hvsock_channel(channel));
441
442
443
444
445
446
447
448
449 if (channel->offermsg.child_relid != INVALID_RELID)
450 vmbus_channel_unmap_relid(channel);
451
452 if (channel->primary_channel == NULL)
453 list_del(&channel->listentry);
454 else
455 list_del(&channel->sc_list);
456
457
458
459
460
461 if (hv_is_perf_channel(channel))
462 hv_clear_alloced_cpu(channel->target_cpu);
463
464
465
466
467
468
469
470
471 if (channel->offermsg.child_relid != INVALID_RELID)
472 vmbus_release_relid(channel->offermsg.child_relid);
473
474 free_channel(channel);
475}
476
477void vmbus_free_channels(void)
478{
479 struct vmbus_channel *channel, *tmp;
480
481 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
482 listentry) {
483
484 channel->rescind = true;
485
486 vmbus_device_unregister(channel->device_obj);
487 }
488}
489
490
491static void vmbus_add_channel_work(struct work_struct *work)
492{
493 struct vmbus_channel *newchannel =
494 container_of(work, struct vmbus_channel, add_channel_work);
495 struct vmbus_channel *primary_channel = newchannel->primary_channel;
496 int ret;
497
498
499
500
501
502
503 newchannel->state = CHANNEL_OPEN_STATE;
504
505 if (primary_channel != NULL) {
506
507 struct hv_device *dev = primary_channel->device_obj;
508
509 if (vmbus_add_channel_kobj(dev, newchannel))
510 goto err_deq_chan;
511
512 if (primary_channel->sc_creation_callback != NULL)
513 primary_channel->sc_creation_callback(newchannel);
514
515 newchannel->probe_done = true;
516 return;
517 }
518
519
520
521
522 newchannel->device_obj = vmbus_device_create(
523 &newchannel->offermsg.offer.if_type,
524 &newchannel->offermsg.offer.if_instance,
525 newchannel);
526 if (!newchannel->device_obj)
527 goto err_deq_chan;
528
529 newchannel->device_obj->device_id = newchannel->device_id;
530
531
532
533
534
535 ret = vmbus_device_register(newchannel->device_obj);
536
537 if (ret != 0) {
538 pr_err("unable to add child device object (relid %d)\n",
539 newchannel->offermsg.child_relid);
540 kfree(newchannel->device_obj);
541 goto err_deq_chan;
542 }
543
544 newchannel->probe_done = true;
545 return;
546
547err_deq_chan:
548 mutex_lock(&vmbus_connection.channel_mutex);
549
550
551
552
553
554 newchannel->probe_done = true;
555
556 if (primary_channel == NULL)
557 list_del(&newchannel->listentry);
558 else
559 list_del(&newchannel->sc_list);
560
561
562 vmbus_channel_unmap_relid(newchannel);
563
564 mutex_unlock(&vmbus_connection.channel_mutex);
565
566 vmbus_release_relid(newchannel->offermsg.child_relid);
567
568 free_channel(newchannel);
569}
570
571
572
573
574
575static void vmbus_process_offer(struct vmbus_channel *newchannel)
576{
577 struct vmbus_channel *channel;
578 struct workqueue_struct *wq;
579 bool fnew = true;
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600 cpus_read_lock();
601
602
603
604
605
606 mutex_lock(&vmbus_connection.channel_mutex);
607
608 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
609 if (guid_equal(&channel->offermsg.offer.if_type,
610 &newchannel->offermsg.offer.if_type) &&
611 guid_equal(&channel->offermsg.offer.if_instance,
612 &newchannel->offermsg.offer.if_instance)) {
613 fnew = false;
614 newchannel->primary_channel = channel;
615 break;
616 }
617 }
618
619 init_vp_index(newchannel);
620
621
622 if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
623 atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
624
625
626
627
628
629 atomic_dec(&vmbus_connection.offer_in_progress);
630
631 if (fnew) {
632 list_add_tail(&newchannel->listentry,
633 &vmbus_connection.chn_list);
634 } else {
635
636
637
638 if (newchannel->offermsg.offer.sub_channel_index == 0) {
639 mutex_unlock(&vmbus_connection.channel_mutex);
640
641
642
643
644 kfree(newchannel);
645 WARN_ON_ONCE(1);
646 return;
647 }
648
649
650
651 list_add_tail(&newchannel->sc_list, &channel->sc_list);
652 }
653
654 vmbus_channel_map_relid(newchannel);
655
656 mutex_unlock(&vmbus_connection.channel_mutex);
657 cpus_read_unlock();
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680 INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
681 wq = fnew ? vmbus_connection.handle_primary_chan_wq :
682 vmbus_connection.handle_sub_chan_wq;
683 queue_work(wq, &newchannel->add_channel_work);
684}
685
686
687
688
689
690static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
691{
692 struct vmbus_channel *primary = chn->primary_channel;
693 struct vmbus_channel *sc;
694
695 lockdep_assert_held(&vmbus_connection.channel_mutex);
696
697 if (!primary)
698 return false;
699
700 if (primary->target_cpu == cpu)
701 return true;
702
703 list_for_each_entry(sc, &primary->sc_list, sc_list)
704 if (sc != chn && sc->target_cpu == cpu)
705 return true;
706
707 return false;
708}
709
710
711
712
713static int next_numa_node_id;
714
715
716
717
718
719
720
721
722
723
724
725
726static void init_vp_index(struct vmbus_channel *channel)
727{
728 bool perf_chn = hv_is_perf_channel(channel);
729 u32 i, ncpu = num_online_cpus();
730 cpumask_var_t available_mask;
731 struct cpumask *alloced_mask;
732 u32 target_cpu;
733 int numa_node;
734
735 if ((vmbus_proto_version == VERSION_WS2008) ||
736 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
737 !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
738
739
740
741
742
743
744
745
746 channel->target_cpu = VMBUS_CONNECT_CPU;
747 if (perf_chn)
748 hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
749 return;
750 }
751
752 for (i = 1; i <= ncpu + 1; i++) {
753 while (true) {
754 numa_node = next_numa_node_id++;
755 if (numa_node == nr_node_ids) {
756 next_numa_node_id = 0;
757 continue;
758 }
759 if (cpumask_empty(cpumask_of_node(numa_node)))
760 continue;
761 break;
762 }
763 alloced_mask = &hv_context.hv_numa_map[numa_node];
764
765 if (cpumask_weight(alloced_mask) ==
766 cpumask_weight(cpumask_of_node(numa_node))) {
767
768
769
770
771 cpumask_clear(alloced_mask);
772 }
773
774 cpumask_xor(available_mask, alloced_mask,
775 cpumask_of_node(numa_node));
776
777 target_cpu = cpumask_first(available_mask);
778 cpumask_set_cpu(target_cpu, alloced_mask);
779
780 if (channel->offermsg.offer.sub_channel_index >= ncpu ||
781 i > ncpu || !hv_cpuself_used(target_cpu, channel))
782 break;
783 }
784
785 channel->target_cpu = target_cpu;
786
787 free_cpumask_var(available_mask);
788}
789
790#define UNLOAD_DELAY_UNIT_MS 10
791#define UNLOAD_WAIT_MS (100*1000)
792#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
793#define UNLOAD_MSG_MS (5*1000)
794#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
795
796static void vmbus_wait_for_unload(void)
797{
798 int cpu;
799 void *page_addr;
800 struct hv_message *msg;
801 struct vmbus_channel_message_header *hdr;
802 u32 message_type, i;
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821 for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
822 if (completion_done(&vmbus_connection.unload_event))
823 goto completed;
824
825 for_each_online_cpu(cpu) {
826 struct hv_per_cpu_context *hv_cpu
827 = per_cpu_ptr(hv_context.cpu_context, cpu);
828
829 page_addr = hv_cpu->synic_message_page;
830 msg = (struct hv_message *)page_addr
831 + VMBUS_MESSAGE_SINT;
832
833 message_type = READ_ONCE(msg->header.message_type);
834 if (message_type == HVMSG_NONE)
835 continue;
836
837 hdr = (struct vmbus_channel_message_header *)
838 msg->u.payload;
839
840 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
841 complete(&vmbus_connection.unload_event);
842
843 vmbus_signal_eom(msg, message_type);
844 }
845
846
847
848
849
850 if (!(i % UNLOAD_MSG_LOOPS))
851 pr_notice("Waiting for VMBus UNLOAD to complete\n");
852
853 mdelay(UNLOAD_DELAY_UNIT_MS);
854 }
855 pr_err("Continuing even though VMBus UNLOAD did not complete\n");
856
857completed:
858
859
860
861
862
863 for_each_online_cpu(cpu) {
864 struct hv_per_cpu_context *hv_cpu
865 = per_cpu_ptr(hv_context.cpu_context, cpu);
866
867 page_addr = hv_cpu->synic_message_page;
868 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
869 msg->header.message_type = HVMSG_NONE;
870 }
871}
872
873
874
875
876static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
877{
878
879
880
881
882
883
884
885
886
887 complete(&vmbus_connection.unload_event);
888}
889
890void vmbus_initiate_unload(bool crash)
891{
892 struct vmbus_channel_message_header hdr;
893
894 if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
895 return;
896
897
898 if (vmbus_proto_version < VERSION_WIN8_1)
899 return;
900
901 reinit_completion(&vmbus_connection.unload_event);
902 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
903 hdr.msgtype = CHANNELMSG_UNLOAD;
904 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
905 !crash);
906
907
908
909
910
911 if (!crash)
912 wait_for_completion(&vmbus_connection.unload_event);
913 else
914 vmbus_wait_for_unload();
915}
916
917static void check_ready_for_resume_event(void)
918{
919
920
921
922
923 if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
924 complete(&vmbus_connection.ready_for_resume_event);
925}
926
927static void vmbus_setup_channel_state(struct vmbus_channel *channel,
928 struct vmbus_channel_offer_channel *offer)
929{
930
931
932
933 channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
934
935 if (vmbus_proto_version != VERSION_WS2008) {
936 channel->is_dedicated_interrupt =
937 (offer->is_dedicated_interrupt != 0);
938 channel->sig_event = offer->connection_id;
939 }
940
941 memcpy(&channel->offermsg, offer,
942 sizeof(struct vmbus_channel_offer_channel));
943 channel->monitor_grp = (u8)offer->monitorid / 32;
944 channel->monitor_bit = (u8)offer->monitorid % 32;
945 channel->device_id = hv_get_dev_type(channel);
946}
947
948
949
950
951
952static struct vmbus_channel *
953find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
954{
955 struct vmbus_channel *channel = NULL, *iter;
956 const guid_t *inst1, *inst2;
957
958
959 if (offer->offer.sub_channel_index != 0)
960 return NULL;
961
962 mutex_lock(&vmbus_connection.channel_mutex);
963
964 list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
965 inst1 = &iter->offermsg.offer.if_instance;
966 inst2 = &offer->offer.if_instance;
967
968 if (guid_equal(inst1, inst2)) {
969 channel = iter;
970 break;
971 }
972 }
973
974 mutex_unlock(&vmbus_connection.channel_mutex);
975
976 return channel;
977}
978
979static bool vmbus_is_valid_device(const guid_t *guid)
980{
981 u16 i;
982
983 if (!hv_is_isolation_supported())
984 return true;
985
986 for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) {
987 if (guid_equal(guid, &vmbus_devs[i].guid))
988 return vmbus_devs[i].allowed_in_isolated;
989 }
990 return false;
991}
992
993
994
995
996
997static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
998{
999 struct vmbus_channel_offer_channel *offer;
1000 struct vmbus_channel *oldchannel, *newchannel;
1001 size_t offer_sz;
1002
1003 offer = (struct vmbus_channel_offer_channel *)hdr;
1004
1005 trace_vmbus_onoffer(offer);
1006
1007 if (!vmbus_is_valid_device(&offer->offer.if_type)) {
1008 pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n",
1009 offer->child_relid);
1010 atomic_dec(&vmbus_connection.offer_in_progress);
1011 return;
1012 }
1013
1014 oldchannel = find_primary_channel_by_offer(offer);
1015
1016 if (oldchannel != NULL) {
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 mutex_lock(&vmbus_connection.channel_mutex);
1047
1048 atomic_dec(&vmbus_connection.offer_in_progress);
1049
1050 WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
1051
1052 oldchannel->offermsg.child_relid = offer->child_relid;
1053
1054 offer_sz = sizeof(*offer);
1055 if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
1056
1057
1058
1059
1060
1061
1062
1063 pr_debug("vmbus offer changed: relid=%d\n",
1064 offer->child_relid);
1065
1066 print_hex_dump_debug("Old vmbus offer: ",
1067 DUMP_PREFIX_OFFSET, 16, 4,
1068 &oldchannel->offermsg, offer_sz,
1069 false);
1070 print_hex_dump_debug("New vmbus offer: ",
1071 DUMP_PREFIX_OFFSET, 16, 4,
1072 offer, offer_sz, false);
1073
1074
1075 vmbus_setup_channel_state(oldchannel, offer);
1076 }
1077
1078
1079 vmbus_channel_map_relid(oldchannel);
1080 check_ready_for_resume_event();
1081
1082 mutex_unlock(&vmbus_connection.channel_mutex);
1083 return;
1084 }
1085
1086
1087 newchannel = alloc_channel();
1088 if (!newchannel) {
1089 vmbus_release_relid(offer->child_relid);
1090 atomic_dec(&vmbus_connection.offer_in_progress);
1091 pr_err("Unable to allocate channel object\n");
1092 return;
1093 }
1094
1095 vmbus_setup_channel_state(newchannel, offer);
1096
1097 vmbus_process_offer(newchannel);
1098}
1099
1100static void check_ready_for_suspend_event(void)
1101{
1102
1103
1104
1105
1106 if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
1107 complete(&vmbus_connection.ready_for_suspend_event);
1108}
1109
1110
1111
1112
1113
1114
1115static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1116{
1117 struct vmbus_channel_rescind_offer *rescind;
1118 struct vmbus_channel *channel;
1119 struct device *dev;
1120 bool clean_up_chan_for_suspend;
1121
1122 rescind = (struct vmbus_channel_rescind_offer *)hdr;
1123
1124 trace_vmbus_onoffer_rescind(rescind);
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
1151
1152
1153
1154
1155 msleep(1);
1156 }
1157
1158 mutex_lock(&vmbus_connection.channel_mutex);
1159 channel = relid2channel(rescind->child_relid);
1160 if (channel != NULL) {
1161
1162
1163
1164
1165
1166 if (channel->rescind_ref) {
1167 mutex_unlock(&vmbus_connection.channel_mutex);
1168 return;
1169 }
1170 channel->rescind_ref = true;
1171 }
1172 mutex_unlock(&vmbus_connection.channel_mutex);
1173
1174 if (channel == NULL) {
1175
1176
1177
1178
1179
1180 return;
1181 }
1182
1183 clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
1184 is_sub_channel(channel);
1185
1186
1187
1188
1189 vmbus_reset_channel_cb(channel);
1190
1191
1192
1193
1194 vmbus_rescind_cleanup(channel);
1195 while (READ_ONCE(channel->probe_done) == false) {
1196
1197
1198
1199
1200 msleep(1);
1201 }
1202
1203
1204
1205
1206
1207 if (channel->device_obj) {
1208 if (channel->chn_rescind_callback) {
1209 channel->chn_rescind_callback(channel);
1210
1211 if (clean_up_chan_for_suspend)
1212 check_ready_for_suspend_event();
1213
1214 return;
1215 }
1216
1217
1218
1219
1220 dev = get_device(&channel->device_obj->device);
1221 if (dev) {
1222 vmbus_device_unregister(channel->device_obj);
1223 put_device(dev);
1224 }
1225 } else if (channel->primary_channel != NULL) {
1226
1227
1228
1229
1230
1231
1232
1233 mutex_lock(&vmbus_connection.channel_mutex);
1234 if (channel->state == CHANNEL_OPEN_STATE) {
1235
1236
1237
1238
1239 hv_process_channel_removal(channel);
1240 } else {
1241 complete(&channel->rescind_event);
1242 }
1243 mutex_unlock(&vmbus_connection.channel_mutex);
1244 }
1245
1246
1247
1248 if (clean_up_chan_for_suspend)
1249 check_ready_for_suspend_event();
1250}
1251
1252void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
1253{
1254 BUG_ON(!is_hvsock_channel(channel));
1255
1256
1257 while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
1258 msleep(1);
1259
1260 vmbus_device_unregister(channel->device_obj);
1261}
1262EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
1263
1264
1265
1266
1267
1268
1269
1270
1271static void vmbus_onoffers_delivered(
1272 struct vmbus_channel_message_header *hdr)
1273{
1274}
1275
1276
1277
1278
1279
1280
1281
1282
1283static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
1284{
1285 struct vmbus_channel_open_result *result;
1286 struct vmbus_channel_msginfo *msginfo;
1287 struct vmbus_channel_message_header *requestheader;
1288 struct vmbus_channel_open_channel *openmsg;
1289 unsigned long flags;
1290
1291 result = (struct vmbus_channel_open_result *)hdr;
1292
1293 trace_vmbus_onopen_result(result);
1294
1295
1296
1297
1298 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1299
1300 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1301 msglistentry) {
1302 requestheader =
1303 (struct vmbus_channel_message_header *)msginfo->msg;
1304
1305 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
1306 openmsg =
1307 (struct vmbus_channel_open_channel *)msginfo->msg;
1308 if (openmsg->child_relid == result->child_relid &&
1309 openmsg->openid == result->openid) {
1310 memcpy(&msginfo->response.open_result,
1311 result,
1312 sizeof(
1313 struct vmbus_channel_open_result));
1314 complete(&msginfo->waitevent);
1315 break;
1316 }
1317 }
1318 }
1319 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1330{
1331 struct vmbus_channel_gpadl_created *gpadlcreated;
1332 struct vmbus_channel_msginfo *msginfo;
1333 struct vmbus_channel_message_header *requestheader;
1334 struct vmbus_channel_gpadl_header *gpadlheader;
1335 unsigned long flags;
1336
1337 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1338
1339 trace_vmbus_ongpadl_created(gpadlcreated);
1340
1341
1342
1343
1344
1345 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1346
1347 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1348 msglistentry) {
1349 requestheader =
1350 (struct vmbus_channel_message_header *)msginfo->msg;
1351
1352 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1353 gpadlheader =
1354 (struct vmbus_channel_gpadl_header *)requestheader;
1355
1356 if ((gpadlcreated->child_relid ==
1357 gpadlheader->child_relid) &&
1358 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1359 memcpy(&msginfo->response.gpadl_created,
1360 gpadlcreated,
1361 sizeof(
1362 struct vmbus_channel_gpadl_created));
1363 complete(&msginfo->waitevent);
1364 break;
1365 }
1366 }
1367 }
1368 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1369}
1370
1371
1372
1373
1374
1375
1376
1377static void vmbus_onmodifychannel_response(struct vmbus_channel_message_header *hdr)
1378{
1379 struct vmbus_channel_modifychannel_response *response;
1380 struct vmbus_channel_msginfo *msginfo;
1381 unsigned long flags;
1382
1383 response = (struct vmbus_channel_modifychannel_response *)hdr;
1384
1385 trace_vmbus_onmodifychannel_response(response);
1386
1387
1388
1389
1390 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1391
1392 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) {
1393 struct vmbus_channel_message_header *responseheader =
1394 (struct vmbus_channel_message_header *)msginfo->msg;
1395
1396 if (responseheader->msgtype == CHANNELMSG_MODIFYCHANNEL) {
1397 struct vmbus_channel_modifychannel *modifymsg;
1398
1399 modifymsg = (struct vmbus_channel_modifychannel *)msginfo->msg;
1400 if (modifymsg->child_relid == response->child_relid) {
1401 memcpy(&msginfo->response.modify_response, response,
1402 sizeof(*response));
1403 complete(&msginfo->waitevent);
1404 break;
1405 }
1406 }
1407 }
1408 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418static void vmbus_ongpadl_torndown(
1419 struct vmbus_channel_message_header *hdr)
1420{
1421 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1422 struct vmbus_channel_msginfo *msginfo;
1423 struct vmbus_channel_message_header *requestheader;
1424 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1425 unsigned long flags;
1426
1427 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1428
1429 trace_vmbus_ongpadl_torndown(gpadl_torndown);
1430
1431
1432
1433
1434 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1435
1436 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1437 msglistentry) {
1438 requestheader =
1439 (struct vmbus_channel_message_header *)msginfo->msg;
1440
1441 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1442 gpadl_teardown =
1443 (struct vmbus_channel_gpadl_teardown *)requestheader;
1444
1445 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1446 memcpy(&msginfo->response.gpadl_torndown,
1447 gpadl_torndown,
1448 sizeof(
1449 struct vmbus_channel_gpadl_torndown));
1450 complete(&msginfo->waitevent);
1451 break;
1452 }
1453 }
1454 }
1455 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1456}
1457
1458
1459
1460
1461
1462
1463
1464
1465static void vmbus_onversion_response(
1466 struct vmbus_channel_message_header *hdr)
1467{
1468 struct vmbus_channel_msginfo *msginfo;
1469 struct vmbus_channel_message_header *requestheader;
1470 struct vmbus_channel_version_response *version_response;
1471 unsigned long flags;
1472
1473 version_response = (struct vmbus_channel_version_response *)hdr;
1474
1475 trace_vmbus_onversion_response(version_response);
1476
1477 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1478
1479 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1480 msglistentry) {
1481 requestheader =
1482 (struct vmbus_channel_message_header *)msginfo->msg;
1483
1484 if (requestheader->msgtype ==
1485 CHANNELMSG_INITIATE_CONTACT) {
1486 memcpy(&msginfo->response.version_response,
1487 version_response,
1488 sizeof(struct vmbus_channel_version_response));
1489 complete(&msginfo->waitevent);
1490 }
1491 }
1492 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1493}
1494
1495
1496const struct vmbus_channel_message_table_entry
1497channel_message_table[CHANNELMSG_COUNT] = {
1498 { CHANNELMSG_INVALID, 0, NULL, 0},
1499 { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
1500 sizeof(struct vmbus_channel_offer_channel)},
1501 { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
1502 sizeof(struct vmbus_channel_rescind_offer) },
1503 { CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
1504 { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
1505 { CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
1506 { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
1507 sizeof(struct vmbus_channel_open_result)},
1508 { CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
1509 { CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
1510 { CHANNELMSG_GPADL_BODY, 0, NULL, 0},
1511 { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
1512 sizeof(struct vmbus_channel_gpadl_created)},
1513 { CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
1514 { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
1515 sizeof(struct vmbus_channel_gpadl_torndown) },
1516 { CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
1517 { CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
1518 { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
1519 sizeof(struct vmbus_channel_version_response)},
1520 { CHANNELMSG_UNLOAD, 0, NULL, 0},
1521 { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
1522 { CHANNELMSG_18, 0, NULL, 0},
1523 { CHANNELMSG_19, 0, NULL, 0},
1524 { CHANNELMSG_20, 0, NULL, 0},
1525 { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
1526 { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
1527 { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
1528 { CHANNELMSG_MODIFYCHANNEL_RESPONSE, 1, vmbus_onmodifychannel_response,
1529 sizeof(struct vmbus_channel_modifychannel_response)},
1530};
1531
1532
1533
1534
1535
1536
1537void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
1538{
1539 trace_vmbus_on_message(hdr);
1540
1541
1542
1543
1544
1545 channel_message_table[hdr->msgtype].message_handler(hdr);
1546}
1547
1548
1549
1550
1551int vmbus_request_offers(void)
1552{
1553 struct vmbus_channel_message_header *msg;
1554 struct vmbus_channel_msginfo *msginfo;
1555 int ret;
1556
1557 msginfo = kmalloc(sizeof(*msginfo) +
1558 sizeof(struct vmbus_channel_message_header),
1559 GFP_KERNEL);
1560 if (!msginfo)
1561 return -ENOMEM;
1562
1563 msg = (struct vmbus_channel_message_header *)msginfo->msg;
1564
1565 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1566
1567 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1568 true);
1569
1570 trace_vmbus_request_offers(ret);
1571
1572 if (ret != 0) {
1573 pr_err("Unable to request offers - %d\n", ret);
1574
1575 goto cleanup;
1576 }
1577
1578cleanup:
1579 kfree(msginfo);
1580
1581 return ret;
1582}
1583
1584static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1585{
1586 struct list_head *cur, *tmp;
1587 struct vmbus_channel *cur_channel;
1588
1589 if (primary_channel->sc_creation_callback == NULL)
1590 return;
1591
1592 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1593 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1594
1595 primary_channel->sc_creation_callback(cur_channel);
1596 }
1597}
1598
1599void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1600 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1601{
1602 primary_channel->sc_creation_callback = sc_cr_cb;
1603}
1604EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1605
1606bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1607{
1608 bool ret;
1609
1610 ret = !list_empty(&primary->sc_list);
1611
1612 if (ret) {
1613
1614
1615
1616
1617
1618 invoke_sc_cb(primary);
1619 }
1620
1621 return ret;
1622}
1623EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1624
1625void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1626 void (*chn_rescind_cb)(struct vmbus_channel *))
1627{
1628 channel->chn_rescind_callback = chn_rescind_cb;
1629}
1630EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
1631