1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/kernel.h>
22#include <linux/jiffies.h>
23#include <linux/mman.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/kthread.h>
29#include <linux/completion.h>
30#include <linux/memory_hotplug.h>
31#include <linux/memory.h>
32#include <linux/notifier.h>
33#include <linux/percpu_counter.h>
34
35#include <linux/hyperv.h>
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61
62enum {
63 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
66
67 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
68 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
70
71 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
72};
73
74
75
76
77
78
79
80enum dm_message_type {
81
82
83
84 DM_ERROR = 0,
85 DM_VERSION_REQUEST = 1,
86 DM_VERSION_RESPONSE = 2,
87 DM_CAPABILITIES_REPORT = 3,
88 DM_CAPABILITIES_RESPONSE = 4,
89 DM_STATUS_REPORT = 5,
90 DM_BALLOON_REQUEST = 6,
91 DM_BALLOON_RESPONSE = 7,
92 DM_UNBALLOON_REQUEST = 8,
93 DM_UNBALLOON_RESPONSE = 9,
94 DM_MEM_HOT_ADD_REQUEST = 10,
95 DM_MEM_HOT_ADD_RESPONSE = 11,
96 DM_VERSION_03_MAX = 11,
97
98
99
100 DM_INFO_MESSAGE = 12,
101 DM_VERSION_1_MAX = 12
102};
103
104
105
106
107
108
109
110union dm_version {
111 struct {
112 __u16 minor_version;
113 __u16 major_version;
114 };
115 __u32 version;
116} __packed;
117
118
119union dm_caps {
120 struct {
121 __u64 balloon:1;
122 __u64 hot_add:1;
123
124
125
126
127
128
129 __u64 hot_add_alignment:4;
130 __u64 reservedz:58;
131 } cap_bits;
132 __u64 caps;
133} __packed;
134
135union dm_mem_page_range {
136 struct {
137
138
139
140
141
142 __u64 start_page:40;
143
144
145
146 __u64 page_cnt:24;
147 } finfo;
148 __u64 page_range;
149} __packed;
150
151
152
153
154
155
156
157
158
159
160
161struct dm_header {
162 __u16 type;
163 __u16 size;
164 __u32 trans_id;
165} __packed;
166
167
168
169
170
171
172struct dm_message {
173 struct dm_header hdr;
174 __u8 data[];
175} __packed;
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192struct dm_version_request {
193 struct dm_header hdr;
194 union dm_version version;
195 __u32 is_last_attempt:1;
196 __u32 reservedz:31;
197} __packed;
198
199
200
201
202
203
204
205
206
207
208
209
210struct dm_version_response {
211 struct dm_header hdr;
212 __u64 is_accepted:1;
213 __u64 reservedz:63;
214} __packed;
215
216
217
218
219
220
221struct dm_capabilities {
222 struct dm_header hdr;
223 union dm_caps caps;
224 __u64 min_page_cnt;
225 __u64 max_page_number;
226} __packed;
227
228
229
230
231
232
233
234
235
236
237
238struct dm_capabilities_resp_msg {
239 struct dm_header hdr;
240 __u64 is_accepted:1;
241 __u64 reservedz:63;
242} __packed;
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264struct dm_status {
265 struct dm_header hdr;
266 __u64 num_avail;
267 __u64 num_committed;
268 __u64 page_file_size;
269 __u64 zero_free;
270 __u32 page_file_writes;
271 __u32 io_diff;
272} __packed;
273
274
275
276
277
278
279
280
281
282
283struct dm_balloon {
284 struct dm_header hdr;
285 __u32 num_pages;
286 __u32 reservedz;
287} __packed;
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304struct dm_balloon_response {
305 struct dm_header hdr;
306 __u32 reservedz;
307 __u32 more_pages:1;
308 __u32 range_count:31;
309 union dm_mem_page_range range_array[];
310} __packed;
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327struct dm_unballoon_request {
328 struct dm_header hdr;
329 __u32 more_pages:1;
330 __u32 reservedz:31;
331 __u32 range_count;
332 union dm_mem_page_range range_array[];
333} __packed;
334
335
336
337
338
339
340
341struct dm_unballoon_response {
342 struct dm_header hdr;
343} __packed;
344
345
346
347
348
349
350
351
352
353
354
355struct dm_hot_add {
356 struct dm_header hdr;
357 union dm_mem_page_range range;
358} __packed;
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380struct dm_hot_add_response {
381 struct dm_header hdr;
382 __u32 page_count;
383 __u32 result;
384} __packed;
385
386
387
388
389
390enum dm_info_type {
391 INFO_TYPE_MAX_PAGE_CNT = 0,
392 MAX_INFO_TYPE
393};
394
395
396
397
398
399
400struct dm_info_header {
401 enum dm_info_type type;
402 __u32 data_size;
403} __packed;
404
405
406
407
408
409
410
411
412
413
414struct dm_info_msg {
415 struct dm_header hdr;
416 __u32 reserved;
417 __u32 info_size;
418 __u8 info[];
419};
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437struct hv_hotadd_state {
438 struct list_head list;
439 unsigned long start_pfn;
440 unsigned long covered_start_pfn;
441 unsigned long covered_end_pfn;
442 unsigned long ha_end_pfn;
443 unsigned long end_pfn;
444
445
446
447 struct list_head gap_list;
448};
449
450struct hv_hotadd_gap {
451 struct list_head list;
452 unsigned long start_pfn;
453 unsigned long end_pfn;
454};
455
456struct balloon_state {
457 __u32 num_pages;
458 struct work_struct wrk;
459};
460
461struct hot_add_wrk {
462 union dm_mem_page_range ha_page_range;
463 union dm_mem_page_range ha_region_range;
464 struct work_struct wrk;
465};
466
467static bool hot_add = true;
468static bool do_hot_add;
469
470
471
472
473static uint pressure_report_delay = 45;
474
475
476
477
478static unsigned long last_post_time;
479
480module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
481MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
482
483module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
484MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
485static atomic_t trans_id = ATOMIC_INIT(0);
486
487static int dm_ring_size = (5 * PAGE_SIZE);
488
489
490
491
492
493enum hv_dm_state {
494 DM_INITIALIZING = 0,
495 DM_INITIALIZED,
496 DM_BALLOON_UP,
497 DM_BALLOON_DOWN,
498 DM_HOT_ADD,
499 DM_INIT_ERROR
500};
501
502
503static __u8 recv_buffer[PAGE_SIZE];
504static __u8 *send_buffer;
505#define PAGES_IN_2M 512
506#define HA_CHUNK (32 * 1024)
507
508struct hv_dynmem_device {
509 struct hv_device *dev;
510 enum hv_dm_state state;
511 struct completion host_event;
512 struct completion config_event;
513
514
515
516
517 unsigned int num_pages_ballooned;
518 unsigned int num_pages_onlined;
519 unsigned int num_pages_added;
520
521
522
523
524 struct balloon_state balloon_wrk;
525
526
527
528
529 struct hot_add_wrk ha_wrk;
530
531
532
533
534
535 bool host_specified_ha_region;
536
537
538
539
540 struct completion ol_waitevent;
541 bool ha_waiting;
542
543
544
545
546
547
548 struct task_struct *thread;
549
550
551
552
553
554 spinlock_t ha_lock;
555
556
557
558
559 struct list_head ha_region_list;
560
561
562
563
564
565
566 __u32 next_version;
567};
568
569static struct hv_dynmem_device dm_device;
570
571static void post_status(struct hv_dynmem_device *dm);
572
573#ifdef CONFIG_MEMORY_HOTPLUG
574static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
575 void *v)
576{
577 struct memory_notify *mem = (struct memory_notify *)v;
578 unsigned long flags;
579
580 switch (val) {
581 case MEM_ONLINE:
582 spin_lock_irqsave(&dm_device.ha_lock, flags);
583 dm_device.num_pages_onlined += mem->nr_pages;
584 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
585 case MEM_CANCEL_ONLINE:
586 if (dm_device.ha_waiting) {
587 dm_device.ha_waiting = false;
588 complete(&dm_device.ol_waitevent);
589 }
590 break;
591
592 case MEM_OFFLINE:
593 spin_lock_irqsave(&dm_device.ha_lock, flags);
594 dm_device.num_pages_onlined -= mem->nr_pages;
595 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
596 break;
597 case MEM_GOING_ONLINE:
598 case MEM_GOING_OFFLINE:
599 case MEM_CANCEL_OFFLINE:
600 break;
601 }
602 return NOTIFY_OK;
603}
604
605static struct notifier_block hv_memory_nb = {
606 .notifier_call = hv_memory_notifier,
607 .priority = 0
608};
609
610
611static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
612{
613 unsigned long cur_start_pgp;
614 unsigned long cur_end_pgp;
615 struct hv_hotadd_gap *gap;
616
617 cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
618 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
619
620
621 if (((unsigned long)pg < cur_start_pgp) ||
622 ((unsigned long)pg >= cur_end_pgp))
623 return;
624
625
626 list_for_each_entry(gap, &has->gap_list, list) {
627 cur_start_pgp = (unsigned long)
628 pfn_to_page(gap->start_pfn);
629 cur_end_pgp = (unsigned long)
630 pfn_to_page(gap->end_pfn);
631 if (((unsigned long)pg >= cur_start_pgp) &&
632 ((unsigned long)pg < cur_end_pgp)) {
633 return;
634 }
635 }
636
637
638 __online_page_set_limits(pg);
639 __online_page_increment_counters(pg);
640 __online_page_free(pg);
641}
642
643static void hv_bring_pgs_online(struct hv_hotadd_state *has,
644 unsigned long start_pfn, unsigned long size)
645{
646 int i;
647
648 for (i = 0; i < size; i++)
649 hv_page_online_one(has, pfn_to_page(start_pfn + i));
650}
651
652static void hv_mem_hot_add(unsigned long start, unsigned long size,
653 unsigned long pfn_count,
654 struct hv_hotadd_state *has)
655{
656 int ret = 0;
657 int i, nid;
658 unsigned long start_pfn;
659 unsigned long processed_pfn;
660 unsigned long total_pfn = pfn_count;
661 unsigned long flags;
662
663 for (i = 0; i < (size/HA_CHUNK); i++) {
664 start_pfn = start + (i * HA_CHUNK);
665
666 spin_lock_irqsave(&dm_device.ha_lock, flags);
667 has->ha_end_pfn += HA_CHUNK;
668
669 if (total_pfn > HA_CHUNK) {
670 processed_pfn = HA_CHUNK;
671 total_pfn -= HA_CHUNK;
672 } else {
673 processed_pfn = total_pfn;
674 total_pfn = 0;
675 }
676
677 has->covered_end_pfn += processed_pfn;
678 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
679
680 init_completion(&dm_device.ol_waitevent);
681 dm_device.ha_waiting = !memhp_auto_online;
682
683 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
684 ret = add_memory(nid, PFN_PHYS((start_pfn)),
685 (HA_CHUNK << PAGE_SHIFT));
686
687 if (ret) {
688 pr_info("hot_add memory failed error is %d\n", ret);
689 if (ret == -EEXIST) {
690
691
692
693
694
695
696
697 do_hot_add = false;
698 }
699 spin_lock_irqsave(&dm_device.ha_lock, flags);
700 has->ha_end_pfn -= HA_CHUNK;
701 has->covered_end_pfn -= processed_pfn;
702 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
703 break;
704 }
705
706
707
708
709
710
711
712
713 if (dm_device.ha_waiting)
714 wait_for_completion_timeout(&dm_device.ol_waitevent,
715 5*HZ);
716 post_status(&dm_device);
717 }
718
719 return;
720}
721
722static void hv_online_page(struct page *pg)
723{
724 struct hv_hotadd_state *has;
725 unsigned long cur_start_pgp;
726 unsigned long cur_end_pgp;
727 unsigned long flags;
728
729 spin_lock_irqsave(&dm_device.ha_lock, flags);
730 list_for_each_entry(has, &dm_device.ha_region_list, list) {
731 cur_start_pgp = (unsigned long)
732 pfn_to_page(has->start_pfn);
733 cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
734
735
736 if (((unsigned long)pg < cur_start_pgp) ||
737 ((unsigned long)pg >= cur_end_pgp))
738 continue;
739
740 hv_page_online_one(has, pg);
741 break;
742 }
743 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
744}
745
746static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
747{
748 struct hv_hotadd_state *has;
749 struct hv_hotadd_gap *gap;
750 unsigned long residual, new_inc;
751 int ret = 0;
752 unsigned long flags;
753
754 spin_lock_irqsave(&dm_device.ha_lock, flags);
755 list_for_each_entry(has, &dm_device.ha_region_list, list) {
756
757
758
759
760 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
761 continue;
762
763
764
765
766
767 if (has->covered_end_pfn != start_pfn) {
768 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
769 if (!gap) {
770 ret = -ENOMEM;
771 break;
772 }
773
774 INIT_LIST_HEAD(&gap->list);
775 gap->start_pfn = has->covered_end_pfn;
776 gap->end_pfn = start_pfn;
777 list_add_tail(&gap->list, &has->gap_list);
778
779 has->covered_end_pfn = start_pfn;
780 }
781
782
783
784
785
786 if ((start_pfn + pfn_cnt) > has->end_pfn) {
787 residual = (start_pfn + pfn_cnt - has->end_pfn);
788
789
790
791 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
792 if (residual % HA_CHUNK)
793 new_inc += HA_CHUNK;
794
795 has->end_pfn += new_inc;
796 }
797
798 ret = 1;
799 break;
800 }
801 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
802
803 return ret;
804}
805
806static unsigned long handle_pg_range(unsigned long pg_start,
807 unsigned long pg_count)
808{
809 unsigned long start_pfn = pg_start;
810 unsigned long pfn_cnt = pg_count;
811 unsigned long size;
812 struct hv_hotadd_state *has;
813 unsigned long pgs_ol = 0;
814 unsigned long old_covered_state;
815 unsigned long res = 0, flags;
816
817 spin_lock_irqsave(&dm_device.ha_lock, flags);
818 list_for_each_entry(has, &dm_device.ha_region_list, list) {
819
820
821
822
823 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
824 continue;
825
826 old_covered_state = has->covered_end_pfn;
827
828 if (start_pfn < has->ha_end_pfn) {
829
830
831
832
833
834 pgs_ol = has->ha_end_pfn - start_pfn;
835 if (pgs_ol > pfn_cnt)
836 pgs_ol = pfn_cnt;
837
838 has->covered_end_pfn += pgs_ol;
839 pfn_cnt -= pgs_ol;
840
841
842
843
844
845
846 if (start_pfn > has->start_pfn &&
847 !PageReserved(pfn_to_page(start_pfn - 1)))
848 hv_bring_pgs_online(has, start_pfn, pgs_ol);
849
850 }
851
852 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
853
854
855
856
857
858
859
860 size = (has->end_pfn - has->ha_end_pfn);
861 if (pfn_cnt <= size) {
862 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
863 if (pfn_cnt % HA_CHUNK)
864 size += HA_CHUNK;
865 } else {
866 pfn_cnt = size;
867 }
868 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
869 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
870 spin_lock_irqsave(&dm_device.ha_lock, flags);
871 }
872
873
874
875
876 res = has->covered_end_pfn - old_covered_state;
877 break;
878 }
879 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
880
881 return res;
882}
883
884static unsigned long process_hot_add(unsigned long pg_start,
885 unsigned long pfn_cnt,
886 unsigned long rg_start,
887 unsigned long rg_size)
888{
889 struct hv_hotadd_state *ha_region = NULL;
890 int covered;
891 unsigned long flags;
892
893 if (pfn_cnt == 0)
894 return 0;
895
896 if (!dm_device.host_specified_ha_region) {
897 covered = pfn_covered(pg_start, pfn_cnt);
898 if (covered < 0)
899 return 0;
900
901 if (covered)
902 goto do_pg_range;
903 }
904
905
906
907
908
909 if (rg_size != 0) {
910 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
911 if (!ha_region)
912 return 0;
913
914 INIT_LIST_HEAD(&ha_region->list);
915 INIT_LIST_HEAD(&ha_region->gap_list);
916
917 ha_region->start_pfn = rg_start;
918 ha_region->ha_end_pfn = rg_start;
919 ha_region->covered_start_pfn = pg_start;
920 ha_region->covered_end_pfn = pg_start;
921 ha_region->end_pfn = rg_start + rg_size;
922
923 spin_lock_irqsave(&dm_device.ha_lock, flags);
924 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
925 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
926 }
927
928do_pg_range:
929
930
931
932
933 return handle_pg_range(pg_start, pfn_cnt);
934}
935
936#endif
937
938static void hot_add_req(struct work_struct *dummy)
939{
940 struct dm_hot_add_response resp;
941#ifdef CONFIG_MEMORY_HOTPLUG
942 unsigned long pg_start, pfn_cnt;
943 unsigned long rg_start, rg_sz;
944#endif
945 struct hv_dynmem_device *dm = &dm_device;
946
947 memset(&resp, 0, sizeof(struct dm_hot_add_response));
948 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
949 resp.hdr.size = sizeof(struct dm_hot_add_response);
950
951#ifdef CONFIG_MEMORY_HOTPLUG
952 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
953 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
954
955 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
956 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
957
958 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
959 unsigned long region_size;
960 unsigned long region_start;
961
962
963
964
965
966
967
968
969 region_start = pg_start;
970 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
971 if (pfn_cnt % HA_CHUNK)
972 region_size += HA_CHUNK;
973
974 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
975
976 rg_start = region_start;
977 rg_sz = region_size;
978 }
979
980 if (do_hot_add)
981 resp.page_count = process_hot_add(pg_start, pfn_cnt,
982 rg_start, rg_sz);
983
984 dm->num_pages_added += resp.page_count;
985#endif
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002 if (resp.page_count > 0)
1003 resp.result = 1;
1004 else if (!do_hot_add)
1005 resp.result = 1;
1006 else
1007 resp.result = 0;
1008
1009 if (!do_hot_add || (resp.page_count == 0))
1010 pr_info("Memory hot add failed\n");
1011
1012 dm->state = DM_INITIALIZED;
1013 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1014 vmbus_sendpacket(dm->dev->channel, &resp,
1015 sizeof(struct dm_hot_add_response),
1016 (unsigned long)NULL,
1017 VM_PKT_DATA_INBAND, 0);
1018}
1019
1020static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1021{
1022 struct dm_info_header *info_hdr;
1023
1024 info_hdr = (struct dm_info_header *)msg->info;
1025
1026 switch (info_hdr->type) {
1027 case INFO_TYPE_MAX_PAGE_CNT:
1028 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
1029 pr_info("Data Size is %d\n", info_hdr->data_size);
1030 break;
1031 default:
1032 pr_info("Received Unknown type: %d\n", info_hdr->type);
1033 }
1034}
1035
1036static unsigned long compute_balloon_floor(void)
1037{
1038 unsigned long min_pages;
1039#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 if (totalram_pages < MB2PAGES(128))
1052 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1053 else if (totalram_pages < MB2PAGES(512))
1054 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1055 else if (totalram_pages < MB2PAGES(2048))
1056 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1057 else if (totalram_pages < MB2PAGES(8192))
1058 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1059 else
1060 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1061#undef MB2PAGES
1062 return min_pages;
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075static void post_status(struct hv_dynmem_device *dm)
1076{
1077 struct dm_status status;
1078 unsigned long now = jiffies;
1079 unsigned long last_post = last_post_time;
1080
1081 if (pressure_report_delay > 0) {
1082 --pressure_report_delay;
1083 return;
1084 }
1085
1086 if (!time_after(now, (last_post_time + HZ)))
1087 return;
1088
1089 memset(&status, 0, sizeof(struct dm_status));
1090 status.hdr.type = DM_STATUS_REPORT;
1091 status.hdr.size = sizeof(struct dm_status);
1092 status.hdr.trans_id = atomic_inc_return(&trans_id);
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 status.num_avail = si_mem_available();
1105 status.num_committed = vm_memory_committed() +
1106 dm->num_pages_ballooned +
1107 (dm->num_pages_added > dm->num_pages_onlined ?
1108 dm->num_pages_added - dm->num_pages_onlined : 0) +
1109 compute_balloon_floor();
1110
1111
1112
1113
1114
1115
1116 if (status.hdr.trans_id != atomic_read(&trans_id))
1117 return;
1118
1119
1120
1121
1122
1123 if (last_post != last_post_time)
1124 return;
1125
1126 last_post_time = jiffies;
1127 vmbus_sendpacket(dm->dev->channel, &status,
1128 sizeof(struct dm_status),
1129 (unsigned long)NULL,
1130 VM_PKT_DATA_INBAND, 0);
1131
1132}
1133
1134static void free_balloon_pages(struct hv_dynmem_device *dm,
1135 union dm_mem_page_range *range_array)
1136{
1137 int num_pages = range_array->finfo.page_cnt;
1138 __u64 start_frame = range_array->finfo.start_page;
1139 struct page *pg;
1140 int i;
1141
1142 for (i = 0; i < num_pages; i++) {
1143 pg = pfn_to_page(i + start_frame);
1144 __free_page(pg);
1145 dm->num_pages_ballooned--;
1146 }
1147}
1148
1149
1150
1151static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1152 unsigned int num_pages,
1153 struct dm_balloon_response *bl_resp,
1154 int alloc_unit)
1155{
1156 unsigned int i = 0;
1157 struct page *pg;
1158
1159 if (num_pages < alloc_unit)
1160 return 0;
1161
1162 for (i = 0; (i * alloc_unit) < num_pages; i++) {
1163 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1164 PAGE_SIZE)
1165 return i * alloc_unit;
1166
1167
1168
1169
1170
1171 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1172 __GFP_NOMEMALLOC | __GFP_NOWARN,
1173 get_order(alloc_unit << PAGE_SHIFT));
1174
1175 if (!pg)
1176 return i * alloc_unit;
1177
1178 dm->num_pages_ballooned += alloc_unit;
1179
1180
1181
1182
1183
1184
1185 if (alloc_unit != 1)
1186 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1187
1188 bl_resp->range_count++;
1189 bl_resp->range_array[i].finfo.start_page =
1190 page_to_pfn(pg);
1191 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1192 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1193
1194 }
1195
1196 return num_pages;
1197}
1198
1199
1200
1201static void balloon_up(struct work_struct *dummy)
1202{
1203 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1204 unsigned int num_ballooned = 0;
1205 struct dm_balloon_response *bl_resp;
1206 int alloc_unit;
1207 int ret;
1208 bool done = false;
1209 int i;
1210 long avail_pages;
1211 unsigned long floor;
1212
1213
1214 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1215
1216
1217
1218
1219
1220 alloc_unit = 512;
1221
1222 avail_pages = si_mem_available();
1223 floor = compute_balloon_floor();
1224
1225
1226 if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1227 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1228 num_pages -= num_pages % PAGES_IN_2M;
1229 }
1230
1231 while (!done) {
1232 bl_resp = (struct dm_balloon_response *)send_buffer;
1233 memset(send_buffer, 0, PAGE_SIZE);
1234 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1235 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1236 bl_resp->more_pages = 1;
1237
1238 num_pages -= num_ballooned;
1239 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1240 bl_resp, alloc_unit);
1241
1242 if (alloc_unit != 1 && num_ballooned == 0) {
1243 alloc_unit = 1;
1244 continue;
1245 }
1246
1247 if (num_ballooned == 0 || num_ballooned == num_pages) {
1248 bl_resp->more_pages = 0;
1249 done = true;
1250 dm_device.state = DM_INITIALIZED;
1251 }
1252
1253
1254
1255
1256
1257
1258
1259 do {
1260 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1261 ret = vmbus_sendpacket(dm_device.dev->channel,
1262 bl_resp,
1263 bl_resp->hdr.size,
1264 (unsigned long)NULL,
1265 VM_PKT_DATA_INBAND, 0);
1266
1267 if (ret == -EAGAIN)
1268 msleep(20);
1269 post_status(&dm_device);
1270 } while (ret == -EAGAIN);
1271
1272 if (ret) {
1273
1274
1275
1276 pr_info("Balloon response failed\n");
1277
1278 for (i = 0; i < bl_resp->range_count; i++)
1279 free_balloon_pages(&dm_device,
1280 &bl_resp->range_array[i]);
1281
1282 done = true;
1283 }
1284 }
1285
1286}
1287
1288static void balloon_down(struct hv_dynmem_device *dm,
1289 struct dm_unballoon_request *req)
1290{
1291 union dm_mem_page_range *range_array = req->range_array;
1292 int range_count = req->range_count;
1293 struct dm_unballoon_response resp;
1294 int i;
1295
1296 for (i = 0; i < range_count; i++) {
1297 free_balloon_pages(dm, &range_array[i]);
1298 complete(&dm_device.config_event);
1299 }
1300
1301 if (req->more_pages == 1)
1302 return;
1303
1304 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1305 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1306 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1307 resp.hdr.size = sizeof(struct dm_unballoon_response);
1308
1309 vmbus_sendpacket(dm_device.dev->channel, &resp,
1310 sizeof(struct dm_unballoon_response),
1311 (unsigned long)NULL,
1312 VM_PKT_DATA_INBAND, 0);
1313
1314 dm->state = DM_INITIALIZED;
1315}
1316
1317static void balloon_onchannelcallback(void *context);
1318
1319static int dm_thread_func(void *dm_dev)
1320{
1321 struct hv_dynmem_device *dm = dm_dev;
1322
1323 while (!kthread_should_stop()) {
1324 wait_for_completion_interruptible_timeout(
1325 &dm_device.config_event, 1*HZ);
1326
1327
1328
1329
1330 reinit_completion(&dm_device.config_event);
1331 post_status(dm);
1332 }
1333
1334 return 0;
1335}
1336
1337
1338static void version_resp(struct hv_dynmem_device *dm,
1339 struct dm_version_response *vresp)
1340{
1341 struct dm_version_request version_req;
1342 int ret;
1343
1344 if (vresp->is_accepted) {
1345
1346
1347
1348
1349
1350 complete(&dm->host_event);
1351 return;
1352 }
1353
1354
1355
1356
1357
1358
1359
1360 if (dm->next_version == 0)
1361 goto version_error;
1362
1363 memset(&version_req, 0, sizeof(struct dm_version_request));
1364 version_req.hdr.type = DM_VERSION_REQUEST;
1365 version_req.hdr.size = sizeof(struct dm_version_request);
1366 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1367 version_req.version.version = dm->next_version;
1368
1369
1370
1371
1372
1373 switch (version_req.version.version) {
1374 case DYNMEM_PROTOCOL_VERSION_WIN8:
1375 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1376 version_req.is_last_attempt = 0;
1377 break;
1378 default:
1379 dm->next_version = 0;
1380 version_req.is_last_attempt = 1;
1381 }
1382
1383 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1384 sizeof(struct dm_version_request),
1385 (unsigned long)NULL,
1386 VM_PKT_DATA_INBAND, 0);
1387
1388 if (ret)
1389 goto version_error;
1390
1391 return;
1392
1393version_error:
1394 dm->state = DM_INIT_ERROR;
1395 complete(&dm->host_event);
1396}
1397
1398static void cap_resp(struct hv_dynmem_device *dm,
1399 struct dm_capabilities_resp_msg *cap_resp)
1400{
1401 if (!cap_resp->is_accepted) {
1402 pr_info("Capabilities not accepted by host\n");
1403 dm->state = DM_INIT_ERROR;
1404 }
1405 complete(&dm->host_event);
1406}
1407
1408static void balloon_onchannelcallback(void *context)
1409{
1410 struct hv_device *dev = context;
1411 u32 recvlen;
1412 u64 requestid;
1413 struct dm_message *dm_msg;
1414 struct dm_header *dm_hdr;
1415 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1416 struct dm_balloon *bal_msg;
1417 struct dm_hot_add *ha_msg;
1418 union dm_mem_page_range *ha_pg_range;
1419 union dm_mem_page_range *ha_region;
1420
1421 memset(recv_buffer, 0, sizeof(recv_buffer));
1422 vmbus_recvpacket(dev->channel, recv_buffer,
1423 PAGE_SIZE, &recvlen, &requestid);
1424
1425 if (recvlen > 0) {
1426 dm_msg = (struct dm_message *)recv_buffer;
1427 dm_hdr = &dm_msg->hdr;
1428
1429 switch (dm_hdr->type) {
1430 case DM_VERSION_RESPONSE:
1431 version_resp(dm,
1432 (struct dm_version_response *)dm_msg);
1433 break;
1434
1435 case DM_CAPABILITIES_RESPONSE:
1436 cap_resp(dm,
1437 (struct dm_capabilities_resp_msg *)dm_msg);
1438 break;
1439
1440 case DM_BALLOON_REQUEST:
1441 if (dm->state == DM_BALLOON_UP)
1442 pr_warn("Currently ballooning\n");
1443 bal_msg = (struct dm_balloon *)recv_buffer;
1444 dm->state = DM_BALLOON_UP;
1445 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1446 schedule_work(&dm_device.balloon_wrk.wrk);
1447 break;
1448
1449 case DM_UNBALLOON_REQUEST:
1450 dm->state = DM_BALLOON_DOWN;
1451 balloon_down(dm,
1452 (struct dm_unballoon_request *)recv_buffer);
1453 break;
1454
1455 case DM_MEM_HOT_ADD_REQUEST:
1456 if (dm->state == DM_HOT_ADD)
1457 pr_warn("Currently hot-adding\n");
1458 dm->state = DM_HOT_ADD;
1459 ha_msg = (struct dm_hot_add *)recv_buffer;
1460 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1461
1462
1463
1464
1465 dm->host_specified_ha_region = false;
1466 ha_pg_range = &ha_msg->range;
1467 dm->ha_wrk.ha_page_range = *ha_pg_range;
1468 dm->ha_wrk.ha_region_range.page_range = 0;
1469 } else {
1470
1471
1472
1473
1474
1475 dm->host_specified_ha_region = true;
1476 ha_pg_range = &ha_msg->range;
1477 ha_region = &ha_pg_range[1];
1478 dm->ha_wrk.ha_page_range = *ha_pg_range;
1479 dm->ha_wrk.ha_region_range = *ha_region;
1480 }
1481 schedule_work(&dm_device.ha_wrk.wrk);
1482 break;
1483
1484 case DM_INFO_MESSAGE:
1485 process_info(dm, (struct dm_info_msg *)dm_msg);
1486 break;
1487
1488 default:
1489 pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1490
1491 }
1492 }
1493
1494}
1495
1496static int balloon_probe(struct hv_device *dev,
1497 const struct hv_vmbus_device_id *dev_id)
1498{
1499 int ret;
1500 unsigned long t;
1501 struct dm_version_request version_req;
1502 struct dm_capabilities cap_msg;
1503
1504 do_hot_add = hot_add;
1505
1506
1507
1508
1509
1510 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1511 if (!send_buffer)
1512 return -ENOMEM;
1513
1514 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1515 balloon_onchannelcallback, dev);
1516
1517 if (ret)
1518 goto probe_error0;
1519
1520 dm_device.dev = dev;
1521 dm_device.state = DM_INITIALIZING;
1522 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1523 init_completion(&dm_device.host_event);
1524 init_completion(&dm_device.config_event);
1525 INIT_LIST_HEAD(&dm_device.ha_region_list);
1526 spin_lock_init(&dm_device.ha_lock);
1527 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1528 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1529 dm_device.host_specified_ha_region = false;
1530
1531 dm_device.thread =
1532 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1533 if (IS_ERR(dm_device.thread)) {
1534 ret = PTR_ERR(dm_device.thread);
1535 goto probe_error1;
1536 }
1537
1538#ifdef CONFIG_MEMORY_HOTPLUG
1539 set_online_page_callback(&hv_online_page);
1540 register_memory_notifier(&hv_memory_nb);
1541#endif
1542
1543 hv_set_drvdata(dev, &dm_device);
1544
1545
1546
1547
1548
1549
1550 memset(&version_req, 0, sizeof(struct dm_version_request));
1551 version_req.hdr.type = DM_VERSION_REQUEST;
1552 version_req.hdr.size = sizeof(struct dm_version_request);
1553 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1554 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1555 version_req.is_last_attempt = 0;
1556
1557 ret = vmbus_sendpacket(dev->channel, &version_req,
1558 sizeof(struct dm_version_request),
1559 (unsigned long)NULL,
1560 VM_PKT_DATA_INBAND, 0);
1561 if (ret)
1562 goto probe_error2;
1563
1564 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1565 if (t == 0) {
1566 ret = -ETIMEDOUT;
1567 goto probe_error2;
1568 }
1569
1570
1571
1572
1573
1574 if (dm_device.state == DM_INIT_ERROR) {
1575 ret = -ETIMEDOUT;
1576 goto probe_error2;
1577 }
1578
1579
1580
1581 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1582 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1583 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1584 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1585
1586 cap_msg.caps.cap_bits.balloon = 1;
1587 cap_msg.caps.cap_bits.hot_add = 1;
1588
1589
1590
1591
1592
1593 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1594
1595
1596
1597
1598
1599
1600 cap_msg.min_page_cnt = 0;
1601 cap_msg.max_page_number = -1;
1602
1603 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1604 sizeof(struct dm_capabilities),
1605 (unsigned long)NULL,
1606 VM_PKT_DATA_INBAND, 0);
1607 if (ret)
1608 goto probe_error2;
1609
1610 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1611 if (t == 0) {
1612 ret = -ETIMEDOUT;
1613 goto probe_error2;
1614 }
1615
1616
1617
1618
1619
1620 if (dm_device.state == DM_INIT_ERROR) {
1621 ret = -ETIMEDOUT;
1622 goto probe_error2;
1623 }
1624
1625 dm_device.state = DM_INITIALIZED;
1626
1627 return 0;
1628
1629probe_error2:
1630#ifdef CONFIG_MEMORY_HOTPLUG
1631 restore_online_page_callback(&hv_online_page);
1632#endif
1633 kthread_stop(dm_device.thread);
1634
1635probe_error1:
1636 vmbus_close(dev->channel);
1637probe_error0:
1638 kfree(send_buffer);
1639 return ret;
1640}
1641
1642static int balloon_remove(struct hv_device *dev)
1643{
1644 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1645 struct hv_hotadd_state *has, *tmp;
1646 struct hv_hotadd_gap *gap, *tmp_gap;
1647 unsigned long flags;
1648
1649 if (dm->num_pages_ballooned != 0)
1650 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1651
1652 cancel_work_sync(&dm->balloon_wrk.wrk);
1653 cancel_work_sync(&dm->ha_wrk.wrk);
1654
1655 vmbus_close(dev->channel);
1656 kthread_stop(dm->thread);
1657 kfree(send_buffer);
1658#ifdef CONFIG_MEMORY_HOTPLUG
1659 restore_online_page_callback(&hv_online_page);
1660 unregister_memory_notifier(&hv_memory_nb);
1661#endif
1662 spin_lock_irqsave(&dm_device.ha_lock, flags);
1663 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1664 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1665 list_del(&gap->list);
1666 kfree(gap);
1667 }
1668 list_del(&has->list);
1669 kfree(has);
1670 }
1671 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1672
1673 return 0;
1674}
1675
1676static const struct hv_vmbus_device_id id_table[] = {
1677
1678
1679 { HV_DM_GUID, },
1680 { },
1681};
1682
1683MODULE_DEVICE_TABLE(vmbus, id_table);
1684
1685static struct hv_driver balloon_drv = {
1686 .name = "hv_balloon",
1687 .id_table = id_table,
1688 .probe = balloon_probe,
1689 .remove = balloon_remove,
1690};
1691
1692static int __init init_balloon_drv(void)
1693{
1694
1695 return vmbus_driver_register(&balloon_drv);
1696}
1697
1698module_init(init_balloon_drv);
1699
1700MODULE_DESCRIPTION("Hyper-V Balloon");
1701MODULE_LICENSE("GPL");
1702