1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/kernel.h>
22#include <linux/jiffies.h>
23#include <linux/mman.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/kthread.h>
29#include <linux/completion.h>
30#include <linux/memory_hotplug.h>
31#include <linux/memory.h>
32#include <linux/notifier.h>
33#include <linux/percpu_counter.h>
34
35#include <linux/hyperv.h>
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61
62enum {
63 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
66
67 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
68 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
70
71 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
72};
73
74
75
76
77
78
79
80enum dm_message_type {
81
82
83
84 DM_ERROR = 0,
85 DM_VERSION_REQUEST = 1,
86 DM_VERSION_RESPONSE = 2,
87 DM_CAPABILITIES_REPORT = 3,
88 DM_CAPABILITIES_RESPONSE = 4,
89 DM_STATUS_REPORT = 5,
90 DM_BALLOON_REQUEST = 6,
91 DM_BALLOON_RESPONSE = 7,
92 DM_UNBALLOON_REQUEST = 8,
93 DM_UNBALLOON_RESPONSE = 9,
94 DM_MEM_HOT_ADD_REQUEST = 10,
95 DM_MEM_HOT_ADD_RESPONSE = 11,
96 DM_VERSION_03_MAX = 11,
97
98
99
100 DM_INFO_MESSAGE = 12,
101 DM_VERSION_1_MAX = 12
102};
103
104
105
106
107
108
109
110union dm_version {
111 struct {
112 __u16 minor_version;
113 __u16 major_version;
114 };
115 __u32 version;
116} __packed;
117
118
119union dm_caps {
120 struct {
121 __u64 balloon:1;
122 __u64 hot_add:1;
123
124
125
126
127
128
129 __u64 hot_add_alignment:4;
130 __u64 reservedz:58;
131 } cap_bits;
132 __u64 caps;
133} __packed;
134
135union dm_mem_page_range {
136 struct {
137
138
139
140
141
142 __u64 start_page:40;
143
144
145
146 __u64 page_cnt:24;
147 } finfo;
148 __u64 page_range;
149} __packed;
150
151
152
153
154
155
156
157
158
159
160
161struct dm_header {
162 __u16 type;
163 __u16 size;
164 __u32 trans_id;
165} __packed;
166
167
168
169
170
171
172struct dm_message {
173 struct dm_header hdr;
174 __u8 data[];
175} __packed;
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192struct dm_version_request {
193 struct dm_header hdr;
194 union dm_version version;
195 __u32 is_last_attempt:1;
196 __u32 reservedz:31;
197} __packed;
198
199
200
201
202
203
204
205
206
207
208
209
210struct dm_version_response {
211 struct dm_header hdr;
212 __u64 is_accepted:1;
213 __u64 reservedz:63;
214} __packed;
215
216
217
218
219
220
221struct dm_capabilities {
222 struct dm_header hdr;
223 union dm_caps caps;
224 __u64 min_page_cnt;
225 __u64 max_page_number;
226} __packed;
227
228
229
230
231
232
233
234
235
236
237
238struct dm_capabilities_resp_msg {
239 struct dm_header hdr;
240 __u64 is_accepted:1;
241 __u64 reservedz:63;
242} __packed;
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264struct dm_status {
265 struct dm_header hdr;
266 __u64 num_avail;
267 __u64 num_committed;
268 __u64 page_file_size;
269 __u64 zero_free;
270 __u32 page_file_writes;
271 __u32 io_diff;
272} __packed;
273
274
275
276
277
278
279
280
281
282
283struct dm_balloon {
284 struct dm_header hdr;
285 __u32 num_pages;
286 __u32 reservedz;
287} __packed;
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304struct dm_balloon_response {
305 struct dm_header hdr;
306 __u32 reservedz;
307 __u32 more_pages:1;
308 __u32 range_count:31;
309 union dm_mem_page_range range_array[];
310} __packed;
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327struct dm_unballoon_request {
328 struct dm_header hdr;
329 __u32 more_pages:1;
330 __u32 reservedz:31;
331 __u32 range_count;
332 union dm_mem_page_range range_array[];
333} __packed;
334
335
336
337
338
339
340
341struct dm_unballoon_response {
342 struct dm_header hdr;
343} __packed;
344
345
346
347
348
349
350
351
352
353
354
355struct dm_hot_add {
356 struct dm_header hdr;
357 union dm_mem_page_range range;
358} __packed;
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380struct dm_hot_add_response {
381 struct dm_header hdr;
382 __u32 page_count;
383 __u32 result;
384} __packed;
385
386
387
388
389
390enum dm_info_type {
391 INFO_TYPE_MAX_PAGE_CNT = 0,
392 MAX_INFO_TYPE
393};
394
395
396
397
398
399
400struct dm_info_header {
401 enum dm_info_type type;
402 __u32 data_size;
403} __packed;
404
405
406
407
408
409
410
411
412
413
414struct dm_info_msg {
415 struct dm_header hdr;
416 __u32 reserved;
417 __u32 info_size;
418 __u8 info[];
419};
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437struct hv_hotadd_state {
438 struct list_head list;
439 unsigned long start_pfn;
440 unsigned long covered_start_pfn;
441 unsigned long covered_end_pfn;
442 unsigned long ha_end_pfn;
443 unsigned long end_pfn;
444
445
446
447 struct list_head gap_list;
448};
449
450struct hv_hotadd_gap {
451 struct list_head list;
452 unsigned long start_pfn;
453 unsigned long end_pfn;
454};
455
456struct balloon_state {
457 __u32 num_pages;
458 struct work_struct wrk;
459};
460
461struct hot_add_wrk {
462 union dm_mem_page_range ha_page_range;
463 union dm_mem_page_range ha_region_range;
464 struct work_struct wrk;
465};
466
467static bool hot_add = true;
468static bool do_hot_add;
469
470
471
472
473static uint pressure_report_delay = 45;
474
475
476
477
478static unsigned long last_post_time;
479
480module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
481MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
482
483module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
484MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
485static atomic_t trans_id = ATOMIC_INIT(0);
486
487static int dm_ring_size = (5 * PAGE_SIZE);
488
489
490
491
492
493enum hv_dm_state {
494 DM_INITIALIZING = 0,
495 DM_INITIALIZED,
496 DM_BALLOON_UP,
497 DM_BALLOON_DOWN,
498 DM_HOT_ADD,
499 DM_INIT_ERROR
500};
501
502
503static __u8 recv_buffer[PAGE_SIZE];
504static __u8 *send_buffer;
505#define PAGES_IN_2M 512
506#define HA_CHUNK (32 * 1024)
507
508struct hv_dynmem_device {
509 struct hv_device *dev;
510 enum hv_dm_state state;
511 struct completion host_event;
512 struct completion config_event;
513
514
515
516
517 unsigned int num_pages_ballooned;
518 unsigned int num_pages_onlined;
519 unsigned int num_pages_added;
520
521
522
523
524 struct balloon_state balloon_wrk;
525
526
527
528
529 struct hot_add_wrk ha_wrk;
530
531
532
533
534
535 bool host_specified_ha_region;
536
537
538
539
540 struct completion ol_waitevent;
541 bool ha_waiting;
542
543
544
545
546
547
548 struct task_struct *thread;
549
550
551
552
553
554 spinlock_t ha_lock;
555
556
557
558
559 struct list_head ha_region_list;
560
561
562
563
564
565
566 __u32 next_version;
567
568
569
570
571 __u32 version;
572};
573
574static struct hv_dynmem_device dm_device;
575
576static void post_status(struct hv_dynmem_device *dm);
577
578#ifdef CONFIG_MEMORY_HOTPLUG
579static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
580 void *v)
581{
582 struct memory_notify *mem = (struct memory_notify *)v;
583 unsigned long flags;
584
585 switch (val) {
586 case MEM_ONLINE:
587 spin_lock_irqsave(&dm_device.ha_lock, flags);
588 dm_device.num_pages_onlined += mem->nr_pages;
589 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
590 case MEM_CANCEL_ONLINE:
591 if (dm_device.ha_waiting) {
592 dm_device.ha_waiting = false;
593 complete(&dm_device.ol_waitevent);
594 }
595 break;
596
597 case MEM_OFFLINE:
598 spin_lock_irqsave(&dm_device.ha_lock, flags);
599 dm_device.num_pages_onlined -= mem->nr_pages;
600 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
601 break;
602 case MEM_GOING_ONLINE:
603 case MEM_GOING_OFFLINE:
604 case MEM_CANCEL_OFFLINE:
605 break;
606 }
607 return NOTIFY_OK;
608}
609
610static struct notifier_block hv_memory_nb = {
611 .notifier_call = hv_memory_notifier,
612 .priority = 0
613};
614
615
616static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
617{
618 unsigned long cur_start_pgp;
619 unsigned long cur_end_pgp;
620 struct hv_hotadd_gap *gap;
621
622 cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
623 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
624
625
626 if (((unsigned long)pg < cur_start_pgp) ||
627 ((unsigned long)pg >= cur_end_pgp))
628 return;
629
630
631 list_for_each_entry(gap, &has->gap_list, list) {
632 cur_start_pgp = (unsigned long)
633 pfn_to_page(gap->start_pfn);
634 cur_end_pgp = (unsigned long)
635 pfn_to_page(gap->end_pfn);
636 if (((unsigned long)pg >= cur_start_pgp) &&
637 ((unsigned long)pg < cur_end_pgp)) {
638 return;
639 }
640 }
641
642
643 __online_page_set_limits(pg);
644 __online_page_increment_counters(pg);
645 __online_page_free(pg);
646}
647
648static void hv_bring_pgs_online(struct hv_hotadd_state *has,
649 unsigned long start_pfn, unsigned long size)
650{
651 int i;
652
653 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
654 for (i = 0; i < size; i++)
655 hv_page_online_one(has, pfn_to_page(start_pfn + i));
656}
657
658static void hv_mem_hot_add(unsigned long start, unsigned long size,
659 unsigned long pfn_count,
660 struct hv_hotadd_state *has)
661{
662 int ret = 0;
663 int i, nid;
664 unsigned long start_pfn;
665 unsigned long processed_pfn;
666 unsigned long total_pfn = pfn_count;
667 unsigned long flags;
668
669 for (i = 0; i < (size/HA_CHUNK); i++) {
670 start_pfn = start + (i * HA_CHUNK);
671
672 spin_lock_irqsave(&dm_device.ha_lock, flags);
673 has->ha_end_pfn += HA_CHUNK;
674
675 if (total_pfn > HA_CHUNK) {
676 processed_pfn = HA_CHUNK;
677 total_pfn -= HA_CHUNK;
678 } else {
679 processed_pfn = total_pfn;
680 total_pfn = 0;
681 }
682
683 has->covered_end_pfn += processed_pfn;
684 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
685
686 init_completion(&dm_device.ol_waitevent);
687 dm_device.ha_waiting = true;
688
689 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
690 ret = add_memory(nid, PFN_PHYS((start_pfn)),
691 (HA_CHUNK << PAGE_SHIFT));
692
693 if (ret) {
694 pr_warn("hot_add memory failed error is %d\n", ret);
695 if (ret == -EEXIST) {
696
697
698
699
700
701
702
703 do_hot_add = false;
704 }
705 spin_lock_irqsave(&dm_device.ha_lock, flags);
706 has->ha_end_pfn -= HA_CHUNK;
707 has->covered_end_pfn -= processed_pfn;
708 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
709 break;
710 }
711
712
713
714
715
716
717
718
719 if (dm_device.ha_waiting)
720 wait_for_completion_timeout(&dm_device.ol_waitevent,
721 5*HZ);
722 post_status(&dm_device);
723 }
724
725 return;
726}
727
728static void hv_online_page(struct page *pg)
729{
730 struct hv_hotadd_state *has;
731 unsigned long cur_start_pgp;
732 unsigned long cur_end_pgp;
733 unsigned long flags;
734
735 spin_lock_irqsave(&dm_device.ha_lock, flags);
736 list_for_each_entry(has, &dm_device.ha_region_list, list) {
737 cur_start_pgp = (unsigned long)
738 pfn_to_page(has->start_pfn);
739 cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
740
741
742 if (((unsigned long)pg < cur_start_pgp) ||
743 ((unsigned long)pg >= cur_end_pgp))
744 continue;
745
746 hv_page_online_one(has, pg);
747 break;
748 }
749 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
750}
751
752static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
753{
754 struct hv_hotadd_state *has;
755 struct hv_hotadd_gap *gap;
756 unsigned long residual, new_inc;
757 int ret = 0;
758 unsigned long flags;
759
760 spin_lock_irqsave(&dm_device.ha_lock, flags);
761 list_for_each_entry(has, &dm_device.ha_region_list, list) {
762
763
764
765
766 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
767 continue;
768
769
770
771
772
773 if (has->covered_end_pfn != start_pfn) {
774 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
775 if (!gap) {
776 ret = -ENOMEM;
777 break;
778 }
779
780 INIT_LIST_HEAD(&gap->list);
781 gap->start_pfn = has->covered_end_pfn;
782 gap->end_pfn = start_pfn;
783 list_add_tail(&gap->list, &has->gap_list);
784
785 has->covered_end_pfn = start_pfn;
786 }
787
788
789
790
791
792 if ((start_pfn + pfn_cnt) > has->end_pfn) {
793 residual = (start_pfn + pfn_cnt - has->end_pfn);
794
795
796
797 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
798 if (residual % HA_CHUNK)
799 new_inc += HA_CHUNK;
800
801 has->end_pfn += new_inc;
802 }
803
804 ret = 1;
805 break;
806 }
807 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
808
809 return ret;
810}
811
812static unsigned long handle_pg_range(unsigned long pg_start,
813 unsigned long pg_count)
814{
815 unsigned long start_pfn = pg_start;
816 unsigned long pfn_cnt = pg_count;
817 unsigned long size;
818 struct hv_hotadd_state *has;
819 unsigned long pgs_ol = 0;
820 unsigned long old_covered_state;
821 unsigned long res = 0, flags;
822
823 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
824 pg_start);
825
826 spin_lock_irqsave(&dm_device.ha_lock, flags);
827 list_for_each_entry(has, &dm_device.ha_region_list, list) {
828
829
830
831
832 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
833 continue;
834
835 old_covered_state = has->covered_end_pfn;
836
837 if (start_pfn < has->ha_end_pfn) {
838
839
840
841
842
843 pgs_ol = has->ha_end_pfn - start_pfn;
844 if (pgs_ol > pfn_cnt)
845 pgs_ol = pfn_cnt;
846
847 has->covered_end_pfn += pgs_ol;
848 pfn_cnt -= pgs_ol;
849
850
851
852
853
854
855 if (start_pfn > has->start_pfn &&
856 !PageReserved(pfn_to_page(start_pfn - 1)))
857 hv_bring_pgs_online(has, start_pfn, pgs_ol);
858
859 }
860
861 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
862
863
864
865
866
867
868
869 size = (has->end_pfn - has->ha_end_pfn);
870 if (pfn_cnt <= size) {
871 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
872 if (pfn_cnt % HA_CHUNK)
873 size += HA_CHUNK;
874 } else {
875 pfn_cnt = size;
876 }
877 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
878 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
879 spin_lock_irqsave(&dm_device.ha_lock, flags);
880 }
881
882
883
884
885 res = has->covered_end_pfn - old_covered_state;
886 break;
887 }
888 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
889
890 return res;
891}
892
893static unsigned long process_hot_add(unsigned long pg_start,
894 unsigned long pfn_cnt,
895 unsigned long rg_start,
896 unsigned long rg_size)
897{
898 struct hv_hotadd_state *ha_region = NULL;
899 int covered;
900 unsigned long flags;
901
902 if (pfn_cnt == 0)
903 return 0;
904
905 if (!dm_device.host_specified_ha_region) {
906 covered = pfn_covered(pg_start, pfn_cnt);
907 if (covered < 0)
908 return 0;
909
910 if (covered)
911 goto do_pg_range;
912 }
913
914
915
916
917
918 if (rg_size != 0) {
919 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
920 if (!ha_region)
921 return 0;
922
923 INIT_LIST_HEAD(&ha_region->list);
924 INIT_LIST_HEAD(&ha_region->gap_list);
925
926 ha_region->start_pfn = rg_start;
927 ha_region->ha_end_pfn = rg_start;
928 ha_region->covered_start_pfn = pg_start;
929 ha_region->covered_end_pfn = pg_start;
930 ha_region->end_pfn = rg_start + rg_size;
931
932 spin_lock_irqsave(&dm_device.ha_lock, flags);
933 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
934 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
935 }
936
937do_pg_range:
938
939
940
941
942 return handle_pg_range(pg_start, pfn_cnt);
943}
944
945#endif
946
947static void hot_add_req(struct work_struct *dummy)
948{
949 struct dm_hot_add_response resp;
950#ifdef CONFIG_MEMORY_HOTPLUG
951 unsigned long pg_start, pfn_cnt;
952 unsigned long rg_start, rg_sz;
953#endif
954 struct hv_dynmem_device *dm = &dm_device;
955
956 memset(&resp, 0, sizeof(struct dm_hot_add_response));
957 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
958 resp.hdr.size = sizeof(struct dm_hot_add_response);
959
960#ifdef CONFIG_MEMORY_HOTPLUG
961 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
962 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
963
964 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
965 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
966
967 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
968 unsigned long region_size;
969 unsigned long region_start;
970
971
972
973
974
975
976
977
978 region_start = pg_start;
979 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
980 if (pfn_cnt % HA_CHUNK)
981 region_size += HA_CHUNK;
982
983 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
984
985 rg_start = region_start;
986 rg_sz = region_size;
987 }
988
989 if (do_hot_add)
990 resp.page_count = process_hot_add(pg_start, pfn_cnt,
991 rg_start, rg_sz);
992
993 dm->num_pages_added += resp.page_count;
994#endif
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011 if (resp.page_count > 0)
1012 resp.result = 1;
1013 else if (!do_hot_add)
1014 resp.result = 1;
1015 else
1016 resp.result = 0;
1017
1018 if (!do_hot_add || (resp.page_count == 0))
1019 pr_info("Memory hot add failed\n");
1020
1021 dm->state = DM_INITIALIZED;
1022 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1023 vmbus_sendpacket(dm->dev->channel, &resp,
1024 sizeof(struct dm_hot_add_response),
1025 (unsigned long)NULL,
1026 VM_PKT_DATA_INBAND, 0);
1027}
1028
1029static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1030{
1031 struct dm_info_header *info_hdr;
1032
1033 info_hdr = (struct dm_info_header *)msg->info;
1034
1035 switch (info_hdr->type) {
1036 case INFO_TYPE_MAX_PAGE_CNT:
1037 if (info_hdr->data_size == sizeof(__u64)) {
1038 __u64 *max_page_count = (__u64 *)&info_hdr[1];
1039
1040 pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
1041 *max_page_count);
1042 }
1043
1044 break;
1045 default:
1046 pr_info("Received Unknown type: %d\n", info_hdr->type);
1047 }
1048}
1049
1050static unsigned long compute_balloon_floor(void)
1051{
1052 unsigned long min_pages;
1053#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 if (totalram_pages < MB2PAGES(128))
1066 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1067 else if (totalram_pages < MB2PAGES(512))
1068 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1069 else if (totalram_pages < MB2PAGES(2048))
1070 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1071 else if (totalram_pages < MB2PAGES(8192))
1072 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1073 else
1074 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1075#undef MB2PAGES
1076 return min_pages;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static void post_status(struct hv_dynmem_device *dm)
1090{
1091 struct dm_status status;
1092 unsigned long now = jiffies;
1093 unsigned long last_post = last_post_time;
1094
1095 if (pressure_report_delay > 0) {
1096 --pressure_report_delay;
1097 return;
1098 }
1099
1100 if (!time_after(now, (last_post_time + HZ)))
1101 return;
1102
1103 memset(&status, 0, sizeof(struct dm_status));
1104 status.hdr.type = DM_STATUS_REPORT;
1105 status.hdr.size = sizeof(struct dm_status);
1106 status.hdr.trans_id = atomic_inc_return(&trans_id);
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 status.num_avail = si_mem_available();
1119 status.num_committed = vm_memory_committed() +
1120 dm->num_pages_ballooned +
1121 (dm->num_pages_added > dm->num_pages_onlined ?
1122 dm->num_pages_added - dm->num_pages_onlined : 0) +
1123 compute_balloon_floor();
1124
1125
1126
1127
1128
1129
1130 if (status.hdr.trans_id != atomic_read(&trans_id))
1131 return;
1132
1133
1134
1135
1136
1137 if (last_post != last_post_time)
1138 return;
1139
1140 last_post_time = jiffies;
1141 vmbus_sendpacket(dm->dev->channel, &status,
1142 sizeof(struct dm_status),
1143 (unsigned long)NULL,
1144 VM_PKT_DATA_INBAND, 0);
1145
1146}
1147
1148static void free_balloon_pages(struct hv_dynmem_device *dm,
1149 union dm_mem_page_range *range_array)
1150{
1151 int num_pages = range_array->finfo.page_cnt;
1152 __u64 start_frame = range_array->finfo.start_page;
1153 struct page *pg;
1154 int i;
1155
1156 for (i = 0; i < num_pages; i++) {
1157 pg = pfn_to_page(i + start_frame);
1158 __free_page(pg);
1159 dm->num_pages_ballooned--;
1160 }
1161}
1162
1163
1164
1165static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1166 unsigned int num_pages,
1167 struct dm_balloon_response *bl_resp,
1168 int alloc_unit)
1169{
1170 unsigned int i = 0;
1171 struct page *pg;
1172
1173 if (num_pages < alloc_unit)
1174 return 0;
1175
1176 for (i = 0; (i * alloc_unit) < num_pages; i++) {
1177 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1178 PAGE_SIZE)
1179 return i * alloc_unit;
1180
1181
1182
1183
1184
1185 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1186 __GFP_NOMEMALLOC | __GFP_NOWARN,
1187 get_order(alloc_unit << PAGE_SHIFT));
1188
1189 if (!pg)
1190 return i * alloc_unit;
1191
1192 dm->num_pages_ballooned += alloc_unit;
1193
1194
1195
1196
1197
1198
1199 if (alloc_unit != 1)
1200 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1201
1202 bl_resp->range_count++;
1203 bl_resp->range_array[i].finfo.start_page =
1204 page_to_pfn(pg);
1205 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1206 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1207
1208 }
1209
1210 return num_pages;
1211}
1212
1213static void balloon_up(struct work_struct *dummy)
1214{
1215 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1216 unsigned int num_ballooned = 0;
1217 struct dm_balloon_response *bl_resp;
1218 int alloc_unit;
1219 int ret;
1220 bool done = false;
1221 int i;
1222 long avail_pages;
1223 unsigned long floor;
1224
1225
1226 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1227
1228
1229
1230
1231
1232 alloc_unit = 512;
1233
1234 avail_pages = si_mem_available();
1235 floor = compute_balloon_floor();
1236
1237
1238 if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1239 pr_warn("Balloon request will be partially fulfilled. %s\n",
1240 avail_pages < num_pages ? "Not enough memory." :
1241 "Balloon floor reached.");
1242
1243 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1244 num_pages -= num_pages % PAGES_IN_2M;
1245 }
1246
1247 while (!done) {
1248 bl_resp = (struct dm_balloon_response *)send_buffer;
1249 memset(send_buffer, 0, PAGE_SIZE);
1250 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1251 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1252 bl_resp->more_pages = 1;
1253
1254 num_pages -= num_ballooned;
1255 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1256 bl_resp, alloc_unit);
1257
1258 if (alloc_unit != 1 && num_ballooned == 0) {
1259 alloc_unit = 1;
1260 continue;
1261 }
1262
1263 if (num_ballooned == 0 || num_ballooned == num_pages) {
1264 pr_debug("Ballooned %u out of %u requested pages.\n",
1265 num_pages, dm_device.balloon_wrk.num_pages);
1266
1267 bl_resp->more_pages = 0;
1268 done = true;
1269 dm_device.state = DM_INITIALIZED;
1270 }
1271
1272
1273
1274
1275
1276
1277
1278 do {
1279 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1280 ret = vmbus_sendpacket(dm_device.dev->channel,
1281 bl_resp,
1282 bl_resp->hdr.size,
1283 (unsigned long)NULL,
1284 VM_PKT_DATA_INBAND, 0);
1285
1286 if (ret == -EAGAIN)
1287 msleep(20);
1288 post_status(&dm_device);
1289 } while (ret == -EAGAIN);
1290
1291 if (ret) {
1292
1293
1294
1295 pr_info("Balloon response failed\n");
1296
1297 for (i = 0; i < bl_resp->range_count; i++)
1298 free_balloon_pages(&dm_device,
1299 &bl_resp->range_array[i]);
1300
1301 done = true;
1302 }
1303 }
1304
1305}
1306
1307static void balloon_down(struct hv_dynmem_device *dm,
1308 struct dm_unballoon_request *req)
1309{
1310 union dm_mem_page_range *range_array = req->range_array;
1311 int range_count = req->range_count;
1312 struct dm_unballoon_response resp;
1313 int i;
1314 unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1315
1316 for (i = 0; i < range_count; i++) {
1317 free_balloon_pages(dm, &range_array[i]);
1318 complete(&dm_device.config_event);
1319 }
1320
1321 pr_debug("Freed %u ballooned pages.\n",
1322 prev_pages_ballooned - dm->num_pages_ballooned);
1323
1324 if (req->more_pages == 1)
1325 return;
1326
1327 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1328 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1329 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1330 resp.hdr.size = sizeof(struct dm_unballoon_response);
1331
1332 vmbus_sendpacket(dm_device.dev->channel, &resp,
1333 sizeof(struct dm_unballoon_response),
1334 (unsigned long)NULL,
1335 VM_PKT_DATA_INBAND, 0);
1336
1337 dm->state = DM_INITIALIZED;
1338}
1339
1340static void balloon_onchannelcallback(void *context);
1341
1342static int dm_thread_func(void *dm_dev)
1343{
1344 struct hv_dynmem_device *dm = dm_dev;
1345
1346 while (!kthread_should_stop()) {
1347 wait_for_completion_interruptible_timeout(
1348 &dm_device.config_event, 1*HZ);
1349
1350
1351
1352
1353 reinit_completion(&dm_device.config_event);
1354 post_status(dm);
1355 }
1356
1357 return 0;
1358}
1359
1360
1361static void version_resp(struct hv_dynmem_device *dm,
1362 struct dm_version_response *vresp)
1363{
1364 struct dm_version_request version_req;
1365 int ret;
1366
1367 if (vresp->is_accepted) {
1368
1369
1370
1371
1372
1373 complete(&dm->host_event);
1374 return;
1375 }
1376
1377
1378
1379
1380
1381
1382
1383 if (dm->next_version == 0)
1384 goto version_error;
1385
1386 memset(&version_req, 0, sizeof(struct dm_version_request));
1387 version_req.hdr.type = DM_VERSION_REQUEST;
1388 version_req.hdr.size = sizeof(struct dm_version_request);
1389 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1390 version_req.version.version = dm->next_version;
1391 dm->version = version_req.version.version;
1392
1393
1394
1395
1396
1397 switch (version_req.version.version) {
1398 case DYNMEM_PROTOCOL_VERSION_WIN8:
1399 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1400 version_req.is_last_attempt = 0;
1401 break;
1402 default:
1403 dm->next_version = 0;
1404 version_req.is_last_attempt = 1;
1405 }
1406
1407 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1408 sizeof(struct dm_version_request),
1409 (unsigned long)NULL,
1410 VM_PKT_DATA_INBAND, 0);
1411
1412 if (ret)
1413 goto version_error;
1414
1415 return;
1416
1417version_error:
1418 dm->state = DM_INIT_ERROR;
1419 complete(&dm->host_event);
1420}
1421
1422static void cap_resp(struct hv_dynmem_device *dm,
1423 struct dm_capabilities_resp_msg *cap_resp)
1424{
1425 if (!cap_resp->is_accepted) {
1426 pr_info("Capabilities not accepted by host\n");
1427 dm->state = DM_INIT_ERROR;
1428 }
1429 complete(&dm->host_event);
1430}
1431
1432static void balloon_onchannelcallback(void *context)
1433{
1434 struct hv_device *dev = context;
1435 u32 recvlen;
1436 u64 requestid;
1437 struct dm_message *dm_msg;
1438 struct dm_header *dm_hdr;
1439 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1440 struct dm_balloon *bal_msg;
1441 struct dm_hot_add *ha_msg;
1442 union dm_mem_page_range *ha_pg_range;
1443 union dm_mem_page_range *ha_region;
1444
1445 memset(recv_buffer, 0, sizeof(recv_buffer));
1446 vmbus_recvpacket(dev->channel, recv_buffer,
1447 PAGE_SIZE, &recvlen, &requestid);
1448
1449 if (recvlen > 0) {
1450 dm_msg = (struct dm_message *)recv_buffer;
1451 dm_hdr = &dm_msg->hdr;
1452
1453 switch (dm_hdr->type) {
1454 case DM_VERSION_RESPONSE:
1455 version_resp(dm,
1456 (struct dm_version_response *)dm_msg);
1457 break;
1458
1459 case DM_CAPABILITIES_RESPONSE:
1460 cap_resp(dm,
1461 (struct dm_capabilities_resp_msg *)dm_msg);
1462 break;
1463
1464 case DM_BALLOON_REQUEST:
1465 if (dm->state == DM_BALLOON_UP)
1466 pr_warn("Currently ballooning\n");
1467 bal_msg = (struct dm_balloon *)recv_buffer;
1468 dm->state = DM_BALLOON_UP;
1469 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1470 schedule_work(&dm_device.balloon_wrk.wrk);
1471 break;
1472
1473 case DM_UNBALLOON_REQUEST:
1474 dm->state = DM_BALLOON_DOWN;
1475 balloon_down(dm,
1476 (struct dm_unballoon_request *)recv_buffer);
1477 break;
1478
1479 case DM_MEM_HOT_ADD_REQUEST:
1480 if (dm->state == DM_HOT_ADD)
1481 pr_warn("Currently hot-adding\n");
1482 dm->state = DM_HOT_ADD;
1483 ha_msg = (struct dm_hot_add *)recv_buffer;
1484 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1485
1486
1487
1488
1489 dm->host_specified_ha_region = false;
1490 ha_pg_range = &ha_msg->range;
1491 dm->ha_wrk.ha_page_range = *ha_pg_range;
1492 dm->ha_wrk.ha_region_range.page_range = 0;
1493 } else {
1494
1495
1496
1497
1498
1499 dm->host_specified_ha_region = true;
1500 ha_pg_range = &ha_msg->range;
1501 ha_region = &ha_pg_range[1];
1502 dm->ha_wrk.ha_page_range = *ha_pg_range;
1503 dm->ha_wrk.ha_region_range = *ha_region;
1504 }
1505 schedule_work(&dm_device.ha_wrk.wrk);
1506 break;
1507
1508 case DM_INFO_MESSAGE:
1509 process_info(dm, (struct dm_info_msg *)dm_msg);
1510 break;
1511
1512 default:
1513 pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1514
1515 }
1516 }
1517
1518}
1519
1520static int balloon_probe(struct hv_device *dev,
1521 const struct hv_vmbus_device_id *dev_id)
1522{
1523 int ret;
1524 unsigned long t;
1525 struct dm_version_request version_req;
1526 struct dm_capabilities cap_msg;
1527
1528#ifdef CONFIG_MEMORY_HOTPLUG
1529 do_hot_add = hot_add;
1530#else
1531 do_hot_add = false;
1532#endif
1533
1534
1535
1536
1537
1538 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1539 if (!send_buffer)
1540 return -ENOMEM;
1541
1542 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1543 balloon_onchannelcallback, dev);
1544
1545 if (ret)
1546 goto probe_error0;
1547
1548 dm_device.dev = dev;
1549 dm_device.state = DM_INITIALIZING;
1550 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1551 init_completion(&dm_device.host_event);
1552 init_completion(&dm_device.config_event);
1553 INIT_LIST_HEAD(&dm_device.ha_region_list);
1554 spin_lock_init(&dm_device.ha_lock);
1555 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1556 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1557 dm_device.host_specified_ha_region = false;
1558
1559 dm_device.thread =
1560 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1561 if (IS_ERR(dm_device.thread)) {
1562 ret = PTR_ERR(dm_device.thread);
1563 goto probe_error1;
1564 }
1565
1566#ifdef CONFIG_MEMORY_HOTPLUG
1567 set_online_page_callback(&hv_online_page);
1568 register_memory_notifier(&hv_memory_nb);
1569#endif
1570
1571 hv_set_drvdata(dev, &dm_device);
1572
1573
1574
1575
1576
1577
1578 memset(&version_req, 0, sizeof(struct dm_version_request));
1579 version_req.hdr.type = DM_VERSION_REQUEST;
1580 version_req.hdr.size = sizeof(struct dm_version_request);
1581 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1582 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1583 version_req.is_last_attempt = 0;
1584 dm_device.version = version_req.version.version;
1585
1586 ret = vmbus_sendpacket(dev->channel, &version_req,
1587 sizeof(struct dm_version_request),
1588 (unsigned long)NULL,
1589 VM_PKT_DATA_INBAND, 0);
1590 if (ret)
1591 goto probe_error2;
1592
1593 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1594 if (t == 0) {
1595 ret = -ETIMEDOUT;
1596 goto probe_error2;
1597 }
1598
1599
1600
1601
1602
1603 if (dm_device.state == DM_INIT_ERROR) {
1604 ret = -ETIMEDOUT;
1605 goto probe_error2;
1606 }
1607
1608 pr_info("Using Dynamic Memory protocol version %u.%u\n",
1609 DYNMEM_MAJOR_VERSION(dm_device.version),
1610 DYNMEM_MINOR_VERSION(dm_device.version));
1611
1612
1613
1614
1615 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1616 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1617 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1618 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1619
1620 cap_msg.caps.cap_bits.balloon = 1;
1621 cap_msg.caps.cap_bits.hot_add = 1;
1622
1623
1624
1625
1626
1627 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1628
1629
1630
1631
1632
1633
1634 cap_msg.min_page_cnt = 0;
1635 cap_msg.max_page_number = -1;
1636
1637 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1638 sizeof(struct dm_capabilities),
1639 (unsigned long)NULL,
1640 VM_PKT_DATA_INBAND, 0);
1641 if (ret)
1642 goto probe_error2;
1643
1644 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1645 if (t == 0) {
1646 ret = -ETIMEDOUT;
1647 goto probe_error2;
1648 }
1649
1650
1651
1652
1653
1654 if (dm_device.state == DM_INIT_ERROR) {
1655 ret = -ETIMEDOUT;
1656 goto probe_error2;
1657 }
1658
1659 dm_device.state = DM_INITIALIZED;
1660
1661 return 0;
1662
1663probe_error2:
1664#ifdef CONFIG_MEMORY_HOTPLUG
1665 restore_online_page_callback(&hv_online_page);
1666#endif
1667 kthread_stop(dm_device.thread);
1668
1669probe_error1:
1670 vmbus_close(dev->channel);
1671probe_error0:
1672 kfree(send_buffer);
1673 return ret;
1674}
1675
1676static int balloon_remove(struct hv_device *dev)
1677{
1678 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1679 struct hv_hotadd_state *has, *tmp;
1680 struct hv_hotadd_gap *gap, *tmp_gap;
1681 unsigned long flags;
1682
1683 if (dm->num_pages_ballooned != 0)
1684 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1685
1686 cancel_work_sync(&dm->balloon_wrk.wrk);
1687 cancel_work_sync(&dm->ha_wrk.wrk);
1688
1689 vmbus_close(dev->channel);
1690 kthread_stop(dm->thread);
1691 kfree(send_buffer);
1692#ifdef CONFIG_MEMORY_HOTPLUG
1693 restore_online_page_callback(&hv_online_page);
1694 unregister_memory_notifier(&hv_memory_nb);
1695#endif
1696 spin_lock_irqsave(&dm_device.ha_lock, flags);
1697 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1698 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1699 list_del(&gap->list);
1700 kfree(gap);
1701 }
1702 list_del(&has->list);
1703 kfree(has);
1704 }
1705 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1706
1707 return 0;
1708}
1709
1710static const struct hv_vmbus_device_id id_table[] = {
1711
1712
1713 { HV_DM_GUID, },
1714 { },
1715};
1716
1717MODULE_DEVICE_TABLE(vmbus, id_table);
1718
1719static struct hv_driver balloon_drv = {
1720 .name = "hv_balloon",
1721 .id_table = id_table,
1722 .probe = balloon_probe,
1723 .remove = balloon_remove,
1724};
1725
1726static int __init init_balloon_drv(void)
1727{
1728
1729 return vmbus_driver_register(&balloon_drv);
1730}
1731
1732module_init(init_balloon_drv);
1733
1734MODULE_DESCRIPTION("Hyper-V Balloon");
1735MODULE_LICENSE("GPL");
1736