1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/jiffies.h>
13#include <linux/mman.h>
14#include <linux/delay.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/kthread.h>
19#include <linux/completion.h>
20#include <linux/memory_hotplug.h>
21#include <linux/memory.h>
22#include <linux/notifier.h>
23#include <linux/percpu_counter.h>
24
25#include <linux/hyperv.h>
26
27#define CREATE_TRACE_POINTS
28#include "hv_trace_balloon.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
52#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
53#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
54
55enum {
56 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
57 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
58 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
59
60 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
61 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
62 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
63
64 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
65};
66
67
68
69
70
71
72
73enum dm_message_type {
74
75
76
77 DM_ERROR = 0,
78 DM_VERSION_REQUEST = 1,
79 DM_VERSION_RESPONSE = 2,
80 DM_CAPABILITIES_REPORT = 3,
81 DM_CAPABILITIES_RESPONSE = 4,
82 DM_STATUS_REPORT = 5,
83 DM_BALLOON_REQUEST = 6,
84 DM_BALLOON_RESPONSE = 7,
85 DM_UNBALLOON_REQUEST = 8,
86 DM_UNBALLOON_RESPONSE = 9,
87 DM_MEM_HOT_ADD_REQUEST = 10,
88 DM_MEM_HOT_ADD_RESPONSE = 11,
89 DM_VERSION_03_MAX = 11,
90
91
92
93 DM_INFO_MESSAGE = 12,
94 DM_VERSION_1_MAX = 12
95};
96
97
98
99
100
101
102
103union dm_version {
104 struct {
105 __u16 minor_version;
106 __u16 major_version;
107 };
108 __u32 version;
109} __packed;
110
111
112union dm_caps {
113 struct {
114 __u64 balloon:1;
115 __u64 hot_add:1;
116
117
118
119
120
121
122 __u64 hot_add_alignment:4;
123 __u64 reservedz:58;
124 } cap_bits;
125 __u64 caps;
126} __packed;
127
128union dm_mem_page_range {
129 struct {
130
131
132
133
134
135 __u64 start_page:40;
136
137
138
139 __u64 page_cnt:24;
140 } finfo;
141 __u64 page_range;
142} __packed;
143
144
145
146
147
148
149
150
151
152
153
154struct dm_header {
155 __u16 type;
156 __u16 size;
157 __u32 trans_id;
158} __packed;
159
160
161
162
163
164
165struct dm_message {
166 struct dm_header hdr;
167 __u8 data[];
168} __packed;
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185struct dm_version_request {
186 struct dm_header hdr;
187 union dm_version version;
188 __u32 is_last_attempt:1;
189 __u32 reservedz:31;
190} __packed;
191
192
193
194
195
196
197
198
199
200
201
202
203struct dm_version_response {
204 struct dm_header hdr;
205 __u64 is_accepted:1;
206 __u64 reservedz:63;
207} __packed;
208
209
210
211
212
213
214struct dm_capabilities {
215 struct dm_header hdr;
216 union dm_caps caps;
217 __u64 min_page_cnt;
218 __u64 max_page_number;
219} __packed;
220
221
222
223
224
225
226
227
228
229
230
231struct dm_capabilities_resp_msg {
232 struct dm_header hdr;
233 __u64 is_accepted:1;
234 __u64 reservedz:63;
235} __packed;
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257struct dm_status {
258 struct dm_header hdr;
259 __u64 num_avail;
260 __u64 num_committed;
261 __u64 page_file_size;
262 __u64 zero_free;
263 __u32 page_file_writes;
264 __u32 io_diff;
265} __packed;
266
267
268
269
270
271
272
273
274
275
276struct dm_balloon {
277 struct dm_header hdr;
278 __u32 num_pages;
279 __u32 reservedz;
280} __packed;
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297struct dm_balloon_response {
298 struct dm_header hdr;
299 __u32 reservedz;
300 __u32 more_pages:1;
301 __u32 range_count:31;
302 union dm_mem_page_range range_array[];
303} __packed;
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320struct dm_unballoon_request {
321 struct dm_header hdr;
322 __u32 more_pages:1;
323 __u32 reservedz:31;
324 __u32 range_count;
325 union dm_mem_page_range range_array[];
326} __packed;
327
328
329
330
331
332
333
334struct dm_unballoon_response {
335 struct dm_header hdr;
336} __packed;
337
338
339
340
341
342
343
344
345
346
347
348struct dm_hot_add {
349 struct dm_header hdr;
350 union dm_mem_page_range range;
351} __packed;
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373struct dm_hot_add_response {
374 struct dm_header hdr;
375 __u32 page_count;
376 __u32 result;
377} __packed;
378
379
380
381
382
383enum dm_info_type {
384 INFO_TYPE_MAX_PAGE_CNT = 0,
385 MAX_INFO_TYPE
386};
387
388
389
390
391
392
393struct dm_info_header {
394 enum dm_info_type type;
395 __u32 data_size;
396} __packed;
397
398
399
400
401
402
403
404
405
406
407struct dm_info_msg {
408 struct dm_header hdr;
409 __u32 reserved;
410 __u32 info_size;
411 __u8 info[];
412};
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430struct hv_hotadd_state {
431 struct list_head list;
432 unsigned long start_pfn;
433 unsigned long covered_start_pfn;
434 unsigned long covered_end_pfn;
435 unsigned long ha_end_pfn;
436 unsigned long end_pfn;
437
438
439
440 struct list_head gap_list;
441};
442
443struct hv_hotadd_gap {
444 struct list_head list;
445 unsigned long start_pfn;
446 unsigned long end_pfn;
447};
448
449struct balloon_state {
450 __u32 num_pages;
451 struct work_struct wrk;
452};
453
454struct hot_add_wrk {
455 union dm_mem_page_range ha_page_range;
456 union dm_mem_page_range ha_region_range;
457 struct work_struct wrk;
458};
459
460static bool hot_add = true;
461static bool do_hot_add;
462
463
464
465
466static uint pressure_report_delay = 45;
467
468
469
470
471static unsigned long last_post_time;
472
473module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
474MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
475
476module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
477MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
478static atomic_t trans_id = ATOMIC_INIT(0);
479
480static int dm_ring_size = (5 * PAGE_SIZE);
481
482
483
484
485
486enum hv_dm_state {
487 DM_INITIALIZING = 0,
488 DM_INITIALIZED,
489 DM_BALLOON_UP,
490 DM_BALLOON_DOWN,
491 DM_HOT_ADD,
492 DM_INIT_ERROR
493};
494
495
496static __u8 recv_buffer[PAGE_SIZE];
497static __u8 *send_buffer;
498#define PAGES_IN_2M 512
499#define HA_CHUNK (32 * 1024)
500
501struct hv_dynmem_device {
502 struct hv_device *dev;
503 enum hv_dm_state state;
504 struct completion host_event;
505 struct completion config_event;
506
507
508
509
510 unsigned int num_pages_ballooned;
511 unsigned int num_pages_onlined;
512 unsigned int num_pages_added;
513
514
515
516
517 struct balloon_state balloon_wrk;
518
519
520
521
522 struct hot_add_wrk ha_wrk;
523
524
525
526
527
528 bool host_specified_ha_region;
529
530
531
532
533 struct completion ol_waitevent;
534 bool ha_waiting;
535
536
537
538
539
540
541 struct task_struct *thread;
542
543
544
545
546
547 spinlock_t ha_lock;
548
549
550
551
552 struct list_head ha_region_list;
553
554
555
556
557
558
559 __u32 next_version;
560
561
562
563
564 __u32 version;
565};
566
567static struct hv_dynmem_device dm_device;
568
569static void post_status(struct hv_dynmem_device *dm);
570
571#ifdef CONFIG_MEMORY_HOTPLUG
572static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
573 unsigned long pfn)
574{
575 struct hv_hotadd_gap *gap;
576
577
578 if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
579 return false;
580
581
582 list_for_each_entry(gap, &has->gap_list, list) {
583 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
584 return false;
585 }
586
587 return true;
588}
589
590static unsigned long hv_page_offline_check(unsigned long start_pfn,
591 unsigned long nr_pages)
592{
593 unsigned long pfn = start_pfn, count = 0;
594 struct hv_hotadd_state *has;
595 bool found;
596
597 while (pfn < start_pfn + nr_pages) {
598
599
600
601
602 found = false;
603 list_for_each_entry(has, &dm_device.ha_region_list, list) {
604 while ((pfn >= has->start_pfn) &&
605 (pfn < has->end_pfn) &&
606 (pfn < start_pfn + nr_pages)) {
607 found = true;
608 if (has_pfn_is_backed(has, pfn))
609 count++;
610 pfn++;
611 }
612 }
613
614
615
616
617
618
619 if (!found)
620 pfn++;
621 }
622
623 return count;
624}
625
626static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
627 void *v)
628{
629 struct memory_notify *mem = (struct memory_notify *)v;
630 unsigned long flags, pfn_count;
631
632 switch (val) {
633 case MEM_ONLINE:
634 case MEM_CANCEL_ONLINE:
635 if (dm_device.ha_waiting) {
636 dm_device.ha_waiting = false;
637 complete(&dm_device.ol_waitevent);
638 }
639 break;
640
641 case MEM_OFFLINE:
642 spin_lock_irqsave(&dm_device.ha_lock, flags);
643 pfn_count = hv_page_offline_check(mem->start_pfn,
644 mem->nr_pages);
645 if (pfn_count <= dm_device.num_pages_onlined) {
646 dm_device.num_pages_onlined -= pfn_count;
647 } else {
648
649
650
651
652
653 WARN_ON_ONCE(1);
654 dm_device.num_pages_onlined = 0;
655 }
656 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
657 break;
658 case MEM_GOING_ONLINE:
659 case MEM_GOING_OFFLINE:
660 case MEM_CANCEL_OFFLINE:
661 break;
662 }
663 return NOTIFY_OK;
664}
665
666static struct notifier_block hv_memory_nb = {
667 .notifier_call = hv_memory_notifier,
668 .priority = 0
669};
670
671
672static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
673{
674 if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
675 if (!PageOffline(pg))
676 __SetPageOffline(pg);
677 return;
678 }
679 if (PageOffline(pg))
680 __ClearPageOffline(pg);
681
682
683 __online_page_set_limits(pg);
684 __online_page_increment_counters(pg);
685 __online_page_free(pg);
686
687 lockdep_assert_held(&dm_device.ha_lock);
688 dm_device.num_pages_onlined++;
689}
690
691static void hv_bring_pgs_online(struct hv_hotadd_state *has,
692 unsigned long start_pfn, unsigned long size)
693{
694 int i;
695
696 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
697 for (i = 0; i < size; i++)
698 hv_page_online_one(has, pfn_to_page(start_pfn + i));
699}
700
701static void hv_mem_hot_add(unsigned long start, unsigned long size,
702 unsigned long pfn_count,
703 struct hv_hotadd_state *has)
704{
705 int ret = 0;
706 int i, nid;
707 unsigned long start_pfn;
708 unsigned long processed_pfn;
709 unsigned long total_pfn = pfn_count;
710 unsigned long flags;
711
712 for (i = 0; i < (size/HA_CHUNK); i++) {
713 start_pfn = start + (i * HA_CHUNK);
714
715 spin_lock_irqsave(&dm_device.ha_lock, flags);
716 has->ha_end_pfn += HA_CHUNK;
717
718 if (total_pfn > HA_CHUNK) {
719 processed_pfn = HA_CHUNK;
720 total_pfn -= HA_CHUNK;
721 } else {
722 processed_pfn = total_pfn;
723 total_pfn = 0;
724 }
725
726 has->covered_end_pfn += processed_pfn;
727 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
728
729 init_completion(&dm_device.ol_waitevent);
730 dm_device.ha_waiting = !memhp_auto_online;
731
732 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
733 ret = add_memory(nid, PFN_PHYS((start_pfn)),
734 (HA_CHUNK << PAGE_SHIFT));
735
736 if (ret) {
737 pr_err("hot_add memory failed error is %d\n", ret);
738 if (ret == -EEXIST) {
739
740
741
742
743
744
745
746 do_hot_add = false;
747 }
748 spin_lock_irqsave(&dm_device.ha_lock, flags);
749 has->ha_end_pfn -= HA_CHUNK;
750 has->covered_end_pfn -= processed_pfn;
751 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
752 break;
753 }
754
755
756
757
758
759
760
761
762 if (dm_device.ha_waiting)
763 wait_for_completion_timeout(&dm_device.ol_waitevent,
764 5*HZ);
765 post_status(&dm_device);
766 }
767}
768
769static void hv_online_page(struct page *pg, unsigned int order)
770{
771 struct hv_hotadd_state *has;
772 unsigned long flags;
773 unsigned long pfn = page_to_pfn(pg);
774
775 spin_lock_irqsave(&dm_device.ha_lock, flags);
776 list_for_each_entry(has, &dm_device.ha_region_list, list) {
777
778 if ((pfn < has->start_pfn) ||
779 (pfn + (1UL << order) > has->end_pfn))
780 continue;
781
782 hv_bring_pgs_online(has, pfn, 1UL << order);
783 break;
784 }
785 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
786}
787
788static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
789{
790 struct hv_hotadd_state *has;
791 struct hv_hotadd_gap *gap;
792 unsigned long residual, new_inc;
793 int ret = 0;
794 unsigned long flags;
795
796 spin_lock_irqsave(&dm_device.ha_lock, flags);
797 list_for_each_entry(has, &dm_device.ha_region_list, list) {
798
799
800
801
802 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
803 continue;
804
805
806
807
808
809 if (has->covered_end_pfn != start_pfn) {
810 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
811 if (!gap) {
812 ret = -ENOMEM;
813 break;
814 }
815
816 INIT_LIST_HEAD(&gap->list);
817 gap->start_pfn = has->covered_end_pfn;
818 gap->end_pfn = start_pfn;
819 list_add_tail(&gap->list, &has->gap_list);
820
821 has->covered_end_pfn = start_pfn;
822 }
823
824
825
826
827
828 if ((start_pfn + pfn_cnt) > has->end_pfn) {
829 residual = (start_pfn + pfn_cnt - has->end_pfn);
830
831
832
833 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
834 if (residual % HA_CHUNK)
835 new_inc += HA_CHUNK;
836
837 has->end_pfn += new_inc;
838 }
839
840 ret = 1;
841 break;
842 }
843 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
844
845 return ret;
846}
847
848static unsigned long handle_pg_range(unsigned long pg_start,
849 unsigned long pg_count)
850{
851 unsigned long start_pfn = pg_start;
852 unsigned long pfn_cnt = pg_count;
853 unsigned long size;
854 struct hv_hotadd_state *has;
855 unsigned long pgs_ol = 0;
856 unsigned long old_covered_state;
857 unsigned long res = 0, flags;
858
859 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
860 pg_start);
861
862 spin_lock_irqsave(&dm_device.ha_lock, flags);
863 list_for_each_entry(has, &dm_device.ha_region_list, list) {
864
865
866
867
868 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
869 continue;
870
871 old_covered_state = has->covered_end_pfn;
872
873 if (start_pfn < has->ha_end_pfn) {
874
875
876
877
878
879 pgs_ol = has->ha_end_pfn - start_pfn;
880 if (pgs_ol > pfn_cnt)
881 pgs_ol = pfn_cnt;
882
883 has->covered_end_pfn += pgs_ol;
884 pfn_cnt -= pgs_ol;
885
886
887
888
889
890
891
892
893 if (start_pfn > has->start_pfn &&
894 online_section_nr(pfn_to_section_nr(start_pfn)))
895 hv_bring_pgs_online(has, start_pfn, pgs_ol);
896
897 }
898
899 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
900
901
902
903
904
905
906
907 size = (has->end_pfn - has->ha_end_pfn);
908 if (pfn_cnt <= size) {
909 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
910 if (pfn_cnt % HA_CHUNK)
911 size += HA_CHUNK;
912 } else {
913 pfn_cnt = size;
914 }
915 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
916 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
917 spin_lock_irqsave(&dm_device.ha_lock, flags);
918 }
919
920
921
922
923 res = has->covered_end_pfn - old_covered_state;
924 break;
925 }
926 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
927
928 return res;
929}
930
931static unsigned long process_hot_add(unsigned long pg_start,
932 unsigned long pfn_cnt,
933 unsigned long rg_start,
934 unsigned long rg_size)
935{
936 struct hv_hotadd_state *ha_region = NULL;
937 int covered;
938 unsigned long flags;
939
940 if (pfn_cnt == 0)
941 return 0;
942
943 if (!dm_device.host_specified_ha_region) {
944 covered = pfn_covered(pg_start, pfn_cnt);
945 if (covered < 0)
946 return 0;
947
948 if (covered)
949 goto do_pg_range;
950 }
951
952
953
954
955
956 if (rg_size != 0) {
957 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
958 if (!ha_region)
959 return 0;
960
961 INIT_LIST_HEAD(&ha_region->list);
962 INIT_LIST_HEAD(&ha_region->gap_list);
963
964 ha_region->start_pfn = rg_start;
965 ha_region->ha_end_pfn = rg_start;
966 ha_region->covered_start_pfn = pg_start;
967 ha_region->covered_end_pfn = pg_start;
968 ha_region->end_pfn = rg_start + rg_size;
969
970 spin_lock_irqsave(&dm_device.ha_lock, flags);
971 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
972 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
973 }
974
975do_pg_range:
976
977
978
979
980 return handle_pg_range(pg_start, pfn_cnt);
981}
982
983#endif
984
985static void hot_add_req(struct work_struct *dummy)
986{
987 struct dm_hot_add_response resp;
988#ifdef CONFIG_MEMORY_HOTPLUG
989 unsigned long pg_start, pfn_cnt;
990 unsigned long rg_start, rg_sz;
991#endif
992 struct hv_dynmem_device *dm = &dm_device;
993
994 memset(&resp, 0, sizeof(struct dm_hot_add_response));
995 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
996 resp.hdr.size = sizeof(struct dm_hot_add_response);
997
998#ifdef CONFIG_MEMORY_HOTPLUG
999 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
1000 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
1001
1002 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
1003 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
1004
1005 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
1006 unsigned long region_size;
1007 unsigned long region_start;
1008
1009
1010
1011
1012
1013
1014
1015
1016 region_start = pg_start;
1017 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
1018 if (pfn_cnt % HA_CHUNK)
1019 region_size += HA_CHUNK;
1020
1021 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
1022
1023 rg_start = region_start;
1024 rg_sz = region_size;
1025 }
1026
1027 if (do_hot_add)
1028 resp.page_count = process_hot_add(pg_start, pfn_cnt,
1029 rg_start, rg_sz);
1030
1031 dm->num_pages_added += resp.page_count;
1032#endif
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 if (resp.page_count > 0)
1050 resp.result = 1;
1051 else if (!do_hot_add)
1052 resp.result = 1;
1053 else
1054 resp.result = 0;
1055
1056 if (!do_hot_add || (resp.page_count == 0))
1057 pr_err("Memory hot add failed\n");
1058
1059 dm->state = DM_INITIALIZED;
1060 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1061 vmbus_sendpacket(dm->dev->channel, &resp,
1062 sizeof(struct dm_hot_add_response),
1063 (unsigned long)NULL,
1064 VM_PKT_DATA_INBAND, 0);
1065}
1066
1067static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1068{
1069 struct dm_info_header *info_hdr;
1070
1071 info_hdr = (struct dm_info_header *)msg->info;
1072
1073 switch (info_hdr->type) {
1074 case INFO_TYPE_MAX_PAGE_CNT:
1075 if (info_hdr->data_size == sizeof(__u64)) {
1076 __u64 *max_page_count = (__u64 *)&info_hdr[1];
1077
1078 pr_info("Max. dynamic memory size: %llu MB\n",
1079 (*max_page_count) >> (20 - PAGE_SHIFT));
1080 }
1081
1082 break;
1083 default:
1084 pr_warn("Received Unknown type: %d\n", info_hdr->type);
1085 }
1086}
1087
1088static unsigned long compute_balloon_floor(void)
1089{
1090 unsigned long min_pages;
1091 unsigned long nr_pages = totalram_pages();
1092#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 if (nr_pages < MB2PAGES(128))
1105 min_pages = MB2PAGES(8) + (nr_pages >> 1);
1106 else if (nr_pages < MB2PAGES(512))
1107 min_pages = MB2PAGES(40) + (nr_pages >> 2);
1108 else if (nr_pages < MB2PAGES(2048))
1109 min_pages = MB2PAGES(104) + (nr_pages >> 3);
1110 else if (nr_pages < MB2PAGES(8192))
1111 min_pages = MB2PAGES(232) + (nr_pages >> 4);
1112 else
1113 min_pages = MB2PAGES(488) + (nr_pages >> 5);
1114#undef MB2PAGES
1115 return min_pages;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128static void post_status(struct hv_dynmem_device *dm)
1129{
1130 struct dm_status status;
1131 unsigned long now = jiffies;
1132 unsigned long last_post = last_post_time;
1133
1134 if (pressure_report_delay > 0) {
1135 --pressure_report_delay;
1136 return;
1137 }
1138
1139 if (!time_after(now, (last_post_time + HZ)))
1140 return;
1141
1142 memset(&status, 0, sizeof(struct dm_status));
1143 status.hdr.type = DM_STATUS_REPORT;
1144 status.hdr.size = sizeof(struct dm_status);
1145 status.hdr.trans_id = atomic_inc_return(&trans_id);
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 status.num_avail = si_mem_available();
1158 status.num_committed = vm_memory_committed() +
1159 dm->num_pages_ballooned +
1160 (dm->num_pages_added > dm->num_pages_onlined ?
1161 dm->num_pages_added - dm->num_pages_onlined : 0) +
1162 compute_balloon_floor();
1163
1164 trace_balloon_status(status.num_avail, status.num_committed,
1165 vm_memory_committed(), dm->num_pages_ballooned,
1166 dm->num_pages_added, dm->num_pages_onlined);
1167
1168
1169
1170
1171
1172 if (status.hdr.trans_id != atomic_read(&trans_id))
1173 return;
1174
1175
1176
1177
1178
1179 if (last_post != last_post_time)
1180 return;
1181
1182 last_post_time = jiffies;
1183 vmbus_sendpacket(dm->dev->channel, &status,
1184 sizeof(struct dm_status),
1185 (unsigned long)NULL,
1186 VM_PKT_DATA_INBAND, 0);
1187
1188}
1189
1190static void free_balloon_pages(struct hv_dynmem_device *dm,
1191 union dm_mem_page_range *range_array)
1192{
1193 int num_pages = range_array->finfo.page_cnt;
1194 __u64 start_frame = range_array->finfo.start_page;
1195 struct page *pg;
1196 int i;
1197
1198 for (i = 0; i < num_pages; i++) {
1199 pg = pfn_to_page(i + start_frame);
1200 __ClearPageOffline(pg);
1201 __free_page(pg);
1202 dm->num_pages_ballooned--;
1203 }
1204}
1205
1206
1207
1208static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1209 unsigned int num_pages,
1210 struct dm_balloon_response *bl_resp,
1211 int alloc_unit)
1212{
1213 unsigned int i, j;
1214 struct page *pg;
1215
1216 if (num_pages < alloc_unit)
1217 return 0;
1218
1219 for (i = 0; (i * alloc_unit) < num_pages; i++) {
1220 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1221 PAGE_SIZE)
1222 return i * alloc_unit;
1223
1224
1225
1226
1227
1228 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1229 __GFP_NOMEMALLOC | __GFP_NOWARN,
1230 get_order(alloc_unit << PAGE_SHIFT));
1231
1232 if (!pg)
1233 return i * alloc_unit;
1234
1235 dm->num_pages_ballooned += alloc_unit;
1236
1237
1238
1239
1240
1241
1242 if (alloc_unit != 1)
1243 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1244
1245
1246 for (j = 0; j < (1 << get_order(alloc_unit << PAGE_SHIFT)); j++)
1247 __SetPageOffline(pg + j);
1248
1249 bl_resp->range_count++;
1250 bl_resp->range_array[i].finfo.start_page =
1251 page_to_pfn(pg);
1252 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1253 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1254
1255 }
1256
1257 return num_pages;
1258}
1259
1260static void balloon_up(struct work_struct *dummy)
1261{
1262 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1263 unsigned int num_ballooned = 0;
1264 struct dm_balloon_response *bl_resp;
1265 int alloc_unit;
1266 int ret;
1267 bool done = false;
1268 int i;
1269 long avail_pages;
1270 unsigned long floor;
1271
1272
1273 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1274
1275
1276
1277
1278
1279 alloc_unit = 512;
1280
1281 avail_pages = si_mem_available();
1282 floor = compute_balloon_floor();
1283
1284
1285 if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1286 pr_warn("Balloon request will be partially fulfilled. %s\n",
1287 avail_pages < num_pages ? "Not enough memory." :
1288 "Balloon floor reached.");
1289
1290 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1291 num_pages -= num_pages % PAGES_IN_2M;
1292 }
1293
1294 while (!done) {
1295 bl_resp = (struct dm_balloon_response *)send_buffer;
1296 memset(send_buffer, 0, PAGE_SIZE);
1297 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1298 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1299 bl_resp->more_pages = 1;
1300
1301 num_pages -= num_ballooned;
1302 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1303 bl_resp, alloc_unit);
1304
1305 if (alloc_unit != 1 && num_ballooned == 0) {
1306 alloc_unit = 1;
1307 continue;
1308 }
1309
1310 if (num_ballooned == 0 || num_ballooned == num_pages) {
1311 pr_debug("Ballooned %u out of %u requested pages.\n",
1312 num_pages, dm_device.balloon_wrk.num_pages);
1313
1314 bl_resp->more_pages = 0;
1315 done = true;
1316 dm_device.state = DM_INITIALIZED;
1317 }
1318
1319
1320
1321
1322
1323
1324
1325 do {
1326 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1327 ret = vmbus_sendpacket(dm_device.dev->channel,
1328 bl_resp,
1329 bl_resp->hdr.size,
1330 (unsigned long)NULL,
1331 VM_PKT_DATA_INBAND, 0);
1332
1333 if (ret == -EAGAIN)
1334 msleep(20);
1335 post_status(&dm_device);
1336 } while (ret == -EAGAIN);
1337
1338 if (ret) {
1339
1340
1341
1342 pr_err("Balloon response failed\n");
1343
1344 for (i = 0; i < bl_resp->range_count; i++)
1345 free_balloon_pages(&dm_device,
1346 &bl_resp->range_array[i]);
1347
1348 done = true;
1349 }
1350 }
1351
1352}
1353
1354static void balloon_down(struct hv_dynmem_device *dm,
1355 struct dm_unballoon_request *req)
1356{
1357 union dm_mem_page_range *range_array = req->range_array;
1358 int range_count = req->range_count;
1359 struct dm_unballoon_response resp;
1360 int i;
1361 unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1362
1363 for (i = 0; i < range_count; i++) {
1364 free_balloon_pages(dm, &range_array[i]);
1365 complete(&dm_device.config_event);
1366 }
1367
1368 pr_debug("Freed %u ballooned pages.\n",
1369 prev_pages_ballooned - dm->num_pages_ballooned);
1370
1371 if (req->more_pages == 1)
1372 return;
1373
1374 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1375 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1376 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1377 resp.hdr.size = sizeof(struct dm_unballoon_response);
1378
1379 vmbus_sendpacket(dm_device.dev->channel, &resp,
1380 sizeof(struct dm_unballoon_response),
1381 (unsigned long)NULL,
1382 VM_PKT_DATA_INBAND, 0);
1383
1384 dm->state = DM_INITIALIZED;
1385}
1386
1387static void balloon_onchannelcallback(void *context);
1388
1389static int dm_thread_func(void *dm_dev)
1390{
1391 struct hv_dynmem_device *dm = dm_dev;
1392
1393 while (!kthread_should_stop()) {
1394 wait_for_completion_interruptible_timeout(
1395 &dm_device.config_event, 1*HZ);
1396
1397
1398
1399
1400 reinit_completion(&dm_device.config_event);
1401 post_status(dm);
1402 }
1403
1404 return 0;
1405}
1406
1407
1408static void version_resp(struct hv_dynmem_device *dm,
1409 struct dm_version_response *vresp)
1410{
1411 struct dm_version_request version_req;
1412 int ret;
1413
1414 if (vresp->is_accepted) {
1415
1416
1417
1418
1419
1420 complete(&dm->host_event);
1421 return;
1422 }
1423
1424
1425
1426
1427
1428
1429
1430 if (dm->next_version == 0)
1431 goto version_error;
1432
1433 memset(&version_req, 0, sizeof(struct dm_version_request));
1434 version_req.hdr.type = DM_VERSION_REQUEST;
1435 version_req.hdr.size = sizeof(struct dm_version_request);
1436 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1437 version_req.version.version = dm->next_version;
1438 dm->version = version_req.version.version;
1439
1440
1441
1442
1443
1444 switch (version_req.version.version) {
1445 case DYNMEM_PROTOCOL_VERSION_WIN8:
1446 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1447 version_req.is_last_attempt = 0;
1448 break;
1449 default:
1450 dm->next_version = 0;
1451 version_req.is_last_attempt = 1;
1452 }
1453
1454 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1455 sizeof(struct dm_version_request),
1456 (unsigned long)NULL,
1457 VM_PKT_DATA_INBAND, 0);
1458
1459 if (ret)
1460 goto version_error;
1461
1462 return;
1463
1464version_error:
1465 dm->state = DM_INIT_ERROR;
1466 complete(&dm->host_event);
1467}
1468
1469static void cap_resp(struct hv_dynmem_device *dm,
1470 struct dm_capabilities_resp_msg *cap_resp)
1471{
1472 if (!cap_resp->is_accepted) {
1473 pr_err("Capabilities not accepted by host\n");
1474 dm->state = DM_INIT_ERROR;
1475 }
1476 complete(&dm->host_event);
1477}
1478
1479static void balloon_onchannelcallback(void *context)
1480{
1481 struct hv_device *dev = context;
1482 u32 recvlen;
1483 u64 requestid;
1484 struct dm_message *dm_msg;
1485 struct dm_header *dm_hdr;
1486 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1487 struct dm_balloon *bal_msg;
1488 struct dm_hot_add *ha_msg;
1489 union dm_mem_page_range *ha_pg_range;
1490 union dm_mem_page_range *ha_region;
1491
1492 memset(recv_buffer, 0, sizeof(recv_buffer));
1493 vmbus_recvpacket(dev->channel, recv_buffer,
1494 PAGE_SIZE, &recvlen, &requestid);
1495
1496 if (recvlen > 0) {
1497 dm_msg = (struct dm_message *)recv_buffer;
1498 dm_hdr = &dm_msg->hdr;
1499
1500 switch (dm_hdr->type) {
1501 case DM_VERSION_RESPONSE:
1502 version_resp(dm,
1503 (struct dm_version_response *)dm_msg);
1504 break;
1505
1506 case DM_CAPABILITIES_RESPONSE:
1507 cap_resp(dm,
1508 (struct dm_capabilities_resp_msg *)dm_msg);
1509 break;
1510
1511 case DM_BALLOON_REQUEST:
1512 if (dm->state == DM_BALLOON_UP)
1513 pr_warn("Currently ballooning\n");
1514 bal_msg = (struct dm_balloon *)recv_buffer;
1515 dm->state = DM_BALLOON_UP;
1516 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1517 schedule_work(&dm_device.balloon_wrk.wrk);
1518 break;
1519
1520 case DM_UNBALLOON_REQUEST:
1521 dm->state = DM_BALLOON_DOWN;
1522 balloon_down(dm,
1523 (struct dm_unballoon_request *)recv_buffer);
1524 break;
1525
1526 case DM_MEM_HOT_ADD_REQUEST:
1527 if (dm->state == DM_HOT_ADD)
1528 pr_warn("Currently hot-adding\n");
1529 dm->state = DM_HOT_ADD;
1530 ha_msg = (struct dm_hot_add *)recv_buffer;
1531 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1532
1533
1534
1535
1536 dm->host_specified_ha_region = false;
1537 ha_pg_range = &ha_msg->range;
1538 dm->ha_wrk.ha_page_range = *ha_pg_range;
1539 dm->ha_wrk.ha_region_range.page_range = 0;
1540 } else {
1541
1542
1543
1544
1545
1546 dm->host_specified_ha_region = true;
1547 ha_pg_range = &ha_msg->range;
1548 ha_region = &ha_pg_range[1];
1549 dm->ha_wrk.ha_page_range = *ha_pg_range;
1550 dm->ha_wrk.ha_region_range = *ha_region;
1551 }
1552 schedule_work(&dm_device.ha_wrk.wrk);
1553 break;
1554
1555 case DM_INFO_MESSAGE:
1556 process_info(dm, (struct dm_info_msg *)dm_msg);
1557 break;
1558
1559 default:
1560 pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
1561
1562 }
1563 }
1564
1565}
1566
1567static int balloon_probe(struct hv_device *dev,
1568 const struct hv_vmbus_device_id *dev_id)
1569{
1570 int ret;
1571 unsigned long t;
1572 struct dm_version_request version_req;
1573 struct dm_capabilities cap_msg;
1574
1575#ifdef CONFIG_MEMORY_HOTPLUG
1576 do_hot_add = hot_add;
1577#else
1578 do_hot_add = false;
1579#endif
1580
1581
1582
1583
1584
1585 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1586 if (!send_buffer)
1587 return -ENOMEM;
1588
1589 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1590 balloon_onchannelcallback, dev);
1591
1592 if (ret)
1593 goto probe_error0;
1594
1595 dm_device.dev = dev;
1596 dm_device.state = DM_INITIALIZING;
1597 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1598 init_completion(&dm_device.host_event);
1599 init_completion(&dm_device.config_event);
1600 INIT_LIST_HEAD(&dm_device.ha_region_list);
1601 spin_lock_init(&dm_device.ha_lock);
1602 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1603 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1604 dm_device.host_specified_ha_region = false;
1605
1606 dm_device.thread =
1607 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1608 if (IS_ERR(dm_device.thread)) {
1609 ret = PTR_ERR(dm_device.thread);
1610 goto probe_error1;
1611 }
1612
1613#ifdef CONFIG_MEMORY_HOTPLUG
1614 set_online_page_callback(&hv_online_page);
1615 register_memory_notifier(&hv_memory_nb);
1616#endif
1617
1618 hv_set_drvdata(dev, &dm_device);
1619
1620
1621
1622
1623
1624
1625 memset(&version_req, 0, sizeof(struct dm_version_request));
1626 version_req.hdr.type = DM_VERSION_REQUEST;
1627 version_req.hdr.size = sizeof(struct dm_version_request);
1628 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1629 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1630 version_req.is_last_attempt = 0;
1631 dm_device.version = version_req.version.version;
1632
1633 ret = vmbus_sendpacket(dev->channel, &version_req,
1634 sizeof(struct dm_version_request),
1635 (unsigned long)NULL,
1636 VM_PKT_DATA_INBAND, 0);
1637 if (ret)
1638 goto probe_error2;
1639
1640 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1641 if (t == 0) {
1642 ret = -ETIMEDOUT;
1643 goto probe_error2;
1644 }
1645
1646
1647
1648
1649
1650 if (dm_device.state == DM_INIT_ERROR) {
1651 ret = -ETIMEDOUT;
1652 goto probe_error2;
1653 }
1654
1655 pr_info("Using Dynamic Memory protocol version %u.%u\n",
1656 DYNMEM_MAJOR_VERSION(dm_device.version),
1657 DYNMEM_MINOR_VERSION(dm_device.version));
1658
1659
1660
1661
1662 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1663 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1664 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1665 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1666
1667 cap_msg.caps.cap_bits.balloon = 1;
1668 cap_msg.caps.cap_bits.hot_add = 1;
1669
1670
1671
1672
1673
1674 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1675
1676
1677
1678
1679
1680
1681 cap_msg.min_page_cnt = 0;
1682 cap_msg.max_page_number = -1;
1683
1684 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1685 sizeof(struct dm_capabilities),
1686 (unsigned long)NULL,
1687 VM_PKT_DATA_INBAND, 0);
1688 if (ret)
1689 goto probe_error2;
1690
1691 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1692 if (t == 0) {
1693 ret = -ETIMEDOUT;
1694 goto probe_error2;
1695 }
1696
1697
1698
1699
1700
1701 if (dm_device.state == DM_INIT_ERROR) {
1702 ret = -ETIMEDOUT;
1703 goto probe_error2;
1704 }
1705
1706 dm_device.state = DM_INITIALIZED;
1707 last_post_time = jiffies;
1708
1709 return 0;
1710
1711probe_error2:
1712#ifdef CONFIG_MEMORY_HOTPLUG
1713 restore_online_page_callback(&hv_online_page);
1714#endif
1715 kthread_stop(dm_device.thread);
1716
1717probe_error1:
1718 vmbus_close(dev->channel);
1719probe_error0:
1720 kfree(send_buffer);
1721 return ret;
1722}
1723
1724static int balloon_remove(struct hv_device *dev)
1725{
1726 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1727 struct hv_hotadd_state *has, *tmp;
1728 struct hv_hotadd_gap *gap, *tmp_gap;
1729 unsigned long flags;
1730
1731 if (dm->num_pages_ballooned != 0)
1732 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1733
1734 cancel_work_sync(&dm->balloon_wrk.wrk);
1735 cancel_work_sync(&dm->ha_wrk.wrk);
1736
1737 vmbus_close(dev->channel);
1738 kthread_stop(dm->thread);
1739 kfree(send_buffer);
1740#ifdef CONFIG_MEMORY_HOTPLUG
1741 restore_online_page_callback(&hv_online_page);
1742 unregister_memory_notifier(&hv_memory_nb);
1743#endif
1744 spin_lock_irqsave(&dm_device.ha_lock, flags);
1745 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1746 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1747 list_del(&gap->list);
1748 kfree(gap);
1749 }
1750 list_del(&has->list);
1751 kfree(has);
1752 }
1753 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1754
1755 return 0;
1756}
1757
1758static const struct hv_vmbus_device_id id_table[] = {
1759
1760
1761 { HV_DM_GUID, },
1762 { },
1763};
1764
1765MODULE_DEVICE_TABLE(vmbus, id_table);
1766
1767static struct hv_driver balloon_drv = {
1768 .name = "hv_balloon",
1769 .id_table = id_table,
1770 .probe = balloon_probe,
1771 .remove = balloon_remove,
1772 .driver = {
1773 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1774 },
1775};
1776
1777static int __init init_balloon_drv(void)
1778{
1779
1780 return vmbus_driver_register(&balloon_drv);
1781}
1782
1783module_init(init_balloon_drv);
1784
1785MODULE_DESCRIPTION("Hyper-V Balloon");
1786MODULE_LICENSE("GPL");
1787