1
2
3
4
5
6
7
8
9
10#include <linux/cpu.h>
11#include <linux/swap.h>
12#include <linux/migrate.h>
13#include <linux/compaction.h>
14#include <linux/mm_inline.h>
15#include <linux/backing-dev.h>
16#include <linux/sysctl.h>
17#include <linux/sysfs.h>
18#include <linux/balloon_compaction.h>
19#include <linux/page-isolation.h>
20#include <linux/kasan.h>
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include "internal.h"
24
25#ifdef CONFIG_COMPACTION
26static inline void count_compact_event(enum vm_event_item item)
27{
28 count_vm_event(item);
29}
30
31static inline void count_compact_events(enum vm_event_item item, long delta)
32{
33 count_vm_events(item, delta);
34}
35#else
36#define count_compact_event(item) do { } while (0)
37#define count_compact_events(item, delta) do { } while (0)
38#endif
39
40#if defined CONFIG_COMPACTION || defined CONFIG_CMA
41
42#define CREATE_TRACE_POINTS
43#include <trace/events/compaction.h>
44
45static unsigned long release_freepages(struct list_head *freelist)
46{
47 struct page *page, *next;
48 unsigned long high_pfn = 0;
49
50 list_for_each_entry_safe(page, next, freelist, lru) {
51 unsigned long pfn = page_to_pfn(page);
52 list_del(&page->lru);
53 __free_page(page);
54 if (pfn > high_pfn)
55 high_pfn = pfn;
56 }
57
58 return high_pfn;
59}
60
61static void map_pages(struct list_head *list)
62{
63 struct page *page;
64
65 list_for_each_entry(page, list, lru) {
66 arch_alloc_page(page, 0);
67 kernel_map_pages(page, 1, 1);
68 kasan_alloc_pages(page, 0);
69 }
70}
71
72static inline bool migrate_async_suitable(int migratetype)
73{
74 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
75}
76
77#ifdef CONFIG_COMPACTION
78
79
80#define COMPACT_MAX_DEFER_SHIFT 6
81
82
83
84
85
86
87void defer_compaction(struct zone *zone, int order)
88{
89 zone->compact_considered = 0;
90 zone->compact_defer_shift++;
91
92 if (order < zone->compact_order_failed)
93 zone->compact_order_failed = order;
94
95 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
96 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
97
98 trace_mm_compaction_defer_compaction(zone, order);
99}
100
101
102bool compaction_deferred(struct zone *zone, int order)
103{
104 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
105
106 if (order < zone->compact_order_failed)
107 return false;
108
109
110 if (++zone->compact_considered > defer_limit)
111 zone->compact_considered = defer_limit;
112
113 if (zone->compact_considered >= defer_limit)
114 return false;
115
116 trace_mm_compaction_deferred(zone, order);
117
118 return true;
119}
120
121
122
123
124
125
126void compaction_defer_reset(struct zone *zone, int order,
127 bool alloc_success)
128{
129 if (alloc_success) {
130 zone->compact_considered = 0;
131 zone->compact_defer_shift = 0;
132 }
133 if (order >= zone->compact_order_failed)
134 zone->compact_order_failed = order + 1;
135
136 trace_mm_compaction_defer_reset(zone, order);
137}
138
139
140bool compaction_restarting(struct zone *zone, int order)
141{
142 if (order < zone->compact_order_failed)
143 return false;
144
145 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
146 zone->compact_considered >= 1UL << zone->compact_defer_shift;
147}
148
149
150static inline bool isolation_suitable(struct compact_control *cc,
151 struct page *page)
152{
153 if (cc->ignore_skip_hint)
154 return true;
155
156 return !get_pageblock_skip(page);
157}
158
159static void reset_cached_positions(struct zone *zone)
160{
161 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
162 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
163 zone->compact_cached_free_pfn =
164 round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages);
165}
166
167
168
169
170
171
172static void __reset_isolation_suitable(struct zone *zone)
173{
174 unsigned long start_pfn = zone->zone_start_pfn;
175 unsigned long end_pfn = zone_end_pfn(zone);
176 unsigned long pfn;
177
178 zone->compact_blockskip_flush = false;
179
180
181 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
182 struct page *page;
183
184 cond_resched();
185
186 if (!pfn_valid(pfn))
187 continue;
188
189 page = pfn_to_page(pfn);
190 if (zone != page_zone(page))
191 continue;
192
193 clear_pageblock_skip(page);
194 }
195
196 reset_cached_positions(zone);
197}
198
199void reset_isolation_suitable(pg_data_t *pgdat)
200{
201 int zoneid;
202
203 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
204 struct zone *zone = &pgdat->node_zones[zoneid];
205 if (!populated_zone(zone))
206 continue;
207
208
209 if (zone->compact_blockskip_flush)
210 __reset_isolation_suitable(zone);
211 }
212}
213
214
215
216
217
218static void update_pageblock_skip(struct compact_control *cc,
219 struct page *page, unsigned long nr_isolated,
220 bool migrate_scanner)
221{
222 struct zone *zone = cc->zone;
223 unsigned long pfn;
224
225 if (cc->ignore_skip_hint)
226 return;
227
228 if (!page)
229 return;
230
231 if (nr_isolated)
232 return;
233
234 set_pageblock_skip(page);
235
236 pfn = page_to_pfn(page);
237
238
239 if (migrate_scanner) {
240 if (pfn > zone->compact_cached_migrate_pfn[0])
241 zone->compact_cached_migrate_pfn[0] = pfn;
242 if (cc->mode != MIGRATE_ASYNC &&
243 pfn > zone->compact_cached_migrate_pfn[1])
244 zone->compact_cached_migrate_pfn[1] = pfn;
245 } else {
246 if (pfn < zone->compact_cached_free_pfn)
247 zone->compact_cached_free_pfn = pfn;
248 }
249}
250#else
251static inline bool isolation_suitable(struct compact_control *cc,
252 struct page *page)
253{
254 return true;
255}
256
257static void update_pageblock_skip(struct compact_control *cc,
258 struct page *page, unsigned long nr_isolated,
259 bool migrate_scanner)
260{
261}
262#endif
263
264
265
266
267
268
269
270
271
272static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
273 struct compact_control *cc)
274{
275 if (cc->mode == MIGRATE_ASYNC) {
276 if (!spin_trylock_irqsave(lock, *flags)) {
277 cc->contended = COMPACT_CONTENDED_LOCK;
278 return false;
279 }
280 } else {
281 spin_lock_irqsave(lock, *flags);
282 }
283
284 return true;
285}
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302static bool compact_unlock_should_abort(spinlock_t *lock,
303 unsigned long flags, bool *locked, struct compact_control *cc)
304{
305 if (*locked) {
306 spin_unlock_irqrestore(lock, flags);
307 *locked = false;
308 }
309
310 if (fatal_signal_pending(current)) {
311 cc->contended = COMPACT_CONTENDED_SCHED;
312 return true;
313 }
314
315 if (need_resched()) {
316 if (cc->mode == MIGRATE_ASYNC) {
317 cc->contended = COMPACT_CONTENDED_SCHED;
318 return true;
319 }
320 cond_resched();
321 }
322
323 return false;
324}
325
326
327
328
329
330
331
332
333
334
335static inline bool compact_should_abort(struct compact_control *cc)
336{
337
338 if (need_resched()) {
339 if (cc->mode == MIGRATE_ASYNC) {
340 cc->contended = COMPACT_CONTENDED_SCHED;
341 return true;
342 }
343
344 cond_resched();
345 }
346
347 return false;
348}
349
350
351
352
353
354
355static unsigned long isolate_freepages_block(struct compact_control *cc,
356 unsigned long *start_pfn,
357 unsigned long end_pfn,
358 struct list_head *freelist,
359 bool strict)
360{
361 int nr_scanned = 0, total_isolated = 0;
362 struct page *cursor, *valid_page = NULL;
363 unsigned long flags = 0;
364 bool locked = false;
365 unsigned long blockpfn = *start_pfn;
366
367 cursor = pfn_to_page(blockpfn);
368
369
370 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
371 int isolated, i;
372 struct page *page = cursor;
373
374
375
376
377
378
379 if (!(blockpfn % SWAP_CLUSTER_MAX)
380 && compact_unlock_should_abort(&cc->zone->lock, flags,
381 &locked, cc))
382 break;
383
384 nr_scanned++;
385 if (!pfn_valid_within(blockpfn))
386 goto isolate_fail;
387
388 if (!valid_page)
389 valid_page = page;
390
391
392
393
394
395
396
397 if (PageCompound(page)) {
398 unsigned int comp_order = compound_order(page);
399
400 if (likely(comp_order < MAX_ORDER)) {
401 blockpfn += (1UL << comp_order) - 1;
402 cursor += (1UL << comp_order) - 1;
403 }
404
405 goto isolate_fail;
406 }
407
408 if (!PageBuddy(page))
409 goto isolate_fail;
410
411
412
413
414
415
416
417
418 if (!locked) {
419
420
421
422
423
424
425
426
427 locked = compact_trylock_irqsave(&cc->zone->lock,
428 &flags, cc);
429 if (!locked)
430 break;
431
432
433 if (!PageBuddy(page))
434 goto isolate_fail;
435 }
436
437
438 isolated = split_free_page(page);
439 total_isolated += isolated;
440 for (i = 0; i < isolated; i++) {
441 list_add(&page->lru, freelist);
442 page++;
443 }
444
445
446 if (isolated) {
447 cc->nr_freepages += isolated;
448 if (!strict &&
449 cc->nr_migratepages <= cc->nr_freepages) {
450 blockpfn += isolated;
451 break;
452 }
453
454 blockpfn += isolated - 1;
455 cursor += isolated - 1;
456 continue;
457 }
458
459isolate_fail:
460 if (strict)
461 break;
462 else
463 continue;
464
465 }
466
467
468
469
470
471 if (unlikely(blockpfn > end_pfn))
472 blockpfn = end_pfn;
473
474 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
475 nr_scanned, total_isolated);
476
477
478 *start_pfn = blockpfn;
479
480
481
482
483
484
485 if (strict && blockpfn < end_pfn)
486 total_isolated = 0;
487
488 if (locked)
489 spin_unlock_irqrestore(&cc->zone->lock, flags);
490
491
492 if (blockpfn == end_pfn)
493 update_pageblock_skip(cc, valid_page, total_isolated, false);
494
495 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
496 if (total_isolated)
497 count_compact_events(COMPACTISOLATED, total_isolated);
498 return total_isolated;
499}
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514unsigned long
515isolate_freepages_range(struct compact_control *cc,
516 unsigned long start_pfn, unsigned long end_pfn)
517{
518 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
519 LIST_HEAD(freelist);
520
521 pfn = start_pfn;
522 block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
523 if (block_start_pfn < cc->zone->zone_start_pfn)
524 block_start_pfn = cc->zone->zone_start_pfn;
525 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
526
527 for (; pfn < end_pfn; pfn += isolated,
528 block_start_pfn = block_end_pfn,
529 block_end_pfn += pageblock_nr_pages) {
530
531 unsigned long isolate_start_pfn = pfn;
532
533 block_end_pfn = min(block_end_pfn, end_pfn);
534
535
536
537
538
539
540 if (pfn >= block_end_pfn) {
541 block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
542 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
543 block_end_pfn = min(block_end_pfn, end_pfn);
544 }
545
546 if (!pageblock_pfn_to_page(block_start_pfn,
547 block_end_pfn, cc->zone))
548 break;
549
550 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
551 block_end_pfn, &freelist, true);
552
553
554
555
556
557
558 if (!isolated)
559 break;
560
561
562
563
564
565
566 }
567
568
569 map_pages(&freelist);
570
571 if (pfn < end_pfn) {
572
573 release_freepages(&freelist);
574 return 0;
575 }
576
577
578 return pfn;
579}
580
581
582static void acct_isolated(struct zone *zone, struct compact_control *cc)
583{
584 struct page *page;
585 unsigned int count[2] = { 0, };
586
587 if (list_empty(&cc->migratepages))
588 return;
589
590 list_for_each_entry(page, &cc->migratepages, lru)
591 count[!!page_is_file_cache(page)]++;
592
593 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
594 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
595}
596
597
598static bool too_many_isolated(struct zone *zone)
599{
600 unsigned long active, inactive, isolated;
601
602 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
603 zone_page_state(zone, NR_INACTIVE_ANON);
604 active = zone_page_state(zone, NR_ACTIVE_FILE) +
605 zone_page_state(zone, NR_ACTIVE_ANON);
606 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
607 zone_page_state(zone, NR_ISOLATED_ANON);
608
609 return isolated > (inactive + active) / 2;
610}
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630static unsigned long
631isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
632 unsigned long end_pfn, isolate_mode_t isolate_mode)
633{
634 struct zone *zone = cc->zone;
635 unsigned long nr_scanned = 0, nr_isolated = 0;
636 struct list_head *migratelist = &cc->migratepages;
637 struct lruvec *lruvec;
638 unsigned long flags = 0;
639 bool locked = false;
640 struct page *page = NULL, *valid_page = NULL;
641 unsigned long start_pfn = low_pfn;
642
643
644
645
646
647
648 while (unlikely(too_many_isolated(zone))) {
649
650 if (cc->mode == MIGRATE_ASYNC)
651 return 0;
652
653 congestion_wait(BLK_RW_ASYNC, HZ/10);
654
655 if (fatal_signal_pending(current))
656 return 0;
657 }
658
659 if (compact_should_abort(cc))
660 return 0;
661
662
663 for (; low_pfn < end_pfn; low_pfn++) {
664 bool is_lru;
665
666
667
668
669
670
671 if (!(low_pfn % SWAP_CLUSTER_MAX)
672 && compact_unlock_should_abort(&zone->lru_lock, flags,
673 &locked, cc))
674 break;
675
676 if (!pfn_valid_within(low_pfn))
677 continue;
678 nr_scanned++;
679
680 page = pfn_to_page(low_pfn);
681
682 if (!valid_page)
683 valid_page = page;
684
685
686
687
688
689
690
691 if (PageBuddy(page)) {
692 unsigned long freepage_order = page_order_unsafe(page);
693
694
695
696
697
698
699 if (freepage_order > 0 && freepage_order < MAX_ORDER)
700 low_pfn += (1UL << freepage_order) - 1;
701 continue;
702 }
703
704
705
706
707
708
709 is_lru = PageLRU(page);
710 if (!is_lru) {
711 if (unlikely(balloon_page_movable(page))) {
712 if (balloon_page_isolate(page)) {
713
714 goto isolate_success;
715 }
716 }
717 }
718
719
720
721
722
723
724
725
726 if (PageCompound(page)) {
727 unsigned int comp_order = compound_order(page);
728
729 if (likely(comp_order < MAX_ORDER))
730 low_pfn += (1UL << comp_order) - 1;
731
732 continue;
733 }
734
735 if (!is_lru)
736 continue;
737
738
739
740
741
742
743 if (!page_mapping(page) &&
744 page_count(page) > page_mapcount(page))
745 continue;
746
747
748 if (!locked) {
749 locked = compact_trylock_irqsave(&zone->lru_lock,
750 &flags, cc);
751 if (!locked)
752 break;
753
754
755 if (!PageLRU(page))
756 continue;
757
758
759
760
761
762
763 if (unlikely(PageCompound(page))) {
764 low_pfn += (1UL << compound_order(page)) - 1;
765 continue;
766 }
767 }
768
769 lruvec = mem_cgroup_page_lruvec(page, zone);
770
771
772 if (__isolate_lru_page(page, isolate_mode) != 0)
773 continue;
774
775 VM_BUG_ON_PAGE(PageCompound(page), page);
776
777
778 del_page_from_lru_list(page, lruvec, page_lru(page));
779
780isolate_success:
781 list_add(&page->lru, migratelist);
782 cc->nr_migratepages++;
783 nr_isolated++;
784
785
786 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
787 ++low_pfn;
788 break;
789 }
790 }
791
792
793
794
795
796 if (unlikely(low_pfn > end_pfn))
797 low_pfn = end_pfn;
798
799 if (locked)
800 spin_unlock_irqrestore(&zone->lru_lock, flags);
801
802
803
804
805
806 if (low_pfn == end_pfn)
807 update_pageblock_skip(cc, valid_page, nr_isolated, true);
808
809 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
810 nr_scanned, nr_isolated);
811
812 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
813 if (nr_isolated)
814 count_compact_events(COMPACTISOLATED, nr_isolated);
815
816 return low_pfn;
817}
818
819
820
821
822
823
824
825
826
827
828
829unsigned long
830isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
831 unsigned long end_pfn)
832{
833 unsigned long pfn, block_start_pfn, block_end_pfn;
834
835
836 pfn = start_pfn;
837 block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
838 if (block_start_pfn < cc->zone->zone_start_pfn)
839 block_start_pfn = cc->zone->zone_start_pfn;
840 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
841
842 for (; pfn < end_pfn; pfn = block_end_pfn,
843 block_start_pfn = block_end_pfn,
844 block_end_pfn += pageblock_nr_pages) {
845
846 block_end_pfn = min(block_end_pfn, end_pfn);
847
848 if (!pageblock_pfn_to_page(block_start_pfn,
849 block_end_pfn, cc->zone))
850 continue;
851
852 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
853 ISOLATE_UNEVICTABLE);
854
855 if (!pfn)
856 break;
857
858 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
859 break;
860 }
861 acct_isolated(cc->zone, cc);
862
863 return pfn;
864}
865
866#endif
867#ifdef CONFIG_COMPACTION
868
869
870static bool suitable_migration_target(struct page *page)
871{
872
873 if (PageBuddy(page)) {
874
875
876
877
878
879 if (page_order_unsafe(page) >= pageblock_order)
880 return false;
881 }
882
883
884 if (migrate_async_suitable(get_pageblock_migratetype(page)))
885 return true;
886
887
888 return false;
889}
890
891
892
893
894
895static inline bool compact_scanners_met(struct compact_control *cc)
896{
897 return (cc->free_pfn >> pageblock_order)
898 <= (cc->migrate_pfn >> pageblock_order);
899}
900
901
902
903
904
905static void isolate_freepages(struct compact_control *cc)
906{
907 struct zone *zone = cc->zone;
908 struct page *page;
909 unsigned long block_start_pfn;
910 unsigned long isolate_start_pfn;
911 unsigned long block_end_pfn;
912 unsigned long low_pfn;
913 struct list_head *freelist = &cc->freepages;
914
915
916
917
918
919
920
921
922
923
924
925
926 isolate_start_pfn = cc->free_pfn;
927 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
928 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
929 zone_end_pfn(zone));
930 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
931
932
933
934
935
936
937 for (; block_start_pfn >= low_pfn;
938 block_end_pfn = block_start_pfn,
939 block_start_pfn -= pageblock_nr_pages,
940 isolate_start_pfn = block_start_pfn) {
941
942
943
944
945
946
947 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
948 && compact_should_abort(cc))
949 break;
950
951 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
952 zone);
953 if (!page)
954 continue;
955
956
957 if (!suitable_migration_target(page))
958 continue;
959
960
961 if (!isolation_suitable(cc, page))
962 continue;
963
964
965 isolate_freepages_block(cc, &isolate_start_pfn,
966 block_end_pfn, freelist, false);
967
968
969
970
971
972
973
974
975
976
977
978
979 if ((cc->nr_freepages >= cc->nr_migratepages)
980 || cc->contended) {
981 if (isolate_start_pfn >= block_end_pfn)
982 isolate_start_pfn =
983 block_start_pfn - pageblock_nr_pages;
984 break;
985 } else {
986
987
988
989
990 VM_BUG_ON(isolate_start_pfn < block_end_pfn);
991 }
992 }
993
994
995 map_pages(freelist);
996
997
998
999
1000
1001
1002
1003 cc->free_pfn = isolate_start_pfn;
1004}
1005
1006
1007
1008
1009
1010static struct page *compaction_alloc(struct page *migratepage,
1011 unsigned long data,
1012 int **result)
1013{
1014 struct compact_control *cc = (struct compact_control *)data;
1015 struct page *freepage;
1016
1017
1018
1019
1020
1021 if (list_empty(&cc->freepages)) {
1022 if (!cc->contended)
1023 isolate_freepages(cc);
1024
1025 if (list_empty(&cc->freepages))
1026 return NULL;
1027 }
1028
1029 freepage = list_entry(cc->freepages.next, struct page, lru);
1030 list_del(&freepage->lru);
1031 cc->nr_freepages--;
1032
1033 return freepage;
1034}
1035
1036
1037
1038
1039
1040
1041static void compaction_free(struct page *page, unsigned long data)
1042{
1043 struct compact_control *cc = (struct compact_control *)data;
1044
1045 list_add(&page->lru, &cc->freepages);
1046 cc->nr_freepages++;
1047}
1048
1049
1050typedef enum {
1051 ISOLATE_ABORT,
1052 ISOLATE_NONE,
1053 ISOLATE_SUCCESS,
1054} isolate_migrate_t;
1055
1056
1057
1058
1059
1060int sysctl_compact_unevictable_allowed __read_mostly = 1;
1061
1062
1063
1064
1065
1066
1067static isolate_migrate_t isolate_migratepages(struct zone *zone,
1068 struct compact_control *cc)
1069{
1070 unsigned long block_start_pfn;
1071 unsigned long block_end_pfn;
1072 unsigned long low_pfn;
1073 unsigned long isolate_start_pfn;
1074 struct page *page;
1075 const isolate_mode_t isolate_mode =
1076 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1077 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1078
1079
1080
1081
1082
1083 low_pfn = cc->migrate_pfn;
1084 block_start_pfn = cc->migrate_pfn & ~(pageblock_nr_pages - 1);
1085 if (block_start_pfn < zone->zone_start_pfn)
1086 block_start_pfn = zone->zone_start_pfn;
1087
1088
1089 block_end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
1090
1091
1092
1093
1094
1095 for (; block_end_pfn <= cc->free_pfn;
1096 low_pfn = block_end_pfn,
1097 block_start_pfn = block_end_pfn,
1098 block_end_pfn += pageblock_nr_pages) {
1099
1100
1101
1102
1103
1104
1105 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1106 && compact_should_abort(cc))
1107 break;
1108
1109 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1110 zone);
1111 if (!page)
1112 continue;
1113
1114
1115 if (!isolation_suitable(cc, page))
1116 continue;
1117
1118
1119
1120
1121
1122
1123 if (cc->mode == MIGRATE_ASYNC &&
1124 !migrate_async_suitable(get_pageblock_migratetype(page)))
1125 continue;
1126
1127
1128 isolate_start_pfn = low_pfn;
1129 low_pfn = isolate_migratepages_block(cc, low_pfn,
1130 block_end_pfn, isolate_mode);
1131
1132 if (!low_pfn || cc->contended) {
1133 acct_isolated(zone, cc);
1134 return ISOLATE_ABORT;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143 if (cc->nr_migratepages && !cc->last_migrated_pfn)
1144 cc->last_migrated_pfn = isolate_start_pfn;
1145
1146
1147
1148
1149
1150
1151 break;
1152 }
1153
1154 acct_isolated(zone, cc);
1155
1156 cc->migrate_pfn = low_pfn;
1157
1158 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1159}
1160
1161
1162
1163
1164
1165static inline bool is_via_compact_memory(int order)
1166{
1167 return order == -1;
1168}
1169
1170static int __compact_finished(struct zone *zone, struct compact_control *cc,
1171 const int migratetype)
1172{
1173 unsigned int order;
1174 unsigned long watermark;
1175
1176 if (cc->contended || fatal_signal_pending(current))
1177 return COMPACT_CONTENDED;
1178
1179
1180 if (compact_scanners_met(cc)) {
1181
1182 reset_cached_positions(zone);
1183
1184
1185
1186
1187
1188
1189
1190 if (cc->direct_compaction)
1191 zone->compact_blockskip_flush = true;
1192
1193 return COMPACT_COMPLETE;
1194 }
1195
1196 if (is_via_compact_memory(cc->order))
1197 return COMPACT_CONTINUE;
1198
1199
1200 watermark = low_wmark_pages(zone);
1201
1202 if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1203 cc->alloc_flags))
1204 return COMPACT_CONTINUE;
1205
1206
1207 for (order = cc->order; order < MAX_ORDER; order++) {
1208 struct free_area *area = &zone->free_area[order];
1209 bool can_steal;
1210
1211
1212 if (!list_empty(&area->free_list[migratetype]))
1213 return COMPACT_PARTIAL;
1214
1215#ifdef CONFIG_CMA
1216
1217 if (migratetype == MIGRATE_MOVABLE &&
1218 !list_empty(&area->free_list[MIGRATE_CMA]))
1219 return COMPACT_PARTIAL;
1220#endif
1221
1222
1223
1224
1225 if (find_suitable_fallback(area, order, migratetype,
1226 true, &can_steal) != -1)
1227 return COMPACT_PARTIAL;
1228 }
1229
1230 return COMPACT_NO_SUITABLE_PAGE;
1231}
1232
1233static int compact_finished(struct zone *zone, struct compact_control *cc,
1234 const int migratetype)
1235{
1236 int ret;
1237
1238 ret = __compact_finished(zone, cc, migratetype);
1239 trace_mm_compaction_finished(zone, cc->order, ret);
1240 if (ret == COMPACT_NO_SUITABLE_PAGE)
1241 ret = COMPACT_CONTINUE;
1242
1243 return ret;
1244}
1245
1246
1247
1248
1249
1250
1251
1252
1253static unsigned long __compaction_suitable(struct zone *zone, int order,
1254 int alloc_flags, int classzone_idx)
1255{
1256 int fragindex;
1257 unsigned long watermark;
1258
1259 if (is_via_compact_memory(order))
1260 return COMPACT_CONTINUE;
1261
1262 watermark = low_wmark_pages(zone);
1263
1264
1265
1266
1267 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1268 alloc_flags))
1269 return COMPACT_PARTIAL;
1270
1271
1272
1273
1274
1275
1276 watermark += (2UL << order);
1277 if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
1278 return COMPACT_SKIPPED;
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 fragindex = fragmentation_index(zone, order);
1292 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1293 return COMPACT_NOT_SUITABLE_ZONE;
1294
1295 return COMPACT_CONTINUE;
1296}
1297
1298unsigned long compaction_suitable(struct zone *zone, int order,
1299 int alloc_flags, int classzone_idx)
1300{
1301 unsigned long ret;
1302
1303 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
1304 trace_mm_compaction_suitable(zone, order, ret);
1305 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1306 ret = COMPACT_SKIPPED;
1307
1308 return ret;
1309}
1310
1311static int compact_zone(struct zone *zone, struct compact_control *cc)
1312{
1313 int ret;
1314 unsigned long start_pfn = zone->zone_start_pfn;
1315 unsigned long end_pfn = zone_end_pfn(zone);
1316 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1317 const bool sync = cc->mode != MIGRATE_ASYNC;
1318
1319 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1320 cc->classzone_idx);
1321 switch (ret) {
1322 case COMPACT_PARTIAL:
1323 case COMPACT_SKIPPED:
1324
1325 return ret;
1326 case COMPACT_CONTINUE:
1327
1328 ;
1329 }
1330
1331
1332
1333
1334
1335 if (compaction_restarting(zone, cc->order))
1336 __reset_isolation_suitable(zone);
1337
1338
1339
1340
1341
1342
1343 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1344 cc->free_pfn = zone->compact_cached_free_pfn;
1345 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1346 cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages);
1347 zone->compact_cached_free_pfn = cc->free_pfn;
1348 }
1349 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1350 cc->migrate_pfn = start_pfn;
1351 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1352 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1353 }
1354 cc->last_migrated_pfn = 0;
1355
1356 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1357 cc->free_pfn, end_pfn, sync);
1358
1359 migrate_prep_local();
1360
1361 while ((ret = compact_finished(zone, cc, migratetype)) ==
1362 COMPACT_CONTINUE) {
1363 int err;
1364
1365 switch (isolate_migratepages(zone, cc)) {
1366 case ISOLATE_ABORT:
1367 ret = COMPACT_CONTENDED;
1368 putback_movable_pages(&cc->migratepages);
1369 cc->nr_migratepages = 0;
1370 goto out;
1371 case ISOLATE_NONE:
1372
1373
1374
1375
1376
1377 goto check_drain;
1378 case ISOLATE_SUCCESS:
1379 ;
1380 }
1381
1382 err = migrate_pages(&cc->migratepages, compaction_alloc,
1383 compaction_free, (unsigned long)cc, cc->mode,
1384 MR_COMPACTION);
1385
1386 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1387 &cc->migratepages);
1388
1389
1390 cc->nr_migratepages = 0;
1391 if (err) {
1392 putback_movable_pages(&cc->migratepages);
1393
1394
1395
1396
1397 if (err == -ENOMEM && !compact_scanners_met(cc)) {
1398 ret = COMPACT_CONTENDED;
1399 goto out;
1400 }
1401 }
1402
1403check_drain:
1404
1405
1406
1407
1408
1409
1410
1411 if (cc->order > 0 && cc->last_migrated_pfn) {
1412 int cpu;
1413 unsigned long current_block_start =
1414 cc->migrate_pfn & ~((1UL << cc->order) - 1);
1415
1416 if (cc->last_migrated_pfn < current_block_start) {
1417 cpu = get_cpu();
1418 lru_add_drain_cpu(cpu);
1419 drain_local_pages(zone);
1420 put_cpu();
1421
1422 cc->last_migrated_pfn = 0;
1423 }
1424 }
1425
1426 }
1427
1428out:
1429
1430
1431
1432
1433 if (cc->nr_freepages > 0) {
1434 unsigned long free_pfn = release_freepages(&cc->freepages);
1435
1436 cc->nr_freepages = 0;
1437 VM_BUG_ON(free_pfn == 0);
1438
1439 free_pfn &= ~(pageblock_nr_pages-1);
1440
1441
1442
1443
1444 if (free_pfn > zone->compact_cached_free_pfn)
1445 zone->compact_cached_free_pfn = free_pfn;
1446 }
1447
1448 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1449 cc->free_pfn, end_pfn, sync, ret);
1450
1451 if (ret == COMPACT_CONTENDED)
1452 ret = COMPACT_PARTIAL;
1453
1454 return ret;
1455}
1456
1457static unsigned long compact_zone_order(struct zone *zone, int order,
1458 gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1459 int alloc_flags, int classzone_idx)
1460{
1461 unsigned long ret;
1462 struct compact_control cc = {
1463 .nr_freepages = 0,
1464 .nr_migratepages = 0,
1465 .order = order,
1466 .gfp_mask = gfp_mask,
1467 .zone = zone,
1468 .mode = mode,
1469 .alloc_flags = alloc_flags,
1470 .classzone_idx = classzone_idx,
1471 .direct_compaction = true,
1472 };
1473 INIT_LIST_HEAD(&cc.freepages);
1474 INIT_LIST_HEAD(&cc.migratepages);
1475
1476 ret = compact_zone(zone, &cc);
1477
1478 VM_BUG_ON(!list_empty(&cc.freepages));
1479 VM_BUG_ON(!list_empty(&cc.migratepages));
1480
1481 *contended = cc.contended;
1482 return ret;
1483}
1484
1485int sysctl_extfrag_threshold = 500;
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1500 int alloc_flags, const struct alloc_context *ac,
1501 enum migrate_mode mode, int *contended)
1502{
1503 int may_enter_fs = gfp_mask & __GFP_FS;
1504 int may_perform_io = gfp_mask & __GFP_IO;
1505 struct zoneref *z;
1506 struct zone *zone;
1507 int rc = COMPACT_DEFERRED;
1508 int all_zones_contended = COMPACT_CONTENDED_LOCK;
1509
1510 *contended = COMPACT_CONTENDED_NONE;
1511
1512
1513 if (!order || !may_enter_fs || !may_perform_io)
1514 return COMPACT_SKIPPED;
1515
1516 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
1517
1518
1519 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1520 ac->nodemask) {
1521 int status;
1522 int zone_contended;
1523
1524 if (compaction_deferred(zone, order))
1525 continue;
1526
1527 status = compact_zone_order(zone, order, gfp_mask, mode,
1528 &zone_contended, alloc_flags,
1529 ac->classzone_idx);
1530 rc = max(status, rc);
1531
1532
1533
1534
1535 all_zones_contended &= zone_contended;
1536
1537
1538 if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1539 ac->classzone_idx, alloc_flags)) {
1540
1541
1542
1543
1544
1545
1546 compaction_defer_reset(zone, order, false);
1547
1548
1549
1550
1551
1552
1553
1554
1555 if (zone_contended == COMPACT_CONTENDED_SCHED)
1556 *contended = COMPACT_CONTENDED_SCHED;
1557
1558 goto break_loop;
1559 }
1560
1561 if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
1562
1563
1564
1565
1566
1567 defer_compaction(zone, order);
1568 }
1569
1570
1571
1572
1573
1574
1575
1576 if ((zone_contended == COMPACT_CONTENDED_SCHED)
1577 || fatal_signal_pending(current)) {
1578 *contended = COMPACT_CONTENDED_SCHED;
1579 goto break_loop;
1580 }
1581
1582 continue;
1583break_loop:
1584
1585
1586
1587
1588 all_zones_contended = 0;
1589 break;
1590 }
1591
1592
1593
1594
1595
1596 if (rc > COMPACT_SKIPPED && all_zones_contended)
1597 *contended = COMPACT_CONTENDED_LOCK;
1598
1599 return rc;
1600}
1601
1602
1603
1604static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1605{
1606 int zoneid;
1607 struct zone *zone;
1608
1609 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1610
1611 zone = &pgdat->node_zones[zoneid];
1612 if (!populated_zone(zone))
1613 continue;
1614
1615 cc->nr_freepages = 0;
1616 cc->nr_migratepages = 0;
1617 cc->zone = zone;
1618 INIT_LIST_HEAD(&cc->freepages);
1619 INIT_LIST_HEAD(&cc->migratepages);
1620
1621
1622
1623
1624
1625
1626 if (is_via_compact_memory(cc->order))
1627 __reset_isolation_suitable(zone);
1628
1629 if (is_via_compact_memory(cc->order) ||
1630 !compaction_deferred(zone, cc->order))
1631 compact_zone(zone, cc);
1632
1633 VM_BUG_ON(!list_empty(&cc->freepages));
1634 VM_BUG_ON(!list_empty(&cc->migratepages));
1635
1636 if (is_via_compact_memory(cc->order))
1637 continue;
1638
1639 if (zone_watermark_ok(zone, cc->order,
1640 low_wmark_pages(zone), 0, 0))
1641 compaction_defer_reset(zone, cc->order, false);
1642 }
1643}
1644
1645void compact_pgdat(pg_data_t *pgdat, int order)
1646{
1647 struct compact_control cc = {
1648 .order = order,
1649 .mode = MIGRATE_ASYNC,
1650 };
1651
1652 if (!order)
1653 return;
1654
1655 __compact_pgdat(pgdat, &cc);
1656}
1657
1658static void compact_node(int nid)
1659{
1660 struct compact_control cc = {
1661 .order = -1,
1662 .mode = MIGRATE_SYNC,
1663 .ignore_skip_hint = true,
1664 };
1665
1666 __compact_pgdat(NODE_DATA(nid), &cc);
1667}
1668
1669
1670static void compact_nodes(void)
1671{
1672 int nid;
1673
1674
1675 lru_add_drain_all();
1676
1677 for_each_online_node(nid)
1678 compact_node(nid);
1679}
1680
1681
1682int sysctl_compact_memory;
1683
1684
1685
1686
1687
1688int sysctl_compaction_handler(struct ctl_table *table, int write,
1689 void __user *buffer, size_t *length, loff_t *ppos)
1690{
1691 if (write)
1692 compact_nodes();
1693
1694 return 0;
1695}
1696
1697int sysctl_extfrag_handler(struct ctl_table *table, int write,
1698 void __user *buffer, size_t *length, loff_t *ppos)
1699{
1700 proc_dointvec_minmax(table, write, buffer, length, ppos);
1701
1702 return 0;
1703}
1704
1705#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1706static ssize_t sysfs_compact_node(struct device *dev,
1707 struct device_attribute *attr,
1708 const char *buf, size_t count)
1709{
1710 int nid = dev->id;
1711
1712 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1713
1714 lru_add_drain_all();
1715
1716 compact_node(nid);
1717 }
1718
1719 return count;
1720}
1721static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1722
1723int compaction_register_node(struct node *node)
1724{
1725 return device_create_file(&node->dev, &dev_attr_compact);
1726}
1727
1728void compaction_unregister_node(struct node *node)
1729{
1730 return device_remove_file(&node->dev, &dev_attr_compact);
1731}
1732#endif
1733
1734static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1735{
1736 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1737}
1738
1739static bool kcompactd_node_suitable(pg_data_t *pgdat)
1740{
1741 int zoneid;
1742 struct zone *zone;
1743 enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1744
1745 for (zoneid = 0; zoneid < classzone_idx; zoneid++) {
1746 zone = &pgdat->node_zones[zoneid];
1747
1748 if (!populated_zone(zone))
1749 continue;
1750
1751 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1752 classzone_idx) == COMPACT_CONTINUE)
1753 return true;
1754 }
1755
1756 return false;
1757}
1758
1759static void kcompactd_do_work(pg_data_t *pgdat)
1760{
1761
1762
1763
1764
1765 int zoneid;
1766 struct zone *zone;
1767 struct compact_control cc = {
1768 .order = pgdat->kcompactd_max_order,
1769 .classzone_idx = pgdat->kcompactd_classzone_idx,
1770 .mode = MIGRATE_SYNC_LIGHT,
1771 .ignore_skip_hint = true,
1772
1773 };
1774 bool success = false;
1775
1776 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1777 cc.classzone_idx);
1778 count_vm_event(KCOMPACTD_WAKE);
1779
1780 for (zoneid = 0; zoneid < cc.classzone_idx; zoneid++) {
1781 int status;
1782
1783 zone = &pgdat->node_zones[zoneid];
1784 if (!populated_zone(zone))
1785 continue;
1786
1787 if (compaction_deferred(zone, cc.order))
1788 continue;
1789
1790 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1791 COMPACT_CONTINUE)
1792 continue;
1793
1794 cc.nr_freepages = 0;
1795 cc.nr_migratepages = 0;
1796 cc.zone = zone;
1797 INIT_LIST_HEAD(&cc.freepages);
1798 INIT_LIST_HEAD(&cc.migratepages);
1799
1800 if (kthread_should_stop())
1801 return;
1802 status = compact_zone(zone, &cc);
1803
1804 if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
1805 cc.classzone_idx, 0)) {
1806 success = true;
1807 compaction_defer_reset(zone, cc.order, false);
1808 } else if (status == COMPACT_COMPLETE) {
1809
1810
1811
1812
1813 defer_compaction(zone, cc.order);
1814 }
1815
1816 VM_BUG_ON(!list_empty(&cc.freepages));
1817 VM_BUG_ON(!list_empty(&cc.migratepages));
1818 }
1819
1820
1821
1822
1823
1824
1825 if (pgdat->kcompactd_max_order <= cc.order)
1826 pgdat->kcompactd_max_order = 0;
1827 if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1828 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1829}
1830
1831void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1832{
1833 if (!order)
1834 return;
1835
1836 if (pgdat->kcompactd_max_order < order)
1837 pgdat->kcompactd_max_order = order;
1838
1839 if (pgdat->kcompactd_classzone_idx > classzone_idx)
1840 pgdat->kcompactd_classzone_idx = classzone_idx;
1841
1842 if (!waitqueue_active(&pgdat->kcompactd_wait))
1843 return;
1844
1845 if (!kcompactd_node_suitable(pgdat))
1846 return;
1847
1848 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
1849 classzone_idx);
1850 wake_up_interruptible(&pgdat->kcompactd_wait);
1851}
1852
1853
1854
1855
1856
1857static int kcompactd(void *p)
1858{
1859 pg_data_t *pgdat = (pg_data_t*)p;
1860 struct task_struct *tsk = current;
1861
1862 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1863
1864 if (!cpumask_empty(cpumask))
1865 set_cpus_allowed_ptr(tsk, cpumask);
1866
1867 set_freezable();
1868
1869 pgdat->kcompactd_max_order = 0;
1870 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1871
1872 while (!kthread_should_stop()) {
1873 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
1874 wait_event_freezable(pgdat->kcompactd_wait,
1875 kcompactd_work_requested(pgdat));
1876
1877 kcompactd_do_work(pgdat);
1878 }
1879
1880 return 0;
1881}
1882
1883
1884
1885
1886
1887int kcompactd_run(int nid)
1888{
1889 pg_data_t *pgdat = NODE_DATA(nid);
1890 int ret = 0;
1891
1892 if (pgdat->kcompactd)
1893 return 0;
1894
1895 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
1896 if (IS_ERR(pgdat->kcompactd)) {
1897 pr_err("Failed to start kcompactd on node %d\n", nid);
1898 ret = PTR_ERR(pgdat->kcompactd);
1899 pgdat->kcompactd = NULL;
1900 }
1901 return ret;
1902}
1903
1904
1905
1906
1907
1908void kcompactd_stop(int nid)
1909{
1910 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
1911
1912 if (kcompactd) {
1913 kthread_stop(kcompactd);
1914 NODE_DATA(nid)->kcompactd = NULL;
1915 }
1916}
1917
1918
1919
1920
1921
1922
1923
1924static int cpu_callback(struct notifier_block *nfb, unsigned long action,
1925 void *hcpu)
1926{
1927 int nid;
1928
1929 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1930 for_each_node_state(nid, N_MEMORY) {
1931 pg_data_t *pgdat = NODE_DATA(nid);
1932 const struct cpumask *mask;
1933
1934 mask = cpumask_of_node(pgdat->node_id);
1935
1936 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
1937
1938 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
1939 }
1940 }
1941 return NOTIFY_OK;
1942}
1943
1944static int __init kcompactd_init(void)
1945{
1946 int nid;
1947
1948 for_each_node_state(nid, N_MEMORY)
1949 kcompactd_run(nid);
1950 hotcpu_notifier(cpu_callback, 0);
1951 return 0;
1952}
1953subsys_initcall(kcompactd_init)
1954
1955#endif
1956