1
2
3
4
5
6
7
8
9
10
11#include <linux/cpu.h>
12#include <linux/swap.h>
13#include <linux/migrate.h>
14#include <linux/compaction.h>
15#include <linux/mm_inline.h>
16#include <linux/sched/signal.h>
17#include <linux/backing-dev.h>
18#include <linux/sysctl.h>
19#include <linux/sysfs.h>
20#include <linux/page-isolation.h>
21#include <linux/kasan.h>
22#include <linux/kthread.h>
23#include <linux/freezer.h>
24#include <linux/page_owner.h>
25#include <linux/psi.h>
26#include "internal.h"
27
28#ifdef CONFIG_COMPACTION
29static inline void count_compact_event(enum vm_event_item item)
30{
31 count_vm_event(item);
32}
33
34static inline void count_compact_events(enum vm_event_item item, long delta)
35{
36 count_vm_events(item, delta);
37}
38#else
39#define count_compact_event(item) do { } while (0)
40#define count_compact_events(item, delta) do { } while (0)
41#endif
42
43#if defined CONFIG_COMPACTION || defined CONFIG_CMA
44
45#define CREATE_TRACE_POINTS
46#include <trace/events/compaction.h>
47
48#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
49#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
50#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
51#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
52
53
54
55
56static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
57
58
59
60
61
62
63#if defined CONFIG_TRANSPARENT_HUGEPAGE
64#define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER
65#elif defined CONFIG_HUGETLBFS
66#define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER
67#else
68#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
69#endif
70
71static unsigned long release_freepages(struct list_head *freelist)
72{
73 struct page *page, *next;
74 unsigned long high_pfn = 0;
75
76 list_for_each_entry_safe(page, next, freelist, lru) {
77 unsigned long pfn = page_to_pfn(page);
78 list_del(&page->lru);
79 __free_page(page);
80 if (pfn > high_pfn)
81 high_pfn = pfn;
82 }
83
84 return high_pfn;
85}
86
87static void split_map_pages(struct list_head *list)
88{
89 unsigned int i, order, nr_pages;
90 struct page *page, *next;
91 LIST_HEAD(tmp_list);
92
93 list_for_each_entry_safe(page, next, list, lru) {
94 list_del(&page->lru);
95
96 order = page_private(page);
97 nr_pages = 1 << order;
98
99 post_alloc_hook(page, order, __GFP_MOVABLE);
100 if (order)
101 split_page(page, order);
102
103 for (i = 0; i < nr_pages; i++) {
104 list_add(&page->lru, &tmp_list);
105 page++;
106 }
107 }
108
109 list_splice(&tmp_list, list);
110}
111
112#ifdef CONFIG_COMPACTION
113
114int PageMovable(struct page *page)
115{
116 struct address_space *mapping;
117
118 VM_BUG_ON_PAGE(!PageLocked(page), page);
119 if (!__PageMovable(page))
120 return 0;
121
122 mapping = page_mapping(page);
123 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
124 return 1;
125
126 return 0;
127}
128EXPORT_SYMBOL(PageMovable);
129
130void __SetPageMovable(struct page *page, struct address_space *mapping)
131{
132 VM_BUG_ON_PAGE(!PageLocked(page), page);
133 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
134 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
135}
136EXPORT_SYMBOL(__SetPageMovable);
137
138void __ClearPageMovable(struct page *page)
139{
140 VM_BUG_ON_PAGE(!PageMovable(page), page);
141
142
143
144
145
146 page->mapping = (void *)((unsigned long)page->mapping &
147 PAGE_MAPPING_MOVABLE);
148}
149EXPORT_SYMBOL(__ClearPageMovable);
150
151
152#define COMPACT_MAX_DEFER_SHIFT 6
153
154
155
156
157
158
159static void defer_compaction(struct zone *zone, int order)
160{
161 zone->compact_considered = 0;
162 zone->compact_defer_shift++;
163
164 if (order < zone->compact_order_failed)
165 zone->compact_order_failed = order;
166
167 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
168 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
169
170 trace_mm_compaction_defer_compaction(zone, order);
171}
172
173
174static bool compaction_deferred(struct zone *zone, int order)
175{
176 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
177
178 if (order < zone->compact_order_failed)
179 return false;
180
181
182 if (++zone->compact_considered >= defer_limit) {
183 zone->compact_considered = defer_limit;
184 return false;
185 }
186
187 trace_mm_compaction_deferred(zone, order);
188
189 return true;
190}
191
192
193
194
195
196
197void compaction_defer_reset(struct zone *zone, int order,
198 bool alloc_success)
199{
200 if (alloc_success) {
201 zone->compact_considered = 0;
202 zone->compact_defer_shift = 0;
203 }
204 if (order >= zone->compact_order_failed)
205 zone->compact_order_failed = order + 1;
206
207 trace_mm_compaction_defer_reset(zone, order);
208}
209
210
211static bool compaction_restarting(struct zone *zone, int order)
212{
213 if (order < zone->compact_order_failed)
214 return false;
215
216 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
217 zone->compact_considered >= 1UL << zone->compact_defer_shift;
218}
219
220
221static inline bool isolation_suitable(struct compact_control *cc,
222 struct page *page)
223{
224 if (cc->ignore_skip_hint)
225 return true;
226
227 return !get_pageblock_skip(page);
228}
229
230static void reset_cached_positions(struct zone *zone)
231{
232 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
233 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
234 zone->compact_cached_free_pfn =
235 pageblock_start_pfn(zone_end_pfn(zone) - 1);
236}
237
238
239
240
241
242
243static bool pageblock_skip_persistent(struct page *page)
244{
245 if (!PageCompound(page))
246 return false;
247
248 page = compound_head(page);
249
250 if (compound_order(page) >= pageblock_order)
251 return true;
252
253 return false;
254}
255
256static bool
257__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
258 bool check_target)
259{
260 struct page *page = pfn_to_online_page(pfn);
261 struct page *block_page;
262 struct page *end_page;
263 unsigned long block_pfn;
264
265 if (!page)
266 return false;
267 if (zone != page_zone(page))
268 return false;
269 if (pageblock_skip_persistent(page))
270 return false;
271
272
273
274
275
276 if (check_source && check_target && !get_pageblock_skip(page))
277 return true;
278
279
280
281
282
283 if (!check_source && check_target &&
284 get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
285 return false;
286
287
288 block_pfn = pageblock_start_pfn(pfn);
289 block_pfn = max(block_pfn, zone->zone_start_pfn);
290 block_page = pfn_to_online_page(block_pfn);
291 if (block_page) {
292 page = block_page;
293 pfn = block_pfn;
294 }
295
296
297 block_pfn = pageblock_end_pfn(pfn) - 1;
298 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
299 end_page = pfn_to_online_page(block_pfn);
300 if (!end_page)
301 return false;
302
303
304
305
306
307
308 do {
309 if (check_source && PageLRU(page)) {
310 clear_pageblock_skip(page);
311 return true;
312 }
313
314 if (check_target && PageBuddy(page)) {
315 clear_pageblock_skip(page);
316 return true;
317 }
318
319 page += (1 << PAGE_ALLOC_COSTLY_ORDER);
320 pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
321 } while (page <= end_page);
322
323 return false;
324}
325
326
327
328
329
330
331static void __reset_isolation_suitable(struct zone *zone)
332{
333 unsigned long migrate_pfn = zone->zone_start_pfn;
334 unsigned long free_pfn = zone_end_pfn(zone) - 1;
335 unsigned long reset_migrate = free_pfn;
336 unsigned long reset_free = migrate_pfn;
337 bool source_set = false;
338 bool free_set = false;
339
340 if (!zone->compact_blockskip_flush)
341 return;
342
343 zone->compact_blockskip_flush = false;
344
345
346
347
348
349
350
351 for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
352 free_pfn -= pageblock_nr_pages) {
353 cond_resched();
354
355
356 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
357 migrate_pfn < reset_migrate) {
358 source_set = true;
359 reset_migrate = migrate_pfn;
360 zone->compact_init_migrate_pfn = reset_migrate;
361 zone->compact_cached_migrate_pfn[0] = reset_migrate;
362 zone->compact_cached_migrate_pfn[1] = reset_migrate;
363 }
364
365
366 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
367 free_pfn > reset_free) {
368 free_set = true;
369 reset_free = free_pfn;
370 zone->compact_init_free_pfn = reset_free;
371 zone->compact_cached_free_pfn = reset_free;
372 }
373 }
374
375
376 if (reset_migrate >= reset_free) {
377 zone->compact_cached_migrate_pfn[0] = migrate_pfn;
378 zone->compact_cached_migrate_pfn[1] = migrate_pfn;
379 zone->compact_cached_free_pfn = free_pfn;
380 }
381}
382
383void reset_isolation_suitable(pg_data_t *pgdat)
384{
385 int zoneid;
386
387 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
388 struct zone *zone = &pgdat->node_zones[zoneid];
389 if (!populated_zone(zone))
390 continue;
391
392
393 if (zone->compact_blockskip_flush)
394 __reset_isolation_suitable(zone);
395 }
396}
397
398
399
400
401
402static bool test_and_set_skip(struct compact_control *cc, struct page *page,
403 unsigned long pfn)
404{
405 bool skip;
406
407
408 if (cc->ignore_skip_hint)
409 return false;
410
411 if (!IS_ALIGNED(pfn, pageblock_nr_pages))
412 return false;
413
414 skip = get_pageblock_skip(page);
415 if (!skip && !cc->no_set_skip_hint)
416 set_pageblock_skip(page);
417
418 return skip;
419}
420
421static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
422{
423 struct zone *zone = cc->zone;
424
425 pfn = pageblock_end_pfn(pfn);
426
427
428 if (cc->no_set_skip_hint)
429 return;
430
431 if (pfn > zone->compact_cached_migrate_pfn[0])
432 zone->compact_cached_migrate_pfn[0] = pfn;
433 if (cc->mode != MIGRATE_ASYNC &&
434 pfn > zone->compact_cached_migrate_pfn[1])
435 zone->compact_cached_migrate_pfn[1] = pfn;
436}
437
438
439
440
441
442static void update_pageblock_skip(struct compact_control *cc,
443 struct page *page, unsigned long pfn)
444{
445 struct zone *zone = cc->zone;
446
447 if (cc->no_set_skip_hint)
448 return;
449
450 if (!page)
451 return;
452
453 set_pageblock_skip(page);
454
455
456 if (pfn < zone->compact_cached_free_pfn)
457 zone->compact_cached_free_pfn = pfn;
458}
459#else
460static inline bool isolation_suitable(struct compact_control *cc,
461 struct page *page)
462{
463 return true;
464}
465
466static inline bool pageblock_skip_persistent(struct page *page)
467{
468 return false;
469}
470
471static inline void update_pageblock_skip(struct compact_control *cc,
472 struct page *page, unsigned long pfn)
473{
474}
475
476static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
477{
478}
479
480static bool test_and_set_skip(struct compact_control *cc, struct page *page,
481 unsigned long pfn)
482{
483 return false;
484}
485#endif
486
487
488
489
490
491
492
493
494
495
496static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
497 struct compact_control *cc)
498 __acquires(lock)
499{
500
501 if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
502 if (spin_trylock_irqsave(lock, *flags))
503 return true;
504
505 cc->contended = true;
506 }
507
508 spin_lock_irqsave(lock, *flags);
509 return true;
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527static bool compact_unlock_should_abort(spinlock_t *lock,
528 unsigned long flags, bool *locked, struct compact_control *cc)
529{
530 if (*locked) {
531 spin_unlock_irqrestore(lock, flags);
532 *locked = false;
533 }
534
535 if (fatal_signal_pending(current)) {
536 cc->contended = true;
537 return true;
538 }
539
540 cond_resched();
541
542 return false;
543}
544
545
546
547
548
549
550static unsigned long isolate_freepages_block(struct compact_control *cc,
551 unsigned long *start_pfn,
552 unsigned long end_pfn,
553 struct list_head *freelist,
554 unsigned int stride,
555 bool strict)
556{
557 int nr_scanned = 0, total_isolated = 0;
558 struct page *cursor;
559 unsigned long flags = 0;
560 bool locked = false;
561 unsigned long blockpfn = *start_pfn;
562 unsigned int order;
563
564
565 if (strict)
566 stride = 1;
567
568 cursor = pfn_to_page(blockpfn);
569
570
571 for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
572 int isolated;
573 struct page *page = cursor;
574
575
576
577
578
579
580 if (!(blockpfn % SWAP_CLUSTER_MAX)
581 && compact_unlock_should_abort(&cc->zone->lock, flags,
582 &locked, cc))
583 break;
584
585 nr_scanned++;
586
587
588
589
590
591
592
593 if (PageCompound(page)) {
594 const unsigned int order = compound_order(page);
595
596 if (likely(order < MAX_ORDER)) {
597 blockpfn += (1UL << order) - 1;
598 cursor += (1UL << order) - 1;
599 }
600 goto isolate_fail;
601 }
602
603 if (!PageBuddy(page))
604 goto isolate_fail;
605
606
607
608
609
610
611
612
613 if (!locked) {
614 locked = compact_lock_irqsave(&cc->zone->lock,
615 &flags, cc);
616
617
618 if (!PageBuddy(page))
619 goto isolate_fail;
620 }
621
622
623 order = buddy_order(page);
624 isolated = __isolate_free_page(page, order);
625 if (!isolated)
626 break;
627 set_page_private(page, order);
628
629 total_isolated += isolated;
630 cc->nr_freepages += isolated;
631 list_add_tail(&page->lru, freelist);
632
633 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
634 blockpfn += isolated;
635 break;
636 }
637
638 blockpfn += isolated - 1;
639 cursor += isolated - 1;
640 continue;
641
642isolate_fail:
643 if (strict)
644 break;
645 else
646 continue;
647
648 }
649
650 if (locked)
651 spin_unlock_irqrestore(&cc->zone->lock, flags);
652
653
654
655
656
657 if (unlikely(blockpfn > end_pfn))
658 blockpfn = end_pfn;
659
660 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
661 nr_scanned, total_isolated);
662
663
664 *start_pfn = blockpfn;
665
666
667
668
669
670
671 if (strict && blockpfn < end_pfn)
672 total_isolated = 0;
673
674 cc->total_free_scanned += nr_scanned;
675 if (total_isolated)
676 count_compact_events(COMPACTISOLATED, total_isolated);
677 return total_isolated;
678}
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694unsigned long
695isolate_freepages_range(struct compact_control *cc,
696 unsigned long start_pfn, unsigned long end_pfn)
697{
698 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
699 LIST_HEAD(freelist);
700
701 pfn = start_pfn;
702 block_start_pfn = pageblock_start_pfn(pfn);
703 if (block_start_pfn < cc->zone->zone_start_pfn)
704 block_start_pfn = cc->zone->zone_start_pfn;
705 block_end_pfn = pageblock_end_pfn(pfn);
706
707 for (; pfn < end_pfn; pfn += isolated,
708 block_start_pfn = block_end_pfn,
709 block_end_pfn += pageblock_nr_pages) {
710
711 unsigned long isolate_start_pfn = pfn;
712
713 block_end_pfn = min(block_end_pfn, end_pfn);
714
715
716
717
718
719
720 if (pfn >= block_end_pfn) {
721 block_start_pfn = pageblock_start_pfn(pfn);
722 block_end_pfn = pageblock_end_pfn(pfn);
723 block_end_pfn = min(block_end_pfn, end_pfn);
724 }
725
726 if (!pageblock_pfn_to_page(block_start_pfn,
727 block_end_pfn, cc->zone))
728 break;
729
730 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
731 block_end_pfn, &freelist, 0, true);
732
733
734
735
736
737
738 if (!isolated)
739 break;
740
741
742
743
744
745
746 }
747
748
749 split_map_pages(&freelist);
750
751 if (pfn < end_pfn) {
752
753 release_freepages(&freelist);
754 return 0;
755 }
756
757
758 return pfn;
759}
760
761
762static bool too_many_isolated(pg_data_t *pgdat)
763{
764 unsigned long active, inactive, isolated;
765
766 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
767 node_page_state(pgdat, NR_INACTIVE_ANON);
768 active = node_page_state(pgdat, NR_ACTIVE_FILE) +
769 node_page_state(pgdat, NR_ACTIVE_ANON);
770 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
771 node_page_state(pgdat, NR_ISOLATED_ANON);
772
773 return isolated > (inactive + active) / 2;
774}
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793static int
794isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
795 unsigned long end_pfn, isolate_mode_t isolate_mode)
796{
797 pg_data_t *pgdat = cc->zone->zone_pgdat;
798 unsigned long nr_scanned = 0, nr_isolated = 0;
799 struct lruvec *lruvec;
800 unsigned long flags = 0;
801 struct lruvec *locked = NULL;
802 struct page *page = NULL, *valid_page = NULL;
803 unsigned long start_pfn = low_pfn;
804 bool skip_on_failure = false;
805 unsigned long next_skip_pfn = 0;
806 bool skip_updated = false;
807 int ret = 0;
808
809 cc->migrate_pfn = low_pfn;
810
811
812
813
814
815
816 while (unlikely(too_many_isolated(pgdat))) {
817
818 if (cc->nr_migratepages)
819 return -EAGAIN;
820
821
822 if (cc->mode == MIGRATE_ASYNC)
823 return -EAGAIN;
824
825 congestion_wait(BLK_RW_ASYNC, HZ/10);
826
827 if (fatal_signal_pending(current))
828 return -EINTR;
829 }
830
831 cond_resched();
832
833 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
834 skip_on_failure = true;
835 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
836 }
837
838
839 for (; low_pfn < end_pfn; low_pfn++) {
840
841 if (skip_on_failure && low_pfn >= next_skip_pfn) {
842
843
844
845
846
847
848 if (nr_isolated)
849 break;
850
851
852
853
854
855
856
857
858
859
860 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
861 }
862
863
864
865
866
867
868 if (!(low_pfn % SWAP_CLUSTER_MAX)) {
869 if (locked) {
870 unlock_page_lruvec_irqrestore(locked, flags);
871 locked = NULL;
872 }
873
874 if (fatal_signal_pending(current)) {
875 cc->contended = true;
876 ret = -EINTR;
877
878 goto fatal_pending;
879 }
880
881 cond_resched();
882 }
883
884 nr_scanned++;
885
886 page = pfn_to_page(low_pfn);
887
888
889
890
891
892
893
894 if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
895 if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
896 low_pfn = end_pfn;
897 page = NULL;
898 goto isolate_abort;
899 }
900 valid_page = page;
901 }
902
903 if (PageHuge(page) && cc->alloc_contig) {
904 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
905
906
907
908
909
910 if (ret < 0) {
911
912 if (ret == -EBUSY)
913 ret = 0;
914 low_pfn += (1UL << compound_order(page)) - 1;
915 goto isolate_fail;
916 }
917
918 if (PageHuge(page)) {
919
920
921
922
923 low_pfn += compound_nr(page) - 1;
924 goto isolate_success_no_list;
925 }
926
927
928
929
930
931
932
933 }
934
935
936
937
938
939
940
941 if (PageBuddy(page)) {
942 unsigned long freepage_order = buddy_order_unsafe(page);
943
944
945
946
947
948
949 if (freepage_order > 0 && freepage_order < MAX_ORDER)
950 low_pfn += (1UL << freepage_order) - 1;
951 continue;
952 }
953
954
955
956
957
958
959
960
961
962 if (PageCompound(page) && !cc->alloc_contig) {
963 const unsigned int order = compound_order(page);
964
965 if (likely(order < MAX_ORDER))
966 low_pfn += (1UL << order) - 1;
967 goto isolate_fail;
968 }
969
970
971
972
973
974
975 if (!PageLRU(page)) {
976
977
978
979
980 if (unlikely(__PageMovable(page)) &&
981 !PageIsolated(page)) {
982 if (locked) {
983 unlock_page_lruvec_irqrestore(locked, flags);
984 locked = NULL;
985 }
986
987 if (!isolate_movable_page(page, isolate_mode))
988 goto isolate_success;
989 }
990
991 goto isolate_fail;
992 }
993
994
995
996
997
998
999 if (!page_mapping(page) &&
1000 page_count(page) > page_mapcount(page))
1001 goto isolate_fail;
1002
1003
1004
1005
1006
1007 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
1008 goto isolate_fail;
1009
1010
1011
1012
1013
1014
1015 if (unlikely(!get_page_unless_zero(page)))
1016 goto isolate_fail;
1017
1018 if (!__isolate_lru_page_prepare(page, isolate_mode))
1019 goto isolate_fail_put;
1020
1021
1022 if (!TestClearPageLRU(page))
1023 goto isolate_fail_put;
1024
1025 lruvec = mem_cgroup_page_lruvec(page);
1026
1027
1028 if (lruvec != locked) {
1029 if (locked)
1030 unlock_page_lruvec_irqrestore(locked, flags);
1031
1032 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
1033 locked = lruvec;
1034
1035 lruvec_memcg_debug(lruvec, page);
1036
1037
1038 if (!skip_updated) {
1039 skip_updated = true;
1040 if (test_and_set_skip(cc, page, low_pfn))
1041 goto isolate_abort;
1042 }
1043
1044
1045
1046
1047
1048
1049 if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
1050 low_pfn += compound_nr(page) - 1;
1051 SetPageLRU(page);
1052 goto isolate_fail_put;
1053 }
1054 }
1055
1056
1057 if (PageCompound(page))
1058 low_pfn += compound_nr(page) - 1;
1059
1060
1061 del_page_from_lru_list(page, lruvec);
1062 mod_node_page_state(page_pgdat(page),
1063 NR_ISOLATED_ANON + page_is_file_lru(page),
1064 thp_nr_pages(page));
1065
1066isolate_success:
1067 list_add(&page->lru, &cc->migratepages);
1068isolate_success_no_list:
1069 cc->nr_migratepages += compound_nr(page);
1070 nr_isolated += compound_nr(page);
1071
1072
1073
1074
1075
1076
1077
1078 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
1079 !cc->rescan && !cc->contended) {
1080 ++low_pfn;
1081 break;
1082 }
1083
1084 continue;
1085
1086isolate_fail_put:
1087
1088 if (locked) {
1089 unlock_page_lruvec_irqrestore(locked, flags);
1090 locked = NULL;
1091 }
1092 put_page(page);
1093
1094isolate_fail:
1095 if (!skip_on_failure && ret != -ENOMEM)
1096 continue;
1097
1098
1099
1100
1101
1102
1103 if (nr_isolated) {
1104 if (locked) {
1105 unlock_page_lruvec_irqrestore(locked, flags);
1106 locked = NULL;
1107 }
1108 putback_movable_pages(&cc->migratepages);
1109 cc->nr_migratepages = 0;
1110 nr_isolated = 0;
1111 }
1112
1113 if (low_pfn < next_skip_pfn) {
1114 low_pfn = next_skip_pfn - 1;
1115
1116
1117
1118
1119 next_skip_pfn += 1UL << cc->order;
1120 }
1121
1122 if (ret == -ENOMEM)
1123 break;
1124 }
1125
1126
1127
1128
1129
1130 if (unlikely(low_pfn > end_pfn))
1131 low_pfn = end_pfn;
1132
1133 page = NULL;
1134
1135isolate_abort:
1136 if (locked)
1137 unlock_page_lruvec_irqrestore(locked, flags);
1138 if (page) {
1139 SetPageLRU(page);
1140 put_page(page);
1141 }
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
1152 if (valid_page && !skip_updated)
1153 set_pageblock_skip(valid_page);
1154 update_cached_migrate(cc, low_pfn);
1155 }
1156
1157 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
1158 nr_scanned, nr_isolated);
1159
1160fatal_pending:
1161 cc->total_migrate_scanned += nr_scanned;
1162 if (nr_isolated)
1163 count_compact_events(COMPACTISOLATED, nr_isolated);
1164
1165 cc->migrate_pfn = low_pfn;
1166
1167 return ret;
1168}
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179int
1180isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1181 unsigned long end_pfn)
1182{
1183 unsigned long pfn, block_start_pfn, block_end_pfn;
1184 int ret = 0;
1185
1186
1187 pfn = start_pfn;
1188 block_start_pfn = pageblock_start_pfn(pfn);
1189 if (block_start_pfn < cc->zone->zone_start_pfn)
1190 block_start_pfn = cc->zone->zone_start_pfn;
1191 block_end_pfn = pageblock_end_pfn(pfn);
1192
1193 for (; pfn < end_pfn; pfn = block_end_pfn,
1194 block_start_pfn = block_end_pfn,
1195 block_end_pfn += pageblock_nr_pages) {
1196
1197 block_end_pfn = min(block_end_pfn, end_pfn);
1198
1199 if (!pageblock_pfn_to_page(block_start_pfn,
1200 block_end_pfn, cc->zone))
1201 continue;
1202
1203 ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
1204 ISOLATE_UNEVICTABLE);
1205
1206 if (ret)
1207 break;
1208
1209 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
1210 break;
1211 }
1212
1213 return ret;
1214}
1215
1216#endif
1217#ifdef CONFIG_COMPACTION
1218
1219static bool suitable_migration_source(struct compact_control *cc,
1220 struct page *page)
1221{
1222 int block_mt;
1223
1224 if (pageblock_skip_persistent(page))
1225 return false;
1226
1227 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1228 return true;
1229
1230 block_mt = get_pageblock_migratetype(page);
1231
1232 if (cc->migratetype == MIGRATE_MOVABLE)
1233 return is_migrate_movable(block_mt);
1234 else
1235 return block_mt == cc->migratetype;
1236}
1237
1238
1239static bool suitable_migration_target(struct compact_control *cc,
1240 struct page *page)
1241{
1242
1243 if (PageBuddy(page)) {
1244
1245
1246
1247
1248
1249 if (buddy_order_unsafe(page) >= pageblock_order)
1250 return false;
1251 }
1252
1253 if (cc->ignore_block_suitable)
1254 return true;
1255
1256
1257 if (is_migrate_movable(get_pageblock_migratetype(page)))
1258 return true;
1259
1260
1261 return false;
1262}
1263
1264static inline unsigned int
1265freelist_scan_limit(struct compact_control *cc)
1266{
1267 unsigned short shift = BITS_PER_LONG - 1;
1268
1269 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
1270}
1271
1272
1273
1274
1275
1276static inline bool compact_scanners_met(struct compact_control *cc)
1277{
1278 return (cc->free_pfn >> pageblock_order)
1279 <= (cc->migrate_pfn >> pageblock_order);
1280}
1281
1282
1283
1284
1285
1286
1287static void
1288move_freelist_head(struct list_head *freelist, struct page *freepage)
1289{
1290 LIST_HEAD(sublist);
1291
1292 if (!list_is_last(freelist, &freepage->lru)) {
1293 list_cut_before(&sublist, freelist, &freepage->lru);
1294 list_splice_tail(&sublist, freelist);
1295 }
1296}
1297
1298
1299
1300
1301
1302
1303
1304static void
1305move_freelist_tail(struct list_head *freelist, struct page *freepage)
1306{
1307 LIST_HEAD(sublist);
1308
1309 if (!list_is_first(freelist, &freepage->lru)) {
1310 list_cut_position(&sublist, freelist, &freepage->lru);
1311 list_splice_tail(&sublist, freelist);
1312 }
1313}
1314
1315static void
1316fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
1317{
1318 unsigned long start_pfn, end_pfn;
1319 struct page *page;
1320
1321
1322 if (cc->nr_freepages >= cc->nr_migratepages)
1323 return;
1324
1325
1326 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1327 return;
1328
1329
1330 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
1331 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
1332
1333 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1334 if (!page)
1335 return;
1336
1337
1338 if (start_pfn != pfn) {
1339 isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
1340 if (cc->nr_freepages >= cc->nr_migratepages)
1341 return;
1342 }
1343
1344
1345 start_pfn = pfn + nr_isolated;
1346 if (start_pfn < end_pfn)
1347 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
1348
1349
1350 if (cc->nr_freepages < cc->nr_migratepages)
1351 set_pageblock_skip(page);
1352}
1353
1354
1355static int next_search_order(struct compact_control *cc, int order)
1356{
1357 order--;
1358 if (order < 0)
1359 order = cc->order - 1;
1360
1361
1362 if (order == cc->search_order) {
1363 cc->search_order--;
1364 if (cc->search_order < 0)
1365 cc->search_order = cc->order - 1;
1366 return -1;
1367 }
1368
1369 return order;
1370}
1371
1372static unsigned long
1373fast_isolate_freepages(struct compact_control *cc)
1374{
1375 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1);
1376 unsigned int nr_scanned = 0;
1377 unsigned long low_pfn, min_pfn, highest = 0;
1378 unsigned long nr_isolated = 0;
1379 unsigned long distance;
1380 struct page *page = NULL;
1381 bool scan_start = false;
1382 int order;
1383
1384
1385 if (cc->order <= 0)
1386 return cc->free_pfn;
1387
1388
1389
1390
1391
1392 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1393 limit = pageblock_nr_pages >> 1;
1394 scan_start = true;
1395 }
1396
1397
1398
1399
1400
1401 distance = (cc->free_pfn - cc->migrate_pfn);
1402 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
1403 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
1404
1405 if (WARN_ON_ONCE(min_pfn > low_pfn))
1406 low_pfn = min_pfn;
1407
1408
1409
1410
1411
1412 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1413
1414 for (order = cc->search_order;
1415 !page && order >= 0;
1416 order = next_search_order(cc, order)) {
1417 struct free_area *area = &cc->zone->free_area[order];
1418 struct list_head *freelist;
1419 struct page *freepage;
1420 unsigned long flags;
1421 unsigned int order_scanned = 0;
1422 unsigned long high_pfn = 0;
1423
1424 if (!area->nr_free)
1425 continue;
1426
1427 spin_lock_irqsave(&cc->zone->lock, flags);
1428 freelist = &area->free_list[MIGRATE_MOVABLE];
1429 list_for_each_entry_reverse(freepage, freelist, lru) {
1430 unsigned long pfn;
1431
1432 order_scanned++;
1433 nr_scanned++;
1434 pfn = page_to_pfn(freepage);
1435
1436 if (pfn >= highest)
1437 highest = max(pageblock_start_pfn(pfn),
1438 cc->zone->zone_start_pfn);
1439
1440 if (pfn >= low_pfn) {
1441 cc->fast_search_fail = 0;
1442 cc->search_order = order;
1443 page = freepage;
1444 break;
1445 }
1446
1447 if (pfn >= min_pfn && pfn > high_pfn) {
1448 high_pfn = pfn;
1449
1450
1451 limit >>= 1;
1452 }
1453
1454 if (order_scanned >= limit)
1455 break;
1456 }
1457
1458
1459 if (!page && high_pfn) {
1460 page = pfn_to_page(high_pfn);
1461
1462
1463 freepage = page;
1464 }
1465
1466
1467 move_freelist_head(freelist, freepage);
1468
1469
1470 if (page) {
1471 if (__isolate_free_page(page, order)) {
1472 set_page_private(page, order);
1473 nr_isolated = 1 << order;
1474 cc->nr_freepages += nr_isolated;
1475 list_add_tail(&page->lru, &cc->freepages);
1476 count_compact_events(COMPACTISOLATED, nr_isolated);
1477 } else {
1478
1479 order = cc->search_order + 1;
1480 page = NULL;
1481 }
1482 }
1483
1484 spin_unlock_irqrestore(&cc->zone->lock, flags);
1485
1486
1487
1488
1489
1490 if (order_scanned >= limit)
1491 limit = max(1U, limit >> 1);
1492 }
1493
1494 if (!page) {
1495 cc->fast_search_fail++;
1496 if (scan_start) {
1497
1498
1499
1500
1501
1502 if (highest) {
1503 page = pfn_to_page(highest);
1504 cc->free_pfn = highest;
1505 } else {
1506 if (cc->direct_compaction && pfn_valid(min_pfn)) {
1507 page = pageblock_pfn_to_page(min_pfn,
1508 min(pageblock_end_pfn(min_pfn),
1509 zone_end_pfn(cc->zone)),
1510 cc->zone);
1511 cc->free_pfn = min_pfn;
1512 }
1513 }
1514 }
1515 }
1516
1517 if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1518 highest -= pageblock_nr_pages;
1519 cc->zone->compact_cached_free_pfn = highest;
1520 }
1521
1522 cc->total_free_scanned += nr_scanned;
1523 if (!page)
1524 return cc->free_pfn;
1525
1526 low_pfn = page_to_pfn(page);
1527 fast_isolate_around(cc, low_pfn, nr_isolated);
1528 return low_pfn;
1529}
1530
1531
1532
1533
1534
1535static void isolate_freepages(struct compact_control *cc)
1536{
1537 struct zone *zone = cc->zone;
1538 struct page *page;
1539 unsigned long block_start_pfn;
1540 unsigned long isolate_start_pfn;
1541 unsigned long block_end_pfn;
1542 unsigned long low_pfn;
1543 struct list_head *freelist = &cc->freepages;
1544 unsigned int stride;
1545
1546
1547 isolate_start_pfn = fast_isolate_freepages(cc);
1548 if (cc->nr_freepages)
1549 goto splitmap;
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 isolate_start_pfn = cc->free_pfn;
1563 block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
1564 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1565 zone_end_pfn(zone));
1566 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1567 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1568
1569
1570
1571
1572
1573
1574 for (; block_start_pfn >= low_pfn;
1575 block_end_pfn = block_start_pfn,
1576 block_start_pfn -= pageblock_nr_pages,
1577 isolate_start_pfn = block_start_pfn) {
1578 unsigned long nr_isolated;
1579
1580
1581
1582
1583
1584 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1585 cond_resched();
1586
1587 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1588 zone);
1589 if (!page)
1590 continue;
1591
1592
1593 if (!suitable_migration_target(cc, page))
1594 continue;
1595
1596
1597 if (!isolation_suitable(cc, page))
1598 continue;
1599
1600
1601 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1602 block_end_pfn, freelist, stride, false);
1603
1604
1605 if (isolate_start_pfn == block_end_pfn)
1606 update_pageblock_skip(cc, page, block_start_pfn);
1607
1608
1609 if (cc->nr_freepages >= cc->nr_migratepages) {
1610 if (isolate_start_pfn >= block_end_pfn) {
1611
1612
1613
1614
1615 isolate_start_pfn =
1616 block_start_pfn - pageblock_nr_pages;
1617 }
1618 break;
1619 } else if (isolate_start_pfn < block_end_pfn) {
1620
1621
1622
1623
1624 break;
1625 }
1626
1627
1628 if (nr_isolated) {
1629 stride = 1;
1630 continue;
1631 }
1632 stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
1633 }
1634
1635
1636
1637
1638
1639
1640
1641 cc->free_pfn = isolate_start_pfn;
1642
1643splitmap:
1644
1645 split_map_pages(freelist);
1646}
1647
1648
1649
1650
1651
1652static struct page *compaction_alloc(struct page *migratepage,
1653 unsigned long data)
1654{
1655 struct compact_control *cc = (struct compact_control *)data;
1656 struct page *freepage;
1657
1658 if (list_empty(&cc->freepages)) {
1659 isolate_freepages(cc);
1660
1661 if (list_empty(&cc->freepages))
1662 return NULL;
1663 }
1664
1665 freepage = list_entry(cc->freepages.next, struct page, lru);
1666 list_del(&freepage->lru);
1667 cc->nr_freepages--;
1668
1669 return freepage;
1670}
1671
1672
1673
1674
1675
1676
1677static void compaction_free(struct page *page, unsigned long data)
1678{
1679 struct compact_control *cc = (struct compact_control *)data;
1680
1681 list_add(&page->lru, &cc->freepages);
1682 cc->nr_freepages++;
1683}
1684
1685
1686typedef enum {
1687 ISOLATE_ABORT,
1688 ISOLATE_NONE,
1689 ISOLATE_SUCCESS,
1690} isolate_migrate_t;
1691
1692
1693
1694
1695
1696#ifdef CONFIG_PREEMPT_RT
1697int sysctl_compact_unevictable_allowed __read_mostly = 0;
1698#else
1699int sysctl_compact_unevictable_allowed __read_mostly = 1;
1700#endif
1701
1702static inline void
1703update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
1704{
1705 if (cc->fast_start_pfn == ULONG_MAX)
1706 return;
1707
1708 if (!cc->fast_start_pfn)
1709 cc->fast_start_pfn = pfn;
1710
1711 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
1712}
1713
1714static inline unsigned long
1715reinit_migrate_pfn(struct compact_control *cc)
1716{
1717 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
1718 return cc->migrate_pfn;
1719
1720 cc->migrate_pfn = cc->fast_start_pfn;
1721 cc->fast_start_pfn = ULONG_MAX;
1722
1723 return cc->migrate_pfn;
1724}
1725
1726
1727
1728
1729
1730
1731static unsigned long fast_find_migrateblock(struct compact_control *cc)
1732{
1733 unsigned int limit = freelist_scan_limit(cc);
1734 unsigned int nr_scanned = 0;
1735 unsigned long distance;
1736 unsigned long pfn = cc->migrate_pfn;
1737 unsigned long high_pfn;
1738 int order;
1739 bool found_block = false;
1740
1741
1742 if (cc->ignore_skip_hint)
1743 return pfn;
1744
1745
1746
1747
1748
1749
1750 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1751 return pfn;
1752
1753
1754
1755
1756
1757
1758 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
1759 return pfn;
1760
1761
1762
1763
1764
1765
1766
1767 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
1768 return pfn;
1769
1770
1771
1772
1773
1774
1775
1776 distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
1777 if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1778 distance >>= 2;
1779 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
1780
1781 for (order = cc->order - 1;
1782 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
1783 order--) {
1784 struct free_area *area = &cc->zone->free_area[order];
1785 struct list_head *freelist;
1786 unsigned long flags;
1787 struct page *freepage;
1788
1789 if (!area->nr_free)
1790 continue;
1791
1792 spin_lock_irqsave(&cc->zone->lock, flags);
1793 freelist = &area->free_list[MIGRATE_MOVABLE];
1794 list_for_each_entry(freepage, freelist, lru) {
1795 unsigned long free_pfn;
1796
1797 if (nr_scanned++ >= limit) {
1798 move_freelist_tail(freelist, freepage);
1799 break;
1800 }
1801
1802 free_pfn = page_to_pfn(freepage);
1803 if (free_pfn < high_pfn) {
1804
1805
1806
1807
1808
1809
1810 if (get_pageblock_skip(freepage))
1811 continue;
1812
1813
1814 move_freelist_tail(freelist, freepage);
1815
1816 update_fast_start_pfn(cc, free_pfn);
1817 pfn = pageblock_start_pfn(free_pfn);
1818 cc->fast_search_fail = 0;
1819 found_block = true;
1820 set_pageblock_skip(freepage);
1821 break;
1822 }
1823 }
1824 spin_unlock_irqrestore(&cc->zone->lock, flags);
1825 }
1826
1827 cc->total_migrate_scanned += nr_scanned;
1828
1829
1830
1831
1832
1833 if (!found_block) {
1834 cc->fast_search_fail++;
1835 pfn = reinit_migrate_pfn(cc);
1836 }
1837 return pfn;
1838}
1839
1840
1841
1842
1843
1844
1845static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
1846{
1847 unsigned long block_start_pfn;
1848 unsigned long block_end_pfn;
1849 unsigned long low_pfn;
1850 struct page *page;
1851 const isolate_mode_t isolate_mode =
1852 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1853 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1854 bool fast_find_block;
1855
1856
1857
1858
1859
1860
1861 low_pfn = fast_find_migrateblock(cc);
1862 block_start_pfn = pageblock_start_pfn(low_pfn);
1863 if (block_start_pfn < cc->zone->zone_start_pfn)
1864 block_start_pfn = cc->zone->zone_start_pfn;
1865
1866
1867
1868
1869
1870
1871 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
1872
1873
1874 block_end_pfn = pageblock_end_pfn(low_pfn);
1875
1876
1877
1878
1879
1880 for (; block_end_pfn <= cc->free_pfn;
1881 fast_find_block = false,
1882 cc->migrate_pfn = low_pfn = block_end_pfn,
1883 block_start_pfn = block_end_pfn,
1884 block_end_pfn += pageblock_nr_pages) {
1885
1886
1887
1888
1889
1890
1891 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1892 cond_resched();
1893
1894 page = pageblock_pfn_to_page(block_start_pfn,
1895 block_end_pfn, cc->zone);
1896 if (!page)
1897 continue;
1898
1899
1900
1901
1902
1903
1904
1905
1906 if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
1907 !fast_find_block && !isolation_suitable(cc, page))
1908 continue;
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918 if (!suitable_migration_source(cc, page)) {
1919 update_cached_migrate(cc, block_end_pfn);
1920 continue;
1921 }
1922
1923
1924 if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
1925 isolate_mode))
1926 return ISOLATE_ABORT;
1927
1928
1929
1930
1931
1932
1933 break;
1934 }
1935
1936 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1937}
1938
1939
1940
1941
1942
1943static inline bool is_via_compact_memory(int order)
1944{
1945 return order == -1;
1946}
1947
1948static bool kswapd_is_running(pg_data_t *pgdat)
1949{
1950 return pgdat->kswapd && task_is_running(pgdat->kswapd);
1951}
1952
1953
1954
1955
1956
1957static unsigned int fragmentation_score_zone(struct zone *zone)
1958{
1959 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
1973{
1974 unsigned long score;
1975
1976 score = zone->present_pages * fragmentation_score_zone(zone);
1977 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987static unsigned int fragmentation_score_node(pg_data_t *pgdat)
1988{
1989 unsigned int score = 0;
1990 int zoneid;
1991
1992 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1993 struct zone *zone;
1994
1995 zone = &pgdat->node_zones[zoneid];
1996 score += fragmentation_score_zone_weighted(zone);
1997 }
1998
1999 return score;
2000}
2001
2002static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
2003{
2004 unsigned int wmark_low;
2005
2006
2007
2008
2009
2010
2011 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
2012 return low ? wmark_low : min(wmark_low + 10, 100U);
2013}
2014
2015static bool should_proactive_compact_node(pg_data_t *pgdat)
2016{
2017 int wmark_high;
2018
2019 if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
2020 return false;
2021
2022 wmark_high = fragmentation_score_wmark(pgdat, false);
2023 return fragmentation_score_node(pgdat) > wmark_high;
2024}
2025
2026static enum compact_result __compact_finished(struct compact_control *cc)
2027{
2028 unsigned int order;
2029 const int migratetype = cc->migratetype;
2030 int ret;
2031
2032
2033 if (compact_scanners_met(cc)) {
2034
2035 reset_cached_positions(cc->zone);
2036
2037
2038
2039
2040
2041
2042
2043 if (cc->direct_compaction)
2044 cc->zone->compact_blockskip_flush = true;
2045
2046 if (cc->whole_zone)
2047 return COMPACT_COMPLETE;
2048 else
2049 return COMPACT_PARTIAL_SKIPPED;
2050 }
2051
2052 if (cc->proactive_compaction) {
2053 int score, wmark_low;
2054 pg_data_t *pgdat;
2055
2056 pgdat = cc->zone->zone_pgdat;
2057 if (kswapd_is_running(pgdat))
2058 return COMPACT_PARTIAL_SKIPPED;
2059
2060 score = fragmentation_score_zone(cc->zone);
2061 wmark_low = fragmentation_score_wmark(pgdat, true);
2062
2063 if (score > wmark_low)
2064 ret = COMPACT_CONTINUE;
2065 else
2066 ret = COMPACT_SUCCESS;
2067
2068 goto out;
2069 }
2070
2071 if (is_via_compact_memory(cc->order))
2072 return COMPACT_CONTINUE;
2073
2074
2075
2076
2077
2078
2079
2080 if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
2081 return COMPACT_CONTINUE;
2082
2083
2084 ret = COMPACT_NO_SUITABLE_PAGE;
2085 for (order = cc->order; order < MAX_ORDER; order++) {
2086 struct free_area *area = &cc->zone->free_area[order];
2087 bool can_steal;
2088
2089
2090 if (!free_area_empty(area, migratetype))
2091 return COMPACT_SUCCESS;
2092
2093#ifdef CONFIG_CMA
2094
2095 if (migratetype == MIGRATE_MOVABLE &&
2096 !free_area_empty(area, MIGRATE_CMA))
2097 return COMPACT_SUCCESS;
2098#endif
2099
2100
2101
2102
2103 if (find_suitable_fallback(area, order, migratetype,
2104 true, &can_steal) != -1) {
2105
2106
2107 if (migratetype == MIGRATE_MOVABLE)
2108 return COMPACT_SUCCESS;
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118 if (cc->mode == MIGRATE_ASYNC ||
2119 IS_ALIGNED(cc->migrate_pfn,
2120 pageblock_nr_pages)) {
2121 return COMPACT_SUCCESS;
2122 }
2123
2124 ret = COMPACT_CONTINUE;
2125 break;
2126 }
2127 }
2128
2129out:
2130 if (cc->contended || fatal_signal_pending(current))
2131 ret = COMPACT_CONTENDED;
2132
2133 return ret;
2134}
2135
2136static enum compact_result compact_finished(struct compact_control *cc)
2137{
2138 int ret;
2139
2140 ret = __compact_finished(cc);
2141 trace_mm_compaction_finished(cc->zone, cc->order, ret);
2142 if (ret == COMPACT_NO_SUITABLE_PAGE)
2143 ret = COMPACT_CONTINUE;
2144
2145 return ret;
2146}
2147
2148static enum compact_result __compaction_suitable(struct zone *zone, int order,
2149 unsigned int alloc_flags,
2150 int highest_zoneidx,
2151 unsigned long wmark_target)
2152{
2153 unsigned long watermark;
2154
2155 if (is_via_compact_memory(order))
2156 return COMPACT_CONTINUE;
2157
2158 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
2159
2160
2161
2162
2163 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
2164 alloc_flags))
2165 return COMPACT_SUCCESS;
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
2182 low_wmark_pages(zone) : min_wmark_pages(zone);
2183 watermark += compact_gap(order);
2184 if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
2185 ALLOC_CMA, wmark_target))
2186 return COMPACT_SKIPPED;
2187
2188 return COMPACT_CONTINUE;
2189}
2190
2191
2192
2193
2194
2195
2196
2197
2198enum compact_result compaction_suitable(struct zone *zone, int order,
2199 unsigned int alloc_flags,
2200 int highest_zoneidx)
2201{
2202 enum compact_result ret;
2203 int fragindex;
2204
2205 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
2206 zone_page_state(zone, NR_FREE_PAGES));
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
2224 fragindex = fragmentation_index(zone, order);
2225 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
2226 ret = COMPACT_NOT_SUITABLE_ZONE;
2227 }
2228
2229 trace_mm_compaction_suitable(zone, order, ret);
2230 if (ret == COMPACT_NOT_SUITABLE_ZONE)
2231 ret = COMPACT_SKIPPED;
2232
2233 return ret;
2234}
2235
2236bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2237 int alloc_flags)
2238{
2239 struct zone *zone;
2240 struct zoneref *z;
2241
2242
2243
2244
2245
2246 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2247 ac->highest_zoneidx, ac->nodemask) {
2248 unsigned long available;
2249 enum compact_result compact_result;
2250
2251
2252
2253
2254
2255
2256
2257 available = zone_reclaimable_pages(zone) / order;
2258 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
2259 compact_result = __compaction_suitable(zone, order, alloc_flags,
2260 ac->highest_zoneidx, available);
2261 if (compact_result != COMPACT_SKIPPED)
2262 return true;
2263 }
2264
2265 return false;
2266}
2267
2268static enum compact_result
2269compact_zone(struct compact_control *cc, struct capture_control *capc)
2270{
2271 enum compact_result ret;
2272 unsigned long start_pfn = cc->zone->zone_start_pfn;
2273 unsigned long end_pfn = zone_end_pfn(cc->zone);
2274 unsigned long last_migrated_pfn;
2275 const bool sync = cc->mode != MIGRATE_ASYNC;
2276 bool update_cached;
2277
2278
2279
2280
2281
2282 cc->total_migrate_scanned = 0;
2283 cc->total_free_scanned = 0;
2284 cc->nr_migratepages = 0;
2285 cc->nr_freepages = 0;
2286 INIT_LIST_HEAD(&cc->freepages);
2287 INIT_LIST_HEAD(&cc->migratepages);
2288
2289 cc->migratetype = gfp_migratetype(cc->gfp_mask);
2290 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
2291 cc->highest_zoneidx);
2292
2293 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
2294 return ret;
2295
2296
2297 VM_BUG_ON(ret != COMPACT_CONTINUE);
2298
2299
2300
2301
2302
2303 if (compaction_restarting(cc->zone, cc->order))
2304 __reset_isolation_suitable(cc->zone);
2305
2306
2307
2308
2309
2310
2311
2312 cc->fast_start_pfn = 0;
2313 if (cc->whole_zone) {
2314 cc->migrate_pfn = start_pfn;
2315 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2316 } else {
2317 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2318 cc->free_pfn = cc->zone->compact_cached_free_pfn;
2319 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2320 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2321 cc->zone->compact_cached_free_pfn = cc->free_pfn;
2322 }
2323 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2324 cc->migrate_pfn = start_pfn;
2325 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2326 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2327 }
2328
2329 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2330 cc->whole_zone = true;
2331 }
2332
2333 last_migrated_pfn = 0;
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343 update_cached = !sync &&
2344 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2345
2346 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
2347 cc->free_pfn, end_pfn, sync);
2348
2349
2350 lru_add_drain();
2351
2352 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
2353 int err;
2354 unsigned long iteration_start_pfn = cc->migrate_pfn;
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364 cc->rescan = false;
2365 if (pageblock_start_pfn(last_migrated_pfn) ==
2366 pageblock_start_pfn(iteration_start_pfn)) {
2367 cc->rescan = true;
2368 }
2369
2370 switch (isolate_migratepages(cc)) {
2371 case ISOLATE_ABORT:
2372 ret = COMPACT_CONTENDED;
2373 putback_movable_pages(&cc->migratepages);
2374 cc->nr_migratepages = 0;
2375 goto out;
2376 case ISOLATE_NONE:
2377 if (update_cached) {
2378 cc->zone->compact_cached_migrate_pfn[1] =
2379 cc->zone->compact_cached_migrate_pfn[0];
2380 }
2381
2382
2383
2384
2385
2386
2387 goto check_drain;
2388 case ISOLATE_SUCCESS:
2389 update_cached = false;
2390 last_migrated_pfn = iteration_start_pfn;
2391 }
2392
2393 err = migrate_pages(&cc->migratepages, compaction_alloc,
2394 compaction_free, (unsigned long)cc, cc->mode,
2395 MR_COMPACTION, NULL);
2396
2397 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
2398 &cc->migratepages);
2399
2400
2401 cc->nr_migratepages = 0;
2402 if (err) {
2403 putback_movable_pages(&cc->migratepages);
2404
2405
2406
2407
2408 if (err == -ENOMEM && !compact_scanners_met(cc)) {
2409 ret = COMPACT_CONTENDED;
2410 goto out;
2411 }
2412
2413
2414
2415
2416 if (cc->direct_compaction &&
2417 (cc->mode == MIGRATE_ASYNC)) {
2418 cc->migrate_pfn = block_end_pfn(
2419 cc->migrate_pfn - 1, cc->order);
2420
2421 last_migrated_pfn = 0;
2422 }
2423 }
2424
2425check_drain:
2426
2427
2428
2429
2430
2431
2432
2433 if (cc->order > 0 && last_migrated_pfn) {
2434 unsigned long current_block_start =
2435 block_start_pfn(cc->migrate_pfn, cc->order);
2436
2437 if (last_migrated_pfn < current_block_start) {
2438 lru_add_drain_cpu_zone(cc->zone);
2439
2440 last_migrated_pfn = 0;
2441 }
2442 }
2443
2444
2445 if (capc && capc->page) {
2446 ret = COMPACT_SUCCESS;
2447 break;
2448 }
2449 }
2450
2451out:
2452
2453
2454
2455
2456 if (cc->nr_freepages > 0) {
2457 unsigned long free_pfn = release_freepages(&cc->freepages);
2458
2459 cc->nr_freepages = 0;
2460 VM_BUG_ON(free_pfn == 0);
2461
2462 free_pfn = pageblock_start_pfn(free_pfn);
2463
2464
2465
2466
2467 if (free_pfn > cc->zone->compact_cached_free_pfn)
2468 cc->zone->compact_cached_free_pfn = free_pfn;
2469 }
2470
2471 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
2472 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
2473
2474 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
2475 cc->free_pfn, end_pfn, sync, ret);
2476
2477 return ret;
2478}
2479
2480static enum compact_result compact_zone_order(struct zone *zone, int order,
2481 gfp_t gfp_mask, enum compact_priority prio,
2482 unsigned int alloc_flags, int highest_zoneidx,
2483 struct page **capture)
2484{
2485 enum compact_result ret;
2486 struct compact_control cc = {
2487 .order = order,
2488 .search_order = order,
2489 .gfp_mask = gfp_mask,
2490 .zone = zone,
2491 .mode = (prio == COMPACT_PRIO_ASYNC) ?
2492 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
2493 .alloc_flags = alloc_flags,
2494 .highest_zoneidx = highest_zoneidx,
2495 .direct_compaction = true,
2496 .whole_zone = (prio == MIN_COMPACT_PRIORITY),
2497 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
2498 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
2499 };
2500 struct capture_control capc = {
2501 .cc = &cc,
2502 .page = NULL,
2503 };
2504
2505
2506
2507
2508
2509
2510 barrier();
2511 WRITE_ONCE(current->capture_control, &capc);
2512
2513 ret = compact_zone(&cc, &capc);
2514
2515 VM_BUG_ON(!list_empty(&cc.freepages));
2516 VM_BUG_ON(!list_empty(&cc.migratepages));
2517
2518
2519
2520
2521
2522
2523 WRITE_ONCE(current->capture_control, NULL);
2524 *capture = READ_ONCE(capc.page);
2525
2526
2527
2528
2529
2530
2531 if (*capture)
2532 ret = COMPACT_SUCCESS;
2533
2534 return ret;
2535}
2536
2537int sysctl_extfrag_threshold = 500;
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
2551 unsigned int alloc_flags, const struct alloc_context *ac,
2552 enum compact_priority prio, struct page **capture)
2553{
2554 int may_perform_io = gfp_mask & __GFP_IO;
2555 struct zoneref *z;
2556 struct zone *zone;
2557 enum compact_result rc = COMPACT_SKIPPED;
2558
2559
2560
2561
2562
2563 if (!may_perform_io)
2564 return COMPACT_SKIPPED;
2565
2566 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
2567
2568
2569 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2570 ac->highest_zoneidx, ac->nodemask) {
2571 enum compact_result status;
2572
2573 if (prio > MIN_COMPACT_PRIORITY
2574 && compaction_deferred(zone, order)) {
2575 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
2576 continue;
2577 }
2578
2579 status = compact_zone_order(zone, order, gfp_mask, prio,
2580 alloc_flags, ac->highest_zoneidx, capture);
2581 rc = max(status, rc);
2582
2583
2584 if (status == COMPACT_SUCCESS) {
2585
2586
2587
2588
2589
2590
2591 compaction_defer_reset(zone, order, false);
2592
2593 break;
2594 }
2595
2596 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
2597 status == COMPACT_PARTIAL_SKIPPED))
2598
2599
2600
2601
2602
2603 defer_compaction(zone, order);
2604
2605
2606
2607
2608
2609
2610 if ((prio == COMPACT_PRIO_ASYNC && need_resched())
2611 || fatal_signal_pending(current))
2612 break;
2613 }
2614
2615 return rc;
2616}
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627static void proactive_compact_node(pg_data_t *pgdat)
2628{
2629 int zoneid;
2630 struct zone *zone;
2631 struct compact_control cc = {
2632 .order = -1,
2633 .mode = MIGRATE_SYNC_LIGHT,
2634 .ignore_skip_hint = true,
2635 .whole_zone = true,
2636 .gfp_mask = GFP_KERNEL,
2637 .proactive_compaction = true,
2638 };
2639
2640 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2641 zone = &pgdat->node_zones[zoneid];
2642 if (!populated_zone(zone))
2643 continue;
2644
2645 cc.zone = zone;
2646
2647 compact_zone(&cc, NULL);
2648
2649 VM_BUG_ON(!list_empty(&cc.freepages));
2650 VM_BUG_ON(!list_empty(&cc.migratepages));
2651 }
2652}
2653
2654
2655static void compact_node(int nid)
2656{
2657 pg_data_t *pgdat = NODE_DATA(nid);
2658 int zoneid;
2659 struct zone *zone;
2660 struct compact_control cc = {
2661 .order = -1,
2662 .mode = MIGRATE_SYNC,
2663 .ignore_skip_hint = true,
2664 .whole_zone = true,
2665 .gfp_mask = GFP_KERNEL,
2666 };
2667
2668
2669 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2670
2671 zone = &pgdat->node_zones[zoneid];
2672 if (!populated_zone(zone))
2673 continue;
2674
2675 cc.zone = zone;
2676
2677 compact_zone(&cc, NULL);
2678
2679 VM_BUG_ON(!list_empty(&cc.freepages));
2680 VM_BUG_ON(!list_empty(&cc.migratepages));
2681 }
2682}
2683
2684
2685static void compact_nodes(void)
2686{
2687 int nid;
2688
2689
2690 lru_add_drain_all();
2691
2692 for_each_online_node(nid)
2693 compact_node(nid);
2694}
2695
2696
2697
2698
2699
2700
2701unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
2702
2703int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
2704 void *buffer, size_t *length, loff_t *ppos)
2705{
2706 int rc, nid;
2707
2708 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
2709 if (rc)
2710 return rc;
2711
2712 if (write && sysctl_compaction_proactiveness) {
2713 for_each_online_node(nid) {
2714 pg_data_t *pgdat = NODE_DATA(nid);
2715
2716 if (pgdat->proactive_compact_trigger)
2717 continue;
2718
2719 pgdat->proactive_compact_trigger = true;
2720 wake_up_interruptible(&pgdat->kcompactd_wait);
2721 }
2722 }
2723
2724 return 0;
2725}
2726
2727
2728
2729
2730
2731int sysctl_compaction_handler(struct ctl_table *table, int write,
2732 void *buffer, size_t *length, loff_t *ppos)
2733{
2734 if (write)
2735 compact_nodes();
2736
2737 return 0;
2738}
2739
2740#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
2741static ssize_t compact_store(struct device *dev,
2742 struct device_attribute *attr,
2743 const char *buf, size_t count)
2744{
2745 int nid = dev->id;
2746
2747 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
2748
2749 lru_add_drain_all();
2750
2751 compact_node(nid);
2752 }
2753
2754 return count;
2755}
2756static DEVICE_ATTR_WO(compact);
2757
2758int compaction_register_node(struct node *node)
2759{
2760 return device_create_file(&node->dev, &dev_attr_compact);
2761}
2762
2763void compaction_unregister_node(struct node *node)
2764{
2765 return device_remove_file(&node->dev, &dev_attr_compact);
2766}
2767#endif
2768
2769static inline bool kcompactd_work_requested(pg_data_t *pgdat)
2770{
2771 return pgdat->kcompactd_max_order > 0 || kthread_should_stop() ||
2772 pgdat->proactive_compact_trigger;
2773}
2774
2775static bool kcompactd_node_suitable(pg_data_t *pgdat)
2776{
2777 int zoneid;
2778 struct zone *zone;
2779 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
2780
2781 for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
2782 zone = &pgdat->node_zones[zoneid];
2783
2784 if (!populated_zone(zone))
2785 continue;
2786
2787 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
2788 highest_zoneidx) == COMPACT_CONTINUE)
2789 return true;
2790 }
2791
2792 return false;
2793}
2794
2795static void kcompactd_do_work(pg_data_t *pgdat)
2796{
2797
2798
2799
2800
2801 int zoneid;
2802 struct zone *zone;
2803 struct compact_control cc = {
2804 .order = pgdat->kcompactd_max_order,
2805 .search_order = pgdat->kcompactd_max_order,
2806 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx,
2807 .mode = MIGRATE_SYNC_LIGHT,
2808 .ignore_skip_hint = false,
2809 .gfp_mask = GFP_KERNEL,
2810 };
2811 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
2812 cc.highest_zoneidx);
2813 count_compact_event(KCOMPACTD_WAKE);
2814
2815 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
2816 int status;
2817
2818 zone = &pgdat->node_zones[zoneid];
2819 if (!populated_zone(zone))
2820 continue;
2821
2822 if (compaction_deferred(zone, cc.order))
2823 continue;
2824
2825 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
2826 COMPACT_CONTINUE)
2827 continue;
2828
2829 if (kthread_should_stop())
2830 return;
2831
2832 cc.zone = zone;
2833 status = compact_zone(&cc, NULL);
2834
2835 if (status == COMPACT_SUCCESS) {
2836 compaction_defer_reset(zone, cc.order, false);
2837 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
2838
2839
2840
2841
2842
2843
2844 drain_all_pages(zone);
2845
2846
2847
2848
2849
2850 defer_compaction(zone, cc.order);
2851 }
2852
2853 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
2854 cc.total_migrate_scanned);
2855 count_compact_events(KCOMPACTD_FREE_SCANNED,
2856 cc.total_free_scanned);
2857
2858 VM_BUG_ON(!list_empty(&cc.freepages));
2859 VM_BUG_ON(!list_empty(&cc.migratepages));
2860 }
2861
2862
2863
2864
2865
2866
2867 if (pgdat->kcompactd_max_order <= cc.order)
2868 pgdat->kcompactd_max_order = 0;
2869 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)
2870 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
2871}
2872
2873void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
2874{
2875 if (!order)
2876 return;
2877
2878 if (pgdat->kcompactd_max_order < order)
2879 pgdat->kcompactd_max_order = order;
2880
2881 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx)
2882 pgdat->kcompactd_highest_zoneidx = highest_zoneidx;
2883
2884
2885
2886
2887
2888 if (!wq_has_sleeper(&pgdat->kcompactd_wait))
2889 return;
2890
2891 if (!kcompactd_node_suitable(pgdat))
2892 return;
2893
2894 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
2895 highest_zoneidx);
2896 wake_up_interruptible(&pgdat->kcompactd_wait);
2897}
2898
2899
2900
2901
2902
2903static int kcompactd(void *p)
2904{
2905 pg_data_t *pgdat = (pg_data_t *)p;
2906 struct task_struct *tsk = current;
2907 long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC);
2908 long timeout = default_timeout;
2909
2910 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2911
2912 if (!cpumask_empty(cpumask))
2913 set_cpus_allowed_ptr(tsk, cpumask);
2914
2915 set_freezable();
2916
2917 pgdat->kcompactd_max_order = 0;
2918 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
2919
2920 while (!kthread_should_stop()) {
2921 unsigned long pflags;
2922
2923
2924
2925
2926
2927 if (!sysctl_compaction_proactiveness)
2928 timeout = MAX_SCHEDULE_TIMEOUT;
2929 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2930 if (wait_event_freezable_timeout(pgdat->kcompactd_wait,
2931 kcompactd_work_requested(pgdat), timeout) &&
2932 !pgdat->proactive_compact_trigger) {
2933
2934 psi_memstall_enter(&pflags);
2935 kcompactd_do_work(pgdat);
2936 psi_memstall_leave(&pflags);
2937
2938
2939
2940
2941
2942
2943
2944 timeout = default_timeout;
2945 continue;
2946 }
2947
2948
2949
2950
2951
2952 timeout = default_timeout;
2953 if (should_proactive_compact_node(pgdat)) {
2954 unsigned int prev_score, score;
2955
2956 prev_score = fragmentation_score_node(pgdat);
2957 proactive_compact_node(pgdat);
2958 score = fragmentation_score_node(pgdat);
2959
2960
2961
2962
2963 if (unlikely(score >= prev_score))
2964 timeout =
2965 default_timeout << COMPACT_MAX_DEFER_SHIFT;
2966 }
2967 if (unlikely(pgdat->proactive_compact_trigger))
2968 pgdat->proactive_compact_trigger = false;
2969 }
2970
2971 return 0;
2972}
2973
2974
2975
2976
2977
2978int kcompactd_run(int nid)
2979{
2980 pg_data_t *pgdat = NODE_DATA(nid);
2981 int ret = 0;
2982
2983 if (pgdat->kcompactd)
2984 return 0;
2985
2986 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2987 if (IS_ERR(pgdat->kcompactd)) {
2988 pr_err("Failed to start kcompactd on node %d\n", nid);
2989 ret = PTR_ERR(pgdat->kcompactd);
2990 pgdat->kcompactd = NULL;
2991 }
2992 return ret;
2993}
2994
2995
2996
2997
2998
2999void kcompactd_stop(int nid)
3000{
3001 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
3002
3003 if (kcompactd) {
3004 kthread_stop(kcompactd);
3005 NODE_DATA(nid)->kcompactd = NULL;
3006 }
3007}
3008
3009
3010
3011
3012
3013
3014
3015static int kcompactd_cpu_online(unsigned int cpu)
3016{
3017 int nid;
3018
3019 for_each_node_state(nid, N_MEMORY) {
3020 pg_data_t *pgdat = NODE_DATA(nid);
3021 const struct cpumask *mask;
3022
3023 mask = cpumask_of_node(pgdat->node_id);
3024
3025 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3026
3027 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
3028 }
3029 return 0;
3030}
3031
3032static int __init kcompactd_init(void)
3033{
3034 int nid;
3035 int ret;
3036
3037 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
3038 "mm/compaction:online",
3039 kcompactd_cpu_online, NULL);
3040 if (ret < 0) {
3041 pr_err("kcompactd: failed to register hotplug callbacks.\n");
3042 return ret;
3043 }
3044
3045 for_each_node_state(nid, N_MEMORY)
3046 kcompactd_run(nid);
3047 return 0;
3048}
3049subsys_initcall(kcompactd_init)
3050
3051#endif
3052