1
2
3
4
5
6
7
8
9
10#include <linux/cpu.h>
11#include <linux/swap.h>
12#include <linux/migrate.h>
13#include <linux/compaction.h>
14#include <linux/mm_inline.h>
15#include <linux/backing-dev.h>
16#include <linux/sysctl.h>
17#include <linux/sysfs.h>
18#include <linux/balloon_compaction.h>
19#include <linux/page-isolation.h>
20#include <linux/kasan.h>
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include "internal.h"
24
25#ifdef CONFIG_COMPACTION
26static inline void count_compact_event(enum vm_event_item item)
27{
28 count_vm_event(item);
29}
30
31static inline void count_compact_events(enum vm_event_item item, long delta)
32{
33 count_vm_events(item, delta);
34}
35#else
36#define count_compact_event(item) do { } while (0)
37#define count_compact_events(item, delta) do { } while (0)
38#endif
39
40#if defined CONFIG_COMPACTION || defined CONFIG_CMA
41
42#define CREATE_TRACE_POINTS
43#include <trace/events/compaction.h>
44
45#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
46#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
47#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
48#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
49
50static unsigned long release_freepages(struct list_head *freelist)
51{
52 struct page *page, *next;
53 unsigned long high_pfn = 0;
54
55 list_for_each_entry_safe(page, next, freelist, lru) {
56 unsigned long pfn = page_to_pfn(page);
57 list_del(&page->lru);
58 __free_page(page);
59 if (pfn > high_pfn)
60 high_pfn = pfn;
61 }
62
63 return high_pfn;
64}
65
66static void map_pages(struct list_head *list)
67{
68 struct page *page;
69
70 list_for_each_entry(page, list, lru) {
71 arch_alloc_page(page, 0);
72 kernel_map_pages(page, 1, 1);
73 kasan_alloc_pages(page, 0);
74 }
75}
76
77static inline bool migrate_async_suitable(int migratetype)
78{
79 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
80}
81
82#ifdef CONFIG_COMPACTION
83
84
85#define COMPACT_MAX_DEFER_SHIFT 6
86
87
88
89
90
91
92void defer_compaction(struct zone *zone, int order)
93{
94 zone->compact_considered = 0;
95 zone->compact_defer_shift++;
96
97 if (order < zone->compact_order_failed)
98 zone->compact_order_failed = order;
99
100 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
101 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
102
103 trace_mm_compaction_defer_compaction(zone, order);
104}
105
106
107bool compaction_deferred(struct zone *zone, int order)
108{
109 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
110
111 if (order < zone->compact_order_failed)
112 return false;
113
114
115 if (++zone->compact_considered > defer_limit)
116 zone->compact_considered = defer_limit;
117
118 if (zone->compact_considered >= defer_limit)
119 return false;
120
121 trace_mm_compaction_deferred(zone, order);
122
123 return true;
124}
125
126
127
128
129
130
131void compaction_defer_reset(struct zone *zone, int order,
132 bool alloc_success)
133{
134 if (alloc_success) {
135 zone->compact_considered = 0;
136 zone->compact_defer_shift = 0;
137 }
138 if (order >= zone->compact_order_failed)
139 zone->compact_order_failed = order + 1;
140
141 trace_mm_compaction_defer_reset(zone, order);
142}
143
144
145bool compaction_restarting(struct zone *zone, int order)
146{
147 if (order < zone->compact_order_failed)
148 return false;
149
150 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
151 zone->compact_considered >= 1UL << zone->compact_defer_shift;
152}
153
154
155static inline bool isolation_suitable(struct compact_control *cc,
156 struct page *page)
157{
158 if (cc->ignore_skip_hint)
159 return true;
160
161 return !get_pageblock_skip(page);
162}
163
164static void reset_cached_positions(struct zone *zone)
165{
166 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
167 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
168 zone->compact_cached_free_pfn =
169 pageblock_start_pfn(zone_end_pfn(zone) - 1);
170}
171
172
173
174
175
176
177static void __reset_isolation_suitable(struct zone *zone)
178{
179 unsigned long start_pfn = zone->zone_start_pfn;
180 unsigned long end_pfn = zone_end_pfn(zone);
181 unsigned long pfn;
182
183 zone->compact_blockskip_flush = false;
184
185
186 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
187 struct page *page;
188
189 cond_resched();
190
191 if (!pfn_valid(pfn))
192 continue;
193
194 page = pfn_to_page(pfn);
195 if (zone != page_zone(page))
196 continue;
197
198 clear_pageblock_skip(page);
199 }
200
201 reset_cached_positions(zone);
202}
203
204void reset_isolation_suitable(pg_data_t *pgdat)
205{
206 int zoneid;
207
208 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
209 struct zone *zone = &pgdat->node_zones[zoneid];
210 if (!populated_zone(zone))
211 continue;
212
213
214 if (zone->compact_blockskip_flush)
215 __reset_isolation_suitable(zone);
216 }
217}
218
219
220
221
222
223static void update_pageblock_skip(struct compact_control *cc,
224 struct page *page, unsigned long nr_isolated,
225 bool migrate_scanner)
226{
227 struct zone *zone = cc->zone;
228 unsigned long pfn;
229
230 if (cc->ignore_skip_hint)
231 return;
232
233 if (!page)
234 return;
235
236 if (nr_isolated)
237 return;
238
239 set_pageblock_skip(page);
240
241 pfn = page_to_pfn(page);
242
243
244 if (migrate_scanner) {
245 if (pfn > zone->compact_cached_migrate_pfn[0])
246 zone->compact_cached_migrate_pfn[0] = pfn;
247 if (cc->mode != MIGRATE_ASYNC &&
248 pfn > zone->compact_cached_migrate_pfn[1])
249 zone->compact_cached_migrate_pfn[1] = pfn;
250 } else {
251 if (pfn < zone->compact_cached_free_pfn)
252 zone->compact_cached_free_pfn = pfn;
253 }
254}
255#else
256static inline bool isolation_suitable(struct compact_control *cc,
257 struct page *page)
258{
259 return true;
260}
261
262static void update_pageblock_skip(struct compact_control *cc,
263 struct page *page, unsigned long nr_isolated,
264 bool migrate_scanner)
265{
266}
267#endif
268
269
270
271
272
273
274
275
276
277static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
278 struct compact_control *cc)
279{
280 if (cc->mode == MIGRATE_ASYNC) {
281 if (!spin_trylock_irqsave(lock, *flags)) {
282 cc->contended = COMPACT_CONTENDED_LOCK;
283 return false;
284 }
285 } else {
286 spin_lock_irqsave(lock, *flags);
287 }
288
289 return true;
290}
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307static bool compact_unlock_should_abort(spinlock_t *lock,
308 unsigned long flags, bool *locked, struct compact_control *cc)
309{
310 if (*locked) {
311 spin_unlock_irqrestore(lock, flags);
312 *locked = false;
313 }
314
315 if (fatal_signal_pending(current)) {
316 cc->contended = COMPACT_CONTENDED_SCHED;
317 return true;
318 }
319
320 if (need_resched()) {
321 if (cc->mode == MIGRATE_ASYNC) {
322 cc->contended = COMPACT_CONTENDED_SCHED;
323 return true;
324 }
325 cond_resched();
326 }
327
328 return false;
329}
330
331
332
333
334
335
336
337
338
339
340static inline bool compact_should_abort(struct compact_control *cc)
341{
342
343 if (need_resched()) {
344 if (cc->mode == MIGRATE_ASYNC) {
345 cc->contended = COMPACT_CONTENDED_SCHED;
346 return true;
347 }
348
349 cond_resched();
350 }
351
352 return false;
353}
354
355
356
357
358
359
360static unsigned long isolate_freepages_block(struct compact_control *cc,
361 unsigned long *start_pfn,
362 unsigned long end_pfn,
363 struct list_head *freelist,
364 bool strict)
365{
366 int nr_scanned = 0, total_isolated = 0;
367 struct page *cursor, *valid_page = NULL;
368 unsigned long flags = 0;
369 bool locked = false;
370 unsigned long blockpfn = *start_pfn;
371
372 cursor = pfn_to_page(blockpfn);
373
374
375 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
376 int isolated, i;
377 struct page *page = cursor;
378
379
380
381
382
383
384 if (!(blockpfn % SWAP_CLUSTER_MAX)
385 && compact_unlock_should_abort(&cc->zone->lock, flags,
386 &locked, cc))
387 break;
388
389 nr_scanned++;
390 if (!pfn_valid_within(blockpfn))
391 goto isolate_fail;
392
393 if (!valid_page)
394 valid_page = page;
395
396
397
398
399
400
401
402 if (PageCompound(page)) {
403 unsigned int comp_order = compound_order(page);
404
405 if (likely(comp_order < MAX_ORDER)) {
406 blockpfn += (1UL << comp_order) - 1;
407 cursor += (1UL << comp_order) - 1;
408 }
409
410 goto isolate_fail;
411 }
412
413 if (!PageBuddy(page))
414 goto isolate_fail;
415
416
417
418
419
420
421
422
423 if (!locked) {
424
425
426
427
428
429
430
431
432 locked = compact_trylock_irqsave(&cc->zone->lock,
433 &flags, cc);
434 if (!locked)
435 break;
436
437
438 if (!PageBuddy(page))
439 goto isolate_fail;
440 }
441
442
443 isolated = split_free_page(page);
444 if (!isolated)
445 break;
446
447 total_isolated += isolated;
448 cc->nr_freepages += isolated;
449 for (i = 0; i < isolated; i++) {
450 list_add(&page->lru, freelist);
451 page++;
452 }
453 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
454 blockpfn += isolated;
455 break;
456 }
457
458 blockpfn += isolated - 1;
459 cursor += isolated - 1;
460 continue;
461
462isolate_fail:
463 if (strict)
464 break;
465 else
466 continue;
467
468 }
469
470 if (locked)
471 spin_unlock_irqrestore(&cc->zone->lock, flags);
472
473
474
475
476
477 if (unlikely(blockpfn > end_pfn))
478 blockpfn = end_pfn;
479
480 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
481 nr_scanned, total_isolated);
482
483
484 *start_pfn = blockpfn;
485
486
487
488
489
490
491 if (strict && blockpfn < end_pfn)
492 total_isolated = 0;
493
494
495 if (blockpfn == end_pfn)
496 update_pageblock_skip(cc, valid_page, total_isolated, false);
497
498 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
499 if (total_isolated)
500 count_compact_events(COMPACTISOLATED, total_isolated);
501 return total_isolated;
502}
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517unsigned long
518isolate_freepages_range(struct compact_control *cc,
519 unsigned long start_pfn, unsigned long end_pfn)
520{
521 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
522 LIST_HEAD(freelist);
523
524 pfn = start_pfn;
525 block_start_pfn = pageblock_start_pfn(pfn);
526 if (block_start_pfn < cc->zone->zone_start_pfn)
527 block_start_pfn = cc->zone->zone_start_pfn;
528 block_end_pfn = pageblock_end_pfn(pfn);
529
530 for (; pfn < end_pfn; pfn += isolated,
531 block_start_pfn = block_end_pfn,
532 block_end_pfn += pageblock_nr_pages) {
533
534 unsigned long isolate_start_pfn = pfn;
535
536 block_end_pfn = min(block_end_pfn, end_pfn);
537
538
539
540
541
542
543 if (pfn >= block_end_pfn) {
544 block_start_pfn = pageblock_start_pfn(pfn);
545 block_end_pfn = pageblock_end_pfn(pfn);
546 block_end_pfn = min(block_end_pfn, end_pfn);
547 }
548
549 if (!pageblock_pfn_to_page(block_start_pfn,
550 block_end_pfn, cc->zone))
551 break;
552
553 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
554 block_end_pfn, &freelist, true);
555
556
557
558
559
560
561 if (!isolated)
562 break;
563
564
565
566
567
568
569 }
570
571
572 map_pages(&freelist);
573
574 if (pfn < end_pfn) {
575
576 release_freepages(&freelist);
577 return 0;
578 }
579
580
581 return pfn;
582}
583
584
585static void acct_isolated(struct zone *zone, struct compact_control *cc)
586{
587 struct page *page;
588 unsigned int count[2] = { 0, };
589
590 if (list_empty(&cc->migratepages))
591 return;
592
593 list_for_each_entry(page, &cc->migratepages, lru)
594 count[!!page_is_file_cache(page)]++;
595
596 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
597 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
598}
599
600
601static bool too_many_isolated(struct zone *zone)
602{
603 unsigned long active, inactive, isolated;
604
605 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
606 zone_page_state(zone, NR_INACTIVE_ANON);
607 active = zone_page_state(zone, NR_ACTIVE_FILE) +
608 zone_page_state(zone, NR_ACTIVE_ANON);
609 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
610 zone_page_state(zone, NR_ISOLATED_ANON);
611
612 return isolated > (inactive + active) / 2;
613}
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633static unsigned long
634isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
635 unsigned long end_pfn, isolate_mode_t isolate_mode)
636{
637 struct zone *zone = cc->zone;
638 unsigned long nr_scanned = 0, nr_isolated = 0;
639 struct lruvec *lruvec;
640 unsigned long flags = 0;
641 bool locked = false;
642 struct page *page = NULL, *valid_page = NULL;
643 unsigned long start_pfn = low_pfn;
644 bool skip_on_failure = false;
645 unsigned long next_skip_pfn = 0;
646
647
648
649
650
651
652 while (unlikely(too_many_isolated(zone))) {
653
654 if (cc->mode == MIGRATE_ASYNC)
655 return 0;
656
657 congestion_wait(BLK_RW_ASYNC, HZ/10);
658
659 if (fatal_signal_pending(current))
660 return 0;
661 }
662
663 if (compact_should_abort(cc))
664 return 0;
665
666 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
667 skip_on_failure = true;
668 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
669 }
670
671
672 for (; low_pfn < end_pfn; low_pfn++) {
673 bool is_lru;
674
675 if (skip_on_failure && low_pfn >= next_skip_pfn) {
676
677
678
679
680
681
682 if (nr_isolated)
683 break;
684
685
686
687
688
689
690
691
692
693
694 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
695 }
696
697
698
699
700
701
702 if (!(low_pfn % SWAP_CLUSTER_MAX)
703 && compact_unlock_should_abort(&zone->lru_lock, flags,
704 &locked, cc))
705 break;
706
707 if (!pfn_valid_within(low_pfn))
708 goto isolate_fail;
709 nr_scanned++;
710
711 page = pfn_to_page(low_pfn);
712
713 if (!valid_page)
714 valid_page = page;
715
716
717
718
719
720
721
722 if (PageBuddy(page)) {
723 unsigned long freepage_order = page_order_unsafe(page);
724
725
726
727
728
729
730 if (freepage_order > 0 && freepage_order < MAX_ORDER)
731 low_pfn += (1UL << freepage_order) - 1;
732 continue;
733 }
734
735
736
737
738
739
740 is_lru = PageLRU(page);
741 if (!is_lru) {
742 if (unlikely(balloon_page_movable(page))) {
743 if (balloon_page_isolate(page)) {
744
745 goto isolate_success;
746 }
747 }
748 }
749
750
751
752
753
754
755
756
757 if (PageCompound(page)) {
758 unsigned int comp_order = compound_order(page);
759
760 if (likely(comp_order < MAX_ORDER))
761 low_pfn += (1UL << comp_order) - 1;
762
763 goto isolate_fail;
764 }
765
766 if (!is_lru)
767 goto isolate_fail;
768
769
770
771
772
773
774 if (!page_mapping(page) &&
775 page_count(page) > page_mapcount(page))
776 goto isolate_fail;
777
778
779 if (!locked) {
780 locked = compact_trylock_irqsave(&zone->lru_lock,
781 &flags, cc);
782 if (!locked)
783 break;
784
785
786 if (!PageLRU(page))
787 goto isolate_fail;
788
789
790
791
792
793
794 if (unlikely(PageCompound(page))) {
795 low_pfn += (1UL << compound_order(page)) - 1;
796 goto isolate_fail;
797 }
798 }
799
800 lruvec = mem_cgroup_page_lruvec(page, zone);
801
802
803 if (__isolate_lru_page(page, isolate_mode) != 0)
804 goto isolate_fail;
805
806 VM_BUG_ON_PAGE(PageCompound(page), page);
807
808
809 del_page_from_lru_list(page, lruvec, page_lru(page));
810
811isolate_success:
812 list_add(&page->lru, &cc->migratepages);
813 cc->nr_migratepages++;
814 nr_isolated++;
815
816
817
818
819
820
821
822 if (!cc->last_migrated_pfn)
823 cc->last_migrated_pfn = low_pfn;
824
825
826 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
827 ++low_pfn;
828 break;
829 }
830
831 continue;
832isolate_fail:
833 if (!skip_on_failure)
834 continue;
835
836
837
838
839
840
841 if (nr_isolated) {
842 if (locked) {
843 spin_unlock_irqrestore(&zone->lru_lock, flags);
844 locked = false;
845 }
846 acct_isolated(zone, cc);
847 putback_movable_pages(&cc->migratepages);
848 cc->nr_migratepages = 0;
849 cc->last_migrated_pfn = 0;
850 nr_isolated = 0;
851 }
852
853 if (low_pfn < next_skip_pfn) {
854 low_pfn = next_skip_pfn - 1;
855
856
857
858
859 next_skip_pfn += 1UL << cc->order;
860 }
861 }
862
863
864
865
866
867 if (unlikely(low_pfn > end_pfn))
868 low_pfn = end_pfn;
869
870 if (locked)
871 spin_unlock_irqrestore(&zone->lru_lock, flags);
872
873
874
875
876
877 if (low_pfn == end_pfn)
878 update_pageblock_skip(cc, valid_page, nr_isolated, true);
879
880 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
881 nr_scanned, nr_isolated);
882
883 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
884 if (nr_isolated)
885 count_compact_events(COMPACTISOLATED, nr_isolated);
886
887 return low_pfn;
888}
889
890
891
892
893
894
895
896
897
898
899
900unsigned long
901isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
902 unsigned long end_pfn)
903{
904 unsigned long pfn, block_start_pfn, block_end_pfn;
905
906
907 pfn = start_pfn;
908 block_start_pfn = pageblock_start_pfn(pfn);
909 if (block_start_pfn < cc->zone->zone_start_pfn)
910 block_start_pfn = cc->zone->zone_start_pfn;
911 block_end_pfn = pageblock_end_pfn(pfn);
912
913 for (; pfn < end_pfn; pfn = block_end_pfn,
914 block_start_pfn = block_end_pfn,
915 block_end_pfn += pageblock_nr_pages) {
916
917 block_end_pfn = min(block_end_pfn, end_pfn);
918
919 if (!pageblock_pfn_to_page(block_start_pfn,
920 block_end_pfn, cc->zone))
921 continue;
922
923 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
924 ISOLATE_UNEVICTABLE);
925
926 if (!pfn)
927 break;
928
929 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
930 break;
931 }
932 acct_isolated(cc->zone, cc);
933
934 return pfn;
935}
936
937#endif
938#ifdef CONFIG_COMPACTION
939
940
941static bool suitable_migration_target(struct page *page)
942{
943
944 if (PageBuddy(page)) {
945
946
947
948
949
950 if (page_order_unsafe(page) >= pageblock_order)
951 return false;
952 }
953
954
955 if (migrate_async_suitable(get_pageblock_migratetype(page)))
956 return true;
957
958
959 return false;
960}
961
962
963
964
965
966static inline bool compact_scanners_met(struct compact_control *cc)
967{
968 return (cc->free_pfn >> pageblock_order)
969 <= (cc->migrate_pfn >> pageblock_order);
970}
971
972
973
974
975
976static void isolate_freepages(struct compact_control *cc)
977{
978 struct zone *zone = cc->zone;
979 struct page *page;
980 unsigned long block_start_pfn;
981 unsigned long isolate_start_pfn;
982 unsigned long block_end_pfn;
983 unsigned long low_pfn;
984 struct list_head *freelist = &cc->freepages;
985
986
987
988
989
990
991
992
993
994
995
996
997 isolate_start_pfn = cc->free_pfn;
998 block_start_pfn = pageblock_start_pfn(cc->free_pfn);
999 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1000 zone_end_pfn(zone));
1001 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1002
1003
1004
1005
1006
1007
1008 for (; block_start_pfn >= low_pfn;
1009 block_end_pfn = block_start_pfn,
1010 block_start_pfn -= pageblock_nr_pages,
1011 isolate_start_pfn = block_start_pfn) {
1012
1013
1014
1015
1016
1017 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1018 && compact_should_abort(cc))
1019 break;
1020
1021 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1022 zone);
1023 if (!page)
1024 continue;
1025
1026
1027 if (!suitable_migration_target(page))
1028 continue;
1029
1030
1031 if (!isolation_suitable(cc, page))
1032 continue;
1033
1034
1035 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1036 freelist, false);
1037
1038
1039
1040
1041
1042 if ((cc->nr_freepages >= cc->nr_migratepages)
1043 || cc->contended) {
1044 if (isolate_start_pfn >= block_end_pfn) {
1045
1046
1047
1048
1049 isolate_start_pfn =
1050 block_start_pfn - pageblock_nr_pages;
1051 }
1052 break;
1053 } else if (isolate_start_pfn < block_end_pfn) {
1054
1055
1056
1057
1058 break;
1059 }
1060 }
1061
1062
1063 map_pages(freelist);
1064
1065
1066
1067
1068
1069
1070
1071 cc->free_pfn = isolate_start_pfn;
1072}
1073
1074
1075
1076
1077
1078static struct page *compaction_alloc(struct page *migratepage,
1079 unsigned long data,
1080 int **result)
1081{
1082 struct compact_control *cc = (struct compact_control *)data;
1083 struct page *freepage;
1084
1085
1086
1087
1088
1089 if (list_empty(&cc->freepages)) {
1090 if (!cc->contended)
1091 isolate_freepages(cc);
1092
1093 if (list_empty(&cc->freepages))
1094 return NULL;
1095 }
1096
1097 freepage = list_entry(cc->freepages.next, struct page, lru);
1098 list_del(&freepage->lru);
1099 cc->nr_freepages--;
1100
1101 return freepage;
1102}
1103
1104
1105
1106
1107
1108
1109static void compaction_free(struct page *page, unsigned long data)
1110{
1111 struct compact_control *cc = (struct compact_control *)data;
1112
1113 list_add(&page->lru, &cc->freepages);
1114 cc->nr_freepages++;
1115}
1116
1117
1118typedef enum {
1119 ISOLATE_ABORT,
1120 ISOLATE_NONE,
1121 ISOLATE_SUCCESS,
1122} isolate_migrate_t;
1123
1124
1125
1126
1127
1128int sysctl_compact_unevictable_allowed __read_mostly = 1;
1129
1130
1131
1132
1133
1134
1135static isolate_migrate_t isolate_migratepages(struct zone *zone,
1136 struct compact_control *cc)
1137{
1138 unsigned long block_start_pfn;
1139 unsigned long block_end_pfn;
1140 unsigned long low_pfn;
1141 struct page *page;
1142 const isolate_mode_t isolate_mode =
1143 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1144 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1145
1146
1147
1148
1149
1150 low_pfn = cc->migrate_pfn;
1151 block_start_pfn = pageblock_start_pfn(low_pfn);
1152 if (block_start_pfn < zone->zone_start_pfn)
1153 block_start_pfn = zone->zone_start_pfn;
1154
1155
1156 block_end_pfn = pageblock_end_pfn(low_pfn);
1157
1158
1159
1160
1161
1162 for (; block_end_pfn <= cc->free_pfn;
1163 low_pfn = block_end_pfn,
1164 block_start_pfn = block_end_pfn,
1165 block_end_pfn += pageblock_nr_pages) {
1166
1167
1168
1169
1170
1171
1172 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1173 && compact_should_abort(cc))
1174 break;
1175
1176 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1177 zone);
1178 if (!page)
1179 continue;
1180
1181
1182 if (!isolation_suitable(cc, page))
1183 continue;
1184
1185
1186
1187
1188
1189
1190 if (cc->mode == MIGRATE_ASYNC &&
1191 !migrate_async_suitable(get_pageblock_migratetype(page)))
1192 continue;
1193
1194
1195 low_pfn = isolate_migratepages_block(cc, low_pfn,
1196 block_end_pfn, isolate_mode);
1197
1198 if (!low_pfn || cc->contended) {
1199 acct_isolated(zone, cc);
1200 return ISOLATE_ABORT;
1201 }
1202
1203
1204
1205
1206
1207
1208 break;
1209 }
1210
1211 acct_isolated(zone, cc);
1212
1213 cc->migrate_pfn = low_pfn;
1214
1215 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1216}
1217
1218
1219
1220
1221
1222static inline bool is_via_compact_memory(int order)
1223{
1224 return order == -1;
1225}
1226
1227static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
1228 const int migratetype)
1229{
1230 unsigned int order;
1231 unsigned long watermark;
1232
1233 if (cc->contended || fatal_signal_pending(current))
1234 return COMPACT_CONTENDED;
1235
1236
1237 if (compact_scanners_met(cc)) {
1238
1239 reset_cached_positions(zone);
1240
1241
1242
1243
1244
1245
1246
1247 if (cc->direct_compaction)
1248 zone->compact_blockskip_flush = true;
1249
1250 if (cc->whole_zone)
1251 return COMPACT_COMPLETE;
1252 else
1253 return COMPACT_PARTIAL_SKIPPED;
1254 }
1255
1256 if (is_via_compact_memory(cc->order))
1257 return COMPACT_CONTINUE;
1258
1259
1260 watermark = low_wmark_pages(zone);
1261
1262 if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1263 cc->alloc_flags))
1264 return COMPACT_CONTINUE;
1265
1266
1267 for (order = cc->order; order < MAX_ORDER; order++) {
1268 struct free_area *area = &zone->free_area[order];
1269 bool can_steal;
1270
1271
1272 if (!list_empty(&area->free_list[migratetype]))
1273 return COMPACT_PARTIAL;
1274
1275#ifdef CONFIG_CMA
1276
1277 if (migratetype == MIGRATE_MOVABLE &&
1278 !list_empty(&area->free_list[MIGRATE_CMA]))
1279 return COMPACT_PARTIAL;
1280#endif
1281
1282
1283
1284
1285 if (find_suitable_fallback(area, order, migratetype,
1286 true, &can_steal) != -1)
1287 return COMPACT_PARTIAL;
1288 }
1289
1290 return COMPACT_NO_SUITABLE_PAGE;
1291}
1292
1293static enum compact_result compact_finished(struct zone *zone,
1294 struct compact_control *cc,
1295 const int migratetype)
1296{
1297 int ret;
1298
1299 ret = __compact_finished(zone, cc, migratetype);
1300 trace_mm_compaction_finished(zone, cc->order, ret);
1301 if (ret == COMPACT_NO_SUITABLE_PAGE)
1302 ret = COMPACT_CONTINUE;
1303
1304 return ret;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314static enum compact_result __compaction_suitable(struct zone *zone, int order,
1315 unsigned int alloc_flags,
1316 int classzone_idx,
1317 unsigned long wmark_target)
1318{
1319 int fragindex;
1320 unsigned long watermark;
1321
1322 if (is_via_compact_memory(order))
1323 return COMPACT_CONTINUE;
1324
1325 watermark = low_wmark_pages(zone);
1326
1327
1328
1329
1330 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1331 alloc_flags))
1332 return COMPACT_PARTIAL;
1333
1334
1335
1336
1337
1338
1339 watermark += (2UL << order);
1340 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1341 alloc_flags, wmark_target))
1342 return COMPACT_SKIPPED;
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355 fragindex = fragmentation_index(zone, order);
1356 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1357 return COMPACT_NOT_SUITABLE_ZONE;
1358
1359 return COMPACT_CONTINUE;
1360}
1361
1362enum compact_result compaction_suitable(struct zone *zone, int order,
1363 unsigned int alloc_flags,
1364 int classzone_idx)
1365{
1366 enum compact_result ret;
1367
1368 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1369 zone_page_state(zone, NR_FREE_PAGES));
1370 trace_mm_compaction_suitable(zone, order, ret);
1371 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1372 ret = COMPACT_SKIPPED;
1373
1374 return ret;
1375}
1376
1377bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1378 int alloc_flags)
1379{
1380 struct zone *zone;
1381 struct zoneref *z;
1382
1383
1384
1385
1386
1387 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1388 ac->nodemask) {
1389 unsigned long available;
1390 enum compact_result compact_result;
1391
1392
1393
1394
1395
1396
1397
1398 available = zone_reclaimable_pages(zone) / order;
1399 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1400 compact_result = __compaction_suitable(zone, order, alloc_flags,
1401 ac_classzone_idx(ac), available);
1402 if (compact_result != COMPACT_SKIPPED &&
1403 compact_result != COMPACT_NOT_SUITABLE_ZONE)
1404 return true;
1405 }
1406
1407 return false;
1408}
1409
1410static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
1411{
1412 enum compact_result ret;
1413 unsigned long start_pfn = zone->zone_start_pfn;
1414 unsigned long end_pfn = zone_end_pfn(zone);
1415 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1416 const bool sync = cc->mode != MIGRATE_ASYNC;
1417
1418 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1419 cc->classzone_idx);
1420
1421 if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED)
1422 return ret;
1423
1424
1425 VM_BUG_ON(ret != COMPACT_CONTINUE);
1426
1427
1428
1429
1430
1431 if (compaction_restarting(zone, cc->order))
1432 __reset_isolation_suitable(zone);
1433
1434
1435
1436
1437
1438
1439 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1440 cc->free_pfn = zone->compact_cached_free_pfn;
1441 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1442 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1443 zone->compact_cached_free_pfn = cc->free_pfn;
1444 }
1445 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1446 cc->migrate_pfn = start_pfn;
1447 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1448 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1449 }
1450
1451 if (cc->migrate_pfn == start_pfn)
1452 cc->whole_zone = true;
1453
1454 cc->last_migrated_pfn = 0;
1455
1456 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1457 cc->free_pfn, end_pfn, sync);
1458
1459 migrate_prep_local();
1460
1461 while ((ret = compact_finished(zone, cc, migratetype)) ==
1462 COMPACT_CONTINUE) {
1463 int err;
1464
1465 switch (isolate_migratepages(zone, cc)) {
1466 case ISOLATE_ABORT:
1467 ret = COMPACT_CONTENDED;
1468 putback_movable_pages(&cc->migratepages);
1469 cc->nr_migratepages = 0;
1470 goto out;
1471 case ISOLATE_NONE:
1472
1473
1474
1475
1476
1477 goto check_drain;
1478 case ISOLATE_SUCCESS:
1479 ;
1480 }
1481
1482 err = migrate_pages(&cc->migratepages, compaction_alloc,
1483 compaction_free, (unsigned long)cc, cc->mode,
1484 MR_COMPACTION);
1485
1486 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1487 &cc->migratepages);
1488
1489
1490 cc->nr_migratepages = 0;
1491 if (err) {
1492 putback_movable_pages(&cc->migratepages);
1493
1494
1495
1496
1497 if (err == -ENOMEM && !compact_scanners_met(cc)) {
1498 ret = COMPACT_CONTENDED;
1499 goto out;
1500 }
1501
1502
1503
1504
1505 if (cc->direct_compaction &&
1506 (cc->mode == MIGRATE_ASYNC)) {
1507 cc->migrate_pfn = block_end_pfn(
1508 cc->migrate_pfn - 1, cc->order);
1509
1510 cc->last_migrated_pfn = 0;
1511
1512 }
1513 }
1514
1515check_drain:
1516
1517
1518
1519
1520
1521
1522
1523 if (cc->order > 0 && cc->last_migrated_pfn) {
1524 int cpu;
1525 unsigned long current_block_start =
1526 block_start_pfn(cc->migrate_pfn, cc->order);
1527
1528 if (cc->last_migrated_pfn < current_block_start) {
1529 cpu = get_cpu();
1530 lru_add_drain_cpu(cpu);
1531 drain_local_pages(zone);
1532 put_cpu();
1533
1534 cc->last_migrated_pfn = 0;
1535 }
1536 }
1537
1538 }
1539
1540out:
1541
1542
1543
1544
1545 if (cc->nr_freepages > 0) {
1546 unsigned long free_pfn = release_freepages(&cc->freepages);
1547
1548 cc->nr_freepages = 0;
1549 VM_BUG_ON(free_pfn == 0);
1550
1551 free_pfn = pageblock_start_pfn(free_pfn);
1552
1553
1554
1555
1556 if (free_pfn > zone->compact_cached_free_pfn)
1557 zone->compact_cached_free_pfn = free_pfn;
1558 }
1559
1560 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1561 cc->free_pfn, end_pfn, sync, ret);
1562
1563 if (ret == COMPACT_CONTENDED)
1564 ret = COMPACT_PARTIAL;
1565
1566 return ret;
1567}
1568
1569static enum compact_result compact_zone_order(struct zone *zone, int order,
1570 gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1571 unsigned int alloc_flags, int classzone_idx)
1572{
1573 enum compact_result ret;
1574 struct compact_control cc = {
1575 .nr_freepages = 0,
1576 .nr_migratepages = 0,
1577 .order = order,
1578 .gfp_mask = gfp_mask,
1579 .zone = zone,
1580 .mode = mode,
1581 .alloc_flags = alloc_flags,
1582 .classzone_idx = classzone_idx,
1583 .direct_compaction = true,
1584 };
1585 INIT_LIST_HEAD(&cc.freepages);
1586 INIT_LIST_HEAD(&cc.migratepages);
1587
1588 ret = compact_zone(zone, &cc);
1589
1590 VM_BUG_ON(!list_empty(&cc.freepages));
1591 VM_BUG_ON(!list_empty(&cc.migratepages));
1592
1593 *contended = cc.contended;
1594 return ret;
1595}
1596
1597int sysctl_extfrag_threshold = 500;
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1612 unsigned int alloc_flags, const struct alloc_context *ac,
1613 enum migrate_mode mode, int *contended)
1614{
1615 int may_enter_fs = gfp_mask & __GFP_FS;
1616 int may_perform_io = gfp_mask & __GFP_IO;
1617 struct zoneref *z;
1618 struct zone *zone;
1619 enum compact_result rc = COMPACT_SKIPPED;
1620 int all_zones_contended = COMPACT_CONTENDED_LOCK;
1621
1622 *contended = COMPACT_CONTENDED_NONE;
1623
1624
1625 if (!order || !may_enter_fs || !may_perform_io)
1626 return COMPACT_SKIPPED;
1627
1628 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
1629
1630
1631 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1632 ac->nodemask) {
1633 enum compact_result status;
1634 int zone_contended;
1635
1636 if (compaction_deferred(zone, order)) {
1637 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
1638 continue;
1639 }
1640
1641 status = compact_zone_order(zone, order, gfp_mask, mode,
1642 &zone_contended, alloc_flags,
1643 ac_classzone_idx(ac));
1644 rc = max(status, rc);
1645
1646
1647
1648
1649 all_zones_contended &= zone_contended;
1650
1651
1652 if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1653 ac_classzone_idx(ac), alloc_flags)) {
1654
1655
1656
1657
1658
1659
1660 compaction_defer_reset(zone, order, false);
1661
1662
1663
1664
1665
1666
1667
1668
1669 if (zone_contended == COMPACT_CONTENDED_SCHED)
1670 *contended = COMPACT_CONTENDED_SCHED;
1671
1672 goto break_loop;
1673 }
1674
1675 if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE ||
1676 status == COMPACT_PARTIAL_SKIPPED)) {
1677
1678
1679
1680
1681
1682 defer_compaction(zone, order);
1683 }
1684
1685
1686
1687
1688
1689
1690
1691 if ((zone_contended == COMPACT_CONTENDED_SCHED)
1692 || fatal_signal_pending(current)) {
1693 *contended = COMPACT_CONTENDED_SCHED;
1694 goto break_loop;
1695 }
1696
1697 continue;
1698break_loop:
1699
1700
1701
1702
1703 all_zones_contended = 0;
1704 break;
1705 }
1706
1707
1708
1709
1710
1711 if (rc > COMPACT_INACTIVE && all_zones_contended)
1712 *contended = COMPACT_CONTENDED_LOCK;
1713
1714 return rc;
1715}
1716
1717
1718
1719static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1720{
1721 int zoneid;
1722 struct zone *zone;
1723
1724 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1725
1726 zone = &pgdat->node_zones[zoneid];
1727 if (!populated_zone(zone))
1728 continue;
1729
1730 cc->nr_freepages = 0;
1731 cc->nr_migratepages = 0;
1732 cc->zone = zone;
1733 INIT_LIST_HEAD(&cc->freepages);
1734 INIT_LIST_HEAD(&cc->migratepages);
1735
1736
1737
1738
1739
1740
1741 if (is_via_compact_memory(cc->order))
1742 __reset_isolation_suitable(zone);
1743
1744 if (is_via_compact_memory(cc->order) ||
1745 !compaction_deferred(zone, cc->order))
1746 compact_zone(zone, cc);
1747
1748 VM_BUG_ON(!list_empty(&cc->freepages));
1749 VM_BUG_ON(!list_empty(&cc->migratepages));
1750
1751 if (is_via_compact_memory(cc->order))
1752 continue;
1753
1754 if (zone_watermark_ok(zone, cc->order,
1755 low_wmark_pages(zone), 0, 0))
1756 compaction_defer_reset(zone, cc->order, false);
1757 }
1758}
1759
1760void compact_pgdat(pg_data_t *pgdat, int order)
1761{
1762 struct compact_control cc = {
1763 .order = order,
1764 .mode = MIGRATE_ASYNC,
1765 };
1766
1767 if (!order)
1768 return;
1769
1770 __compact_pgdat(pgdat, &cc);
1771}
1772
1773static void compact_node(int nid)
1774{
1775 struct compact_control cc = {
1776 .order = -1,
1777 .mode = MIGRATE_SYNC,
1778 .ignore_skip_hint = true,
1779 };
1780
1781 __compact_pgdat(NODE_DATA(nid), &cc);
1782}
1783
1784
1785static void compact_nodes(void)
1786{
1787 int nid;
1788
1789
1790 lru_add_drain_all();
1791
1792 for_each_online_node(nid)
1793 compact_node(nid);
1794}
1795
1796
1797int sysctl_compact_memory;
1798
1799
1800
1801
1802
1803int sysctl_compaction_handler(struct ctl_table *table, int write,
1804 void __user *buffer, size_t *length, loff_t *ppos)
1805{
1806 if (write)
1807 compact_nodes();
1808
1809 return 0;
1810}
1811
1812int sysctl_extfrag_handler(struct ctl_table *table, int write,
1813 void __user *buffer, size_t *length, loff_t *ppos)
1814{
1815 proc_dointvec_minmax(table, write, buffer, length, ppos);
1816
1817 return 0;
1818}
1819
1820#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1821static ssize_t sysfs_compact_node(struct device *dev,
1822 struct device_attribute *attr,
1823 const char *buf, size_t count)
1824{
1825 int nid = dev->id;
1826
1827 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1828
1829 lru_add_drain_all();
1830
1831 compact_node(nid);
1832 }
1833
1834 return count;
1835}
1836static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1837
1838int compaction_register_node(struct node *node)
1839{
1840 return device_create_file(&node->dev, &dev_attr_compact);
1841}
1842
1843void compaction_unregister_node(struct node *node)
1844{
1845 return device_remove_file(&node->dev, &dev_attr_compact);
1846}
1847#endif
1848
1849static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1850{
1851 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1852}
1853
1854static bool kcompactd_node_suitable(pg_data_t *pgdat)
1855{
1856 int zoneid;
1857 struct zone *zone;
1858 enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1859
1860 for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
1861 zone = &pgdat->node_zones[zoneid];
1862
1863 if (!populated_zone(zone))
1864 continue;
1865
1866 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1867 classzone_idx) == COMPACT_CONTINUE)
1868 return true;
1869 }
1870
1871 return false;
1872}
1873
1874static void kcompactd_do_work(pg_data_t *pgdat)
1875{
1876
1877
1878
1879
1880 int zoneid;
1881 struct zone *zone;
1882 struct compact_control cc = {
1883 .order = pgdat->kcompactd_max_order,
1884 .classzone_idx = pgdat->kcompactd_classzone_idx,
1885 .mode = MIGRATE_SYNC_LIGHT,
1886 .ignore_skip_hint = true,
1887
1888 };
1889 bool success = false;
1890
1891 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1892 cc.classzone_idx);
1893 count_vm_event(KCOMPACTD_WAKE);
1894
1895 for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1896 int status;
1897
1898 zone = &pgdat->node_zones[zoneid];
1899 if (!populated_zone(zone))
1900 continue;
1901
1902 if (compaction_deferred(zone, cc.order))
1903 continue;
1904
1905 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1906 COMPACT_CONTINUE)
1907 continue;
1908
1909 cc.nr_freepages = 0;
1910 cc.nr_migratepages = 0;
1911 cc.zone = zone;
1912 INIT_LIST_HEAD(&cc.freepages);
1913 INIT_LIST_HEAD(&cc.migratepages);
1914
1915 if (kthread_should_stop())
1916 return;
1917 status = compact_zone(zone, &cc);
1918
1919 if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
1920 cc.classzone_idx, 0)) {
1921 success = true;
1922 compaction_defer_reset(zone, cc.order, false);
1923 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1924
1925
1926
1927
1928 defer_compaction(zone, cc.order);
1929 }
1930
1931 VM_BUG_ON(!list_empty(&cc.freepages));
1932 VM_BUG_ON(!list_empty(&cc.migratepages));
1933 }
1934
1935
1936
1937
1938
1939
1940 if (pgdat->kcompactd_max_order <= cc.order)
1941 pgdat->kcompactd_max_order = 0;
1942 if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1943 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1944}
1945
1946void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1947{
1948 if (!order)
1949 return;
1950
1951 if (pgdat->kcompactd_max_order < order)
1952 pgdat->kcompactd_max_order = order;
1953
1954 if (pgdat->kcompactd_classzone_idx > classzone_idx)
1955 pgdat->kcompactd_classzone_idx = classzone_idx;
1956
1957 if (!waitqueue_active(&pgdat->kcompactd_wait))
1958 return;
1959
1960 if (!kcompactd_node_suitable(pgdat))
1961 return;
1962
1963 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
1964 classzone_idx);
1965 wake_up_interruptible(&pgdat->kcompactd_wait);
1966}
1967
1968
1969
1970
1971
1972static int kcompactd(void *p)
1973{
1974 pg_data_t *pgdat = (pg_data_t*)p;
1975 struct task_struct *tsk = current;
1976
1977 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1978
1979 if (!cpumask_empty(cpumask))
1980 set_cpus_allowed_ptr(tsk, cpumask);
1981
1982 set_freezable();
1983
1984 pgdat->kcompactd_max_order = 0;
1985 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1986
1987 while (!kthread_should_stop()) {
1988 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
1989 wait_event_freezable(pgdat->kcompactd_wait,
1990 kcompactd_work_requested(pgdat));
1991
1992 kcompactd_do_work(pgdat);
1993 }
1994
1995 return 0;
1996}
1997
1998
1999
2000
2001
2002int kcompactd_run(int nid)
2003{
2004 pg_data_t *pgdat = NODE_DATA(nid);
2005 int ret = 0;
2006
2007 if (pgdat->kcompactd)
2008 return 0;
2009
2010 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2011 if (IS_ERR(pgdat->kcompactd)) {
2012 pr_err("Failed to start kcompactd on node %d\n", nid);
2013 ret = PTR_ERR(pgdat->kcompactd);
2014 pgdat->kcompactd = NULL;
2015 }
2016 return ret;
2017}
2018
2019
2020
2021
2022
2023void kcompactd_stop(int nid)
2024{
2025 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2026
2027 if (kcompactd) {
2028 kthread_stop(kcompactd);
2029 NODE_DATA(nid)->kcompactd = NULL;
2030 }
2031}
2032
2033
2034
2035
2036
2037
2038
2039static int cpu_callback(struct notifier_block *nfb, unsigned long action,
2040 void *hcpu)
2041{
2042 int nid;
2043
2044 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2045 for_each_node_state(nid, N_MEMORY) {
2046 pg_data_t *pgdat = NODE_DATA(nid);
2047 const struct cpumask *mask;
2048
2049 mask = cpumask_of_node(pgdat->node_id);
2050
2051 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2052
2053 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2054 }
2055 }
2056 return NOTIFY_OK;
2057}
2058
2059static int __init kcompactd_init(void)
2060{
2061 int nid;
2062
2063 for_each_node_state(nid, N_MEMORY)
2064 kcompactd_run(nid);
2065 hotcpu_notifier(cpu_callback, 0);
2066 return 0;
2067}
2068subsys_initcall(kcompactd_init)
2069
2070#endif
2071