1
2
3
4
5
6
7
8
9
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
15#include <linux/sysctl.h>
16#include <linux/sysfs.h>
17#include <linux/balloon_compaction.h>
18#include <linux/page-isolation.h>
19#include "internal.h"
20
21#ifdef CONFIG_COMPACTION
22static inline void count_compact_event(enum vm_event_item item)
23{
24 count_vm_event(item);
25}
26
27static inline void count_compact_events(enum vm_event_item item, long delta)
28{
29 count_vm_events(item, delta);
30}
31#else
32#define count_compact_event(item) do { } while (0)
33#define count_compact_events(item, delta) do { } while (0)
34#endif
35
36#if defined CONFIG_COMPACTION || defined CONFIG_CMA
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/compaction.h>
40
41static unsigned long release_freepages(struct list_head *freelist)
42{
43 struct page *page, *next;
44 unsigned long count = 0;
45
46 list_for_each_entry_safe(page, next, freelist, lru) {
47 list_del(&page->lru);
48 __free_page(page);
49 count++;
50 }
51
52 return count;
53}
54
55static void map_pages(struct list_head *list)
56{
57 struct page *page;
58
59 list_for_each_entry(page, list, lru) {
60 arch_alloc_page(page, 0);
61 kernel_map_pages(page, 1, 1);
62 }
63}
64
65static inline bool migrate_async_suitable(int migratetype)
66{
67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
68}
69
70#ifdef CONFIG_COMPACTION
71
72static inline bool isolation_suitable(struct compact_control *cc,
73 struct page *page)
74{
75 if (cc->ignore_skip_hint)
76 return true;
77
78 return !get_pageblock_skip(page);
79}
80
81
82
83
84
85
86static void __reset_isolation_suitable(struct zone *zone)
87{
88 unsigned long start_pfn = zone->zone_start_pfn;
89 unsigned long end_pfn = zone_end_pfn(zone);
90 unsigned long pfn;
91
92 zone->compact_cached_migrate_pfn = start_pfn;
93 zone->compact_cached_free_pfn = end_pfn;
94 zone->compact_blockskip_flush = false;
95
96
97 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
98 struct page *page;
99
100 cond_resched();
101
102 if (!pfn_valid(pfn))
103 continue;
104
105 page = pfn_to_page(pfn);
106 if (zone != page_zone(page))
107 continue;
108
109 clear_pageblock_skip(page);
110 }
111}
112
113void reset_isolation_suitable(pg_data_t *pgdat)
114{
115 int zoneid;
116
117 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
118 struct zone *zone = &pgdat->node_zones[zoneid];
119 if (!populated_zone(zone))
120 continue;
121
122
123 if (zone->compact_blockskip_flush)
124 __reset_isolation_suitable(zone);
125 }
126}
127
128
129
130
131
132static void update_pageblock_skip(struct compact_control *cc,
133 struct page *page, unsigned long nr_isolated,
134 bool migrate_scanner)
135{
136 struct zone *zone = cc->zone;
137
138 if (cc->ignore_skip_hint)
139 return;
140
141 if (!page)
142 return;
143
144 if (!nr_isolated) {
145 unsigned long pfn = page_to_pfn(page);
146 set_pageblock_skip(page);
147
148
149 if (migrate_scanner) {
150 if (!cc->finished_update_migrate &&
151 pfn > zone->compact_cached_migrate_pfn)
152 zone->compact_cached_migrate_pfn = pfn;
153 } else {
154 if (!cc->finished_update_free &&
155 pfn < zone->compact_cached_free_pfn)
156 zone->compact_cached_free_pfn = pfn;
157 }
158 }
159}
160#else
161static inline bool isolation_suitable(struct compact_control *cc,
162 struct page *page)
163{
164 return true;
165}
166
167static void update_pageblock_skip(struct compact_control *cc,
168 struct page *page, unsigned long nr_isolated,
169 bool migrate_scanner)
170{
171}
172#endif
173
174static inline bool should_release_lock(spinlock_t *lock)
175{
176 return need_resched() || spin_is_contended(lock);
177}
178
179
180
181
182
183
184
185
186
187
188static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
189 bool locked, struct compact_control *cc)
190{
191 if (should_release_lock(lock)) {
192 if (locked) {
193 spin_unlock_irqrestore(lock, *flags);
194 locked = false;
195 }
196
197
198 if (!cc->sync) {
199 cc->contended = true;
200 return false;
201 }
202
203 cond_resched();
204 }
205
206 if (!locked)
207 spin_lock_irqsave(lock, *flags);
208 return true;
209}
210
211static inline bool compact_trylock_irqsave(spinlock_t *lock,
212 unsigned long *flags, struct compact_control *cc)
213{
214 return compact_checklock_irqsave(lock, flags, false, cc);
215}
216
217
218static bool suitable_migration_target(struct page *page)
219{
220
221 if (PageBuddy(page) && page_order(page) >= pageblock_order)
222 return false;
223
224
225 if (migrate_async_suitable(get_pageblock_migratetype(page)))
226 return true;
227
228
229 return false;
230}
231
232
233
234
235
236
237static unsigned long isolate_freepages_block(struct compact_control *cc,
238 unsigned long blockpfn,
239 unsigned long end_pfn,
240 struct list_head *freelist,
241 bool strict)
242{
243 int nr_scanned = 0, total_isolated = 0;
244 struct page *cursor, *valid_page = NULL;
245 unsigned long flags;
246 bool locked = false;
247 bool checked_pageblock = false;
248
249 cursor = pfn_to_page(blockpfn);
250
251
252 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
253 int isolated, i;
254 struct page *page = cursor;
255
256 nr_scanned++;
257 if (!pfn_valid_within(blockpfn))
258 goto isolate_fail;
259
260 if (!valid_page)
261 valid_page = page;
262 if (!PageBuddy(page))
263 goto isolate_fail;
264
265
266
267
268
269
270
271
272
273 locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
274 locked, cc);
275 if (!locked)
276 break;
277
278
279 if (!strict && !checked_pageblock) {
280
281
282
283
284
285 checked_pageblock = true;
286 if (!suitable_migration_target(page))
287 break;
288 }
289
290
291 if (!PageBuddy(page))
292 goto isolate_fail;
293
294
295 isolated = split_free_page(page);
296 total_isolated += isolated;
297 for (i = 0; i < isolated; i++) {
298 list_add(&page->lru, freelist);
299 page++;
300 }
301
302
303 if (isolated) {
304 blockpfn += isolated - 1;
305 cursor += isolated - 1;
306 continue;
307 }
308
309isolate_fail:
310 if (strict)
311 break;
312 else
313 continue;
314
315 }
316
317 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
318
319
320
321
322
323
324 if (strict && blockpfn < end_pfn)
325 total_isolated = 0;
326
327 if (locked)
328 spin_unlock_irqrestore(&cc->zone->lock, flags);
329
330
331 if (blockpfn == end_pfn)
332 update_pageblock_skip(cc, valid_page, total_isolated, false);
333
334 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
335 if (total_isolated)
336 count_compact_events(COMPACTISOLATED, total_isolated);
337 return total_isolated;
338}
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353unsigned long
354isolate_freepages_range(struct compact_control *cc,
355 unsigned long start_pfn, unsigned long end_pfn)
356{
357 unsigned long isolated, pfn, block_end_pfn;
358 LIST_HEAD(freelist);
359
360 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
361 if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
362 break;
363
364
365
366
367
368 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
369 block_end_pfn = min(block_end_pfn, end_pfn);
370
371 isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
372 &freelist, true);
373
374
375
376
377
378
379 if (!isolated)
380 break;
381
382
383
384
385
386
387 }
388
389
390 map_pages(&freelist);
391
392 if (pfn < end_pfn) {
393
394 release_freepages(&freelist);
395 return 0;
396 }
397
398
399 return pfn;
400}
401
402
403static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
404{
405 struct page *page;
406 unsigned int count[2] = { 0, };
407
408 list_for_each_entry(page, &cc->migratepages, lru)
409 count[!!page_is_file_cache(page)]++;
410
411
412 if (locked) {
413 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
414 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
415 } else {
416 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
417 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
418 }
419}
420
421
422static bool too_many_isolated(struct zone *zone)
423{
424 unsigned long active, inactive, isolated;
425
426 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
427 zone_page_state(zone, NR_INACTIVE_ANON);
428 active = zone_page_state(zone, NR_ACTIVE_FILE) +
429 zone_page_state(zone, NR_ACTIVE_ANON);
430 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
431 zone_page_state(zone, NR_ISOLATED_ANON);
432
433 return isolated > (inactive + active) / 2;
434}
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456unsigned long
457isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
458 unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
459{
460 unsigned long last_pageblock_nr = 0, pageblock_nr;
461 unsigned long nr_scanned = 0, nr_isolated = 0;
462 struct list_head *migratelist = &cc->migratepages;
463 struct lruvec *lruvec;
464 unsigned long flags;
465 bool locked = false;
466 struct page *page = NULL, *valid_page = NULL;
467 bool skipped_async_unsuitable = false;
468 const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
469 (unevictable ? ISOLATE_UNEVICTABLE : 0);
470
471
472
473
474
475
476 while (unlikely(too_many_isolated(zone))) {
477
478 if (!cc->sync)
479 return 0;
480
481 congestion_wait(BLK_RW_ASYNC, HZ/10);
482
483 if (fatal_signal_pending(current))
484 return 0;
485 }
486
487
488 cond_resched();
489 for (; low_pfn < end_pfn; low_pfn++) {
490
491 if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
492 if (should_release_lock(&zone->lru_lock)) {
493 spin_unlock_irqrestore(&zone->lru_lock, flags);
494 locked = false;
495 }
496 }
497
498
499
500
501
502
503
504 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
505 if (!pfn_valid(low_pfn)) {
506 low_pfn += MAX_ORDER_NR_PAGES - 1;
507 continue;
508 }
509 }
510
511 if (!pfn_valid_within(low_pfn))
512 continue;
513 nr_scanned++;
514
515
516
517
518
519
520
521 page = pfn_to_page(low_pfn);
522 if (page_zone(page) != zone)
523 continue;
524
525 if (!valid_page)
526 valid_page = page;
527
528
529 pageblock_nr = low_pfn >> pageblock_order;
530 if (last_pageblock_nr != pageblock_nr) {
531 int mt;
532
533 last_pageblock_nr = pageblock_nr;
534 if (!isolation_suitable(cc, page))
535 goto next_pageblock;
536
537
538
539
540
541
542 mt = get_pageblock_migratetype(page);
543 if (!cc->sync && !migrate_async_suitable(mt)) {
544 cc->finished_update_migrate = true;
545 skipped_async_unsuitable = true;
546 goto next_pageblock;
547 }
548 }
549
550
551
552
553
554 if (PageBuddy(page))
555 continue;
556
557
558
559
560
561
562 if (!PageLRU(page)) {
563 if (unlikely(balloon_page_movable(page))) {
564 if (locked && balloon_page_isolate(page)) {
565
566 goto isolate_success;
567 }
568 }
569 continue;
570 }
571
572
573
574
575
576
577
578
579
580
581
582 if (PageTransHuge(page)) {
583 if (!locked)
584 goto next_pageblock;
585 low_pfn += (1 << compound_order(page)) - 1;
586 continue;
587 }
588
589
590
591
592
593
594 if (!page_mapping(page) &&
595 page_count(page) > page_mapcount(page))
596 continue;
597
598
599 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
600 locked, cc);
601 if (!locked || fatal_signal_pending(current))
602 break;
603
604
605 if (!PageLRU(page))
606 continue;
607 if (PageTransHuge(page)) {
608 low_pfn += (1 << compound_order(page)) - 1;
609 continue;
610 }
611
612 lruvec = mem_cgroup_page_lruvec(page, zone);
613
614
615 if (__isolate_lru_page(page, mode) != 0)
616 continue;
617
618 VM_BUG_ON_PAGE(PageTransCompound(page), page);
619
620
621 del_page_from_lru_list(page, lruvec, page_lru(page));
622
623isolate_success:
624 cc->finished_update_migrate = true;
625 list_add(&page->lru, migratelist);
626 cc->nr_migratepages++;
627 nr_isolated++;
628
629
630 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
631 ++low_pfn;
632 break;
633 }
634
635 continue;
636
637next_pageblock:
638 low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
639 }
640
641 acct_isolated(zone, locked, cc);
642
643 if (locked)
644 spin_unlock_irqrestore(&zone->lru_lock, flags);
645
646
647
648
649
650
651
652 if (low_pfn == end_pfn && !skipped_async_unsuitable)
653 update_pageblock_skip(cc, valid_page, nr_isolated, true);
654
655 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
656
657 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
658 if (nr_isolated)
659 count_compact_events(COMPACTISOLATED, nr_isolated);
660
661 return low_pfn;
662}
663
664#endif
665#ifdef CONFIG_COMPACTION
666
667
668
669
670static void isolate_freepages(struct zone *zone,
671 struct compact_control *cc)
672{
673 struct page *page;
674 unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
675 int nr_freepages = cc->nr_freepages;
676 struct list_head *freelist = &cc->freepages;
677
678
679
680
681
682
683
684
685
686
687 pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
688 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
689
690
691
692
693
694
695 high_pfn = min(low_pfn, pfn);
696
697 z_end_pfn = zone_end_pfn(zone);
698
699
700
701
702
703
704 for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
705 pfn -= pageblock_nr_pages) {
706 unsigned long isolated;
707 unsigned long end_pfn;
708
709
710
711
712
713
714 cond_resched();
715
716 if (!pfn_valid(pfn))
717 continue;
718
719
720
721
722
723
724
725
726 page = pfn_to_page(pfn);
727 if (page_zone(page) != zone)
728 continue;
729
730
731 if (!suitable_migration_target(page))
732 continue;
733
734
735 if (!isolation_suitable(cc, page))
736 continue;
737
738
739 isolated = 0;
740
741
742
743
744
745 end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
746 isolated = isolate_freepages_block(cc, pfn, end_pfn,
747 freelist, false);
748 nr_freepages += isolated;
749
750
751
752
753
754
755 if (isolated) {
756 cc->finished_update_free = true;
757 high_pfn = max(high_pfn, pfn);
758 }
759 }
760
761
762 map_pages(freelist);
763
764
765
766
767
768 if (pfn < low_pfn)
769 cc->free_pfn = max(pfn, zone->zone_start_pfn);
770 else
771 cc->free_pfn = high_pfn;
772 cc->nr_freepages = nr_freepages;
773}
774
775
776
777
778
779static struct page *compaction_alloc(struct page *migratepage,
780 unsigned long data,
781 int **result)
782{
783 struct compact_control *cc = (struct compact_control *)data;
784 struct page *freepage;
785
786
787 if (list_empty(&cc->freepages)) {
788 isolate_freepages(cc->zone, cc);
789
790 if (list_empty(&cc->freepages))
791 return NULL;
792 }
793
794 freepage = list_entry(cc->freepages.next, struct page, lru);
795 list_del(&freepage->lru);
796 cc->nr_freepages--;
797
798 return freepage;
799}
800
801
802
803
804
805
806static void update_nr_listpages(struct compact_control *cc)
807{
808 int nr_migratepages = 0;
809 int nr_freepages = 0;
810 struct page *page;
811
812 list_for_each_entry(page, &cc->migratepages, lru)
813 nr_migratepages++;
814 list_for_each_entry(page, &cc->freepages, lru)
815 nr_freepages++;
816
817 cc->nr_migratepages = nr_migratepages;
818 cc->nr_freepages = nr_freepages;
819}
820
821
822typedef enum {
823 ISOLATE_ABORT,
824 ISOLATE_NONE,
825 ISOLATE_SUCCESS,
826} isolate_migrate_t;
827
828
829
830
831
832static isolate_migrate_t isolate_migratepages(struct zone *zone,
833 struct compact_control *cc)
834{
835 unsigned long low_pfn, end_pfn;
836
837
838 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
839
840
841 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
842
843
844 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
845 cc->migrate_pfn = end_pfn;
846 return ISOLATE_NONE;
847 }
848
849
850 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
851 if (!low_pfn || cc->contended)
852 return ISOLATE_ABORT;
853
854 cc->migrate_pfn = low_pfn;
855
856 return ISOLATE_SUCCESS;
857}
858
859static int compact_finished(struct zone *zone,
860 struct compact_control *cc)
861{
862 unsigned int order;
863 unsigned long watermark;
864
865 if (fatal_signal_pending(current))
866 return COMPACT_PARTIAL;
867
868
869 if (cc->free_pfn <= cc->migrate_pfn) {
870
871 zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
872 zone->compact_cached_free_pfn = zone_end_pfn(zone);
873
874
875
876
877
878
879
880 if (!current_is_kswapd())
881 zone->compact_blockskip_flush = true;
882
883 return COMPACT_COMPLETE;
884 }
885
886
887
888
889
890 if (cc->order == -1)
891 return COMPACT_CONTINUE;
892
893
894 watermark = low_wmark_pages(zone);
895 watermark += (1 << cc->order);
896
897 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
898 return COMPACT_CONTINUE;
899
900
901 for (order = cc->order; order < MAX_ORDER; order++) {
902 struct free_area *area = &zone->free_area[order];
903
904
905 if (!list_empty(&area->free_list[cc->migratetype]))
906 return COMPACT_PARTIAL;
907
908
909 if (cc->order >= pageblock_order && area->nr_free)
910 return COMPACT_PARTIAL;
911 }
912
913 return COMPACT_CONTINUE;
914}
915
916
917
918
919
920
921
922
923unsigned long compaction_suitable(struct zone *zone, int order)
924{
925 int fragindex;
926 unsigned long watermark;
927
928
929
930
931
932 if (order == -1)
933 return COMPACT_CONTINUE;
934
935
936
937
938
939
940 watermark = low_wmark_pages(zone) + (2UL << order);
941 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
942 return COMPACT_SKIPPED;
943
944
945
946
947
948
949
950
951
952
953
954
955 fragindex = fragmentation_index(zone, order);
956 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
957 return COMPACT_SKIPPED;
958
959 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
960 0, 0))
961 return COMPACT_PARTIAL;
962
963 return COMPACT_CONTINUE;
964}
965
966static int compact_zone(struct zone *zone, struct compact_control *cc)
967{
968 int ret;
969 unsigned long start_pfn = zone->zone_start_pfn;
970 unsigned long end_pfn = zone_end_pfn(zone);
971
972 ret = compaction_suitable(zone, cc->order);
973 switch (ret) {
974 case COMPACT_PARTIAL:
975 case COMPACT_SKIPPED:
976
977 return ret;
978 case COMPACT_CONTINUE:
979
980 ;
981 }
982
983
984
985
986
987
988 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
989 __reset_isolation_suitable(zone);
990
991
992
993
994
995
996 cc->migrate_pfn = zone->compact_cached_migrate_pfn;
997 cc->free_pfn = zone->compact_cached_free_pfn;
998 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
999 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
1000 zone->compact_cached_free_pfn = cc->free_pfn;
1001 }
1002 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1003 cc->migrate_pfn = start_pfn;
1004 zone->compact_cached_migrate_pfn = cc->migrate_pfn;
1005 }
1006
1007 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
1008
1009 migrate_prep_local();
1010
1011 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
1012 unsigned long nr_migrate, nr_remaining;
1013 int err;
1014
1015 switch (isolate_migratepages(zone, cc)) {
1016 case ISOLATE_ABORT:
1017 ret = COMPACT_PARTIAL;
1018 putback_movable_pages(&cc->migratepages);
1019 cc->nr_migratepages = 0;
1020 goto out;
1021 case ISOLATE_NONE:
1022 continue;
1023 case ISOLATE_SUCCESS:
1024 ;
1025 }
1026
1027 nr_migrate = cc->nr_migratepages;
1028 err = migrate_pages(&cc->migratepages, compaction_alloc,
1029 (unsigned long)cc,
1030 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
1031 MR_COMPACTION);
1032 update_nr_listpages(cc);
1033 nr_remaining = cc->nr_migratepages;
1034
1035 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
1036 nr_remaining);
1037
1038
1039 if (err) {
1040 putback_movable_pages(&cc->migratepages);
1041 cc->nr_migratepages = 0;
1042
1043
1044
1045
1046 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
1047 ret = COMPACT_PARTIAL;
1048 goto out;
1049 }
1050 }
1051 }
1052
1053out:
1054
1055 cc->nr_freepages -= release_freepages(&cc->freepages);
1056 VM_BUG_ON(cc->nr_freepages != 0);
1057
1058 trace_mm_compaction_end(ret);
1059
1060 return ret;
1061}
1062
1063static unsigned long compact_zone_order(struct zone *zone,
1064 int order, gfp_t gfp_mask,
1065 bool sync, bool *contended)
1066{
1067 unsigned long ret;
1068 struct compact_control cc = {
1069 .nr_freepages = 0,
1070 .nr_migratepages = 0,
1071 .order = order,
1072 .migratetype = allocflags_to_migratetype(gfp_mask),
1073 .zone = zone,
1074 .sync = sync,
1075 };
1076 INIT_LIST_HEAD(&cc.freepages);
1077 INIT_LIST_HEAD(&cc.migratepages);
1078
1079 ret = compact_zone(zone, &cc);
1080
1081 VM_BUG_ON(!list_empty(&cc.freepages));
1082 VM_BUG_ON(!list_empty(&cc.migratepages));
1083
1084 *contended = cc.contended;
1085 return ret;
1086}
1087
1088int sysctl_extfrag_threshold = 500;
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102unsigned long try_to_compact_pages(struct zonelist *zonelist,
1103 int order, gfp_t gfp_mask, nodemask_t *nodemask,
1104 bool sync, bool *contended)
1105{
1106 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1107 int may_enter_fs = gfp_mask & __GFP_FS;
1108 int may_perform_io = gfp_mask & __GFP_IO;
1109 struct zoneref *z;
1110 struct zone *zone;
1111 int rc = COMPACT_SKIPPED;
1112 int alloc_flags = 0;
1113
1114
1115 if (!order || !may_enter_fs || !may_perform_io)
1116 return rc;
1117
1118 count_compact_event(COMPACTSTALL);
1119
1120#ifdef CONFIG_CMA
1121 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1122 alloc_flags |= ALLOC_CMA;
1123#endif
1124
1125 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1126 nodemask) {
1127 int status;
1128
1129 status = compact_zone_order(zone, order, gfp_mask, sync,
1130 contended);
1131 rc = max(status, rc);
1132
1133
1134 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1135 alloc_flags))
1136 break;
1137 }
1138
1139 return rc;
1140}
1141
1142
1143
1144static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1145{
1146 int zoneid;
1147 struct zone *zone;
1148
1149 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1150
1151 zone = &pgdat->node_zones[zoneid];
1152 if (!populated_zone(zone))
1153 continue;
1154
1155 cc->nr_freepages = 0;
1156 cc->nr_migratepages = 0;
1157 cc->zone = zone;
1158 INIT_LIST_HEAD(&cc->freepages);
1159 INIT_LIST_HEAD(&cc->migratepages);
1160
1161 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
1162 compact_zone(zone, cc);
1163
1164 if (cc->order > 0) {
1165 if (zone_watermark_ok(zone, cc->order,
1166 low_wmark_pages(zone), 0, 0))
1167 compaction_defer_reset(zone, cc->order, false);
1168
1169 else if (cc->sync)
1170 defer_compaction(zone, cc->order);
1171 }
1172
1173 VM_BUG_ON(!list_empty(&cc->freepages));
1174 VM_BUG_ON(!list_empty(&cc->migratepages));
1175 }
1176}
1177
1178void compact_pgdat(pg_data_t *pgdat, int order)
1179{
1180 struct compact_control cc = {
1181 .order = order,
1182 .sync = false,
1183 };
1184
1185 if (!order)
1186 return;
1187
1188 __compact_pgdat(pgdat, &cc);
1189}
1190
1191static void compact_node(int nid)
1192{
1193 struct compact_control cc = {
1194 .order = -1,
1195 .sync = true,
1196 .ignore_skip_hint = true,
1197 };
1198
1199 __compact_pgdat(NODE_DATA(nid), &cc);
1200}
1201
1202
1203static void compact_nodes(void)
1204{
1205 int nid;
1206
1207
1208 lru_add_drain_all();
1209
1210 for_each_online_node(nid)
1211 compact_node(nid);
1212}
1213
1214
1215int sysctl_compact_memory;
1216
1217
1218int sysctl_compaction_handler(struct ctl_table *table, int write,
1219 void __user *buffer, size_t *length, loff_t *ppos)
1220{
1221 if (write)
1222 compact_nodes();
1223
1224 return 0;
1225}
1226
1227int sysctl_extfrag_handler(struct ctl_table *table, int write,
1228 void __user *buffer, size_t *length, loff_t *ppos)
1229{
1230 proc_dointvec_minmax(table, write, buffer, length, ppos);
1231
1232 return 0;
1233}
1234
1235#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1236static ssize_t sysfs_compact_node(struct device *dev,
1237 struct device_attribute *attr,
1238 const char *buf, size_t count)
1239{
1240 int nid = dev->id;
1241
1242 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1243
1244 lru_add_drain_all();
1245
1246 compact_node(nid);
1247 }
1248
1249 return count;
1250}
1251static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1252
1253int compaction_register_node(struct node *node)
1254{
1255 return device_create_file(&node->dev, &dev_attr_compact);
1256}
1257
1258void compaction_unregister_node(struct node *node)
1259{
1260 return device_remove_file(&node->dev, &dev_attr_compact);
1261}
1262#endif
1263
1264#endif
1265