linux/mm/compaction.c
<<
>>
Prefs
   1/*
   2 * linux/mm/compaction.c
   3 *
   4 * Memory compaction for the reduction of external fragmentation. Note that
   5 * this heavily depends upon page migration to do all the real heavy
   6 * lifting
   7 *
   8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
   9 */
  10#include <linux/cpu.h>
  11#include <linux/swap.h>
  12#include <linux/migrate.h>
  13#include <linux/compaction.h>
  14#include <linux/mm_inline.h>
  15#include <linux/backing-dev.h>
  16#include <linux/sysctl.h>
  17#include <linux/sysfs.h>
  18#include <linux/balloon_compaction.h>
  19#include <linux/page-isolation.h>
  20#include <linux/kasan.h>
  21#include <linux/kthread.h>
  22#include <linux/freezer.h>
  23#include "internal.h"
  24
  25#ifdef CONFIG_COMPACTION
  26static inline void count_compact_event(enum vm_event_item item)
  27{
  28        count_vm_event(item);
  29}
  30
  31static inline void count_compact_events(enum vm_event_item item, long delta)
  32{
  33        count_vm_events(item, delta);
  34}
  35#else
  36#define count_compact_event(item) do { } while (0)
  37#define count_compact_events(item, delta) do { } while (0)
  38#endif
  39
  40#if defined CONFIG_COMPACTION || defined CONFIG_CMA
  41
  42#define CREATE_TRACE_POINTS
  43#include <trace/events/compaction.h>
  44
  45#define block_start_pfn(pfn, order)     round_down(pfn, 1UL << (order))
  46#define block_end_pfn(pfn, order)       ALIGN((pfn) + 1, 1UL << (order))
  47#define pageblock_start_pfn(pfn)        block_start_pfn(pfn, pageblock_order)
  48#define pageblock_end_pfn(pfn)          block_end_pfn(pfn, pageblock_order)
  49
  50static unsigned long release_freepages(struct list_head *freelist)
  51{
  52        struct page *page, *next;
  53        unsigned long high_pfn = 0;
  54
  55        list_for_each_entry_safe(page, next, freelist, lru) {
  56                unsigned long pfn = page_to_pfn(page);
  57                list_del(&page->lru);
  58                __free_page(page);
  59                if (pfn > high_pfn)
  60                        high_pfn = pfn;
  61        }
  62
  63        return high_pfn;
  64}
  65
  66static void map_pages(struct list_head *list)
  67{
  68        struct page *page;
  69
  70        list_for_each_entry(page, list, lru) {
  71                arch_alloc_page(page, 0);
  72                kernel_map_pages(page, 1, 1);
  73                kasan_alloc_pages(page, 0);
  74        }
  75}
  76
  77static inline bool migrate_async_suitable(int migratetype)
  78{
  79        return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  80}
  81
  82#ifdef CONFIG_COMPACTION
  83
  84/* Do not skip compaction more than 64 times */
  85#define COMPACT_MAX_DEFER_SHIFT 6
  86
  87/*
  88 * Compaction is deferred when compaction fails to result in a page
  89 * allocation success. 1 << compact_defer_limit compactions are skipped up
  90 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
  91 */
  92void defer_compaction(struct zone *zone, int order)
  93{
  94        zone->compact_considered = 0;
  95        zone->compact_defer_shift++;
  96
  97        if (order < zone->compact_order_failed)
  98                zone->compact_order_failed = order;
  99
 100        if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
 101                zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
 102
 103        trace_mm_compaction_defer_compaction(zone, order);
 104}
 105
 106/* Returns true if compaction should be skipped this time */
 107bool compaction_deferred(struct zone *zone, int order)
 108{
 109        unsigned long defer_limit = 1UL << zone->compact_defer_shift;
 110
 111        if (order < zone->compact_order_failed)
 112                return false;
 113
 114        /* Avoid possible overflow */
 115        if (++zone->compact_considered > defer_limit)
 116                zone->compact_considered = defer_limit;
 117
 118        if (zone->compact_considered >= defer_limit)
 119                return false;
 120
 121        trace_mm_compaction_deferred(zone, order);
 122
 123        return true;
 124}
 125
 126/*
 127 * Update defer tracking counters after successful compaction of given order,
 128 * which means an allocation either succeeded (alloc_success == true) or is
 129 * expected to succeed.
 130 */
 131void compaction_defer_reset(struct zone *zone, int order,
 132                bool alloc_success)
 133{
 134        if (alloc_success) {
 135                zone->compact_considered = 0;
 136                zone->compact_defer_shift = 0;
 137        }
 138        if (order >= zone->compact_order_failed)
 139                zone->compact_order_failed = order + 1;
 140
 141        trace_mm_compaction_defer_reset(zone, order);
 142}
 143
 144/* Returns true if restarting compaction after many failures */
 145bool compaction_restarting(struct zone *zone, int order)
 146{
 147        if (order < zone->compact_order_failed)
 148                return false;
 149
 150        return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
 151                zone->compact_considered >= 1UL << zone->compact_defer_shift;
 152}
 153
 154/* Returns true if the pageblock should be scanned for pages to isolate. */
 155static inline bool isolation_suitable(struct compact_control *cc,
 156                                        struct page *page)
 157{
 158        if (cc->ignore_skip_hint)
 159                return true;
 160
 161        return !get_pageblock_skip(page);
 162}
 163
 164static void reset_cached_positions(struct zone *zone)
 165{
 166        zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
 167        zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
 168        zone->compact_cached_free_pfn =
 169                                pageblock_start_pfn(zone_end_pfn(zone) - 1);
 170}
 171
 172/*
 173 * This function is called to clear all cached information on pageblocks that
 174 * should be skipped for page isolation when the migrate and free page scanner
 175 * meet.
 176 */
 177static void __reset_isolation_suitable(struct zone *zone)
 178{
 179        unsigned long start_pfn = zone->zone_start_pfn;
 180        unsigned long end_pfn = zone_end_pfn(zone);
 181        unsigned long pfn;
 182
 183        zone->compact_blockskip_flush = false;
 184
 185        /* Walk the zone and mark every pageblock as suitable for isolation */
 186        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 187                struct page *page;
 188
 189                cond_resched();
 190
 191                if (!pfn_valid(pfn))
 192                        continue;
 193
 194                page = pfn_to_page(pfn);
 195                if (zone != page_zone(page))
 196                        continue;
 197
 198                clear_pageblock_skip(page);
 199        }
 200
 201        reset_cached_positions(zone);
 202}
 203
 204void reset_isolation_suitable(pg_data_t *pgdat)
 205{
 206        int zoneid;
 207
 208        for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
 209                struct zone *zone = &pgdat->node_zones[zoneid];
 210                if (!populated_zone(zone))
 211                        continue;
 212
 213                /* Only flush if a full compaction finished recently */
 214                if (zone->compact_blockskip_flush)
 215                        __reset_isolation_suitable(zone);
 216        }
 217}
 218
 219/*
 220 * If no pages were isolated then mark this pageblock to be skipped in the
 221 * future. The information is later cleared by __reset_isolation_suitable().
 222 */
 223static void update_pageblock_skip(struct compact_control *cc,
 224                        struct page *page, unsigned long nr_isolated,
 225                        bool migrate_scanner)
 226{
 227        struct zone *zone = cc->zone;
 228        unsigned long pfn;
 229
 230        if (cc->ignore_skip_hint)
 231                return;
 232
 233        if (!page)
 234                return;
 235
 236        if (nr_isolated)
 237                return;
 238
 239        set_pageblock_skip(page);
 240
 241        pfn = page_to_pfn(page);
 242
 243        /* Update where async and sync compaction should restart */
 244        if (migrate_scanner) {
 245                if (pfn > zone->compact_cached_migrate_pfn[0])
 246                        zone->compact_cached_migrate_pfn[0] = pfn;
 247                if (cc->mode != MIGRATE_ASYNC &&
 248                    pfn > zone->compact_cached_migrate_pfn[1])
 249                        zone->compact_cached_migrate_pfn[1] = pfn;
 250        } else {
 251                if (pfn < zone->compact_cached_free_pfn)
 252                        zone->compact_cached_free_pfn = pfn;
 253        }
 254}
 255#else
 256static inline bool isolation_suitable(struct compact_control *cc,
 257                                        struct page *page)
 258{
 259        return true;
 260}
 261
 262static void update_pageblock_skip(struct compact_control *cc,
 263                        struct page *page, unsigned long nr_isolated,
 264                        bool migrate_scanner)
 265{
 266}
 267#endif /* CONFIG_COMPACTION */
 268
 269/*
 270 * Compaction requires the taking of some coarse locks that are potentially
 271 * very heavily contended. For async compaction, back out if the lock cannot
 272 * be taken immediately. For sync compaction, spin on the lock if needed.
 273 *
 274 * Returns true if the lock is held
 275 * Returns false if the lock is not held and compaction should abort
 276 */
 277static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
 278                                                struct compact_control *cc)
 279{
 280        if (cc->mode == MIGRATE_ASYNC) {
 281                if (!spin_trylock_irqsave(lock, *flags)) {
 282                        cc->contended = COMPACT_CONTENDED_LOCK;
 283                        return false;
 284                }
 285        } else {
 286                spin_lock_irqsave(lock, *flags);
 287        }
 288
 289        return true;
 290}
 291
 292/*
 293 * Compaction requires the taking of some coarse locks that are potentially
 294 * very heavily contended. The lock should be periodically unlocked to avoid
 295 * having disabled IRQs for a long time, even when there is nobody waiting on
 296 * the lock. It might also be that allowing the IRQs will result in
 297 * need_resched() becoming true. If scheduling is needed, async compaction
 298 * aborts. Sync compaction schedules.
 299 * Either compaction type will also abort if a fatal signal is pending.
 300 * In either case if the lock was locked, it is dropped and not regained.
 301 *
 302 * Returns true if compaction should abort due to fatal signal pending, or
 303 *              async compaction due to need_resched()
 304 * Returns false when compaction can continue (sync compaction might have
 305 *              scheduled)
 306 */
 307static bool compact_unlock_should_abort(spinlock_t *lock,
 308                unsigned long flags, bool *locked, struct compact_control *cc)
 309{
 310        if (*locked) {
 311                spin_unlock_irqrestore(lock, flags);
 312                *locked = false;
 313        }
 314
 315        if (fatal_signal_pending(current)) {
 316                cc->contended = COMPACT_CONTENDED_SCHED;
 317                return true;
 318        }
 319
 320        if (need_resched()) {
 321                if (cc->mode == MIGRATE_ASYNC) {
 322                        cc->contended = COMPACT_CONTENDED_SCHED;
 323                        return true;
 324                }
 325                cond_resched();
 326        }
 327
 328        return false;
 329}
 330
 331/*
 332 * Aside from avoiding lock contention, compaction also periodically checks
 333 * need_resched() and either schedules in sync compaction or aborts async
 334 * compaction. This is similar to what compact_unlock_should_abort() does, but
 335 * is used where no lock is concerned.
 336 *
 337 * Returns false when no scheduling was needed, or sync compaction scheduled.
 338 * Returns true when async compaction should abort.
 339 */
 340static inline bool compact_should_abort(struct compact_control *cc)
 341{
 342        /* async compaction aborts if contended */
 343        if (need_resched()) {
 344                if (cc->mode == MIGRATE_ASYNC) {
 345                        cc->contended = COMPACT_CONTENDED_SCHED;
 346                        return true;
 347                }
 348
 349                cond_resched();
 350        }
 351
 352        return false;
 353}
 354
 355/*
 356 * Isolate free pages onto a private freelist. If @strict is true, will abort
 357 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 358 * (even though it may still end up isolating some pages).
 359 */
 360static unsigned long isolate_freepages_block(struct compact_control *cc,
 361                                unsigned long *start_pfn,
 362                                unsigned long end_pfn,
 363                                struct list_head *freelist,
 364                                bool strict)
 365{
 366        int nr_scanned = 0, total_isolated = 0;
 367        struct page *cursor, *valid_page = NULL;
 368        unsigned long flags = 0;
 369        bool locked = false;
 370        unsigned long blockpfn = *start_pfn;
 371
 372        cursor = pfn_to_page(blockpfn);
 373
 374        /* Isolate free pages. */
 375        for (; blockpfn < end_pfn; blockpfn++, cursor++) {
 376                int isolated, i;
 377                struct page *page = cursor;
 378
 379                /*
 380                 * Periodically drop the lock (if held) regardless of its
 381                 * contention, to give chance to IRQs. Abort if fatal signal
 382                 * pending or async compaction detects need_resched()
 383                 */
 384                if (!(blockpfn % SWAP_CLUSTER_MAX)
 385                    && compact_unlock_should_abort(&cc->zone->lock, flags,
 386                                                                &locked, cc))
 387                        break;
 388
 389                nr_scanned++;
 390                if (!pfn_valid_within(blockpfn))
 391                        goto isolate_fail;
 392
 393                if (!valid_page)
 394                        valid_page = page;
 395
 396                /*
 397                 * For compound pages such as THP and hugetlbfs, we can save
 398                 * potentially a lot of iterations if we skip them at once.
 399                 * The check is racy, but we can consider only valid values
 400                 * and the only danger is skipping too much.
 401                 */
 402                if (PageCompound(page)) {
 403                        unsigned int comp_order = compound_order(page);
 404
 405                        if (likely(comp_order < MAX_ORDER)) {
 406                                blockpfn += (1UL << comp_order) - 1;
 407                                cursor += (1UL << comp_order) - 1;
 408                        }
 409
 410                        goto isolate_fail;
 411                }
 412
 413                if (!PageBuddy(page))
 414                        goto isolate_fail;
 415
 416                /*
 417                 * If we already hold the lock, we can skip some rechecking.
 418                 * Note that if we hold the lock now, checked_pageblock was
 419                 * already set in some previous iteration (or strict is true),
 420                 * so it is correct to skip the suitable migration target
 421                 * recheck as well.
 422                 */
 423                if (!locked) {
 424                        /*
 425                         * The zone lock must be held to isolate freepages.
 426                         * Unfortunately this is a very coarse lock and can be
 427                         * heavily contended if there are parallel allocations
 428                         * or parallel compactions. For async compaction do not
 429                         * spin on the lock and we acquire the lock as late as
 430                         * possible.
 431                         */
 432                        locked = compact_trylock_irqsave(&cc->zone->lock,
 433                                                                &flags, cc);
 434                        if (!locked)
 435                                break;
 436
 437                        /* Recheck this is a buddy page under lock */
 438                        if (!PageBuddy(page))
 439                                goto isolate_fail;
 440                }
 441
 442                /* Found a free page, break it into order-0 pages */
 443                isolated = split_free_page(page);
 444                if (!isolated)
 445                        break;
 446
 447                total_isolated += isolated;
 448                cc->nr_freepages += isolated;
 449                for (i = 0; i < isolated; i++) {
 450                        list_add(&page->lru, freelist);
 451                        page++;
 452                }
 453                if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
 454                        blockpfn += isolated;
 455                        break;
 456                }
 457                /* Advance to the end of split page */
 458                blockpfn += isolated - 1;
 459                cursor += isolated - 1;
 460                continue;
 461
 462isolate_fail:
 463                if (strict)
 464                        break;
 465                else
 466                        continue;
 467
 468        }
 469
 470        if (locked)
 471                spin_unlock_irqrestore(&cc->zone->lock, flags);
 472
 473        /*
 474         * There is a tiny chance that we have read bogus compound_order(),
 475         * so be careful to not go outside of the pageblock.
 476         */
 477        if (unlikely(blockpfn > end_pfn))
 478                blockpfn = end_pfn;
 479
 480        trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
 481                                        nr_scanned, total_isolated);
 482
 483        /* Record how far we have got within the block */
 484        *start_pfn = blockpfn;
 485
 486        /*
 487         * If strict isolation is requested by CMA then check that all the
 488         * pages requested were isolated. If there were any failures, 0 is
 489         * returned and CMA will fail.
 490         */
 491        if (strict && blockpfn < end_pfn)
 492                total_isolated = 0;
 493
 494        /* Update the pageblock-skip if the whole pageblock was scanned */
 495        if (blockpfn == end_pfn)
 496                update_pageblock_skip(cc, valid_page, total_isolated, false);
 497
 498        count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
 499        if (total_isolated)
 500                count_compact_events(COMPACTISOLATED, total_isolated);
 501        return total_isolated;
 502}
 503
 504/**
 505 * isolate_freepages_range() - isolate free pages.
 506 * @start_pfn: The first PFN to start isolating.
 507 * @end_pfn:   The one-past-last PFN.
 508 *
 509 * Non-free pages, invalid PFNs, or zone boundaries within the
 510 * [start_pfn, end_pfn) range are considered errors, cause function to
 511 * undo its actions and return zero.
 512 *
 513 * Otherwise, function returns one-past-the-last PFN of isolated page
 514 * (which may be greater then end_pfn if end fell in a middle of
 515 * a free page).
 516 */
 517unsigned long
 518isolate_freepages_range(struct compact_control *cc,
 519                        unsigned long start_pfn, unsigned long end_pfn)
 520{
 521        unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
 522        LIST_HEAD(freelist);
 523
 524        pfn = start_pfn;
 525        block_start_pfn = pageblock_start_pfn(pfn);
 526        if (block_start_pfn < cc->zone->zone_start_pfn)
 527                block_start_pfn = cc->zone->zone_start_pfn;
 528        block_end_pfn = pageblock_end_pfn(pfn);
 529
 530        for (; pfn < end_pfn; pfn += isolated,
 531                                block_start_pfn = block_end_pfn,
 532                                block_end_pfn += pageblock_nr_pages) {
 533                /* Protect pfn from changing by isolate_freepages_block */
 534                unsigned long isolate_start_pfn = pfn;
 535
 536                block_end_pfn = min(block_end_pfn, end_pfn);
 537
 538                /*
 539                 * pfn could pass the block_end_pfn if isolated freepage
 540                 * is more than pageblock order. In this case, we adjust
 541                 * scanning range to right one.
 542                 */
 543                if (pfn >= block_end_pfn) {
 544                        block_start_pfn = pageblock_start_pfn(pfn);
 545                        block_end_pfn = pageblock_end_pfn(pfn);
 546                        block_end_pfn = min(block_end_pfn, end_pfn);
 547                }
 548
 549                if (!pageblock_pfn_to_page(block_start_pfn,
 550                                        block_end_pfn, cc->zone))
 551                        break;
 552
 553                isolated = isolate_freepages_block(cc, &isolate_start_pfn,
 554                                                block_end_pfn, &freelist, true);
 555
 556                /*
 557                 * In strict mode, isolate_freepages_block() returns 0 if
 558                 * there are any holes in the block (ie. invalid PFNs or
 559                 * non-free pages).
 560                 */
 561                if (!isolated)
 562                        break;
 563
 564                /*
 565                 * If we managed to isolate pages, it is always (1 << n) *
 566                 * pageblock_nr_pages for some non-negative n.  (Max order
 567                 * page may span two pageblocks).
 568                 */
 569        }
 570
 571        /* split_free_page does not map the pages */
 572        map_pages(&freelist);
 573
 574        if (pfn < end_pfn) {
 575                /* Loop terminated early, cleanup. */
 576                release_freepages(&freelist);
 577                return 0;
 578        }
 579
 580        /* We don't use freelists for anything. */
 581        return pfn;
 582}
 583
 584/* Update the number of anon and file isolated pages in the zone */
 585static void acct_isolated(struct zone *zone, struct compact_control *cc)
 586{
 587        struct page *page;
 588        unsigned int count[2] = { 0, };
 589
 590        if (list_empty(&cc->migratepages))
 591                return;
 592
 593        list_for_each_entry(page, &cc->migratepages, lru)
 594                count[!!page_is_file_cache(page)]++;
 595
 596        mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
 597        mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
 598}
 599
 600/* Similar to reclaim, but different enough that they don't share logic */
 601static bool too_many_isolated(struct zone *zone)
 602{
 603        unsigned long active, inactive, isolated;
 604
 605        inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
 606                                        zone_page_state(zone, NR_INACTIVE_ANON);
 607        active = zone_page_state(zone, NR_ACTIVE_FILE) +
 608                                        zone_page_state(zone, NR_ACTIVE_ANON);
 609        isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
 610                                        zone_page_state(zone, NR_ISOLATED_ANON);
 611
 612        return isolated > (inactive + active) / 2;
 613}
 614
 615/**
 616 * isolate_migratepages_block() - isolate all migrate-able pages within
 617 *                                a single pageblock
 618 * @cc:         Compaction control structure.
 619 * @low_pfn:    The first PFN to isolate
 620 * @end_pfn:    The one-past-the-last PFN to isolate, within same pageblock
 621 * @isolate_mode: Isolation mode to be used.
 622 *
 623 * Isolate all pages that can be migrated from the range specified by
 624 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
 625 * Returns zero if there is a fatal signal pending, otherwise PFN of the
 626 * first page that was not scanned (which may be both less, equal to or more
 627 * than end_pfn).
 628 *
 629 * The pages are isolated on cc->migratepages list (not required to be empty),
 630 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
 631 * is neither read nor updated.
 632 */
 633static unsigned long
 634isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 635                        unsigned long end_pfn, isolate_mode_t isolate_mode)
 636{
 637        struct zone *zone = cc->zone;
 638        unsigned long nr_scanned = 0, nr_isolated = 0;
 639        struct lruvec *lruvec;
 640        unsigned long flags = 0;
 641        bool locked = false;
 642        struct page *page = NULL, *valid_page = NULL;
 643        unsigned long start_pfn = low_pfn;
 644        bool skip_on_failure = false;
 645        unsigned long next_skip_pfn = 0;
 646
 647        /*
 648         * Ensure that there are not too many pages isolated from the LRU
 649         * list by either parallel reclaimers or compaction. If there are,
 650         * delay for some time until fewer pages are isolated
 651         */
 652        while (unlikely(too_many_isolated(zone))) {
 653                /* async migration should just abort */
 654                if (cc->mode == MIGRATE_ASYNC)
 655                        return 0;
 656
 657                congestion_wait(BLK_RW_ASYNC, HZ/10);
 658
 659                if (fatal_signal_pending(current))
 660                        return 0;
 661        }
 662
 663        if (compact_should_abort(cc))
 664                return 0;
 665
 666        if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
 667                skip_on_failure = true;
 668                next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 669        }
 670
 671        /* Time to isolate some pages for migration */
 672        for (; low_pfn < end_pfn; low_pfn++) {
 673                bool is_lru;
 674
 675                if (skip_on_failure && low_pfn >= next_skip_pfn) {
 676                        /*
 677                         * We have isolated all migration candidates in the
 678                         * previous order-aligned block, and did not skip it due
 679                         * to failure. We should migrate the pages now and
 680                         * hopefully succeed compaction.
 681                         */
 682                        if (nr_isolated)
 683                                break;
 684
 685                        /*
 686                         * We failed to isolate in the previous order-aligned
 687                         * block. Set the new boundary to the end of the
 688                         * current block. Note we can't simply increase
 689                         * next_skip_pfn by 1 << order, as low_pfn might have
 690                         * been incremented by a higher number due to skipping
 691                         * a compound or a high-order buddy page in the
 692                         * previous loop iteration.
 693                         */
 694                        next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 695                }
 696
 697                /*
 698                 * Periodically drop the lock (if held) regardless of its
 699                 * contention, to give chance to IRQs. Abort async compaction
 700                 * if contended.
 701                 */
 702                if (!(low_pfn % SWAP_CLUSTER_MAX)
 703                    && compact_unlock_should_abort(&zone->lru_lock, flags,
 704                                                                &locked, cc))
 705                        break;
 706
 707                if (!pfn_valid_within(low_pfn))
 708                        goto isolate_fail;
 709                nr_scanned++;
 710
 711                page = pfn_to_page(low_pfn);
 712
 713                if (!valid_page)
 714                        valid_page = page;
 715
 716                /*
 717                 * Skip if free. We read page order here without zone lock
 718                 * which is generally unsafe, but the race window is small and
 719                 * the worst thing that can happen is that we skip some
 720                 * potential isolation targets.
 721                 */
 722                if (PageBuddy(page)) {
 723                        unsigned long freepage_order = page_order_unsafe(page);
 724
 725                        /*
 726                         * Without lock, we cannot be sure that what we got is
 727                         * a valid page order. Consider only values in the
 728                         * valid order range to prevent low_pfn overflow.
 729                         */
 730                        if (freepage_order > 0 && freepage_order < MAX_ORDER)
 731                                low_pfn += (1UL << freepage_order) - 1;
 732                        continue;
 733                }
 734
 735                /*
 736                 * Check may be lockless but that's ok as we recheck later.
 737                 * It's possible to migrate LRU pages and balloon pages
 738                 * Skip any other type of page
 739                 */
 740                is_lru = PageLRU(page);
 741                if (!is_lru) {
 742                        if (unlikely(balloon_page_movable(page))) {
 743                                if (balloon_page_isolate(page)) {
 744                                        /* Successfully isolated */
 745                                        goto isolate_success;
 746                                }
 747                        }
 748                }
 749
 750                /*
 751                 * Regardless of being on LRU, compound pages such as THP and
 752                 * hugetlbfs are not to be compacted. We can potentially save
 753                 * a lot of iterations if we skip them at once. The check is
 754                 * racy, but we can consider only valid values and the only
 755                 * danger is skipping too much.
 756                 */
 757                if (PageCompound(page)) {
 758                        unsigned int comp_order = compound_order(page);
 759
 760                        if (likely(comp_order < MAX_ORDER))
 761                                low_pfn += (1UL << comp_order) - 1;
 762
 763                        goto isolate_fail;
 764                }
 765
 766                if (!is_lru)
 767                        goto isolate_fail;
 768
 769                /*
 770                 * Migration will fail if an anonymous page is pinned in memory,
 771                 * so avoid taking lru_lock and isolating it unnecessarily in an
 772                 * admittedly racy check.
 773                 */
 774                if (!page_mapping(page) &&
 775                    page_count(page) > page_mapcount(page))
 776                        goto isolate_fail;
 777
 778                /* If we already hold the lock, we can skip some rechecking */
 779                if (!locked) {
 780                        locked = compact_trylock_irqsave(&zone->lru_lock,
 781                                                                &flags, cc);
 782                        if (!locked)
 783                                break;
 784
 785                        /* Recheck PageLRU and PageCompound under lock */
 786                        if (!PageLRU(page))
 787                                goto isolate_fail;
 788
 789                        /*
 790                         * Page become compound since the non-locked check,
 791                         * and it's on LRU. It can only be a THP so the order
 792                         * is safe to read and it's 0 for tail pages.
 793                         */
 794                        if (unlikely(PageCompound(page))) {
 795                                low_pfn += (1UL << compound_order(page)) - 1;
 796                                goto isolate_fail;
 797                        }
 798                }
 799
 800                lruvec = mem_cgroup_page_lruvec(page, zone);
 801
 802                /* Try isolate the page */
 803                if (__isolate_lru_page(page, isolate_mode) != 0)
 804                        goto isolate_fail;
 805
 806                VM_BUG_ON_PAGE(PageCompound(page), page);
 807
 808                /* Successfully isolated */
 809                del_page_from_lru_list(page, lruvec, page_lru(page));
 810
 811isolate_success:
 812                list_add(&page->lru, &cc->migratepages);
 813                cc->nr_migratepages++;
 814                nr_isolated++;
 815
 816                /*
 817                 * Record where we could have freed pages by migration and not
 818                 * yet flushed them to buddy allocator.
 819                 * - this is the lowest page that was isolated and likely be
 820                 * then freed by migration.
 821                 */
 822                if (!cc->last_migrated_pfn)
 823                        cc->last_migrated_pfn = low_pfn;
 824
 825                /* Avoid isolating too much */
 826                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
 827                        ++low_pfn;
 828                        break;
 829                }
 830
 831                continue;
 832isolate_fail:
 833                if (!skip_on_failure)
 834                        continue;
 835
 836                /*
 837                 * We have isolated some pages, but then failed. Release them
 838                 * instead of migrating, as we cannot form the cc->order buddy
 839                 * page anyway.
 840                 */
 841                if (nr_isolated) {
 842                        if (locked) {
 843                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 844                                locked = false;
 845                        }
 846                        acct_isolated(zone, cc);
 847                        putback_movable_pages(&cc->migratepages);
 848                        cc->nr_migratepages = 0;
 849                        cc->last_migrated_pfn = 0;
 850                        nr_isolated = 0;
 851                }
 852
 853                if (low_pfn < next_skip_pfn) {
 854                        low_pfn = next_skip_pfn - 1;
 855                        /*
 856                         * The check near the loop beginning would have updated
 857                         * next_skip_pfn too, but this is a bit simpler.
 858                         */
 859                        next_skip_pfn += 1UL << cc->order;
 860                }
 861        }
 862
 863        /*
 864         * The PageBuddy() check could have potentially brought us outside
 865         * the range to be scanned.
 866         */
 867        if (unlikely(low_pfn > end_pfn))
 868                low_pfn = end_pfn;
 869
 870        if (locked)
 871                spin_unlock_irqrestore(&zone->lru_lock, flags);
 872
 873        /*
 874         * Update the pageblock-skip information and cached scanner pfn,
 875         * if the whole pageblock was scanned without isolating any page.
 876         */
 877        if (low_pfn == end_pfn)
 878                update_pageblock_skip(cc, valid_page, nr_isolated, true);
 879
 880        trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
 881                                                nr_scanned, nr_isolated);
 882
 883        count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
 884        if (nr_isolated)
 885                count_compact_events(COMPACTISOLATED, nr_isolated);
 886
 887        return low_pfn;
 888}
 889
 890/**
 891 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
 892 * @cc:        Compaction control structure.
 893 * @start_pfn: The first PFN to start isolating.
 894 * @end_pfn:   The one-past-last PFN.
 895 *
 896 * Returns zero if isolation fails fatally due to e.g. pending signal.
 897 * Otherwise, function returns one-past-the-last PFN of isolated page
 898 * (which may be greater than end_pfn if end fell in a middle of a THP page).
 899 */
 900unsigned long
 901isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
 902                                                        unsigned long end_pfn)
 903{
 904        unsigned long pfn, block_start_pfn, block_end_pfn;
 905
 906        /* Scan block by block. First and last block may be incomplete */
 907        pfn = start_pfn;
 908        block_start_pfn = pageblock_start_pfn(pfn);
 909        if (block_start_pfn < cc->zone->zone_start_pfn)
 910                block_start_pfn = cc->zone->zone_start_pfn;
 911        block_end_pfn = pageblock_end_pfn(pfn);
 912
 913        for (; pfn < end_pfn; pfn = block_end_pfn,
 914                                block_start_pfn = block_end_pfn,
 915                                block_end_pfn += pageblock_nr_pages) {
 916
 917                block_end_pfn = min(block_end_pfn, end_pfn);
 918
 919                if (!pageblock_pfn_to_page(block_start_pfn,
 920                                        block_end_pfn, cc->zone))
 921                        continue;
 922
 923                pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
 924                                                        ISOLATE_UNEVICTABLE);
 925
 926                if (!pfn)
 927                        break;
 928
 929                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
 930                        break;
 931        }
 932        acct_isolated(cc->zone, cc);
 933
 934        return pfn;
 935}
 936
 937#endif /* CONFIG_COMPACTION || CONFIG_CMA */
 938#ifdef CONFIG_COMPACTION
 939
 940/* Returns true if the page is within a block suitable for migration to */
 941static bool suitable_migration_target(struct page *page)
 942{
 943        /* If the page is a large free page, then disallow migration */
 944        if (PageBuddy(page)) {
 945                /*
 946                 * We are checking page_order without zone->lock taken. But
 947                 * the only small danger is that we skip a potentially suitable
 948                 * pageblock, so it's not worth to check order for valid range.
 949                 */
 950                if (page_order_unsafe(page) >= pageblock_order)
 951                        return false;
 952        }
 953
 954        /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
 955        if (migrate_async_suitable(get_pageblock_migratetype(page)))
 956                return true;
 957
 958        /* Otherwise skip the block */
 959        return false;
 960}
 961
 962/*
 963 * Test whether the free scanner has reached the same or lower pageblock than
 964 * the migration scanner, and compaction should thus terminate.
 965 */
 966static inline bool compact_scanners_met(struct compact_control *cc)
 967{
 968        return (cc->free_pfn >> pageblock_order)
 969                <= (cc->migrate_pfn >> pageblock_order);
 970}
 971
 972/*
 973 * Based on information in the current compact_control, find blocks
 974 * suitable for isolating free pages from and then isolate them.
 975 */
 976static void isolate_freepages(struct compact_control *cc)
 977{
 978        struct zone *zone = cc->zone;
 979        struct page *page;
 980        unsigned long block_start_pfn;  /* start of current pageblock */
 981        unsigned long isolate_start_pfn; /* exact pfn we start at */
 982        unsigned long block_end_pfn;    /* end of current pageblock */
 983        unsigned long low_pfn;       /* lowest pfn scanner is able to scan */
 984        struct list_head *freelist = &cc->freepages;
 985
 986        /*
 987         * Initialise the free scanner. The starting point is where we last
 988         * successfully isolated from, zone-cached value, or the end of the
 989         * zone when isolating for the first time. For looping we also need
 990         * this pfn aligned down to the pageblock boundary, because we do
 991         * block_start_pfn -= pageblock_nr_pages in the for loop.
 992         * For ending point, take care when isolating in last pageblock of a
 993         * a zone which ends in the middle of a pageblock.
 994         * The low boundary is the end of the pageblock the migration scanner
 995         * is using.
 996         */
 997        isolate_start_pfn = cc->free_pfn;
 998        block_start_pfn = pageblock_start_pfn(cc->free_pfn);
 999        block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1000                                                zone_end_pfn(zone));
1001        low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1002
1003        /*
1004         * Isolate free pages until enough are available to migrate the
1005         * pages on cc->migratepages. We stop searching if the migrate
1006         * and free page scanners meet or enough free pages are isolated.
1007         */
1008        for (; block_start_pfn >= low_pfn;
1009                                block_end_pfn = block_start_pfn,
1010                                block_start_pfn -= pageblock_nr_pages,
1011                                isolate_start_pfn = block_start_pfn) {
1012                /*
1013                 * This can iterate a massively long zone without finding any
1014                 * suitable migration targets, so periodically check if we need
1015                 * to schedule, or even abort async compaction.
1016                 */
1017                if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1018                                                && compact_should_abort(cc))
1019                        break;
1020
1021                page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1022                                                                        zone);
1023                if (!page)
1024                        continue;
1025
1026                /* Check the block is suitable for migration */
1027                if (!suitable_migration_target(page))
1028                        continue;
1029
1030                /* If isolation recently failed, do not retry */
1031                if (!isolation_suitable(cc, page))
1032                        continue;
1033
1034                /* Found a block suitable for isolating free pages from. */
1035                isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1036                                        freelist, false);
1037
1038                /*
1039                 * If we isolated enough freepages, or aborted due to lock
1040                 * contention, terminate.
1041                 */
1042                if ((cc->nr_freepages >= cc->nr_migratepages)
1043                                                        || cc->contended) {
1044                        if (isolate_start_pfn >= block_end_pfn) {
1045                                /*
1046                                 * Restart at previous pageblock if more
1047                                 * freepages can be isolated next time.
1048                                 */
1049                                isolate_start_pfn =
1050                                        block_start_pfn - pageblock_nr_pages;
1051                        }
1052                        break;
1053                } else if (isolate_start_pfn < block_end_pfn) {
1054                        /*
1055                         * If isolation failed early, do not continue
1056                         * needlessly.
1057                         */
1058                        break;
1059                }
1060        }
1061
1062        /* split_free_page does not map the pages */
1063        map_pages(freelist);
1064
1065        /*
1066         * Record where the free scanner will restart next time. Either we
1067         * broke from the loop and set isolate_start_pfn based on the last
1068         * call to isolate_freepages_block(), or we met the migration scanner
1069         * and the loop terminated due to isolate_start_pfn < low_pfn
1070         */
1071        cc->free_pfn = isolate_start_pfn;
1072}
1073
1074/*
1075 * This is a migrate-callback that "allocates" freepages by taking pages
1076 * from the isolated freelists in the block we are migrating to.
1077 */
1078static struct page *compaction_alloc(struct page *migratepage,
1079                                        unsigned long data,
1080                                        int **result)
1081{
1082        struct compact_control *cc = (struct compact_control *)data;
1083        struct page *freepage;
1084
1085        /*
1086         * Isolate free pages if necessary, and if we are not aborting due to
1087         * contention.
1088         */
1089        if (list_empty(&cc->freepages)) {
1090                if (!cc->contended)
1091                        isolate_freepages(cc);
1092
1093                if (list_empty(&cc->freepages))
1094                        return NULL;
1095        }
1096
1097        freepage = list_entry(cc->freepages.next, struct page, lru);
1098        list_del(&freepage->lru);
1099        cc->nr_freepages--;
1100
1101        return freepage;
1102}
1103
1104/*
1105 * This is a migrate-callback that "frees" freepages back to the isolated
1106 * freelist.  All pages on the freelist are from the same zone, so there is no
1107 * special handling needed for NUMA.
1108 */
1109static void compaction_free(struct page *page, unsigned long data)
1110{
1111        struct compact_control *cc = (struct compact_control *)data;
1112
1113        list_add(&page->lru, &cc->freepages);
1114        cc->nr_freepages++;
1115}
1116
1117/* possible outcome of isolate_migratepages */
1118typedef enum {
1119        ISOLATE_ABORT,          /* Abort compaction now */
1120        ISOLATE_NONE,           /* No pages isolated, continue scanning */
1121        ISOLATE_SUCCESS,        /* Pages isolated, migrate */
1122} isolate_migrate_t;
1123
1124/*
1125 * Allow userspace to control policy on scanning the unevictable LRU for
1126 * compactable pages.
1127 */
1128int sysctl_compact_unevictable_allowed __read_mostly = 1;
1129
1130/*
1131 * Isolate all pages that can be migrated from the first suitable block,
1132 * starting at the block pointed to by the migrate scanner pfn within
1133 * compact_control.
1134 */
1135static isolate_migrate_t isolate_migratepages(struct zone *zone,
1136                                        struct compact_control *cc)
1137{
1138        unsigned long block_start_pfn;
1139        unsigned long block_end_pfn;
1140        unsigned long low_pfn;
1141        struct page *page;
1142        const isolate_mode_t isolate_mode =
1143                (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1144                (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1145
1146        /*
1147         * Start at where we last stopped, or beginning of the zone as
1148         * initialized by compact_zone()
1149         */
1150        low_pfn = cc->migrate_pfn;
1151        block_start_pfn = pageblock_start_pfn(low_pfn);
1152        if (block_start_pfn < zone->zone_start_pfn)
1153                block_start_pfn = zone->zone_start_pfn;
1154
1155        /* Only scan within a pageblock boundary */
1156        block_end_pfn = pageblock_end_pfn(low_pfn);
1157
1158        /*
1159         * Iterate over whole pageblocks until we find the first suitable.
1160         * Do not cross the free scanner.
1161         */
1162        for (; block_end_pfn <= cc->free_pfn;
1163                        low_pfn = block_end_pfn,
1164                        block_start_pfn = block_end_pfn,
1165                        block_end_pfn += pageblock_nr_pages) {
1166
1167                /*
1168                 * This can potentially iterate a massively long zone with
1169                 * many pageblocks unsuitable, so periodically check if we
1170                 * need to schedule, or even abort async compaction.
1171                 */
1172                if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1173                                                && compact_should_abort(cc))
1174                        break;
1175
1176                page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1177                                                                        zone);
1178                if (!page)
1179                        continue;
1180
1181                /* If isolation recently failed, do not retry */
1182                if (!isolation_suitable(cc, page))
1183                        continue;
1184
1185                /*
1186                 * For async compaction, also only scan in MOVABLE blocks.
1187                 * Async compaction is optimistic to see if the minimum amount
1188                 * of work satisfies the allocation.
1189                 */
1190                if (cc->mode == MIGRATE_ASYNC &&
1191                    !migrate_async_suitable(get_pageblock_migratetype(page)))
1192                        continue;
1193
1194                /* Perform the isolation */
1195                low_pfn = isolate_migratepages_block(cc, low_pfn,
1196                                                block_end_pfn, isolate_mode);
1197
1198                if (!low_pfn || cc->contended) {
1199                        acct_isolated(zone, cc);
1200                        return ISOLATE_ABORT;
1201                }
1202
1203                /*
1204                 * Either we isolated something and proceed with migration. Or
1205                 * we failed and compact_zone should decide if we should
1206                 * continue or not.
1207                 */
1208                break;
1209        }
1210
1211        acct_isolated(zone, cc);
1212        /* Record where migration scanner will be restarted. */
1213        cc->migrate_pfn = low_pfn;
1214
1215        return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1216}
1217
1218/*
1219 * order == -1 is expected when compacting via
1220 * /proc/sys/vm/compact_memory
1221 */
1222static inline bool is_via_compact_memory(int order)
1223{
1224        return order == -1;
1225}
1226
1227static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
1228                            const int migratetype)
1229{
1230        unsigned int order;
1231        unsigned long watermark;
1232
1233        if (cc->contended || fatal_signal_pending(current))
1234                return COMPACT_CONTENDED;
1235
1236        /* Compaction run completes if the migrate and free scanner meet */
1237        if (compact_scanners_met(cc)) {
1238                /* Let the next compaction start anew. */
1239                reset_cached_positions(zone);
1240
1241                /*
1242                 * Mark that the PG_migrate_skip information should be cleared
1243                 * by kswapd when it goes to sleep. kcompactd does not set the
1244                 * flag itself as the decision to be clear should be directly
1245                 * based on an allocation request.
1246                 */
1247                if (cc->direct_compaction)
1248                        zone->compact_blockskip_flush = true;
1249
1250                if (cc->whole_zone)
1251                        return COMPACT_COMPLETE;
1252                else
1253                        return COMPACT_PARTIAL_SKIPPED;
1254        }
1255
1256        if (is_via_compact_memory(cc->order))
1257                return COMPACT_CONTINUE;
1258
1259        /* Compaction run is not finished if the watermark is not met */
1260        watermark = low_wmark_pages(zone);
1261
1262        if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1263                                                        cc->alloc_flags))
1264                return COMPACT_CONTINUE;
1265
1266        /* Direct compactor: Is a suitable page free? */
1267        for (order = cc->order; order < MAX_ORDER; order++) {
1268                struct free_area *area = &zone->free_area[order];
1269                bool can_steal;
1270
1271                /* Job done if page is free of the right migratetype */
1272                if (!list_empty(&area->free_list[migratetype]))
1273                        return COMPACT_PARTIAL;
1274
1275#ifdef CONFIG_CMA
1276                /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1277                if (migratetype == MIGRATE_MOVABLE &&
1278                        !list_empty(&area->free_list[MIGRATE_CMA]))
1279                        return COMPACT_PARTIAL;
1280#endif
1281                /*
1282                 * Job done if allocation would steal freepages from
1283                 * other migratetype buddy lists.
1284                 */
1285                if (find_suitable_fallback(area, order, migratetype,
1286                                                true, &can_steal) != -1)
1287                        return COMPACT_PARTIAL;
1288        }
1289
1290        return COMPACT_NO_SUITABLE_PAGE;
1291}
1292
1293static enum compact_result compact_finished(struct zone *zone,
1294                        struct compact_control *cc,
1295                        const int migratetype)
1296{
1297        int ret;
1298
1299        ret = __compact_finished(zone, cc, migratetype);
1300        trace_mm_compaction_finished(zone, cc->order, ret);
1301        if (ret == COMPACT_NO_SUITABLE_PAGE)
1302                ret = COMPACT_CONTINUE;
1303
1304        return ret;
1305}
1306
1307/*
1308 * compaction_suitable: Is this suitable to run compaction on this zone now?
1309 * Returns
1310 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1311 *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
1312 *   COMPACT_CONTINUE - If compaction should run now
1313 */
1314static enum compact_result __compaction_suitable(struct zone *zone, int order,
1315                                        unsigned int alloc_flags,
1316                                        int classzone_idx,
1317                                        unsigned long wmark_target)
1318{
1319        int fragindex;
1320        unsigned long watermark;
1321
1322        if (is_via_compact_memory(order))
1323                return COMPACT_CONTINUE;
1324
1325        watermark = low_wmark_pages(zone);
1326        /*
1327         * If watermarks for high-order allocation are already met, there
1328         * should be no need for compaction at all.
1329         */
1330        if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1331                                                                alloc_flags))
1332                return COMPACT_PARTIAL;
1333
1334        /*
1335         * Watermarks for order-0 must be met for compaction. Note the 2UL.
1336         * This is because during migration, copies of pages need to be
1337         * allocated and for a short time, the footprint is higher
1338         */
1339        watermark += (2UL << order);
1340        if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1341                                 alloc_flags, wmark_target))
1342                return COMPACT_SKIPPED;
1343
1344        /*
1345         * fragmentation index determines if allocation failures are due to
1346         * low memory or external fragmentation
1347         *
1348         * index of -1000 would imply allocations might succeed depending on
1349         * watermarks, but we already failed the high-order watermark check
1350         * index towards 0 implies failure is due to lack of memory
1351         * index towards 1000 implies failure is due to fragmentation
1352         *
1353         * Only compact if a failure would be due to fragmentation.
1354         */
1355        fragindex = fragmentation_index(zone, order);
1356        if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1357                return COMPACT_NOT_SUITABLE_ZONE;
1358
1359        return COMPACT_CONTINUE;
1360}
1361
1362enum compact_result compaction_suitable(struct zone *zone, int order,
1363                                        unsigned int alloc_flags,
1364                                        int classzone_idx)
1365{
1366        enum compact_result ret;
1367
1368        ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1369                                    zone_page_state(zone, NR_FREE_PAGES));
1370        trace_mm_compaction_suitable(zone, order, ret);
1371        if (ret == COMPACT_NOT_SUITABLE_ZONE)
1372                ret = COMPACT_SKIPPED;
1373
1374        return ret;
1375}
1376
1377bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1378                int alloc_flags)
1379{
1380        struct zone *zone;
1381        struct zoneref *z;
1382
1383        /*
1384         * Make sure at least one zone would pass __compaction_suitable if we continue
1385         * retrying the reclaim.
1386         */
1387        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1388                                        ac->nodemask) {
1389                unsigned long available;
1390                enum compact_result compact_result;
1391
1392                /*
1393                 * Do not consider all the reclaimable memory because we do not
1394                 * want to trash just for a single high order allocation which
1395                 * is even not guaranteed to appear even if __compaction_suitable
1396                 * is happy about the watermark check.
1397                 */
1398                available = zone_reclaimable_pages(zone) / order;
1399                available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1400                compact_result = __compaction_suitable(zone, order, alloc_flags,
1401                                ac_classzone_idx(ac), available);
1402                if (compact_result != COMPACT_SKIPPED &&
1403                                compact_result != COMPACT_NOT_SUITABLE_ZONE)
1404                        return true;
1405        }
1406
1407        return false;
1408}
1409
1410static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
1411{
1412        enum compact_result ret;
1413        unsigned long start_pfn = zone->zone_start_pfn;
1414        unsigned long end_pfn = zone_end_pfn(zone);
1415        const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1416        const bool sync = cc->mode != MIGRATE_ASYNC;
1417
1418        ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1419                                                        cc->classzone_idx);
1420        /* Compaction is likely to fail */
1421        if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED)
1422                return ret;
1423
1424        /* huh, compaction_suitable is returning something unexpected */
1425        VM_BUG_ON(ret != COMPACT_CONTINUE);
1426
1427        /*
1428         * Clear pageblock skip if there were failures recently and compaction
1429         * is about to be retried after being deferred.
1430         */
1431        if (compaction_restarting(zone, cc->order))
1432                __reset_isolation_suitable(zone);
1433
1434        /*
1435         * Setup to move all movable pages to the end of the zone. Used cached
1436         * information on where the scanners should start but check that it
1437         * is initialised by ensuring the values are within zone boundaries.
1438         */
1439        cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1440        cc->free_pfn = zone->compact_cached_free_pfn;
1441        if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1442                cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1443                zone->compact_cached_free_pfn = cc->free_pfn;
1444        }
1445        if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1446                cc->migrate_pfn = start_pfn;
1447                zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1448                zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1449        }
1450
1451        if (cc->migrate_pfn == start_pfn)
1452                cc->whole_zone = true;
1453
1454        cc->last_migrated_pfn = 0;
1455
1456        trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1457                                cc->free_pfn, end_pfn, sync);
1458
1459        migrate_prep_local();
1460
1461        while ((ret = compact_finished(zone, cc, migratetype)) ==
1462                                                COMPACT_CONTINUE) {
1463                int err;
1464
1465                switch (isolate_migratepages(zone, cc)) {
1466                case ISOLATE_ABORT:
1467                        ret = COMPACT_CONTENDED;
1468                        putback_movable_pages(&cc->migratepages);
1469                        cc->nr_migratepages = 0;
1470                        goto out;
1471                case ISOLATE_NONE:
1472                        /*
1473                         * We haven't isolated and migrated anything, but
1474                         * there might still be unflushed migrations from
1475                         * previous cc->order aligned block.
1476                         */
1477                        goto check_drain;
1478                case ISOLATE_SUCCESS:
1479                        ;
1480                }
1481
1482                err = migrate_pages(&cc->migratepages, compaction_alloc,
1483                                compaction_free, (unsigned long)cc, cc->mode,
1484                                MR_COMPACTION);
1485
1486                trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1487                                                        &cc->migratepages);
1488
1489                /* All pages were either migrated or will be released */
1490                cc->nr_migratepages = 0;
1491                if (err) {
1492                        putback_movable_pages(&cc->migratepages);
1493                        /*
1494                         * migrate_pages() may return -ENOMEM when scanners meet
1495                         * and we want compact_finished() to detect it
1496                         */
1497                        if (err == -ENOMEM && !compact_scanners_met(cc)) {
1498                                ret = COMPACT_CONTENDED;
1499                                goto out;
1500                        }
1501                        /*
1502                         * We failed to migrate at least one page in the current
1503                         * order-aligned block, so skip the rest of it.
1504                         */
1505                        if (cc->direct_compaction &&
1506                                                (cc->mode == MIGRATE_ASYNC)) {
1507                                cc->migrate_pfn = block_end_pfn(
1508                                                cc->migrate_pfn - 1, cc->order);
1509                                /* Draining pcplists is useless in this case */
1510                                cc->last_migrated_pfn = 0;
1511
1512                        }
1513                }
1514
1515check_drain:
1516                /*
1517                 * Has the migration scanner moved away from the previous
1518                 * cc->order aligned block where we migrated from? If yes,
1519                 * flush the pages that were freed, so that they can merge and
1520                 * compact_finished() can detect immediately if allocation
1521                 * would succeed.
1522                 */
1523                if (cc->order > 0 && cc->last_migrated_pfn) {
1524                        int cpu;
1525                        unsigned long current_block_start =
1526                                block_start_pfn(cc->migrate_pfn, cc->order);
1527
1528                        if (cc->last_migrated_pfn < current_block_start) {
1529                                cpu = get_cpu();
1530                                lru_add_drain_cpu(cpu);
1531                                drain_local_pages(zone);
1532                                put_cpu();
1533                                /* No more flushing until we migrate again */
1534                                cc->last_migrated_pfn = 0;
1535                        }
1536                }
1537
1538        }
1539
1540out:
1541        /*
1542         * Release free pages and update where the free scanner should restart,
1543         * so we don't leave any returned pages behind in the next attempt.
1544         */
1545        if (cc->nr_freepages > 0) {
1546                unsigned long free_pfn = release_freepages(&cc->freepages);
1547
1548                cc->nr_freepages = 0;
1549                VM_BUG_ON(free_pfn == 0);
1550                /* The cached pfn is always the first in a pageblock */
1551                free_pfn = pageblock_start_pfn(free_pfn);
1552                /*
1553                 * Only go back, not forward. The cached pfn might have been
1554                 * already reset to zone end in compact_finished()
1555                 */
1556                if (free_pfn > zone->compact_cached_free_pfn)
1557                        zone->compact_cached_free_pfn = free_pfn;
1558        }
1559
1560        trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1561                                cc->free_pfn, end_pfn, sync, ret);
1562
1563        if (ret == COMPACT_CONTENDED)
1564                ret = COMPACT_PARTIAL;
1565
1566        return ret;
1567}
1568
1569static enum compact_result compact_zone_order(struct zone *zone, int order,
1570                gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1571                unsigned int alloc_flags, int classzone_idx)
1572{
1573        enum compact_result ret;
1574        struct compact_control cc = {
1575                .nr_freepages = 0,
1576                .nr_migratepages = 0,
1577                .order = order,
1578                .gfp_mask = gfp_mask,
1579                .zone = zone,
1580                .mode = mode,
1581                .alloc_flags = alloc_flags,
1582                .classzone_idx = classzone_idx,
1583                .direct_compaction = true,
1584        };
1585        INIT_LIST_HEAD(&cc.freepages);
1586        INIT_LIST_HEAD(&cc.migratepages);
1587
1588        ret = compact_zone(zone, &cc);
1589
1590        VM_BUG_ON(!list_empty(&cc.freepages));
1591        VM_BUG_ON(!list_empty(&cc.migratepages));
1592
1593        *contended = cc.contended;
1594        return ret;
1595}
1596
1597int sysctl_extfrag_threshold = 500;
1598
1599/**
1600 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1601 * @gfp_mask: The GFP mask of the current allocation
1602 * @order: The order of the current allocation
1603 * @alloc_flags: The allocation flags of the current allocation
1604 * @ac: The context of current allocation
1605 * @mode: The migration mode for async, sync light, or sync migration
1606 * @contended: Return value that determines if compaction was aborted due to
1607 *             need_resched() or lock contention
1608 *
1609 * This is the main entry point for direct page compaction.
1610 */
1611enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1612                unsigned int alloc_flags, const struct alloc_context *ac,
1613                enum migrate_mode mode, int *contended)
1614{
1615        int may_enter_fs = gfp_mask & __GFP_FS;
1616        int may_perform_io = gfp_mask & __GFP_IO;
1617        struct zoneref *z;
1618        struct zone *zone;
1619        enum compact_result rc = COMPACT_SKIPPED;
1620        int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1621
1622        *contended = COMPACT_CONTENDED_NONE;
1623
1624        /* Check if the GFP flags allow compaction */
1625        if (!order || !may_enter_fs || !may_perform_io)
1626                return COMPACT_SKIPPED;
1627
1628        trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
1629
1630        /* Compact each zone in the list */
1631        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1632                                                                ac->nodemask) {
1633                enum compact_result status;
1634                int zone_contended;
1635
1636                if (compaction_deferred(zone, order)) {
1637                        rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
1638                        continue;
1639                }
1640
1641                status = compact_zone_order(zone, order, gfp_mask, mode,
1642                                &zone_contended, alloc_flags,
1643                                ac_classzone_idx(ac));
1644                rc = max(status, rc);
1645                /*
1646                 * It takes at least one zone that wasn't lock contended
1647                 * to clear all_zones_contended.
1648                 */
1649                all_zones_contended &= zone_contended;
1650
1651                /* If a normal allocation would succeed, stop compacting */
1652                if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1653                                        ac_classzone_idx(ac), alloc_flags)) {
1654                        /*
1655                         * We think the allocation will succeed in this zone,
1656                         * but it is not certain, hence the false. The caller
1657                         * will repeat this with true if allocation indeed
1658                         * succeeds in this zone.
1659                         */
1660                        compaction_defer_reset(zone, order, false);
1661                        /*
1662                         * It is possible that async compaction aborted due to
1663                         * need_resched() and the watermarks were ok thanks to
1664                         * somebody else freeing memory. The allocation can
1665                         * however still fail so we better signal the
1666                         * need_resched() contention anyway (this will not
1667                         * prevent the allocation attempt).
1668                         */
1669                        if (zone_contended == COMPACT_CONTENDED_SCHED)
1670                                *contended = COMPACT_CONTENDED_SCHED;
1671
1672                        goto break_loop;
1673                }
1674
1675                if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE ||
1676                                        status == COMPACT_PARTIAL_SKIPPED)) {
1677                        /*
1678                         * We think that allocation won't succeed in this zone
1679                         * so we defer compaction there. If it ends up
1680                         * succeeding after all, it will be reset.
1681                         */
1682                        defer_compaction(zone, order);
1683                }
1684
1685                /*
1686                 * We might have stopped compacting due to need_resched() in
1687                 * async compaction, or due to a fatal signal detected. In that
1688                 * case do not try further zones and signal need_resched()
1689                 * contention.
1690                 */
1691                if ((zone_contended == COMPACT_CONTENDED_SCHED)
1692                                        || fatal_signal_pending(current)) {
1693                        *contended = COMPACT_CONTENDED_SCHED;
1694                        goto break_loop;
1695                }
1696
1697                continue;
1698break_loop:
1699                /*
1700                 * We might not have tried all the zones, so  be conservative
1701                 * and assume they are not all lock contended.
1702                 */
1703                all_zones_contended = 0;
1704                break;
1705        }
1706
1707        /*
1708         * If at least one zone wasn't deferred or skipped, we report if all
1709         * zones that were tried were lock contended.
1710         */
1711        if (rc > COMPACT_INACTIVE && all_zones_contended)
1712                *contended = COMPACT_CONTENDED_LOCK;
1713
1714        return rc;
1715}
1716
1717
1718/* Compact all zones within a node */
1719static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1720{
1721        int zoneid;
1722        struct zone *zone;
1723
1724        for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1725
1726                zone = &pgdat->node_zones[zoneid];
1727                if (!populated_zone(zone))
1728                        continue;
1729
1730                cc->nr_freepages = 0;
1731                cc->nr_migratepages = 0;
1732                cc->zone = zone;
1733                INIT_LIST_HEAD(&cc->freepages);
1734                INIT_LIST_HEAD(&cc->migratepages);
1735
1736                /*
1737                 * When called via /proc/sys/vm/compact_memory
1738                 * this makes sure we compact the whole zone regardless of
1739                 * cached scanner positions.
1740                 */
1741                if (is_via_compact_memory(cc->order))
1742                        __reset_isolation_suitable(zone);
1743
1744                if (is_via_compact_memory(cc->order) ||
1745                                !compaction_deferred(zone, cc->order))
1746                        compact_zone(zone, cc);
1747
1748                VM_BUG_ON(!list_empty(&cc->freepages));
1749                VM_BUG_ON(!list_empty(&cc->migratepages));
1750
1751                if (is_via_compact_memory(cc->order))
1752                        continue;
1753
1754                if (zone_watermark_ok(zone, cc->order,
1755                                low_wmark_pages(zone), 0, 0))
1756                        compaction_defer_reset(zone, cc->order, false);
1757        }
1758}
1759
1760void compact_pgdat(pg_data_t *pgdat, int order)
1761{
1762        struct compact_control cc = {
1763                .order = order,
1764                .mode = MIGRATE_ASYNC,
1765        };
1766
1767        if (!order)
1768                return;
1769
1770        __compact_pgdat(pgdat, &cc);
1771}
1772
1773static void compact_node(int nid)
1774{
1775        struct compact_control cc = {
1776                .order = -1,
1777                .mode = MIGRATE_SYNC,
1778                .ignore_skip_hint = true,
1779        };
1780
1781        __compact_pgdat(NODE_DATA(nid), &cc);
1782}
1783
1784/* Compact all nodes in the system */
1785static void compact_nodes(void)
1786{
1787        int nid;
1788
1789        /* Flush pending updates to the LRU lists */
1790        lru_add_drain_all();
1791
1792        for_each_online_node(nid)
1793                compact_node(nid);
1794}
1795
1796/* The written value is actually unused, all memory is compacted */
1797int sysctl_compact_memory;
1798
1799/*
1800 * This is the entry point for compacting all nodes via
1801 * /proc/sys/vm/compact_memory
1802 */
1803int sysctl_compaction_handler(struct ctl_table *table, int write,
1804                        void __user *buffer, size_t *length, loff_t *ppos)
1805{
1806        if (write)
1807                compact_nodes();
1808
1809        return 0;
1810}
1811
1812int sysctl_extfrag_handler(struct ctl_table *table, int write,
1813                        void __user *buffer, size_t *length, loff_t *ppos)
1814{
1815        proc_dointvec_minmax(table, write, buffer, length, ppos);
1816
1817        return 0;
1818}
1819
1820#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1821static ssize_t sysfs_compact_node(struct device *dev,
1822                        struct device_attribute *attr,
1823                        const char *buf, size_t count)
1824{
1825        int nid = dev->id;
1826
1827        if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1828                /* Flush pending updates to the LRU lists */
1829                lru_add_drain_all();
1830
1831                compact_node(nid);
1832        }
1833
1834        return count;
1835}
1836static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1837
1838int compaction_register_node(struct node *node)
1839{
1840        return device_create_file(&node->dev, &dev_attr_compact);
1841}
1842
1843void compaction_unregister_node(struct node *node)
1844{
1845        return device_remove_file(&node->dev, &dev_attr_compact);
1846}
1847#endif /* CONFIG_SYSFS && CONFIG_NUMA */
1848
1849static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1850{
1851        return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1852}
1853
1854static bool kcompactd_node_suitable(pg_data_t *pgdat)
1855{
1856        int zoneid;
1857        struct zone *zone;
1858        enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1859
1860        for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
1861                zone = &pgdat->node_zones[zoneid];
1862
1863                if (!populated_zone(zone))
1864                        continue;
1865
1866                if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1867                                        classzone_idx) == COMPACT_CONTINUE)
1868                        return true;
1869        }
1870
1871        return false;
1872}
1873
1874static void kcompactd_do_work(pg_data_t *pgdat)
1875{
1876        /*
1877         * With no special task, compact all zones so that a page of requested
1878         * order is allocatable.
1879         */
1880        int zoneid;
1881        struct zone *zone;
1882        struct compact_control cc = {
1883                .order = pgdat->kcompactd_max_order,
1884                .classzone_idx = pgdat->kcompactd_classzone_idx,
1885                .mode = MIGRATE_SYNC_LIGHT,
1886                .ignore_skip_hint = true,
1887
1888        };
1889        bool success = false;
1890
1891        trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1892                                                        cc.classzone_idx);
1893        count_vm_event(KCOMPACTD_WAKE);
1894
1895        for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1896                int status;
1897
1898                zone = &pgdat->node_zones[zoneid];
1899                if (!populated_zone(zone))
1900                        continue;
1901
1902                if (compaction_deferred(zone, cc.order))
1903                        continue;
1904
1905                if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1906                                                        COMPACT_CONTINUE)
1907                        continue;
1908
1909                cc.nr_freepages = 0;
1910                cc.nr_migratepages = 0;
1911                cc.zone = zone;
1912                INIT_LIST_HEAD(&cc.freepages);
1913                INIT_LIST_HEAD(&cc.migratepages);
1914
1915                if (kthread_should_stop())
1916                        return;
1917                status = compact_zone(zone, &cc);
1918
1919                if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
1920                                                cc.classzone_idx, 0)) {
1921                        success = true;
1922                        compaction_defer_reset(zone, cc.order, false);
1923                } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1924                        /*
1925                         * We use sync migration mode here, so we defer like
1926                         * sync direct compaction does.
1927                         */
1928                        defer_compaction(zone, cc.order);
1929                }
1930
1931                VM_BUG_ON(!list_empty(&cc.freepages));
1932                VM_BUG_ON(!list_empty(&cc.migratepages));
1933        }
1934
1935        /*
1936         * Regardless of success, we are done until woken up next. But remember
1937         * the requested order/classzone_idx in case it was higher/tighter than
1938         * our current ones
1939         */
1940        if (pgdat->kcompactd_max_order <= cc.order)
1941                pgdat->kcompactd_max_order = 0;
1942        if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1943                pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1944}
1945
1946void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1947{
1948        if (!order)
1949                return;
1950
1951        if (pgdat->kcompactd_max_order < order)
1952                pgdat->kcompactd_max_order = order;
1953
1954        if (pgdat->kcompactd_classzone_idx > classzone_idx)
1955                pgdat->kcompactd_classzone_idx = classzone_idx;
1956
1957        if (!waitqueue_active(&pgdat->kcompactd_wait))
1958                return;
1959
1960        if (!kcompactd_node_suitable(pgdat))
1961                return;
1962
1963        trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
1964                                                        classzone_idx);
1965        wake_up_interruptible(&pgdat->kcompactd_wait);
1966}
1967
1968/*
1969 * The background compaction daemon, started as a kernel thread
1970 * from the init process.
1971 */
1972static int kcompactd(void *p)
1973{
1974        pg_data_t *pgdat = (pg_data_t*)p;
1975        struct task_struct *tsk = current;
1976
1977        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1978
1979        if (!cpumask_empty(cpumask))
1980                set_cpus_allowed_ptr(tsk, cpumask);
1981
1982        set_freezable();
1983
1984        pgdat->kcompactd_max_order = 0;
1985        pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1986
1987        while (!kthread_should_stop()) {
1988                trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
1989                wait_event_freezable(pgdat->kcompactd_wait,
1990                                kcompactd_work_requested(pgdat));
1991
1992                kcompactd_do_work(pgdat);
1993        }
1994
1995        return 0;
1996}
1997
1998/*
1999 * This kcompactd start function will be called by init and node-hot-add.
2000 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2001 */
2002int kcompactd_run(int nid)
2003{
2004        pg_data_t *pgdat = NODE_DATA(nid);
2005        int ret = 0;
2006
2007        if (pgdat->kcompactd)
2008                return 0;
2009
2010        pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2011        if (IS_ERR(pgdat->kcompactd)) {
2012                pr_err("Failed to start kcompactd on node %d\n", nid);
2013                ret = PTR_ERR(pgdat->kcompactd);
2014                pgdat->kcompactd = NULL;
2015        }
2016        return ret;
2017}
2018
2019/*
2020 * Called by memory hotplug when all memory in a node is offlined. Caller must
2021 * hold mem_hotplug_begin/end().
2022 */
2023void kcompactd_stop(int nid)
2024{
2025        struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2026
2027        if (kcompactd) {
2028                kthread_stop(kcompactd);
2029                NODE_DATA(nid)->kcompactd = NULL;
2030        }
2031}
2032
2033/*
2034 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2035 * not required for correctness. So if the last cpu in a node goes
2036 * away, we get changed to run anywhere: as the first one comes back,
2037 * restore their cpu bindings.
2038 */
2039static int cpu_callback(struct notifier_block *nfb, unsigned long action,
2040                        void *hcpu)
2041{
2042        int nid;
2043
2044        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2045                for_each_node_state(nid, N_MEMORY) {
2046                        pg_data_t *pgdat = NODE_DATA(nid);
2047                        const struct cpumask *mask;
2048
2049                        mask = cpumask_of_node(pgdat->node_id);
2050
2051                        if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2052                                /* One of our CPUs online: restore mask */
2053                                set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2054                }
2055        }
2056        return NOTIFY_OK;
2057}
2058
2059static int __init kcompactd_init(void)
2060{
2061        int nid;
2062
2063        for_each_node_state(nid, N_MEMORY)
2064                kcompactd_run(nid);
2065        hotcpu_notifier(cpu_callback, 0);
2066        return 0;
2067}
2068subsys_initcall(kcompactd_init)
2069
2070#endif /* CONFIG_COMPACTION */
2071