linux/mm/page_isolation.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/mm/page_isolation.c
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/page-isolation.h>
   8#include <linux/pageblock-flags.h>
   9#include <linux/memory.h>
  10#include <linux/hugetlb.h>
  11#include <linux/page_owner.h>
  12#include <linux/migrate.h>
  13#include "internal.h"
  14
  15#define CREATE_TRACE_POINTS
  16#include <trace/events/page_isolation.h>
  17
  18static int set_migratetype_isolate(struct page *page, int migratetype,
  19                                bool skip_hwpoisoned_pages)
  20{
  21        struct zone *zone;
  22        unsigned long flags, pfn;
  23        struct memory_isolate_notify arg;
  24        int notifier_ret;
  25        int ret = -EBUSY;
  26
  27        zone = page_zone(page);
  28
  29        spin_lock_irqsave(&zone->lock, flags);
  30
  31        pfn = page_to_pfn(page);
  32        arg.start_pfn = pfn;
  33        arg.nr_pages = pageblock_nr_pages;
  34        arg.pages_found = 0;
  35
  36        /*
  37         * It may be possible to isolate a pageblock even if the
  38         * migratetype is not MIGRATE_MOVABLE. The memory isolation
  39         * notifier chain is used by balloon drivers to return the
  40         * number of pages in a range that are held by the balloon
  41         * driver to shrink memory. If all the pages are accounted for
  42         * by balloons, are free, or on the LRU, isolation can continue.
  43         * Later, for example, when memory hotplug notifier runs, these
  44         * pages reported as "can be isolated" should be isolated(freed)
  45         * by the balloon driver through the memory notifier chain.
  46         */
  47        notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  48        notifier_ret = notifier_to_errno(notifier_ret);
  49        if (notifier_ret)
  50                goto out;
  51        /*
  52         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  53         * We just check MOVABLE pages.
  54         */
  55        if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
  56                                 skip_hwpoisoned_pages))
  57                ret = 0;
  58
  59        /*
  60         * immobile means "not-on-lru" pages. If immobile is larger than
  61         * removable-by-driver pages reported by notifier, we'll fail.
  62         */
  63
  64out:
  65        if (!ret) {
  66                unsigned long nr_pages;
  67                int mt = get_pageblock_migratetype(page);
  68
  69                set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  70                zone->nr_isolate_pageblock++;
  71                nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
  72                                                                        NULL);
  73
  74                __mod_zone_freepage_state(zone, -nr_pages, mt);
  75        }
  76
  77        spin_unlock_irqrestore(&zone->lock, flags);
  78        if (!ret)
  79                drain_all_pages(zone);
  80        return ret;
  81}
  82
  83static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  84{
  85        struct zone *zone;
  86        unsigned long flags, nr_pages;
  87        bool isolated_page = false;
  88        unsigned int order;
  89        unsigned long pfn, buddy_pfn;
  90        struct page *buddy;
  91
  92        zone = page_zone(page);
  93        spin_lock_irqsave(&zone->lock, flags);
  94        if (!is_migrate_isolate_page(page))
  95                goto out;
  96
  97        /*
  98         * Because freepage with more than pageblock_order on isolated
  99         * pageblock is restricted to merge due to freepage counting problem,
 100         * it is possible that there is free buddy page.
 101         * move_freepages_block() doesn't care of merge so we need other
 102         * approach in order to merge them. Isolation and free will make
 103         * these pages to be merged.
 104         */
 105        if (PageBuddy(page)) {
 106                order = page_order(page);
 107                if (order >= pageblock_order) {
 108                        pfn = page_to_pfn(page);
 109                        buddy_pfn = __find_buddy_pfn(pfn, order);
 110                        buddy = page + (buddy_pfn - pfn);
 111
 112                        if (pfn_valid_within(buddy_pfn) &&
 113                            !is_migrate_isolate_page(buddy)) {
 114                                __isolate_free_page(page, order);
 115                                isolated_page = true;
 116                        }
 117                }
 118        }
 119
 120        /*
 121         * If we isolate freepage with more than pageblock_order, there
 122         * should be no freepage in the range, so we could avoid costly
 123         * pageblock scanning for freepage moving.
 124         */
 125        if (!isolated_page) {
 126                nr_pages = move_freepages_block(zone, page, migratetype, NULL);
 127                __mod_zone_freepage_state(zone, nr_pages, migratetype);
 128        }
 129        set_pageblock_migratetype(page, migratetype);
 130        zone->nr_isolate_pageblock--;
 131out:
 132        spin_unlock_irqrestore(&zone->lock, flags);
 133        if (isolated_page) {
 134                post_alloc_hook(page, order, __GFP_MOVABLE);
 135                __free_pages(page, order);
 136        }
 137}
 138
 139static inline struct page *
 140__first_valid_page(unsigned long pfn, unsigned long nr_pages)
 141{
 142        int i;
 143
 144        for (i = 0; i < nr_pages; i++) {
 145                struct page *page;
 146
 147                if (!pfn_valid_within(pfn + i))
 148                        continue;
 149                page = pfn_to_online_page(pfn + i);
 150                if (!page)
 151                        continue;
 152                return page;
 153        }
 154        return NULL;
 155}
 156
 157/*
 158 * start_isolate_page_range() -- make page-allocation-type of range of pages
 159 * to be MIGRATE_ISOLATE.
 160 * @start_pfn: The lower PFN of the range to be isolated.
 161 * @end_pfn: The upper PFN of the range to be isolated.
 162 * @migratetype: migrate type to set in error recovery.
 163 *
 164 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
 165 * the range will never be allocated. Any free pages and pages freed in the
 166 * future will not be allocated again.
 167 *
 168 * start_pfn/end_pfn must be aligned to pageblock_order.
 169 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
 170 */
 171int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 172                             unsigned migratetype, bool skip_hwpoisoned_pages)
 173{
 174        unsigned long pfn;
 175        unsigned long undo_pfn;
 176        struct page *page;
 177
 178        BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
 179        BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
 180
 181        for (pfn = start_pfn;
 182             pfn < end_pfn;
 183             pfn += pageblock_nr_pages) {
 184                page = __first_valid_page(pfn, pageblock_nr_pages);
 185                if (page &&
 186                    set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
 187                        undo_pfn = pfn;
 188                        goto undo;
 189                }
 190        }
 191        return 0;
 192undo:
 193        for (pfn = start_pfn;
 194             pfn < undo_pfn;
 195             pfn += pageblock_nr_pages) {
 196                struct page *page = pfn_to_online_page(pfn);
 197                if (!page)
 198                        continue;
 199                unset_migratetype_isolate(page, migratetype);
 200        }
 201
 202        return -EBUSY;
 203}
 204
 205/*
 206 * Make isolated pages available again.
 207 */
 208int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 209                            unsigned migratetype)
 210{
 211        unsigned long pfn;
 212        struct page *page;
 213
 214        BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
 215        BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
 216
 217        for (pfn = start_pfn;
 218             pfn < end_pfn;
 219             pfn += pageblock_nr_pages) {
 220                page = __first_valid_page(pfn, pageblock_nr_pages);
 221                if (!page || !is_migrate_isolate_page(page))
 222                        continue;
 223                unset_migratetype_isolate(page, migratetype);
 224        }
 225        return 0;
 226}
 227/*
 228 * Test all pages in the range is free(means isolated) or not.
 229 * all pages in [start_pfn...end_pfn) must be in the same zone.
 230 * zone->lock must be held before call this.
 231 *
 232 * Returns the last tested pfn.
 233 */
 234static unsigned long
 235__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
 236                                  bool skip_hwpoisoned_pages)
 237{
 238        struct page *page;
 239
 240        while (pfn < end_pfn) {
 241                if (!pfn_valid_within(pfn)) {
 242                        pfn++;
 243                        continue;
 244                }
 245                page = pfn_to_page(pfn);
 246                if (PageBuddy(page))
 247                        /*
 248                         * If the page is on a free list, it has to be on
 249                         * the correct MIGRATE_ISOLATE freelist. There is no
 250                         * simple way to verify that as VM_BUG_ON(), though.
 251                         */
 252                        pfn += 1 << page_order(page);
 253                else if (skip_hwpoisoned_pages && PageHWPoison(page))
 254                        /* A HWPoisoned page cannot be also PageBuddy */
 255                        pfn++;
 256                else
 257                        break;
 258        }
 259
 260        return pfn;
 261}
 262
 263/* Caller should ensure that requested range is in a single zone */
 264int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 265                        bool skip_hwpoisoned_pages)
 266{
 267        unsigned long pfn, flags;
 268        struct page *page;
 269        struct zone *zone;
 270
 271        /*
 272         * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
 273         * are not aligned to pageblock_nr_pages.
 274         * Then we just check migratetype first.
 275         */
 276        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 277                page = __first_valid_page(pfn, pageblock_nr_pages);
 278                if (page && !is_migrate_isolate_page(page))
 279                        break;
 280        }
 281        page = __first_valid_page(start_pfn, end_pfn - start_pfn);
 282        if ((pfn < end_pfn) || !page)
 283                return -EBUSY;
 284        /* Check all pages are free or marked as ISOLATED */
 285        zone = page_zone(page);
 286        spin_lock_irqsave(&zone->lock, flags);
 287        pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
 288                                                skip_hwpoisoned_pages);
 289        spin_unlock_irqrestore(&zone->lock, flags);
 290
 291        trace_test_pages_isolated(start_pfn, end_pfn, pfn);
 292
 293        return pfn < end_pfn ? -EBUSY : 0;
 294}
 295
 296struct page *alloc_migrate_target(struct page *page, unsigned long private,
 297                                  int **resultp)
 298{
 299        return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
 300}
 301