linux/mm/page_isolation.c
<<
>>
Prefs
   1/*
   2 * linux/mm/page_isolation.c
   3 */
   4
   5#include <linux/mm.h>
   6#include <linux/page-isolation.h>
   7#include <linux/pageblock-flags.h>
   8#include <linux/memory.h>
   9#include <linux/hugetlb.h>
  10#include "internal.h"
  11
  12int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
  13{
  14        struct zone *zone;
  15        unsigned long flags, pfn;
  16        struct memory_isolate_notify arg;
  17        int notifier_ret;
  18        int ret = -EBUSY;
  19
  20        zone = page_zone(page);
  21
  22        spin_lock_irqsave(&zone->lock, flags);
  23
  24        pfn = page_to_pfn(page);
  25        arg.start_pfn = pfn;
  26        arg.nr_pages = pageblock_nr_pages;
  27        arg.pages_found = 0;
  28
  29        /*
  30         * It may be possible to isolate a pageblock even if the
  31         * migratetype is not MIGRATE_MOVABLE. The memory isolation
  32         * notifier chain is used by balloon drivers to return the
  33         * number of pages in a range that are held by the balloon
  34         * driver to shrink memory. If all the pages are accounted for
  35         * by balloons, are free, or on the LRU, isolation can continue.
  36         * Later, for example, when memory hotplug notifier runs, these
  37         * pages reported as "can be isolated" should be isolated(freed)
  38         * by the balloon driver through the memory notifier chain.
  39         */
  40        notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  41        notifier_ret = notifier_to_errno(notifier_ret);
  42        if (notifier_ret)
  43                goto out;
  44        /*
  45         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  46         * We just check MOVABLE pages.
  47         */
  48        if (!has_unmovable_pages(zone, page, arg.pages_found,
  49                                 skip_hwpoisoned_pages))
  50                ret = 0;
  51
  52        /*
  53         * immobile means "not-on-lru" paes. If immobile is larger than
  54         * removable-by-driver pages reported by notifier, we'll fail.
  55         */
  56
  57out:
  58        if (!ret) {
  59                unsigned long nr_pages;
  60                int migratetype = get_pageblock_migratetype(page);
  61
  62                set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  63                zone->nr_isolate_pageblock++;
  64                nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
  65
  66                __mod_zone_freepage_state(zone, -nr_pages, migratetype);
  67        }
  68
  69        spin_unlock_irqrestore(&zone->lock, flags);
  70        if (!ret)
  71                drain_all_pages(zone);
  72        return ret;
  73}
  74
  75void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  76{
  77        struct zone *zone;
  78        unsigned long flags, nr_pages;
  79        struct page *isolated_page = NULL;
  80        unsigned int order;
  81        unsigned long page_idx, buddy_idx;
  82        struct page *buddy;
  83
  84        zone = page_zone(page);
  85        spin_lock_irqsave(&zone->lock, flags);
  86        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  87                goto out;
  88
  89        /*
  90         * Because freepage with more than pageblock_order on isolated
  91         * pageblock is restricted to merge due to freepage counting problem,
  92         * it is possible that there is free buddy page.
  93         * move_freepages_block() doesn't care of merge so we need other
  94         * approach in order to merge them. Isolation and free will make
  95         * these pages to be merged.
  96         */
  97        if (PageBuddy(page)) {
  98                order = page_order(page);
  99                if (order >= pageblock_order) {
 100                        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 101                        buddy_idx = __find_buddy_index(page_idx, order);
 102                        buddy = page + (buddy_idx - page_idx);
 103
 104                        if (pfn_valid_within(page_to_pfn(buddy)) &&
 105                            !is_migrate_isolate_page(buddy)) {
 106                                __isolate_free_page(page, order);
 107                                kernel_map_pages(page, (1 << order), 1);
 108                                set_page_refcounted(page);
 109                                isolated_page = page;
 110                        }
 111                }
 112        }
 113
 114        /*
 115         * If we isolate freepage with more than pageblock_order, there
 116         * should be no freepage in the range, so we could avoid costly
 117         * pageblock scanning for freepage moving.
 118         */
 119        if (!isolated_page) {
 120                nr_pages = move_freepages_block(zone, page, migratetype);
 121                __mod_zone_freepage_state(zone, nr_pages, migratetype);
 122        }
 123        set_pageblock_migratetype(page, migratetype);
 124        zone->nr_isolate_pageblock--;
 125out:
 126        spin_unlock_irqrestore(&zone->lock, flags);
 127        if (isolated_page)
 128                __free_pages(isolated_page, order);
 129}
 130
 131static inline struct page *
 132__first_valid_page(unsigned long pfn, unsigned long nr_pages)
 133{
 134        int i;
 135        for (i = 0; i < nr_pages; i++)
 136                if (pfn_valid_within(pfn + i))
 137                        break;
 138        if (unlikely(i == nr_pages))
 139                return NULL;
 140        return pfn_to_page(pfn + i);
 141}
 142
 143/*
 144 * start_isolate_page_range() -- make page-allocation-type of range of pages
 145 * to be MIGRATE_ISOLATE.
 146 * @start_pfn: The lower PFN of the range to be isolated.
 147 * @end_pfn: The upper PFN of the range to be isolated.
 148 * @migratetype: migrate type to set in error recovery.
 149 *
 150 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
 151 * the range will never be allocated. Any free pages and pages freed in the
 152 * future will not be allocated again.
 153 *
 154 * start_pfn/end_pfn must be aligned to pageblock_order.
 155 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
 156 */
 157int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 158                             unsigned migratetype, bool skip_hwpoisoned_pages)
 159{
 160        unsigned long pfn;
 161        unsigned long undo_pfn;
 162        struct page *page;
 163
 164        BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
 165        BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 166
 167        for (pfn = start_pfn;
 168             pfn < end_pfn;
 169             pfn += pageblock_nr_pages) {
 170                page = __first_valid_page(pfn, pageblock_nr_pages);
 171                if (page &&
 172                    set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
 173                        undo_pfn = pfn;
 174                        goto undo;
 175                }
 176        }
 177        return 0;
 178undo:
 179        for (pfn = start_pfn;
 180             pfn < undo_pfn;
 181             pfn += pageblock_nr_pages)
 182                unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 183
 184        return -EBUSY;
 185}
 186
 187/*
 188 * Make isolated pages available again.
 189 */
 190int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 191                            unsigned migratetype)
 192{
 193        unsigned long pfn;
 194        struct page *page;
 195        BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
 196        BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 197        for (pfn = start_pfn;
 198             pfn < end_pfn;
 199             pfn += pageblock_nr_pages) {
 200                page = __first_valid_page(pfn, pageblock_nr_pages);
 201                if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 202                        continue;
 203                unset_migratetype_isolate(page, migratetype);
 204        }
 205        return 0;
 206}
 207/*
 208 * Test all pages in the range is free(means isolated) or not.
 209 * all pages in [start_pfn...end_pfn) must be in the same zone.
 210 * zone->lock must be held before call this.
 211 *
 212 * Returns 1 if all pages in the range are isolated.
 213 */
 214static int
 215__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
 216                                  bool skip_hwpoisoned_pages)
 217{
 218        struct page *page;
 219
 220        while (pfn < end_pfn) {
 221                if (!pfn_valid_within(pfn)) {
 222                        pfn++;
 223                        continue;
 224                }
 225                page = pfn_to_page(pfn);
 226                if (PageBuddy(page)) {
 227                        /*
 228                         * If race between isolatation and allocation happens,
 229                         * some free pages could be in MIGRATE_MOVABLE list
 230                         * although pageblock's migratation type of the page
 231                         * is MIGRATE_ISOLATE. Catch it and move the page into
 232                         * MIGRATE_ISOLATE list.
 233                         */
 234                        if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
 235                                struct page *end_page;
 236
 237                                end_page = page + (1 << page_order(page)) - 1;
 238                                move_freepages(page_zone(page), page, end_page,
 239                                                MIGRATE_ISOLATE);
 240                        }
 241                        pfn += 1 << page_order(page);
 242                }
 243                else if (page_count(page) == 0 &&
 244                        get_freepage_migratetype(page) == MIGRATE_ISOLATE)
 245                        pfn += 1;
 246                else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
 247                        /*
 248                         * The HWPoisoned page may be not in buddy
 249                         * system, and page_count() is not 0.
 250                         */
 251                        pfn++;
 252                        continue;
 253                }
 254                else
 255                        break;
 256        }
 257        if (pfn < end_pfn)
 258                return 0;
 259        return 1;
 260}
 261
 262int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 263                        bool skip_hwpoisoned_pages)
 264{
 265        unsigned long pfn, flags;
 266        struct page *page;
 267        struct zone *zone;
 268        int ret;
 269
 270        /*
 271         * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
 272         * are not aligned to pageblock_nr_pages.
 273         * Then we just check migratetype first.
 274         */
 275        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 276                page = __first_valid_page(pfn, pageblock_nr_pages);
 277                if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 278                        break;
 279        }
 280        page = __first_valid_page(start_pfn, end_pfn - start_pfn);
 281        if ((pfn < end_pfn) || !page)
 282                return -EBUSY;
 283        /* Check all pages are free or marked as ISOLATED */
 284        zone = page_zone(page);
 285        spin_lock_irqsave(&zone->lock, flags);
 286        ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
 287                                                skip_hwpoisoned_pages);
 288        spin_unlock_irqrestore(&zone->lock, flags);
 289        return ret ? 0 : -EBUSY;
 290}
 291
 292struct page *alloc_migrate_target(struct page *page, unsigned long private,
 293                                  int **resultp)
 294{
 295        gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
 296
 297        /*
 298         * TODO: allocate a destination hugepage from a nearest neighbor node,
 299         * accordance with memory policy of the user process if possible. For
 300         * now as a simple work-around, we use the next node for destination.
 301         */
 302        if (PageHuge(page)) {
 303                nodemask_t src = nodemask_of_node(page_to_nid(page));
 304                nodemask_t dst;
 305                nodes_complement(dst, src);
 306                return alloc_huge_page_node(page_hstate(compound_head(page)),
 307                                            next_node(page_to_nid(page), dst));
 308        }
 309
 310        if (PageHighMem(page))
 311                gfp_mask |= __GFP_HIGHMEM;
 312
 313        return alloc_page(gfp_mask);
 314}
 315