linux/mm/balloon_compaction.c
<<
>>
Prefs
   1/*
   2 * mm/balloon_compaction.c
   3 *
   4 * Common interface for making balloon pages movable by compaction.
   5 *
   6 * Copyright (C) 2012, Red Hat, Inc.  Rafael Aquini <aquini@redhat.com>
   7 */
   8#include <linux/mm.h>
   9#include <linux/slab.h>
  10#include <linux/export.h>
  11#include <linux/balloon_compaction.h>
  12
  13/*
  14 * balloon_page_alloc - allocates a new page for insertion into the balloon
  15 *                        page list.
  16 *
  17 * Driver must call it to properly allocate a new enlisted balloon page.
  18 * Driver must call balloon_page_enqueue before definitively removing it from
  19 * the guest system.  This function returns the page address for the recently
  20 * allocated page or NULL in the case we fail to allocate a new page this turn.
  21 */
  22struct page *balloon_page_alloc(void)
  23{
  24        struct page *page = alloc_page(balloon_mapping_gfp_mask() |
  25                                       __GFP_NOMEMALLOC | __GFP_NORETRY);
  26        return page;
  27}
  28EXPORT_SYMBOL_GPL(balloon_page_alloc);
  29
  30/*
  31 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
  32 *                        page list.
  33 * @b_dev_info: balloon device decriptor where we will insert a new page to
  34 * @page: new page to enqueue - allocated using balloon_page_alloc.
  35 *
  36 * Driver must call it to properly enqueue a new allocated balloon page
  37 * before definetively removing it from the guest system.
  38 * This function returns the page address for the recently enqueued page or
  39 * NULL in the case we fail to allocate a new page this turn.
  40 */
  41void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
  42                          struct page *page)
  43{
  44        unsigned long flags;
  45
  46        /*
  47         * Block others from accessing the 'page' when we get around to
  48         * establishing additional references. We should be the only one
  49         * holding a reference to the 'page' at this point.
  50         */
  51        BUG_ON(!trylock_page(page));
  52        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  53        balloon_page_insert(b_dev_info, page);
  54        __count_vm_event(BALLOON_INFLATE);
  55        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  56        unlock_page(page);
  57}
  58EXPORT_SYMBOL_GPL(balloon_page_enqueue);
  59
  60/*
  61 * balloon_page_dequeue - removes a page from balloon's page list and returns
  62 *                        the its address to allow the driver release the page.
  63 * @b_dev_info: balloon device decriptor where we will grab a page from.
  64 *
  65 * Driver must call it to properly de-allocate a previous enlisted balloon page
  66 * before definetively releasing it back to the guest system.
  67 * This function returns the page address for the recently dequeued page or
  68 * NULL in the case we find balloon's page list temporarily empty due to
  69 * compaction isolated pages.
  70 */
  71struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
  72{
  73        struct page *page, *tmp;
  74        unsigned long flags;
  75        bool dequeued_page;
  76
  77        dequeued_page = false;
  78        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  79        list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
  80                /*
  81                 * Block others from accessing the 'page' while we get around
  82                 * establishing additional references and preparing the 'page'
  83                 * to be released by the balloon driver.
  84                 */
  85                if (trylock_page(page)) {
  86#ifdef CONFIG_BALLOON_COMPACTION
  87                        if (!PagePrivate(page)) {
  88                                /* raced with isolation */
  89                                unlock_page(page);
  90                                continue;
  91                        }
  92#endif
  93                        balloon_page_delete(page);
  94                        __count_vm_event(BALLOON_DEFLATE);
  95                        unlock_page(page);
  96                        dequeued_page = true;
  97                        break;
  98                }
  99        }
 100        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 101
 102        if (!dequeued_page) {
 103                /*
 104                 * If we are unable to dequeue a balloon page because the page
 105                 * list is empty and there is no isolated pages, then something
 106                 * went out of track and some balloon pages are lost.
 107                 * BUG() here, otherwise the balloon driver may get stuck into
 108                 * an infinite loop while attempting to release all its pages.
 109                 */
 110                spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 111                if (unlikely(list_empty(&b_dev_info->pages) &&
 112                             !b_dev_info->isolated_pages))
 113                        BUG();
 114                spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 115                page = NULL;
 116        }
 117        return page;
 118}
 119EXPORT_SYMBOL_GPL(balloon_page_dequeue);
 120
 121#ifdef CONFIG_BALLOON_COMPACTION
 122
 123static inline void __isolate_balloon_page(struct page *page)
 124{
 125        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
 126        unsigned long flags;
 127
 128        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 129        ClearPagePrivate(page);
 130        list_del(&page->lru);
 131        b_dev_info->isolated_pages++;
 132        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 133}
 134
 135static inline void __putback_balloon_page(struct page *page)
 136{
 137        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
 138        unsigned long flags;
 139
 140        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 141        SetPagePrivate(page);
 142        list_add(&page->lru, &b_dev_info->pages);
 143        b_dev_info->isolated_pages--;
 144        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 145}
 146
 147/* __isolate_lru_page() counterpart for a ballooned page */
 148bool balloon_page_isolate(struct page *page)
 149{
 150        /*
 151         * Avoid burning cycles with pages that are yet under __free_pages(),
 152         * or just got freed under us.
 153         *
 154         * In case we 'win' a race for a balloon page being freed under us and
 155         * raise its refcount preventing __free_pages() from doing its job
 156         * the put_page() at the end of this block will take care of
 157         * release this page, thus avoiding a nasty leakage.
 158         */
 159        if (likely(get_page_unless_zero(page))) {
 160                /*
 161                 * As balloon pages are not isolated from LRU lists, concurrent
 162                 * compaction threads can race against page migration functions
 163                 * as well as race against the balloon driver releasing a page.
 164                 *
 165                 * In order to avoid having an already isolated balloon page
 166                 * being (wrongly) re-isolated while it is under migration,
 167                 * or to avoid attempting to isolate pages being released by
 168                 * the balloon driver, lets be sure we have the page lock
 169                 * before proceeding with the balloon page isolation steps.
 170                 */
 171                if (likely(trylock_page(page))) {
 172                        /*
 173                         * A ballooned page, by default, has PagePrivate set.
 174                         * Prevent concurrent compaction threads from isolating
 175                         * an already isolated balloon page by clearing it.
 176                         */
 177                        if (balloon_page_movable(page)) {
 178                                __isolate_balloon_page(page);
 179                                unlock_page(page);
 180                                return true;
 181                        }
 182                        unlock_page(page);
 183                }
 184                put_page(page);
 185        }
 186        return false;
 187}
 188
 189/* putback_lru_page() counterpart for a ballooned page */
 190void balloon_page_putback(struct page *page)
 191{
 192        /*
 193         * 'lock_page()' stabilizes the page and prevents races against
 194         * concurrent isolation threads attempting to re-isolate it.
 195         */
 196        lock_page(page);
 197
 198        if (__is_movable_balloon_page(page)) {
 199                __putback_balloon_page(page);
 200                /* drop the extra ref count taken for page isolation */
 201                put_page(page);
 202        } else {
 203                WARN_ON(1);
 204                dump_page(page, "not movable balloon page");
 205        }
 206        unlock_page(page);
 207}
 208
 209/* move_to_new_page() counterpart for a ballooned page */
 210int balloon_page_migrate(struct page *newpage,
 211                         struct page *page, enum migrate_mode mode)
 212{
 213        struct balloon_dev_info *balloon = balloon_page_device(page);
 214        int rc = -EAGAIN;
 215
 216        /*
 217         * We can not easily support the no copy case here so ignore it as it
 218         * is unlikely to be use with ballon pages. See include/linux/hmm.h for
 219         * user of the MIGRATE_SYNC_NO_COPY mode.
 220         */
 221        if (mode == MIGRATE_SYNC_NO_COPY)
 222                return -EINVAL;
 223
 224        /*
 225         * Block others from accessing the 'newpage' when we get around to
 226         * establishing additional references. We should be the only one
 227         * holding a reference to the 'newpage' at this point.
 228         */
 229        BUG_ON(!trylock_page(newpage));
 230
 231        if (WARN_ON(!__is_movable_balloon_page(page))) {
 232                dump_page(page, "not movable balloon page");
 233                unlock_page(newpage);
 234                return rc;
 235        }
 236
 237        if (balloon && balloon->migratepage)
 238                rc = balloon->migratepage(balloon, newpage, page, mode);
 239
 240        unlock_page(newpage);
 241        return rc;
 242}
 243#endif /* CONFIG_BALLOON_COMPACTION */
 244