linux/mm/balloon_compaction.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/balloon_compaction.c
   4 *
   5 * Common interface for making balloon pages movable by compaction.
   6 *
   7 * Copyright (C) 2012, Red Hat, Inc.  Rafael Aquini <aquini@redhat.com>
   8 */
   9#include <linux/mm.h>
  10#include <linux/slab.h>
  11#include <linux/export.h>
  12#include <linux/balloon_compaction.h>
  13
  14/*
  15 * balloon_page_alloc - allocates a new page for insertion into the balloon
  16 *                        page list.
  17 *
  18 * Driver must call it to properly allocate a new enlisted balloon page.
  19 * Driver must call balloon_page_enqueue before definitively removing it from
  20 * the guest system.  This function returns the page address for the recently
  21 * allocated page or NULL in the case we fail to allocate a new page this turn.
  22 */
  23struct page *balloon_page_alloc(void)
  24{
  25        struct page *page = alloc_page(balloon_mapping_gfp_mask() |
  26                                       __GFP_NOMEMALLOC | __GFP_NORETRY);
  27        return page;
  28}
  29EXPORT_SYMBOL_GPL(balloon_page_alloc);
  30
  31/*
  32 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
  33 *                        page list.
  34 * @b_dev_info: balloon device descriptor where we will insert a new page to
  35 * @page: new page to enqueue - allocated using balloon_page_alloc.
  36 *
  37 * Driver must call it to properly enqueue a new allocated balloon page
  38 * before definitively removing it from the guest system.
  39 * This function returns the page address for the recently enqueued page or
  40 * NULL in the case we fail to allocate a new page this turn.
  41 */
  42void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
  43                          struct page *page)
  44{
  45        unsigned long flags;
  46
  47        /*
  48         * Block others from accessing the 'page' when we get around to
  49         * establishing additional references. We should be the only one
  50         * holding a reference to the 'page' at this point.
  51         */
  52        BUG_ON(!trylock_page(page));
  53        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  54        balloon_page_insert(b_dev_info, page);
  55        __count_vm_event(BALLOON_INFLATE);
  56        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  57        unlock_page(page);
  58}
  59EXPORT_SYMBOL_GPL(balloon_page_enqueue);
  60
  61/*
  62 * balloon_page_dequeue - removes a page from balloon's page list and returns
  63 *                        the its address to allow the driver release the page.
  64 * @b_dev_info: balloon device decriptor where we will grab a page from.
  65 *
  66 * Driver must call it to properly de-allocate a previous enlisted balloon page
  67 * before definetively releasing it back to the guest system.
  68 * This function returns the page address for the recently dequeued page or
  69 * NULL in the case we find balloon's page list temporarily empty due to
  70 * compaction isolated pages.
  71 */
  72struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
  73{
  74        struct page *page, *tmp;
  75        unsigned long flags;
  76        bool dequeued_page;
  77
  78        dequeued_page = false;
  79        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  80        list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
  81                /*
  82                 * Block others from accessing the 'page' while we get around
  83                 * establishing additional references and preparing the 'page'
  84                 * to be released by the balloon driver.
  85                 */
  86                if (trylock_page(page)) {
  87#ifdef CONFIG_BALLOON_COMPACTION
  88                        if (PageIsolated(page)) {
  89                                /* raced with isolation */
  90                                unlock_page(page);
  91                                continue;
  92                        }
  93#endif
  94                        balloon_page_delete(page);
  95                        __count_vm_event(BALLOON_DEFLATE);
  96                        unlock_page(page);
  97                        dequeued_page = true;
  98                        break;
  99                }
 100        }
 101        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 102
 103        if (!dequeued_page) {
 104                /*
 105                 * If we are unable to dequeue a balloon page because the page
 106                 * list is empty and there is no isolated pages, then something
 107                 * went out of track and some balloon pages are lost.
 108                 * BUG() here, otherwise the balloon driver may get stuck into
 109                 * an infinite loop while attempting to release all its pages.
 110                 */
 111                spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 112                if (unlikely(list_empty(&b_dev_info->pages) &&
 113                             !b_dev_info->isolated_pages))
 114                        BUG();
 115                spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 116                page = NULL;
 117        }
 118        return page;
 119}
 120EXPORT_SYMBOL_GPL(balloon_page_dequeue);
 121
 122#ifdef CONFIG_BALLOON_COMPACTION
 123
 124bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
 125
 126{
 127        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
 128        unsigned long flags;
 129
 130        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 131        list_del(&page->lru);
 132        b_dev_info->isolated_pages++;
 133        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 134
 135        return true;
 136}
 137
 138void balloon_page_putback(struct page *page)
 139{
 140        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
 141        unsigned long flags;
 142
 143        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 144        list_add(&page->lru, &b_dev_info->pages);
 145        b_dev_info->isolated_pages--;
 146        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 147}
 148
 149
 150/* move_to_new_page() counterpart for a ballooned page */
 151int balloon_page_migrate(struct address_space *mapping,
 152                struct page *newpage, struct page *page,
 153                enum migrate_mode mode)
 154{
 155        struct balloon_dev_info *balloon = balloon_page_device(page);
 156
 157        /*
 158         * We can not easily support the no copy case here so ignore it as it
 159         * is unlikely to be use with ballon pages. See include/linux/hmm.h for
 160         * user of the MIGRATE_SYNC_NO_COPY mode.
 161         */
 162        if (mode == MIGRATE_SYNC_NO_COPY)
 163                return -EINVAL;
 164
 165        VM_BUG_ON_PAGE(!PageLocked(page), page);
 166        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 167
 168        return balloon->migratepage(balloon, newpage, page, mode);
 169}
 170
 171const struct address_space_operations balloon_aops = {
 172        .migratepage = balloon_page_migrate,
 173        .isolate_page = balloon_page_isolate,
 174        .putback_page = balloon_page_putback,
 175};
 176EXPORT_SYMBOL_GPL(balloon_aops);
 177
 178#endif /* CONFIG_BALLOON_COMPACTION */
 179