linux/mm/balloon_compaction.c
<<
>>
Prefs
   1/*
   2 * mm/balloon_compaction.c
   3 *
   4 * Common interface for making balloon pages movable by compaction.
   5 *
   6 * Copyright (C) 2012, Red Hat, Inc.  Rafael Aquini <aquini@redhat.com>
   7 */
   8#include <linux/mm.h>
   9#include <linux/slab.h>
  10#include <linux/export.h>
  11#include <linux/balloon_compaction.h>
  12
  13/*
  14 * balloon_page_alloc - allocates a new page for insertion into the balloon
  15 *                        page list.
  16 *
  17 * Driver must call it to properly allocate a new enlisted balloon page.
  18 * Driver must call balloon_page_enqueue before definitively removing it from
  19 * the guest system.  This function returns the page address for the recently
  20 * allocated page or NULL in the case we fail to allocate a new page this turn.
  21 */
  22struct page *balloon_page_alloc(void)
  23{
  24        struct page *page = alloc_page(balloon_mapping_gfp_mask() |
  25                                       __GFP_NOMEMALLOC | __GFP_NORETRY);
  26        return page;
  27}
  28EXPORT_SYMBOL_GPL(balloon_page_alloc);
  29
  30/*
  31 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
  32 *                        page list.
  33 * @b_dev_info: balloon device descriptor where we will insert a new page to
  34 * @page: new page to enqueue - allocated using balloon_page_alloc.
  35 *
  36 * Driver must call it to properly enqueue a new allocated balloon page
  37 * before definitively removing it from the guest system.
  38 * This function returns the page address for the recently enqueued page or
  39 * NULL in the case we fail to allocate a new page this turn.
  40 */
  41void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
  42                          struct page *page)
  43{
  44        unsigned long flags;
  45
  46        /*
  47         * Block others from accessing the 'page' when we get around to
  48         * establishing additional references. We should be the only one
  49         * holding a reference to the 'page' at this point.
  50         */
  51        BUG_ON(!trylock_page(page));
  52        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  53        balloon_page_insert(b_dev_info, page);
  54        __count_vm_event(BALLOON_INFLATE);
  55        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  56        unlock_page(page);
  57}
  58EXPORT_SYMBOL_GPL(balloon_page_enqueue);
  59
  60/*
  61 * balloon_page_dequeue - removes a page from balloon's page list and returns
  62 *                        the its address to allow the driver release the page.
  63 * @b_dev_info: balloon device decriptor where we will grab a page from.
  64 *
  65 * Driver must call it to properly de-allocate a previous enlisted balloon page
  66 * before definetively releasing it back to the guest system.
  67 * This function returns the page address for the recently dequeued page or
  68 * NULL in the case we find balloon's page list temporarily empty due to
  69 * compaction isolated pages.
  70 */
  71struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
  72{
  73        struct page *page, *tmp;
  74        unsigned long flags;
  75        bool dequeued_page;
  76
  77        dequeued_page = false;
  78        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  79        list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
  80                /*
  81                 * Block others from accessing the 'page' while we get around
  82                 * establishing additional references and preparing the 'page'
  83                 * to be released by the balloon driver.
  84                 */
  85                if (trylock_page(page)) {
  86#ifdef CONFIG_BALLOON_COMPACTION
  87                        if (PageIsolated(page)) {
  88                                /* raced with isolation */
  89                                unlock_page(page);
  90                                continue;
  91                        }
  92#endif
  93                        balloon_page_delete(page);
  94                        __count_vm_event(BALLOON_DEFLATE);
  95                        unlock_page(page);
  96                        dequeued_page = true;
  97                        break;
  98                }
  99        }
 100        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 101
 102        if (!dequeued_page) {
 103                /*
 104                 * If we are unable to dequeue a balloon page because the page
 105                 * list is empty and there is no isolated pages, then something
 106                 * went out of track and some balloon pages are lost.
 107                 * BUG() here, otherwise the balloon driver may get stuck into
 108                 * an infinite loop while attempting to release all its pages.
 109                 */
 110                spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 111                if (unlikely(list_empty(&b_dev_info->pages) &&
 112                             !b_dev_info->isolated_pages))
 113                        BUG();
 114                spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 115                page = NULL;
 116        }
 117        return page;
 118}
 119EXPORT_SYMBOL_GPL(balloon_page_dequeue);
 120
 121#ifdef CONFIG_BALLOON_COMPACTION
 122
 123bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
 124
 125{
 126        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
 127        unsigned long flags;
 128
 129        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 130        list_del(&page->lru);
 131        b_dev_info->isolated_pages++;
 132        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 133
 134        return true;
 135}
 136
 137void balloon_page_putback(struct page *page)
 138{
 139        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
 140        unsigned long flags;
 141
 142        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 143        list_add(&page->lru, &b_dev_info->pages);
 144        b_dev_info->isolated_pages--;
 145        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 146}
 147
 148
 149/* move_to_new_page() counterpart for a ballooned page */
 150int balloon_page_migrate(struct address_space *mapping,
 151                struct page *newpage, struct page *page,
 152                enum migrate_mode mode)
 153{
 154        struct balloon_dev_info *balloon = balloon_page_device(page);
 155
 156        /*
 157         * We can not easily support the no copy case here so ignore it as it
 158         * is unlikely to be use with ballon pages. See include/linux/hmm.h for
 159         * user of the MIGRATE_SYNC_NO_COPY mode.
 160         */
 161        if (mode == MIGRATE_SYNC_NO_COPY)
 162                return -EINVAL;
 163
 164        VM_BUG_ON_PAGE(!PageLocked(page), page);
 165        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 166
 167        return balloon->migratepage(balloon, newpage, page, mode);
 168}
 169
 170const struct address_space_operations balloon_aops = {
 171        .migratepage = balloon_page_migrate,
 172        .isolate_page = balloon_page_isolate,
 173        .putback_page = balloon_page_putback,
 174};
 175EXPORT_SYMBOL_GPL(balloon_aops);
 176
 177#endif /* CONFIG_BALLOON_COMPACTION */
 178