linux/drivers/xen/unpopulated-alloc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/errno.h>
   3#include <linux/gfp.h>
   4#include <linux/kernel.h>
   5#include <linux/mm.h>
   6#include <linux/memremap.h>
   7#include <linux/slab.h>
   8
   9#include <asm/page.h>
  10
  11#include <xen/page.h>
  12#include <xen/xen.h>
  13
  14static DEFINE_MUTEX(list_lock);
  15static struct page *page_list;
  16static unsigned int list_count;
  17
  18static int fill_list(unsigned int nr_pages)
  19{
  20        struct dev_pagemap *pgmap;
  21        struct resource *res;
  22        void *vaddr;
  23        unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
  24        int ret = -ENOMEM;
  25
  26        res = kzalloc(sizeof(*res), GFP_KERNEL);
  27        if (!res)
  28                return -ENOMEM;
  29
  30        res->name = "Xen scratch";
  31        res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  32
  33        ret = allocate_resource(&iomem_resource, res,
  34                                alloc_pages * PAGE_SIZE, 0, -1,
  35                                PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
  36        if (ret < 0) {
  37                pr_err("Cannot allocate new IOMEM resource\n");
  38                goto err_resource;
  39        }
  40
  41        pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
  42        if (!pgmap) {
  43                ret = -ENOMEM;
  44                goto err_pgmap;
  45        }
  46
  47        pgmap->type = MEMORY_DEVICE_GENERIC;
  48        pgmap->range = (struct range) {
  49                .start = res->start,
  50                .end = res->end,
  51        };
  52        pgmap->nr_range = 1;
  53        pgmap->owner = res;
  54
  55#ifdef CONFIG_XEN_HAVE_PVMMU
  56        /*
  57         * memremap will build page tables for the new memory so
  58         * the p2m must contain invalid entries so the correct
  59         * non-present PTEs will be written.
  60         *
  61         * If a failure occurs, the original (identity) p2m entries
  62         * are not restored since this region is now known not to
  63         * conflict with any devices.
  64         */
  65        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  66                xen_pfn_t pfn = PFN_DOWN(res->start);
  67
  68                for (i = 0; i < alloc_pages; i++) {
  69                        if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
  70                                pr_warn("set_phys_to_machine() failed, no memory added\n");
  71                                ret = -ENOMEM;
  72                                goto err_memremap;
  73                        }
  74                }
  75        }
  76#endif
  77
  78        vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
  79        if (IS_ERR(vaddr)) {
  80                pr_err("Cannot remap memory range\n");
  81                ret = PTR_ERR(vaddr);
  82                goto err_memremap;
  83        }
  84
  85        for (i = 0; i < alloc_pages; i++) {
  86                struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
  87
  88                BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
  89                pg->zone_device_data = page_list;
  90                page_list = pg;
  91                list_count++;
  92        }
  93
  94        return 0;
  95
  96err_memremap:
  97        kfree(pgmap);
  98err_pgmap:
  99        release_resource(res);
 100err_resource:
 101        kfree(res);
 102        return ret;
 103}
 104
 105/**
 106 * xen_alloc_unpopulated_pages - alloc unpopulated pages
 107 * @nr_pages: Number of pages
 108 * @pages: pages returned
 109 * @return 0 on success, error otherwise
 110 */
 111int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
 112{
 113        unsigned int i;
 114        int ret = 0;
 115
 116        mutex_lock(&list_lock);
 117        if (list_count < nr_pages) {
 118                ret = fill_list(nr_pages - list_count);
 119                if (ret)
 120                        goto out;
 121        }
 122
 123        for (i = 0; i < nr_pages; i++) {
 124                struct page *pg = page_list;
 125
 126                BUG_ON(!pg);
 127                page_list = pg->zone_device_data;
 128                list_count--;
 129                pages[i] = pg;
 130
 131#ifdef CONFIG_XEN_HAVE_PVMMU
 132                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 133                        ret = xen_alloc_p2m_entry(page_to_pfn(pg));
 134                        if (ret < 0) {
 135                                unsigned int j;
 136
 137                                for (j = 0; j <= i; j++) {
 138                                        pages[j]->zone_device_data = page_list;
 139                                        page_list = pages[j];
 140                                        list_count++;
 141                                }
 142                                goto out;
 143                        }
 144                }
 145#endif
 146        }
 147
 148out:
 149        mutex_unlock(&list_lock);
 150        return ret;
 151}
 152EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
 153
 154/**
 155 * xen_free_unpopulated_pages - return unpopulated pages
 156 * @nr_pages: Number of pages
 157 * @pages: pages to return
 158 */
 159void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
 160{
 161        unsigned int i;
 162
 163        mutex_lock(&list_lock);
 164        for (i = 0; i < nr_pages; i++) {
 165                pages[i]->zone_device_data = page_list;
 166                page_list = pages[i];
 167                list_count++;
 168        }
 169        mutex_unlock(&list_lock);
 170}
 171EXPORT_SYMBOL(xen_free_unpopulated_pages);
 172
 173#ifdef CONFIG_XEN_PV
 174static int __init init(void)
 175{
 176        unsigned int i;
 177
 178        if (!xen_domain())
 179                return -ENODEV;
 180
 181        if (!xen_pv_domain())
 182                return 0;
 183
 184        /*
 185         * Initialize with pages from the extra memory regions (see
 186         * arch/x86/xen/setup.c).
 187         */
 188        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
 189                unsigned int j;
 190
 191                for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
 192                        struct page *pg =
 193                                pfn_to_page(xen_extra_mem[i].start_pfn + j);
 194
 195                        pg->zone_device_data = page_list;
 196                        page_list = pg;
 197                        list_count++;
 198                }
 199        }
 200
 201        return 0;
 202}
 203subsys_initcall(init);
 204#endif
 205