linux/arch/powerpc/kvm/book3s_hv_cma.c
<<
>>
Prefs
   1/*
   2 * Contiguous Memory Allocator for ppc KVM hash pagetable  based on CMA
   3 * for DMA mapping framework
   4 *
   5 * Copyright IBM Corporation, 2013
   6 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; either version 2 of the
  11 * License or (at your optional) any later version of the license.
  12 *
  13 */
  14#define pr_fmt(fmt) "kvm_cma: " fmt
  15
  16#ifdef CONFIG_CMA_DEBUG
  17#ifndef DEBUG
  18#  define DEBUG
  19#endif
  20#endif
  21
  22#include <linux/memblock.h>
  23#include <linux/mutex.h>
  24#include <linux/sizes.h>
  25#include <linux/slab.h>
  26
  27#include "book3s_hv_cma.h"
  28
  29struct kvm_cma {
  30        unsigned long   base_pfn;
  31        unsigned long   count;
  32        unsigned long   *bitmap;
  33};
  34
  35static DEFINE_MUTEX(kvm_cma_mutex);
  36static struct kvm_cma kvm_cma_area;
  37
  38/**
  39 * kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
  40 *                                for kvm hash pagetable
  41 * @size:  Size of the reserved memory.
  42 * @alignment:  Alignment for the contiguous memory area
  43 *
  44 * This function reserves memory for kvm cma area. It should be
  45 * called by arch code when early allocator (memblock or bootmem)
  46 * is still activate.
  47 */
  48long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
  49{
  50        long base_pfn;
  51        phys_addr_t addr;
  52        struct kvm_cma *cma = &kvm_cma_area;
  53
  54        pr_debug("%s(size %lx)\n", __func__, (unsigned long)size);
  55
  56        if (!size)
  57                return -EINVAL;
  58        /*
  59         * Sanitise input arguments.
  60         * We should be pageblock aligned for CMA.
  61         */
  62        alignment = max(alignment, (phys_addr_t)(PAGE_SIZE << pageblock_order));
  63        size = ALIGN(size, alignment);
  64        /*
  65         * Reserve memory
  66         * Use __memblock_alloc_base() since
  67         * memblock_alloc_base() panic()s.
  68         */
  69        addr = __memblock_alloc_base(size, alignment, 0);
  70        if (!addr) {
  71                base_pfn = -ENOMEM;
  72                goto err;
  73        } else
  74                base_pfn = PFN_DOWN(addr);
  75
  76        /*
  77         * Each reserved area must be initialised later, when more kernel
  78         * subsystems (like slab allocator) are available.
  79         */
  80        cma->base_pfn = base_pfn;
  81        cma->count    = size >> PAGE_SHIFT;
  82        pr_info("CMA: reserved %ld MiB\n", (unsigned long)size / SZ_1M);
  83        return 0;
  84err:
  85        pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
  86        return base_pfn;
  87}
  88
  89/**
  90 * kvm_alloc_cma() - allocate pages from contiguous area
  91 * @nr_pages: Requested number of pages.
  92 * @align_pages: Requested alignment in number of pages
  93 *
  94 * This function allocates memory buffer for hash pagetable.
  95 */
  96struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
  97{
  98        int ret;
  99        struct page *page = NULL;
 100        struct kvm_cma *cma = &kvm_cma_area;
 101        unsigned long chunk_count, nr_chunk;
 102        unsigned long mask, pfn, pageno, start = 0;
 103
 104
 105        if (!cma || !cma->count)
 106                return NULL;
 107
 108        pr_debug("%s(cma %p, count %lu, align pages %lu)\n", __func__,
 109                 (void *)cma, nr_pages, align_pages);
 110
 111        if (!nr_pages)
 112                return NULL;
 113        /*
 114         * align mask with chunk size. The bit tracks pages in chunk size
 115         */
 116        VM_BUG_ON(!is_power_of_2(align_pages));
 117        mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
 118        BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
 119
 120        chunk_count = cma->count >>  (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 121        nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 122
 123        mutex_lock(&kvm_cma_mutex);
 124        for (;;) {
 125                pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
 126                                                    start, nr_chunk, mask);
 127                if (pageno >= chunk_count)
 128                        break;
 129
 130                pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
 131                ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
 132                if (ret == 0) {
 133                        bitmap_set(cma->bitmap, pageno, nr_chunk);
 134                        page = pfn_to_page(pfn);
 135                        memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
 136                        break;
 137                } else if (ret != -EBUSY) {
 138                        break;
 139                }
 140                pr_debug("%s(): memory range at %p is busy, retrying\n",
 141                         __func__, pfn_to_page(pfn));
 142                /* try again with a bit different memory target */
 143                start = pageno + mask + 1;
 144        }
 145        mutex_unlock(&kvm_cma_mutex);
 146        pr_debug("%s(): returned %p\n", __func__, page);
 147        return page;
 148}
 149
 150/**
 151 * kvm_release_cma() - release allocated pages for hash pagetable
 152 * @pages: Allocated pages.
 153 * @nr_pages: Number of allocated pages.
 154 *
 155 * This function releases memory allocated by kvm_alloc_cma().
 156 * It returns false when provided pages do not belong to contiguous area and
 157 * true otherwise.
 158 */
 159bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
 160{
 161        unsigned long pfn;
 162        unsigned long nr_chunk;
 163        struct kvm_cma *cma = &kvm_cma_area;
 164
 165        if (!cma || !pages)
 166                return false;
 167
 168        pr_debug("%s(page %p count %lu)\n", __func__, (void *)pages, nr_pages);
 169
 170        pfn = page_to_pfn(pages);
 171
 172        if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
 173                return false;
 174
 175        VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
 176        nr_chunk = nr_pages >>  (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 177
 178        mutex_lock(&kvm_cma_mutex);
 179        bitmap_clear(cma->bitmap,
 180                     (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
 181                     nr_chunk);
 182        free_contig_range(pfn, nr_pages);
 183        mutex_unlock(&kvm_cma_mutex);
 184
 185        return true;
 186}
 187
 188static int __init kvm_cma_activate_area(unsigned long base_pfn,
 189                                        unsigned long count)
 190{
 191        unsigned long pfn = base_pfn;
 192        unsigned i = count >> pageblock_order;
 193        struct zone *zone;
 194
 195        WARN_ON_ONCE(!pfn_valid(pfn));
 196        zone = page_zone(pfn_to_page(pfn));
 197        do {
 198                unsigned j;
 199                base_pfn = pfn;
 200                for (j = pageblock_nr_pages; j; --j, pfn++) {
 201                        WARN_ON_ONCE(!pfn_valid(pfn));
 202                        /*
 203                         * alloc_contig_range requires the pfn range
 204                         * specified to be in the same zone. Make this
 205                         * simple by forcing the entire CMA resv range
 206                         * to be in the same zone.
 207                         */
 208                        if (page_zone(pfn_to_page(pfn)) != zone)
 209                                return -EINVAL;
 210                }
 211                init_cma_reserved_pageblock(pfn_to_page(base_pfn));
 212        } while (--i);
 213        return 0;
 214}
 215
 216static int __init kvm_cma_init_reserved_areas(void)
 217{
 218        int bitmap_size, ret;
 219        unsigned long chunk_count;
 220        struct kvm_cma *cma = &kvm_cma_area;
 221
 222        pr_debug("%s()\n", __func__);
 223        if (!cma->count)
 224                return 0;
 225        chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 226        bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
 227        cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 228        if (!cma->bitmap)
 229                return -ENOMEM;
 230
 231        ret = kvm_cma_activate_area(cma->base_pfn, cma->count);
 232        if (ret)
 233                goto error;
 234        return 0;
 235
 236error:
 237        kfree(cma->bitmap);
 238        return ret;
 239}
 240core_initcall(kvm_cma_init_reserved_areas);
 241