linux/mm/cma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Contiguous Memory Allocator
   4 *
   5 * Copyright (c) 2010-2011 by Samsung Electronics.
   6 * Copyright IBM Corporation, 2013
   7 * Copyright LG Electronics Inc., 2014
   8 * Written by:
   9 *      Marek Szyprowski <m.szyprowski@samsung.com>
  10 *      Michal Nazarewicz <mina86@mina86.com>
  11 *      Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  12 *      Joonsoo Kim <iamjoonsoo.kim@lge.com>
  13 */
  14
  15#define pr_fmt(fmt) "cma: " fmt
  16
  17#ifdef CONFIG_CMA_DEBUG
  18#ifndef DEBUG
  19#  define DEBUG
  20#endif
  21#endif
  22#define CREATE_TRACE_POINTS
  23
  24#include <linux/memblock.h>
  25#include <linux/err.h>
  26#include <linux/mm.h>
  27#include <linux/mutex.h>
  28#include <linux/sizes.h>
  29#include <linux/slab.h>
  30#include <linux/log2.h>
  31#include <linux/cma.h>
  32#include <linux/highmem.h>
  33#include <linux/io.h>
  34#include <linux/kmemleak.h>
  35#include <trace/events/cma.h>
  36
  37#include "cma.h"
  38
  39struct cma cma_areas[MAX_CMA_AREAS];
  40unsigned cma_area_count;
  41static DEFINE_MUTEX(cma_mutex);
  42
  43phys_addr_t cma_get_base(const struct cma *cma)
  44{
  45        return PFN_PHYS(cma->base_pfn);
  46}
  47
  48unsigned long cma_get_size(const struct cma *cma)
  49{
  50        return cma->count << PAGE_SHIFT;
  51}
  52
  53const char *cma_get_name(const struct cma *cma)
  54{
  55        return cma->name ? cma->name : "(undefined)";
  56}
  57
  58static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
  59                                             unsigned int align_order)
  60{
  61        if (align_order <= cma->order_per_bit)
  62                return 0;
  63        return (1UL << (align_order - cma->order_per_bit)) - 1;
  64}
  65
  66/*
  67 * Find the offset of the base PFN from the specified align_order.
  68 * The value returned is represented in order_per_bits.
  69 */
  70static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
  71                                               unsigned int align_order)
  72{
  73        return (cma->base_pfn & ((1UL << align_order) - 1))
  74                >> cma->order_per_bit;
  75}
  76
  77static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
  78                                              unsigned long pages)
  79{
  80        return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
  81}
  82
  83static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
  84                             unsigned int count)
  85{
  86        unsigned long bitmap_no, bitmap_count;
  87
  88        bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
  89        bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  90
  91        mutex_lock(&cma->lock);
  92        bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
  93        mutex_unlock(&cma->lock);
  94}
  95
  96static int __init cma_activate_area(struct cma *cma)
  97{
  98        int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
  99        unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
 100        unsigned i = cma->count >> pageblock_order;
 101        struct zone *zone;
 102
 103        cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 104
 105        if (!cma->bitmap) {
 106                cma->count = 0;
 107                return -ENOMEM;
 108        }
 109
 110        WARN_ON_ONCE(!pfn_valid(pfn));
 111        zone = page_zone(pfn_to_page(pfn));
 112
 113        do {
 114                unsigned j;
 115
 116                base_pfn = pfn;
 117                for (j = pageblock_nr_pages; j; --j, pfn++) {
 118                        WARN_ON_ONCE(!pfn_valid(pfn));
 119                        /*
 120                         * alloc_contig_range requires the pfn range
 121                         * specified to be in the same zone. Make this
 122                         * simple by forcing the entire CMA resv range
 123                         * to be in the same zone.
 124                         */
 125                        if (page_zone(pfn_to_page(pfn)) != zone)
 126                                goto not_in_zone;
 127                }
 128                init_cma_reserved_pageblock(pfn_to_page(base_pfn));
 129        } while (--i);
 130
 131        mutex_init(&cma->lock);
 132
 133#ifdef CONFIG_CMA_DEBUGFS
 134        INIT_HLIST_HEAD(&cma->mem_head);
 135        spin_lock_init(&cma->mem_head_lock);
 136#endif
 137
 138        return 0;
 139
 140not_in_zone:
 141        pr_err("CMA area %s could not be activated\n", cma->name);
 142        kfree(cma->bitmap);
 143        cma->count = 0;
 144        return -EINVAL;
 145}
 146
 147static int __init cma_init_reserved_areas(void)
 148{
 149        int i;
 150
 151        for (i = 0; i < cma_area_count; i++) {
 152                int ret = cma_activate_area(&cma_areas[i]);
 153
 154                if (ret)
 155                        return ret;
 156        }
 157
 158        return 0;
 159}
 160core_initcall(cma_init_reserved_areas);
 161
 162/**
 163 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
 164 * @base: Base address of the reserved area
 165 * @size: Size of the reserved area (in bytes),
 166 * @order_per_bit: Order of pages represented by one bit on bitmap.
 167 * @name: The name of the area. If this parameter is NULL, the name of
 168 *        the area will be set to "cmaN", where N is a running counter of
 169 *        used areas.
 170 * @res_cma: Pointer to store the created cma region.
 171 *
 172 * This function creates custom contiguous area from already reserved memory.
 173 */
 174int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 175                                 unsigned int order_per_bit,
 176                                 const char *name,
 177                                 struct cma **res_cma)
 178{
 179        struct cma *cma;
 180        phys_addr_t alignment;
 181
 182        /* Sanity checks */
 183        if (cma_area_count == ARRAY_SIZE(cma_areas)) {
 184                pr_err("Not enough slots for CMA reserved regions!\n");
 185                return -ENOSPC;
 186        }
 187
 188        if (!size || !memblock_is_region_reserved(base, size))
 189                return -EINVAL;
 190
 191        /* ensure minimal alignment required by mm core */
 192        alignment = PAGE_SIZE <<
 193                        max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
 194
 195        /* alignment should be aligned with order_per_bit */
 196        if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
 197                return -EINVAL;
 198
 199        if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
 200                return -EINVAL;
 201
 202        /*
 203         * Each reserved area must be initialised later, when more kernel
 204         * subsystems (like slab allocator) are available.
 205         */
 206        cma = &cma_areas[cma_area_count];
 207        if (name) {
 208                cma->name = name;
 209        } else {
 210                cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
 211                if (!cma->name)
 212                        return -ENOMEM;
 213        }
 214        cma->base_pfn = PFN_DOWN(base);
 215        cma->count = size >> PAGE_SHIFT;
 216        cma->order_per_bit = order_per_bit;
 217        *res_cma = cma;
 218        cma_area_count++;
 219        totalcma_pages += (size / PAGE_SIZE);
 220
 221        return 0;
 222}
 223
 224/**
 225 * cma_declare_contiguous() - reserve custom contiguous area
 226 * @base: Base address of the reserved area optional, use 0 for any
 227 * @size: Size of the reserved area (in bytes),
 228 * @limit: End address of the reserved memory (optional, 0 for any).
 229 * @alignment: Alignment for the CMA area, should be power of 2 or zero
 230 * @order_per_bit: Order of pages represented by one bit on bitmap.
 231 * @fixed: hint about where to place the reserved area
 232 * @name: The name of the area. See function cma_init_reserved_mem()
 233 * @res_cma: Pointer to store the created cma region.
 234 *
 235 * This function reserves memory from early allocator. It should be
 236 * called by arch specific code once the early allocator (memblock or bootmem)
 237 * has been activated and all other subsystems have already allocated/reserved
 238 * memory. This function allows to create custom reserved areas.
 239 *
 240 * If @fixed is true, reserve contiguous area at exactly @base.  If false,
 241 * reserve in range from @base to @limit.
 242 */
 243int __init cma_declare_contiguous(phys_addr_t base,
 244                        phys_addr_t size, phys_addr_t limit,
 245                        phys_addr_t alignment, unsigned int order_per_bit,
 246                        bool fixed, const char *name, struct cma **res_cma)
 247{
 248        phys_addr_t memblock_end = memblock_end_of_DRAM();
 249        phys_addr_t highmem_start;
 250        int ret = 0;
 251
 252        /*
 253         * We can't use __pa(high_memory) directly, since high_memory
 254         * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
 255         * complain. Find the boundary by adding one to the last valid
 256         * address.
 257         */
 258        highmem_start = __pa(high_memory - 1) + 1;
 259        pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
 260                __func__, &size, &base, &limit, &alignment);
 261
 262        if (cma_area_count == ARRAY_SIZE(cma_areas)) {
 263                pr_err("Not enough slots for CMA reserved regions!\n");
 264                return -ENOSPC;
 265        }
 266
 267        if (!size)
 268                return -EINVAL;
 269
 270        if (alignment && !is_power_of_2(alignment))
 271                return -EINVAL;
 272
 273        /*
 274         * Sanitise input arguments.
 275         * Pages both ends in CMA area could be merged into adjacent unmovable
 276         * migratetype page by page allocator's buddy algorithm. In the case,
 277         * you couldn't get a contiguous memory, which is not what we want.
 278         */
 279        alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
 280                          max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
 281        base = ALIGN(base, alignment);
 282        size = ALIGN(size, alignment);
 283        limit &= ~(alignment - 1);
 284
 285        if (!base)
 286                fixed = false;
 287
 288        /* size should be aligned with order_per_bit */
 289        if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
 290                return -EINVAL;
 291
 292        /*
 293         * If allocating at a fixed base the request region must not cross the
 294         * low/high memory boundary.
 295         */
 296        if (fixed && base < highmem_start && base + size > highmem_start) {
 297                ret = -EINVAL;
 298                pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
 299                        &base, &highmem_start);
 300                goto err;
 301        }
 302
 303        /*
 304         * If the limit is unspecified or above the memblock end, its effective
 305         * value will be the memblock end. Set it explicitly to simplify further
 306         * checks.
 307         */
 308        if (limit == 0 || limit > memblock_end)
 309                limit = memblock_end;
 310
 311        /* Reserve memory */
 312        if (fixed) {
 313                if (memblock_is_region_reserved(base, size) ||
 314                    memblock_reserve(base, size) < 0) {
 315                        ret = -EBUSY;
 316                        goto err;
 317                }
 318        } else {
 319                phys_addr_t addr = 0;
 320
 321                /*
 322                 * All pages in the reserved area must come from the same zone.
 323                 * If the requested region crosses the low/high memory boundary,
 324                 * try allocating from high memory first and fall back to low
 325                 * memory in case of failure.
 326                 */
 327                if (base < highmem_start && limit > highmem_start) {
 328                        addr = memblock_phys_alloc_range(size, alignment,
 329                                                         highmem_start, limit);
 330                        limit = highmem_start;
 331                }
 332
 333                if (!addr) {
 334                        addr = memblock_phys_alloc_range(size, alignment, base,
 335                                                         limit);
 336                        if (!addr) {
 337                                ret = -ENOMEM;
 338                                goto err;
 339                        }
 340                }
 341
 342                /*
 343                 * kmemleak scans/reads tracked objects for pointers to other
 344                 * objects but this address isn't mapped and accessible
 345                 */
 346                kmemleak_ignore_phys(addr);
 347                base = addr;
 348        }
 349
 350        ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
 351        if (ret)
 352                goto free_mem;
 353
 354        pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
 355                &base);
 356        return 0;
 357
 358free_mem:
 359        memblock_free(base, size);
 360err:
 361        pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
 362        return ret;
 363}
 364
 365#ifdef CONFIG_CMA_DEBUG
 366static void cma_debug_show_areas(struct cma *cma)
 367{
 368        unsigned long next_zero_bit, next_set_bit, nr_zero;
 369        unsigned long start = 0;
 370        unsigned long nr_part, nr_total = 0;
 371        unsigned long nbits = cma_bitmap_maxno(cma);
 372
 373        mutex_lock(&cma->lock);
 374        pr_info("number of available pages: ");
 375        for (;;) {
 376                next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
 377                if (next_zero_bit >= nbits)
 378                        break;
 379                next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
 380                nr_zero = next_set_bit - next_zero_bit;
 381                nr_part = nr_zero << cma->order_per_bit;
 382                pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
 383                        next_zero_bit);
 384                nr_total += nr_part;
 385                start = next_zero_bit + nr_zero;
 386        }
 387        pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
 388        mutex_unlock(&cma->lock);
 389}
 390#else
 391static inline void cma_debug_show_areas(struct cma *cma) { }
 392#endif
 393
 394/**
 395 * cma_alloc() - allocate pages from contiguous area
 396 * @cma:   Contiguous memory region for which the allocation is performed.
 397 * @count: Requested number of pages.
 398 * @align: Requested alignment of pages (in PAGE_SIZE order).
 399 * @no_warn: Avoid printing message about failed allocation
 400 *
 401 * This function allocates part of contiguous memory on specific
 402 * contiguous memory area.
 403 */
 404struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 405                       bool no_warn)
 406{
 407        unsigned long mask, offset;
 408        unsigned long pfn = -1;
 409        unsigned long start = 0;
 410        unsigned long bitmap_maxno, bitmap_no, bitmap_count;
 411        size_t i;
 412        struct page *page = NULL;
 413        int ret = -ENOMEM;
 414
 415        if (!cma || !cma->count)
 416                return NULL;
 417
 418        pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
 419                 count, align);
 420
 421        if (!count)
 422                return NULL;
 423
 424        mask = cma_bitmap_aligned_mask(cma, align);
 425        offset = cma_bitmap_aligned_offset(cma, align);
 426        bitmap_maxno = cma_bitmap_maxno(cma);
 427        bitmap_count = cma_bitmap_pages_to_bits(cma, count);
 428
 429        if (bitmap_count > bitmap_maxno)
 430                return NULL;
 431
 432        for (;;) {
 433                mutex_lock(&cma->lock);
 434                bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
 435                                bitmap_maxno, start, bitmap_count, mask,
 436                                offset);
 437                if (bitmap_no >= bitmap_maxno) {
 438                        mutex_unlock(&cma->lock);
 439                        break;
 440                }
 441                bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
 442                /*
 443                 * It's safe to drop the lock here. We've marked this region for
 444                 * our exclusive use. If the migration fails we will take the
 445                 * lock again and unmark it.
 446                 */
 447                mutex_unlock(&cma->lock);
 448
 449                pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
 450                mutex_lock(&cma_mutex);
 451                ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
 452                                     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
 453                mutex_unlock(&cma_mutex);
 454                if (ret == 0) {
 455                        page = pfn_to_page(pfn);
 456                        break;
 457                }
 458
 459                cma_clear_bitmap(cma, pfn, count);
 460                if (ret != -EBUSY)
 461                        break;
 462
 463                pr_debug("%s(): memory range at %p is busy, retrying\n",
 464                         __func__, pfn_to_page(pfn));
 465                /* try again with a bit different memory target */
 466                start = bitmap_no + mask + 1;
 467        }
 468
 469        trace_cma_alloc(pfn, page, count, align);
 470
 471        /*
 472         * CMA can allocate multiple page blocks, which results in different
 473         * blocks being marked with different tags. Reset the tags to ignore
 474         * those page blocks.
 475         */
 476        if (page) {
 477                for (i = 0; i < count; i++)
 478                        page_kasan_tag_reset(page + i);
 479        }
 480
 481        if (ret && !no_warn) {
 482                pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
 483                        __func__, count, ret);
 484                cma_debug_show_areas(cma);
 485        }
 486
 487        pr_debug("%s(): returned %p\n", __func__, page);
 488        return page;
 489}
 490
 491/**
 492 * cma_release() - release allocated pages
 493 * @cma:   Contiguous memory region for which the allocation is performed.
 494 * @pages: Allocated pages.
 495 * @count: Number of allocated pages.
 496 *
 497 * This function releases memory allocated by alloc_cma().
 498 * It returns false when provided pages do not belong to contiguous area and
 499 * true otherwise.
 500 */
 501bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
 502{
 503        unsigned long pfn;
 504
 505        if (!cma || !pages)
 506                return false;
 507
 508        pr_debug("%s(page %p)\n", __func__, (void *)pages);
 509
 510        pfn = page_to_pfn(pages);
 511
 512        if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
 513                return false;
 514
 515        VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
 516
 517        free_contig_range(pfn, count);
 518        cma_clear_bitmap(cma, pfn, count);
 519        trace_cma_release(pfn, pages, count);
 520
 521        return true;
 522}
 523
 524int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
 525{
 526        int i;
 527
 528        for (i = 0; i < cma_area_count; i++) {
 529                int ret = it(&cma_areas[i], data);
 530
 531                if (ret)
 532                        return ret;
 533        }
 534
 535        return 0;
 536}
 537