linux/mm/memblock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Procedures for maintaining information about logical memory blocks.
   4 *
   5 * Peter Bergner, IBM Corp.     June 2001.
   6 * Copyright (C) 2001 Peter Bergner.
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/init.h>
  12#include <linux/bitops.h>
  13#include <linux/poison.h>
  14#include <linux/pfn.h>
  15#include <linux/debugfs.h>
  16#include <linux/kmemleak.h>
  17#include <linux/seq_file.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/sections.h>
  21#include <linux/io.h>
  22
  23#include "internal.h"
  24
  25#define INIT_MEMBLOCK_REGIONS                   128
  26#define INIT_PHYSMEM_REGIONS                    4
  27
  28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
  29# define INIT_MEMBLOCK_RESERVED_REGIONS         INIT_MEMBLOCK_REGIONS
  30#endif
  31
  32/**
  33 * DOC: memblock overview
  34 *
  35 * Memblock is a method of managing memory regions during the early
  36 * boot period when the usual kernel memory allocators are not up and
  37 * running.
  38 *
  39 * Memblock views the system memory as collections of contiguous
  40 * regions. There are several types of these collections:
  41 *
  42 * * ``memory`` - describes the physical memory available to the
  43 *   kernel; this may differ from the actual physical memory installed
  44 *   in the system, for instance when the memory is restricted with
  45 *   ``mem=`` command line parameter
  46 * * ``reserved`` - describes the regions that were allocated
  47 * * ``physmem`` - describes the actual physical memory available during
  48 *   boot regardless of the possible restrictions and memory hot(un)plug;
  49 *   the ``physmem`` type is only available on some architectures.
  50 *
  51 * Each region is represented by struct memblock_region that
  52 * defines the region extents, its attributes and NUMA node id on NUMA
  53 * systems. Every memory type is described by the struct memblock_type
  54 * which contains an array of memory regions along with
  55 * the allocator metadata. The "memory" and "reserved" types are nicely
  56 * wrapped with struct memblock. This structure is statically
  57 * initialized at build time. The region arrays are initially sized to
  58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
  59 * for "reserved". The region array for "physmem" is initially sized to
  60 * %INIT_PHYSMEM_REGIONS.
  61 * The memblock_allow_resize() enables automatic resizing of the region
  62 * arrays during addition of new regions. This feature should be used
  63 * with care so that memory allocated for the region array will not
  64 * overlap with areas that should be reserved, for example initrd.
  65 *
  66 * The early architecture setup should tell memblock what the physical
  67 * memory layout is by using memblock_add() or memblock_add_node()
  68 * functions. The first function does not assign the region to a NUMA
  69 * node and it is appropriate for UMA systems. Yet, it is possible to
  70 * use it on NUMA systems as well and assign the region to a NUMA node
  71 * later in the setup process using memblock_set_node(). The
  72 * memblock_add_node() performs such an assignment directly.
  73 *
  74 * Once memblock is setup the memory can be allocated using one of the
  75 * API variants:
  76 *
  77 * * memblock_phys_alloc*() - these functions return the **physical**
  78 *   address of the allocated memory
  79 * * memblock_alloc*() - these functions return the **virtual** address
  80 *   of the allocated memory.
  81 *
  82 * Note, that both API variants use implicit assumptions about allowed
  83 * memory ranges and the fallback methods. Consult the documentation
  84 * of memblock_alloc_internal() and memblock_alloc_range_nid()
  85 * functions for more elaborate description.
  86 *
  87 * As the system boot progresses, the architecture specific mem_init()
  88 * function frees all the memory to the buddy page allocator.
  89 *
  90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
  91 * memblock data structures (except "physmem") will be discarded after the
  92 * system initialization completes.
  93 */
  94
  95#ifndef CONFIG_NUMA
  96struct pglist_data __refdata contig_page_data;
  97EXPORT_SYMBOL(contig_page_data);
  98#endif
  99
 100unsigned long max_low_pfn;
 101unsigned long min_low_pfn;
 102unsigned long max_pfn;
 103unsigned long long max_possible_pfn;
 104
 105static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
 106static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
 107#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 108static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
 109#endif
 110
 111struct memblock memblock __initdata_memblock = {
 112        .memory.regions         = memblock_memory_init_regions,
 113        .memory.cnt             = 1,    /* empty dummy entry */
 114        .memory.max             = INIT_MEMBLOCK_REGIONS,
 115        .memory.name            = "memory",
 116
 117        .reserved.regions       = memblock_reserved_init_regions,
 118        .reserved.cnt           = 1,    /* empty dummy entry */
 119        .reserved.max           = INIT_MEMBLOCK_RESERVED_REGIONS,
 120        .reserved.name          = "reserved",
 121
 122        .bottom_up              = false,
 123        .current_limit          = MEMBLOCK_ALLOC_ANYWHERE,
 124};
 125
 126#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 127struct memblock_type physmem = {
 128        .regions                = memblock_physmem_init_regions,
 129        .cnt                    = 1,    /* empty dummy entry */
 130        .max                    = INIT_PHYSMEM_REGIONS,
 131        .name                   = "physmem",
 132};
 133#endif
 134
 135/*
 136 * keep a pointer to &memblock.memory in the text section to use it in
 137 * __next_mem_range() and its helpers.
 138 *  For architectures that do not keep memblock data after init, this
 139 * pointer will be reset to NULL at memblock_discard()
 140 */
 141static __refdata struct memblock_type *memblock_memory = &memblock.memory;
 142
 143#define for_each_memblock_type(i, memblock_type, rgn)                   \
 144        for (i = 0, rgn = &memblock_type->regions[0];                   \
 145             i < memblock_type->cnt;                                    \
 146             i++, rgn = &memblock_type->regions[i])
 147
 148#define memblock_dbg(fmt, ...)                                          \
 149        do {                                                            \
 150                if (memblock_debug)                                     \
 151                        pr_info(fmt, ##__VA_ARGS__);                    \
 152        } while (0)
 153
 154static int memblock_debug __initdata_memblock;
 155static bool system_has_some_mirror __initdata_memblock = false;
 156static int memblock_can_resize __initdata_memblock;
 157static int memblock_memory_in_slab __initdata_memblock = 0;
 158static int memblock_reserved_in_slab __initdata_memblock = 0;
 159
 160static enum memblock_flags __init_memblock choose_memblock_flags(void)
 161{
 162        return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
 163}
 164
 165/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
 166static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
 167{
 168        return *size = min(*size, PHYS_ADDR_MAX - base);
 169}
 170
 171/*
 172 * Address comparison utilities
 173 */
 174static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
 175                                       phys_addr_t base2, phys_addr_t size2)
 176{
 177        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
 178}
 179
 180bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
 181                                        phys_addr_t base, phys_addr_t size)
 182{
 183        unsigned long i;
 184
 185        memblock_cap_size(base, &size);
 186
 187        for (i = 0; i < type->cnt; i++)
 188                if (memblock_addrs_overlap(base, size, type->regions[i].base,
 189                                           type->regions[i].size))
 190                        break;
 191        return i < type->cnt;
 192}
 193
 194/**
 195 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 196 * @start: start of candidate range
 197 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 198 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 199 * @size: size of free area to find
 200 * @align: alignment of free area to find
 201 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 202 * @flags: pick from blocks based on memory attributes
 203 *
 204 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 205 *
 206 * Return:
 207 * Found address on success, 0 on failure.
 208 */
 209static phys_addr_t __init_memblock
 210__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
 211                                phys_addr_t size, phys_addr_t align, int nid,
 212                                enum memblock_flags flags)
 213{
 214        phys_addr_t this_start, this_end, cand;
 215        u64 i;
 216
 217        for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
 218                this_start = clamp(this_start, start, end);
 219                this_end = clamp(this_end, start, end);
 220
 221                cand = round_up(this_start, align);
 222                if (cand < this_end && this_end - cand >= size)
 223                        return cand;
 224        }
 225
 226        return 0;
 227}
 228
 229/**
 230 * __memblock_find_range_top_down - find free area utility, in top-down
 231 * @start: start of candidate range
 232 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 233 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 234 * @size: size of free area to find
 235 * @align: alignment of free area to find
 236 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 237 * @flags: pick from blocks based on memory attributes
 238 *
 239 * Utility called from memblock_find_in_range_node(), find free area top-down.
 240 *
 241 * Return:
 242 * Found address on success, 0 on failure.
 243 */
 244static phys_addr_t __init_memblock
 245__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
 246                               phys_addr_t size, phys_addr_t align, int nid,
 247                               enum memblock_flags flags)
 248{
 249        phys_addr_t this_start, this_end, cand;
 250        u64 i;
 251
 252        for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
 253                                        NULL) {
 254                this_start = clamp(this_start, start, end);
 255                this_end = clamp(this_end, start, end);
 256
 257                if (this_end < size)
 258                        continue;
 259
 260                cand = round_down(this_end - size, align);
 261                if (cand >= this_start)
 262                        return cand;
 263        }
 264
 265        return 0;
 266}
 267
 268/**
 269 * memblock_find_in_range_node - find free area in given range and node
 270 * @size: size of free area to find
 271 * @align: alignment of free area to find
 272 * @start: start of candidate range
 273 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 274 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 275 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 276 * @flags: pick from blocks based on memory attributes
 277 *
 278 * Find @size free area aligned to @align in the specified range and node.
 279 *
 280 * Return:
 281 * Found address on success, 0 on failure.
 282 */
 283static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 284                                        phys_addr_t align, phys_addr_t start,
 285                                        phys_addr_t end, int nid,
 286                                        enum memblock_flags flags)
 287{
 288        /* pump up @end */
 289        if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
 290            end == MEMBLOCK_ALLOC_KASAN)
 291                end = memblock.current_limit;
 292
 293        /* avoid allocating the first page */
 294        start = max_t(phys_addr_t, start, PAGE_SIZE);
 295        end = max(start, end);
 296
 297        if (memblock_bottom_up())
 298                return __memblock_find_range_bottom_up(start, end, size, align,
 299                                                       nid, flags);
 300        else
 301                return __memblock_find_range_top_down(start, end, size, align,
 302                                                      nid, flags);
 303}
 304
 305/**
 306 * memblock_find_in_range - find free area in given range
 307 * @start: start of candidate range
 308 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 309 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 310 * @size: size of free area to find
 311 * @align: alignment of free area to find
 312 *
 313 * Find @size free area aligned to @align in the specified range.
 314 *
 315 * Return:
 316 * Found address on success, 0 on failure.
 317 */
 318static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 319                                        phys_addr_t end, phys_addr_t size,
 320                                        phys_addr_t align)
 321{
 322        phys_addr_t ret;
 323        enum memblock_flags flags = choose_memblock_flags();
 324
 325again:
 326        ret = memblock_find_in_range_node(size, align, start, end,
 327                                            NUMA_NO_NODE, flags);
 328
 329        if (!ret && (flags & MEMBLOCK_MIRROR)) {
 330                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 331                        &size);
 332                flags &= ~MEMBLOCK_MIRROR;
 333                goto again;
 334        }
 335
 336        return ret;
 337}
 338
 339static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 340{
 341        type->total_size -= type->regions[r].size;
 342        memmove(&type->regions[r], &type->regions[r + 1],
 343                (type->cnt - (r + 1)) * sizeof(type->regions[r]));
 344        type->cnt--;
 345
 346        /* Special case for empty arrays */
 347        if (type->cnt == 0) {
 348                WARN_ON(type->total_size != 0);
 349                type->cnt = 1;
 350                type->regions[0].base = 0;
 351                type->regions[0].size = 0;
 352                type->regions[0].flags = 0;
 353                memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 354        }
 355}
 356
 357#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
 358/**
 359 * memblock_discard - discard memory and reserved arrays if they were allocated
 360 */
 361void __init memblock_discard(void)
 362{
 363        phys_addr_t addr, size;
 364
 365        if (memblock.reserved.regions != memblock_reserved_init_regions) {
 366                addr = __pa(memblock.reserved.regions);
 367                size = PAGE_ALIGN(sizeof(struct memblock_region) *
 368                                  memblock.reserved.max);
 369                __memblock_free_late(addr, size);
 370        }
 371
 372        if (memblock.memory.regions != memblock_memory_init_regions) {
 373                addr = __pa(memblock.memory.regions);
 374                size = PAGE_ALIGN(sizeof(struct memblock_region) *
 375                                  memblock.memory.max);
 376                __memblock_free_late(addr, size);
 377        }
 378
 379        memblock_memory = NULL;
 380}
 381#endif
 382
 383/**
 384 * memblock_double_array - double the size of the memblock regions array
 385 * @type: memblock type of the regions array being doubled
 386 * @new_area_start: starting address of memory range to avoid overlap with
 387 * @new_area_size: size of memory range to avoid overlap with
 388 *
 389 * Double the size of the @type regions array. If memblock is being used to
 390 * allocate memory for a new reserved regions array and there is a previously
 391 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
 392 * waiting to be reserved, ensure the memory used by the new array does
 393 * not overlap.
 394 *
 395 * Return:
 396 * 0 on success, -1 on failure.
 397 */
 398static int __init_memblock memblock_double_array(struct memblock_type *type,
 399                                                phys_addr_t new_area_start,
 400                                                phys_addr_t new_area_size)
 401{
 402        struct memblock_region *new_array, *old_array;
 403        phys_addr_t old_alloc_size, new_alloc_size;
 404        phys_addr_t old_size, new_size, addr, new_end;
 405        int use_slab = slab_is_available();
 406        int *in_slab;
 407
 408        /* We don't allow resizing until we know about the reserved regions
 409         * of memory that aren't suitable for allocation
 410         */
 411        if (!memblock_can_resize)
 412                return -1;
 413
 414        /* Calculate new doubled size */
 415        old_size = type->max * sizeof(struct memblock_region);
 416        new_size = old_size << 1;
 417        /*
 418         * We need to allocated new one align to PAGE_SIZE,
 419         *   so we can free them completely later.
 420         */
 421        old_alloc_size = PAGE_ALIGN(old_size);
 422        new_alloc_size = PAGE_ALIGN(new_size);
 423
 424        /* Retrieve the slab flag */
 425        if (type == &memblock.memory)
 426                in_slab = &memblock_memory_in_slab;
 427        else
 428                in_slab = &memblock_reserved_in_slab;
 429
 430        /* Try to find some space for it */
 431        if (use_slab) {
 432                new_array = kmalloc(new_size, GFP_KERNEL);
 433                addr = new_array ? __pa(new_array) : 0;
 434        } else {
 435                /* only exclude range when trying to double reserved.regions */
 436                if (type != &memblock.reserved)
 437                        new_area_start = new_area_size = 0;
 438
 439                addr = memblock_find_in_range(new_area_start + new_area_size,
 440                                                memblock.current_limit,
 441                                                new_alloc_size, PAGE_SIZE);
 442                if (!addr && new_area_size)
 443                        addr = memblock_find_in_range(0,
 444                                min(new_area_start, memblock.current_limit),
 445                                new_alloc_size, PAGE_SIZE);
 446
 447                new_array = addr ? __va(addr) : NULL;
 448        }
 449        if (!addr) {
 450                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 451                       type->name, type->max, type->max * 2);
 452                return -1;
 453        }
 454
 455        new_end = addr + new_size - 1;
 456        memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
 457                        type->name, type->max * 2, &addr, &new_end);
 458
 459        /*
 460         * Found space, we now need to move the array over before we add the
 461         * reserved region since it may be our reserved array itself that is
 462         * full.
 463         */
 464        memcpy(new_array, type->regions, old_size);
 465        memset(new_array + type->max, 0, old_size);
 466        old_array = type->regions;
 467        type->regions = new_array;
 468        type->max <<= 1;
 469
 470        /* Free old array. We needn't free it if the array is the static one */
 471        if (*in_slab)
 472                kfree(old_array);
 473        else if (old_array != memblock_memory_init_regions &&
 474                 old_array != memblock_reserved_init_regions)
 475                memblock_free_ptr(old_array, old_alloc_size);
 476
 477        /*
 478         * Reserve the new array if that comes from the memblock.  Otherwise, we
 479         * needn't do it
 480         */
 481        if (!use_slab)
 482                BUG_ON(memblock_reserve(addr, new_alloc_size));
 483
 484        /* Update slab flag */
 485        *in_slab = use_slab;
 486
 487        return 0;
 488}
 489
 490/**
 491 * memblock_merge_regions - merge neighboring compatible regions
 492 * @type: memblock type to scan
 493 *
 494 * Scan @type and merge neighboring compatible regions.
 495 */
 496static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 497{
 498        int i = 0;
 499
 500        /* cnt never goes below 1 */
 501        while (i < type->cnt - 1) {
 502                struct memblock_region *this = &type->regions[i];
 503                struct memblock_region *next = &type->regions[i + 1];
 504
 505                if (this->base + this->size != next->base ||
 506                    memblock_get_region_node(this) !=
 507                    memblock_get_region_node(next) ||
 508                    this->flags != next->flags) {
 509                        BUG_ON(this->base + this->size > next->base);
 510                        i++;
 511                        continue;
 512                }
 513
 514                this->size += next->size;
 515                /* move forward from next + 1, index of which is i + 2 */
 516                memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
 517                type->cnt--;
 518        }
 519}
 520
 521/**
 522 * memblock_insert_region - insert new memblock region
 523 * @type:       memblock type to insert into
 524 * @idx:        index for the insertion point
 525 * @base:       base address of the new region
 526 * @size:       size of the new region
 527 * @nid:        node id of the new region
 528 * @flags:      flags of the new region
 529 *
 530 * Insert new memblock region [@base, @base + @size) into @type at @idx.
 531 * @type must already have extra room to accommodate the new region.
 532 */
 533static void __init_memblock memblock_insert_region(struct memblock_type *type,
 534                                                   int idx, phys_addr_t base,
 535                                                   phys_addr_t size,
 536                                                   int nid,
 537                                                   enum memblock_flags flags)
 538{
 539        struct memblock_region *rgn = &type->regions[idx];
 540
 541        BUG_ON(type->cnt >= type->max);
 542        memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
 543        rgn->base = base;
 544        rgn->size = size;
 545        rgn->flags = flags;
 546        memblock_set_region_node(rgn, nid);
 547        type->cnt++;
 548        type->total_size += size;
 549}
 550
 551/**
 552 * memblock_add_range - add new memblock region
 553 * @type: memblock type to add new region into
 554 * @base: base address of the new region
 555 * @size: size of the new region
 556 * @nid: nid of the new region
 557 * @flags: flags of the new region
 558 *
 559 * Add new memblock region [@base, @base + @size) into @type.  The new region
 560 * is allowed to overlap with existing ones - overlaps don't affect already
 561 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 562 * compatible regions are merged) after the addition.
 563 *
 564 * Return:
 565 * 0 on success, -errno on failure.
 566 */
 567static int __init_memblock memblock_add_range(struct memblock_type *type,
 568                                phys_addr_t base, phys_addr_t size,
 569                                int nid, enum memblock_flags flags)
 570{
 571        bool insert = false;
 572        phys_addr_t obase = base;
 573        phys_addr_t end = base + memblock_cap_size(base, &size);
 574        int idx, nr_new;
 575        struct memblock_region *rgn;
 576
 577        if (!size)
 578                return 0;
 579
 580        /* special case for empty array */
 581        if (type->regions[0].size == 0) {
 582                WARN_ON(type->cnt != 1 || type->total_size);
 583                type->regions[0].base = base;
 584                type->regions[0].size = size;
 585                type->regions[0].flags = flags;
 586                memblock_set_region_node(&type->regions[0], nid);
 587                type->total_size = size;
 588                return 0;
 589        }
 590repeat:
 591        /*
 592         * The following is executed twice.  Once with %false @insert and
 593         * then with %true.  The first counts the number of regions needed
 594         * to accommodate the new area.  The second actually inserts them.
 595         */
 596        base = obase;
 597        nr_new = 0;
 598
 599        for_each_memblock_type(idx, type, rgn) {
 600                phys_addr_t rbase = rgn->base;
 601                phys_addr_t rend = rbase + rgn->size;
 602
 603                if (rbase >= end)
 604                        break;
 605                if (rend <= base)
 606                        continue;
 607                /*
 608                 * @rgn overlaps.  If it separates the lower part of new
 609                 * area, insert that portion.
 610                 */
 611                if (rbase > base) {
 612#ifdef CONFIG_NUMA
 613                        WARN_ON(nid != memblock_get_region_node(rgn));
 614#endif
 615                        WARN_ON(flags != rgn->flags);
 616                        nr_new++;
 617                        if (insert)
 618                                memblock_insert_region(type, idx++, base,
 619                                                       rbase - base, nid,
 620                                                       flags);
 621                }
 622                /* area below @rend is dealt with, forget about it */
 623                base = min(rend, end);
 624        }
 625
 626        /* insert the remaining portion */
 627        if (base < end) {
 628                nr_new++;
 629                if (insert)
 630                        memblock_insert_region(type, idx, base, end - base,
 631                                               nid, flags);
 632        }
 633
 634        if (!nr_new)
 635                return 0;
 636
 637        /*
 638         * If this was the first round, resize array and repeat for actual
 639         * insertions; otherwise, merge and return.
 640         */
 641        if (!insert) {
 642                while (type->cnt + nr_new > type->max)
 643                        if (memblock_double_array(type, obase, size) < 0)
 644                                return -ENOMEM;
 645                insert = true;
 646                goto repeat;
 647        } else {
 648                memblock_merge_regions(type);
 649                return 0;
 650        }
 651}
 652
 653/**
 654 * memblock_add_node - add new memblock region within a NUMA node
 655 * @base: base address of the new region
 656 * @size: size of the new region
 657 * @nid: nid of the new region
 658 *
 659 * Add new memblock region [@base, @base + @size) to the "memory"
 660 * type. See memblock_add_range() description for mode details
 661 *
 662 * Return:
 663 * 0 on success, -errno on failure.
 664 */
 665int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
 666                                       int nid)
 667{
 668        phys_addr_t end = base + size - 1;
 669
 670        memblock_dbg("%s: [%pa-%pa] nid=%d %pS\n", __func__,
 671                     &base, &end, nid, (void *)_RET_IP_);
 672
 673        return memblock_add_range(&memblock.memory, base, size, nid, 0);
 674}
 675
 676/**
 677 * memblock_add - add new memblock region
 678 * @base: base address of the new region
 679 * @size: size of the new region
 680 *
 681 * Add new memblock region [@base, @base + @size) to the "memory"
 682 * type. See memblock_add_range() description for mode details
 683 *
 684 * Return:
 685 * 0 on success, -errno on failure.
 686 */
 687int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 688{
 689        phys_addr_t end = base + size - 1;
 690
 691        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 692                     &base, &end, (void *)_RET_IP_);
 693
 694        return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
 695}
 696
 697/**
 698 * memblock_isolate_range - isolate given range into disjoint memblocks
 699 * @type: memblock type to isolate range for
 700 * @base: base of range to isolate
 701 * @size: size of range to isolate
 702 * @start_rgn: out parameter for the start of isolated region
 703 * @end_rgn: out parameter for the end of isolated region
 704 *
 705 * Walk @type and ensure that regions don't cross the boundaries defined by
 706 * [@base, @base + @size).  Crossing regions are split at the boundaries,
 707 * which may create at most two more regions.  The index of the first
 708 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 709 *
 710 * Return:
 711 * 0 on success, -errno on failure.
 712 */
 713static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 714                                        phys_addr_t base, phys_addr_t size,
 715                                        int *start_rgn, int *end_rgn)
 716{
 717        phys_addr_t end = base + memblock_cap_size(base, &size);
 718        int idx;
 719        struct memblock_region *rgn;
 720
 721        *start_rgn = *end_rgn = 0;
 722
 723        if (!size)
 724                return 0;
 725
 726        /* we'll create at most two more regions */
 727        while (type->cnt + 2 > type->max)
 728                if (memblock_double_array(type, base, size) < 0)
 729                        return -ENOMEM;
 730
 731        for_each_memblock_type(idx, type, rgn) {
 732                phys_addr_t rbase = rgn->base;
 733                phys_addr_t rend = rbase + rgn->size;
 734
 735                if (rbase >= end)
 736                        break;
 737                if (rend <= base)
 738                        continue;
 739
 740                if (rbase < base) {
 741                        /*
 742                         * @rgn intersects from below.  Split and continue
 743                         * to process the next region - the new top half.
 744                         */
 745                        rgn->base = base;
 746                        rgn->size -= base - rbase;
 747                        type->total_size -= base - rbase;
 748                        memblock_insert_region(type, idx, rbase, base - rbase,
 749                                               memblock_get_region_node(rgn),
 750                                               rgn->flags);
 751                } else if (rend > end) {
 752                        /*
 753                         * @rgn intersects from above.  Split and redo the
 754                         * current region - the new bottom half.
 755                         */
 756                        rgn->base = end;
 757                        rgn->size -= end - rbase;
 758                        type->total_size -= end - rbase;
 759                        memblock_insert_region(type, idx--, rbase, end - rbase,
 760                                               memblock_get_region_node(rgn),
 761                                               rgn->flags);
 762                } else {
 763                        /* @rgn is fully contained, record it */
 764                        if (!*end_rgn)
 765                                *start_rgn = idx;
 766                        *end_rgn = idx + 1;
 767                }
 768        }
 769
 770        return 0;
 771}
 772
 773static int __init_memblock memblock_remove_range(struct memblock_type *type,
 774                                          phys_addr_t base, phys_addr_t size)
 775{
 776        int start_rgn, end_rgn;
 777        int i, ret;
 778
 779        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 780        if (ret)
 781                return ret;
 782
 783        for (i = end_rgn - 1; i >= start_rgn; i--)
 784                memblock_remove_region(type, i);
 785        return 0;
 786}
 787
 788int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 789{
 790        phys_addr_t end = base + size - 1;
 791
 792        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 793                     &base, &end, (void *)_RET_IP_);
 794
 795        return memblock_remove_range(&memblock.memory, base, size);
 796}
 797
 798/**
 799 * memblock_free_ptr - free boot memory allocation
 800 * @ptr: starting address of the  boot memory allocation
 801 * @size: size of the boot memory block in bytes
 802 *
 803 * Free boot memory block previously allocated by memblock_alloc_xx() API.
 804 * The freeing memory will not be released to the buddy allocator.
 805 */
 806void __init_memblock memblock_free_ptr(void *ptr, size_t size)
 807{
 808        if (ptr)
 809                memblock_free(__pa(ptr), size);
 810}
 811
 812/**
 813 * memblock_free - free boot memory block
 814 * @base: phys starting address of the  boot memory block
 815 * @size: size of the boot memory block in bytes
 816 *
 817 * Free boot memory block previously allocated by memblock_alloc_xx() API.
 818 * The freeing memory will not be released to the buddy allocator.
 819 */
 820int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 821{
 822        phys_addr_t end = base + size - 1;
 823
 824        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 825                     &base, &end, (void *)_RET_IP_);
 826
 827        kmemleak_free_part_phys(base, size);
 828        return memblock_remove_range(&memblock.reserved, base, size);
 829}
 830
 831int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 832{
 833        phys_addr_t end = base + size - 1;
 834
 835        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 836                     &base, &end, (void *)_RET_IP_);
 837
 838        return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
 839}
 840
 841#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 842int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
 843{
 844        phys_addr_t end = base + size - 1;
 845
 846        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 847                     &base, &end, (void *)_RET_IP_);
 848
 849        return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
 850}
 851#endif
 852
 853/**
 854 * memblock_setclr_flag - set or clear flag for a memory region
 855 * @base: base address of the region
 856 * @size: size of the region
 857 * @set: set or clear the flag
 858 * @flag: the flag to update
 859 *
 860 * This function isolates region [@base, @base + @size), and sets/clears flag
 861 *
 862 * Return: 0 on success, -errno on failure.
 863 */
 864static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 865                                phys_addr_t size, int set, int flag)
 866{
 867        struct memblock_type *type = &memblock.memory;
 868        int i, ret, start_rgn, end_rgn;
 869
 870        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 871        if (ret)
 872                return ret;
 873
 874        for (i = start_rgn; i < end_rgn; i++) {
 875                struct memblock_region *r = &type->regions[i];
 876
 877                if (set)
 878                        r->flags |= flag;
 879                else
 880                        r->flags &= ~flag;
 881        }
 882
 883        memblock_merge_regions(type);
 884        return 0;
 885}
 886
 887/**
 888 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 889 * @base: the base phys addr of the region
 890 * @size: the size of the region
 891 *
 892 * Return: 0 on success, -errno on failure.
 893 */
 894int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 895{
 896        return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
 897}
 898
 899/**
 900 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 901 * @base: the base phys addr of the region
 902 * @size: the size of the region
 903 *
 904 * Return: 0 on success, -errno on failure.
 905 */
 906int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 907{
 908        return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
 909}
 910
 911/**
 912 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 913 * @base: the base phys addr of the region
 914 * @size: the size of the region
 915 *
 916 * Return: 0 on success, -errno on failure.
 917 */
 918int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 919{
 920        system_has_some_mirror = true;
 921
 922        return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 923}
 924
 925/**
 926 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
 927 * @base: the base phys addr of the region
 928 * @size: the size of the region
 929 *
 930 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
 931 * direct mapping of the physical memory. These regions will still be
 932 * covered by the memory map. The struct page representing NOMAP memory
 933 * frames in the memory map will be PageReserved()
 934 *
 935 * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
 936 * memblock, the caller must inform kmemleak to ignore that memory
 937 *
 938 * Return: 0 on success, -errno on failure.
 939 */
 940int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 941{
 942        return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
 943}
 944
 945/**
 946 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
 947 * @base: the base phys addr of the region
 948 * @size: the size of the region
 949 *
 950 * Return: 0 on success, -errno on failure.
 951 */
 952int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
 953{
 954        return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
 955}
 956
 957static bool should_skip_region(struct memblock_type *type,
 958                               struct memblock_region *m,
 959                               int nid, int flags)
 960{
 961        int m_nid = memblock_get_region_node(m);
 962
 963        /* we never skip regions when iterating memblock.reserved or physmem */
 964        if (type != memblock_memory)
 965                return false;
 966
 967        /* only memory regions are associated with nodes, check it */
 968        if (nid != NUMA_NO_NODE && nid != m_nid)
 969                return true;
 970
 971        /* skip hotpluggable memory regions if needed */
 972        if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
 973            !(flags & MEMBLOCK_HOTPLUG))
 974                return true;
 975
 976        /* if we want mirror memory skip non-mirror memory regions */
 977        if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 978                return true;
 979
 980        /* skip nomap memory unless we were asked for it explicitly */
 981        if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
 982                return true;
 983
 984        return false;
 985}
 986
 987/**
 988 * __next_mem_range - next function for for_each_free_mem_range() etc.
 989 * @idx: pointer to u64 loop variable
 990 * @nid: node selector, %NUMA_NO_NODE for all nodes
 991 * @flags: pick from blocks based on memory attributes
 992 * @type_a: pointer to memblock_type from where the range is taken
 993 * @type_b: pointer to memblock_type which excludes memory from being taken
 994 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 995 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 996 * @out_nid: ptr to int for nid of the range, can be %NULL
 997 *
 998 * Find the first area from *@idx which matches @nid, fill the out
 999 * parameters, and update *@idx for the next iteration.  The lower 32bit of
1000 * *@idx contains index into type_a and the upper 32bit indexes the
1001 * areas before each region in type_b.  For example, if type_b regions
1002 * look like the following,
1003 *
1004 *      0:[0-16), 1:[32-48), 2:[128-130)
1005 *
1006 * The upper 32bit indexes the following regions.
1007 *
1008 *      0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1009 *
1010 * As both region arrays are sorted, the function advances the two indices
1011 * in lockstep and returns each intersection.
1012 */
1013void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1014                      struct memblock_type *type_a,
1015                      struct memblock_type *type_b, phys_addr_t *out_start,
1016                      phys_addr_t *out_end, int *out_nid)
1017{
1018        int idx_a = *idx & 0xffffffff;
1019        int idx_b = *idx >> 32;
1020
1021        if (WARN_ONCE(nid == MAX_NUMNODES,
1022        "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1023                nid = NUMA_NO_NODE;
1024
1025        for (; idx_a < type_a->cnt; idx_a++) {
1026                struct memblock_region *m = &type_a->regions[idx_a];
1027
1028                phys_addr_t m_start = m->base;
1029                phys_addr_t m_end = m->base + m->size;
1030                int         m_nid = memblock_get_region_node(m);
1031
1032                if (should_skip_region(type_a, m, nid, flags))
1033                        continue;
1034
1035                if (!type_b) {
1036                        if (out_start)
1037                                *out_start = m_start;
1038                        if (out_end)
1039                                *out_end = m_end;
1040                        if (out_nid)
1041                                *out_nid = m_nid;
1042                        idx_a++;
1043                        *idx = (u32)idx_a | (u64)idx_b << 32;
1044                        return;
1045                }
1046
1047                /* scan areas before each reservation */
1048                for (; idx_b < type_b->cnt + 1; idx_b++) {
1049                        struct memblock_region *r;
1050                        phys_addr_t r_start;
1051                        phys_addr_t r_end;
1052
1053                        r = &type_b->regions[idx_b];
1054                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1055                        r_end = idx_b < type_b->cnt ?
1056                                r->base : PHYS_ADDR_MAX;
1057
1058                        /*
1059                         * if idx_b advanced past idx_a,
1060                         * break out to advance idx_a
1061                         */
1062                        if (r_start >= m_end)
1063                                break;
1064                        /* if the two regions intersect, we're done */
1065                        if (m_start < r_end) {
1066                                if (out_start)
1067                                        *out_start =
1068                                                max(m_start, r_start);
1069                                if (out_end)
1070                                        *out_end = min(m_end, r_end);
1071                                if (out_nid)
1072                                        *out_nid = m_nid;
1073                                /*
1074                                 * The region which ends first is
1075                                 * advanced for the next iteration.
1076                                 */
1077                                if (m_end <= r_end)
1078                                        idx_a++;
1079                                else
1080                                        idx_b++;
1081                                *idx = (u32)idx_a | (u64)idx_b << 32;
1082                                return;
1083                        }
1084                }
1085        }
1086
1087        /* signal end of iteration */
1088        *idx = ULLONG_MAX;
1089}
1090
1091/**
1092 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1093 *
1094 * @idx: pointer to u64 loop variable
1095 * @nid: node selector, %NUMA_NO_NODE for all nodes
1096 * @flags: pick from blocks based on memory attributes
1097 * @type_a: pointer to memblock_type from where the range is taken
1098 * @type_b: pointer to memblock_type which excludes memory from being taken
1099 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1100 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1101 * @out_nid: ptr to int for nid of the range, can be %NULL
1102 *
1103 * Finds the next range from type_a which is not marked as unsuitable
1104 * in type_b.
1105 *
1106 * Reverse of __next_mem_range().
1107 */
1108void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1109                                          enum memblock_flags flags,
1110                                          struct memblock_type *type_a,
1111                                          struct memblock_type *type_b,
1112                                          phys_addr_t *out_start,
1113                                          phys_addr_t *out_end, int *out_nid)
1114{
1115        int idx_a = *idx & 0xffffffff;
1116        int idx_b = *idx >> 32;
1117
1118        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1119                nid = NUMA_NO_NODE;
1120
1121        if (*idx == (u64)ULLONG_MAX) {
1122                idx_a = type_a->cnt - 1;
1123                if (type_b != NULL)
1124                        idx_b = type_b->cnt;
1125                else
1126                        idx_b = 0;
1127        }
1128
1129        for (; idx_a >= 0; idx_a--) {
1130                struct memblock_region *m = &type_a->regions[idx_a];
1131
1132                phys_addr_t m_start = m->base;
1133                phys_addr_t m_end = m->base + m->size;
1134                int m_nid = memblock_get_region_node(m);
1135
1136                if (should_skip_region(type_a, m, nid, flags))
1137                        continue;
1138
1139                if (!type_b) {
1140                        if (out_start)
1141                                *out_start = m_start;
1142                        if (out_end)
1143                                *out_end = m_end;
1144                        if (out_nid)
1145                                *out_nid = m_nid;
1146                        idx_a--;
1147                        *idx = (u32)idx_a | (u64)idx_b << 32;
1148                        return;
1149                }
1150
1151                /* scan areas before each reservation */
1152                for (; idx_b >= 0; idx_b--) {
1153                        struct memblock_region *r;
1154                        phys_addr_t r_start;
1155                        phys_addr_t r_end;
1156
1157                        r = &type_b->regions[idx_b];
1158                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1159                        r_end = idx_b < type_b->cnt ?
1160                                r->base : PHYS_ADDR_MAX;
1161                        /*
1162                         * if idx_b advanced past idx_a,
1163                         * break out to advance idx_a
1164                         */
1165
1166                        if (r_end <= m_start)
1167                                break;
1168                        /* if the two regions intersect, we're done */
1169                        if (m_end > r_start) {
1170                                if (out_start)
1171                                        *out_start = max(m_start, r_start);
1172                                if (out_end)
1173                                        *out_end = min(m_end, r_end);
1174                                if (out_nid)
1175                                        *out_nid = m_nid;
1176                                if (m_start >= r_start)
1177                                        idx_a--;
1178                                else
1179                                        idx_b--;
1180                                *idx = (u32)idx_a | (u64)idx_b << 32;
1181                                return;
1182                        }
1183                }
1184        }
1185        /* signal end of iteration */
1186        *idx = ULLONG_MAX;
1187}
1188
1189/*
1190 * Common iterator interface used to define for_each_mem_pfn_range().
1191 */
1192void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1193                                unsigned long *out_start_pfn,
1194                                unsigned long *out_end_pfn, int *out_nid)
1195{
1196        struct memblock_type *type = &memblock.memory;
1197        struct memblock_region *r;
1198        int r_nid;
1199
1200        while (++*idx < type->cnt) {
1201                r = &type->regions[*idx];
1202                r_nid = memblock_get_region_node(r);
1203
1204                if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1205                        continue;
1206                if (nid == MAX_NUMNODES || nid == r_nid)
1207                        break;
1208        }
1209        if (*idx >= type->cnt) {
1210                *idx = -1;
1211                return;
1212        }
1213
1214        if (out_start_pfn)
1215                *out_start_pfn = PFN_UP(r->base);
1216        if (out_end_pfn)
1217                *out_end_pfn = PFN_DOWN(r->base + r->size);
1218        if (out_nid)
1219                *out_nid = r_nid;
1220}
1221
1222/**
1223 * memblock_set_node - set node ID on memblock regions
1224 * @base: base of area to set node ID for
1225 * @size: size of area to set node ID for
1226 * @type: memblock type to set node ID for
1227 * @nid: node ID to set
1228 *
1229 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1230 * Regions which cross the area boundaries are split as necessary.
1231 *
1232 * Return:
1233 * 0 on success, -errno on failure.
1234 */
1235int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1236                                      struct memblock_type *type, int nid)
1237{
1238#ifdef CONFIG_NUMA
1239        int start_rgn, end_rgn;
1240        int i, ret;
1241
1242        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1243        if (ret)
1244                return ret;
1245
1246        for (i = start_rgn; i < end_rgn; i++)
1247                memblock_set_region_node(&type->regions[i], nid);
1248
1249        memblock_merge_regions(type);
1250#endif
1251        return 0;
1252}
1253
1254#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1255/**
1256 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1257 *
1258 * @idx: pointer to u64 loop variable
1259 * @zone: zone in which all of the memory blocks reside
1260 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1261 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1262 *
1263 * This function is meant to be a zone/pfn specific wrapper for the
1264 * for_each_mem_range type iterators. Specifically they are used in the
1265 * deferred memory init routines and as such we were duplicating much of
1266 * this logic throughout the code. So instead of having it in multiple
1267 * locations it seemed like it would make more sense to centralize this to
1268 * one new iterator that does everything they need.
1269 */
1270void __init_memblock
1271__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1272                             unsigned long *out_spfn, unsigned long *out_epfn)
1273{
1274        int zone_nid = zone_to_nid(zone);
1275        phys_addr_t spa, epa;
1276        int nid;
1277
1278        __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1279                         &memblock.memory, &memblock.reserved,
1280                         &spa, &epa, &nid);
1281
1282        while (*idx != U64_MAX) {
1283                unsigned long epfn = PFN_DOWN(epa);
1284                unsigned long spfn = PFN_UP(spa);
1285
1286                /*
1287                 * Verify the end is at least past the start of the zone and
1288                 * that we have at least one PFN to initialize.
1289                 */
1290                if (zone->zone_start_pfn < epfn && spfn < epfn) {
1291                        /* if we went too far just stop searching */
1292                        if (zone_end_pfn(zone) <= spfn) {
1293                                *idx = U64_MAX;
1294                                break;
1295                        }
1296
1297                        if (out_spfn)
1298                                *out_spfn = max(zone->zone_start_pfn, spfn);
1299                        if (out_epfn)
1300                                *out_epfn = min(zone_end_pfn(zone), epfn);
1301
1302                        return;
1303                }
1304
1305                __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1306                                 &memblock.memory, &memblock.reserved,
1307                                 &spa, &epa, &nid);
1308        }
1309
1310        /* signal end of iteration */
1311        if (out_spfn)
1312                *out_spfn = ULONG_MAX;
1313        if (out_epfn)
1314                *out_epfn = 0;
1315}
1316
1317#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1318
1319/**
1320 * memblock_alloc_range_nid - allocate boot memory block
1321 * @size: size of memory block to be allocated in bytes
1322 * @align: alignment of the region and block's size
1323 * @start: the lower bound of the memory region to allocate (phys address)
1324 * @end: the upper bound of the memory region to allocate (phys address)
1325 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1326 * @exact_nid: control the allocation fall back to other nodes
1327 *
1328 * The allocation is performed from memory region limited by
1329 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1330 *
1331 * If the specified node can not hold the requested memory and @exact_nid
1332 * is false, the allocation falls back to any node in the system.
1333 *
1334 * For systems with memory mirroring, the allocation is attempted first
1335 * from the regions with mirroring enabled and then retried from any
1336 * memory region.
1337 *
1338 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1339 * allocated boot memory block, so that it is never reported as leaks.
1340 *
1341 * Return:
1342 * Physical address of allocated memory block on success, %0 on failure.
1343 */
1344phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1345                                        phys_addr_t align, phys_addr_t start,
1346                                        phys_addr_t end, int nid,
1347                                        bool exact_nid)
1348{
1349        enum memblock_flags flags = choose_memblock_flags();
1350        phys_addr_t found;
1351
1352        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1353                nid = NUMA_NO_NODE;
1354
1355        if (!align) {
1356                /* Can't use WARNs this early in boot on powerpc */
1357                dump_stack();
1358                align = SMP_CACHE_BYTES;
1359        }
1360
1361again:
1362        found = memblock_find_in_range_node(size, align, start, end, nid,
1363                                            flags);
1364        if (found && !memblock_reserve(found, size))
1365                goto done;
1366
1367        if (nid != NUMA_NO_NODE && !exact_nid) {
1368                found = memblock_find_in_range_node(size, align, start,
1369                                                    end, NUMA_NO_NODE,
1370                                                    flags);
1371                if (found && !memblock_reserve(found, size))
1372                        goto done;
1373        }
1374
1375        if (flags & MEMBLOCK_MIRROR) {
1376                flags &= ~MEMBLOCK_MIRROR;
1377                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1378                        &size);
1379                goto again;
1380        }
1381
1382        return 0;
1383
1384done:
1385        /* Skip kmemleak for kasan_init() due to high volume. */
1386        if (end != MEMBLOCK_ALLOC_KASAN)
1387                /*
1388                 * The min_count is set to 0 so that memblock allocated
1389                 * blocks are never reported as leaks. This is because many
1390                 * of these blocks are only referred via the physical
1391                 * address which is not looked up by kmemleak.
1392                 */
1393                kmemleak_alloc_phys(found, size, 0, 0);
1394
1395        return found;
1396}
1397
1398/**
1399 * memblock_phys_alloc_range - allocate a memory block inside specified range
1400 * @size: size of memory block to be allocated in bytes
1401 * @align: alignment of the region and block's size
1402 * @start: the lower bound of the memory region to allocate (physical address)
1403 * @end: the upper bound of the memory region to allocate (physical address)
1404 *
1405 * Allocate @size bytes in the between @start and @end.
1406 *
1407 * Return: physical address of the allocated memory block on success,
1408 * %0 on failure.
1409 */
1410phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1411                                             phys_addr_t align,
1412                                             phys_addr_t start,
1413                                             phys_addr_t end)
1414{
1415        memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1416                     __func__, (u64)size, (u64)align, &start, &end,
1417                     (void *)_RET_IP_);
1418        return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1419                                        false);
1420}
1421
1422/**
1423 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1424 * @size: size of memory block to be allocated in bytes
1425 * @align: alignment of the region and block's size
1426 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1427 *
1428 * Allocates memory block from the specified NUMA node. If the node
1429 * has no available memory, attempts to allocated from any node in the
1430 * system.
1431 *
1432 * Return: physical address of the allocated memory block on success,
1433 * %0 on failure.
1434 */
1435phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1436{
1437        return memblock_alloc_range_nid(size, align, 0,
1438                                        MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1439}
1440
1441/**
1442 * memblock_alloc_internal - allocate boot memory block
1443 * @size: size of memory block to be allocated in bytes
1444 * @align: alignment of the region and block's size
1445 * @min_addr: the lower bound of the memory region to allocate (phys address)
1446 * @max_addr: the upper bound of the memory region to allocate (phys address)
1447 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1448 * @exact_nid: control the allocation fall back to other nodes
1449 *
1450 * Allocates memory block using memblock_alloc_range_nid() and
1451 * converts the returned physical address to virtual.
1452 *
1453 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1454 * will fall back to memory below @min_addr. Other constraints, such
1455 * as node and mirrored memory will be handled again in
1456 * memblock_alloc_range_nid().
1457 *
1458 * Return:
1459 * Virtual address of allocated memory block on success, NULL on failure.
1460 */
1461static void * __init memblock_alloc_internal(
1462                                phys_addr_t size, phys_addr_t align,
1463                                phys_addr_t min_addr, phys_addr_t max_addr,
1464                                int nid, bool exact_nid)
1465{
1466        phys_addr_t alloc;
1467
1468        /*
1469         * Detect any accidental use of these APIs after slab is ready, as at
1470         * this moment memblock may be deinitialized already and its
1471         * internal data may be destroyed (after execution of memblock_free_all)
1472         */
1473        if (WARN_ON_ONCE(slab_is_available()))
1474                return kzalloc_node(size, GFP_NOWAIT, nid);
1475
1476        if (max_addr > memblock.current_limit)
1477                max_addr = memblock.current_limit;
1478
1479        alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1480                                        exact_nid);
1481
1482        /* retry allocation without lower limit */
1483        if (!alloc && min_addr)
1484                alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1485                                                exact_nid);
1486
1487        if (!alloc)
1488                return NULL;
1489
1490        return phys_to_virt(alloc);
1491}
1492
1493/**
1494 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1495 * without zeroing memory
1496 * @size: size of memory block to be allocated in bytes
1497 * @align: alignment of the region and block's size
1498 * @min_addr: the lower bound of the memory region from where the allocation
1499 *        is preferred (phys address)
1500 * @max_addr: the upper bound of the memory region from where the allocation
1501 *            is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1502 *            allocate only from memory limited by memblock.current_limit value
1503 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1504 *
1505 * Public function, provides additional debug information (including caller
1506 * info), if enabled. Does not zero allocated memory.
1507 *
1508 * Return:
1509 * Virtual address of allocated memory block on success, NULL on failure.
1510 */
1511void * __init memblock_alloc_exact_nid_raw(
1512                        phys_addr_t size, phys_addr_t align,
1513                        phys_addr_t min_addr, phys_addr_t max_addr,
1514                        int nid)
1515{
1516        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1517                     __func__, (u64)size, (u64)align, nid, &min_addr,
1518                     &max_addr, (void *)_RET_IP_);
1519
1520        return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1521                                       true);
1522}
1523
1524/**
1525 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1526 * memory and without panicking
1527 * @size: size of memory block to be allocated in bytes
1528 * @align: alignment of the region and block's size
1529 * @min_addr: the lower bound of the memory region from where the allocation
1530 *        is preferred (phys address)
1531 * @max_addr: the upper bound of the memory region from where the allocation
1532 *            is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1533 *            allocate only from memory limited by memblock.current_limit value
1534 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1535 *
1536 * Public function, provides additional debug information (including caller
1537 * info), if enabled. Does not zero allocated memory, does not panic if request
1538 * cannot be satisfied.
1539 *
1540 * Return:
1541 * Virtual address of allocated memory block on success, NULL on failure.
1542 */
1543void * __init memblock_alloc_try_nid_raw(
1544                        phys_addr_t size, phys_addr_t align,
1545                        phys_addr_t min_addr, phys_addr_t max_addr,
1546                        int nid)
1547{
1548        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1549                     __func__, (u64)size, (u64)align, nid, &min_addr,
1550                     &max_addr, (void *)_RET_IP_);
1551
1552        return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1553                                       false);
1554}
1555
1556/**
1557 * memblock_alloc_try_nid - allocate boot memory block
1558 * @size: size of memory block to be allocated in bytes
1559 * @align: alignment of the region and block's size
1560 * @min_addr: the lower bound of the memory region from where the allocation
1561 *        is preferred (phys address)
1562 * @max_addr: the upper bound of the memory region from where the allocation
1563 *            is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1564 *            allocate only from memory limited by memblock.current_limit value
1565 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1566 *
1567 * Public function, provides additional debug information (including caller
1568 * info), if enabled. This function zeroes the allocated memory.
1569 *
1570 * Return:
1571 * Virtual address of allocated memory block on success, NULL on failure.
1572 */
1573void * __init memblock_alloc_try_nid(
1574                        phys_addr_t size, phys_addr_t align,
1575                        phys_addr_t min_addr, phys_addr_t max_addr,
1576                        int nid)
1577{
1578        void *ptr;
1579
1580        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1581                     __func__, (u64)size, (u64)align, nid, &min_addr,
1582                     &max_addr, (void *)_RET_IP_);
1583        ptr = memblock_alloc_internal(size, align,
1584                                           min_addr, max_addr, nid, false);
1585        if (ptr)
1586                memset(ptr, 0, size);
1587
1588        return ptr;
1589}
1590
1591/**
1592 * __memblock_free_late - free pages directly to buddy allocator
1593 * @base: phys starting address of the  boot memory block
1594 * @size: size of the boot memory block in bytes
1595 *
1596 * This is only useful when the memblock allocator has already been torn
1597 * down, but we are still initializing the system.  Pages are released directly
1598 * to the buddy allocator.
1599 */
1600void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1601{
1602        phys_addr_t cursor, end;
1603
1604        end = base + size - 1;
1605        memblock_dbg("%s: [%pa-%pa] %pS\n",
1606                     __func__, &base, &end, (void *)_RET_IP_);
1607        kmemleak_free_part_phys(base, size);
1608        cursor = PFN_UP(base);
1609        end = PFN_DOWN(base + size);
1610
1611        for (; cursor < end; cursor++) {
1612                memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1613                totalram_pages_inc();
1614        }
1615}
1616
1617/*
1618 * Remaining API functions
1619 */
1620
1621phys_addr_t __init_memblock memblock_phys_mem_size(void)
1622{
1623        return memblock.memory.total_size;
1624}
1625
1626phys_addr_t __init_memblock memblock_reserved_size(void)
1627{
1628        return memblock.reserved.total_size;
1629}
1630
1631/* lowest address */
1632phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1633{
1634        return memblock.memory.regions[0].base;
1635}
1636
1637phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1638{
1639        int idx = memblock.memory.cnt - 1;
1640
1641        return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1642}
1643
1644static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1645{
1646        phys_addr_t max_addr = PHYS_ADDR_MAX;
1647        struct memblock_region *r;
1648
1649        /*
1650         * translate the memory @limit size into the max address within one of
1651         * the memory memblock regions, if the @limit exceeds the total size
1652         * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1653         */
1654        for_each_mem_region(r) {
1655                if (limit <= r->size) {
1656                        max_addr = r->base + limit;
1657                        break;
1658                }
1659                limit -= r->size;
1660        }
1661
1662        return max_addr;
1663}
1664
1665void __init memblock_enforce_memory_limit(phys_addr_t limit)
1666{
1667        phys_addr_t max_addr;
1668
1669        if (!limit)
1670                return;
1671
1672        max_addr = __find_max_addr(limit);
1673
1674        /* @limit exceeds the total size of the memory, do nothing */
1675        if (max_addr == PHYS_ADDR_MAX)
1676                return;
1677
1678        /* truncate both memory and reserved regions */
1679        memblock_remove_range(&memblock.memory, max_addr,
1680                              PHYS_ADDR_MAX);
1681        memblock_remove_range(&memblock.reserved, max_addr,
1682                              PHYS_ADDR_MAX);
1683}
1684
1685void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1686{
1687        int start_rgn, end_rgn;
1688        int i, ret;
1689
1690        if (!size)
1691                return;
1692
1693        if (!memblock_memory->total_size) {
1694                pr_warn("%s: No memory registered yet\n", __func__);
1695                return;
1696        }
1697
1698        ret = memblock_isolate_range(&memblock.memory, base, size,
1699                                                &start_rgn, &end_rgn);
1700        if (ret)
1701                return;
1702
1703        /* remove all the MAP regions */
1704        for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1705                if (!memblock_is_nomap(&memblock.memory.regions[i]))
1706                        memblock_remove_region(&memblock.memory, i);
1707
1708        for (i = start_rgn - 1; i >= 0; i--)
1709                if (!memblock_is_nomap(&memblock.memory.regions[i]))
1710                        memblock_remove_region(&memblock.memory, i);
1711
1712        /* truncate the reserved regions */
1713        memblock_remove_range(&memblock.reserved, 0, base);
1714        memblock_remove_range(&memblock.reserved,
1715                        base + size, PHYS_ADDR_MAX);
1716}
1717
1718void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1719{
1720        phys_addr_t max_addr;
1721
1722        if (!limit)
1723                return;
1724
1725        max_addr = __find_max_addr(limit);
1726
1727        /* @limit exceeds the total size of the memory, do nothing */
1728        if (max_addr == PHYS_ADDR_MAX)
1729                return;
1730
1731        memblock_cap_memory_range(0, max_addr);
1732}
1733
1734static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1735{
1736        unsigned int left = 0, right = type->cnt;
1737
1738        do {
1739                unsigned int mid = (right + left) / 2;
1740
1741                if (addr < type->regions[mid].base)
1742                        right = mid;
1743                else if (addr >= (type->regions[mid].base +
1744                                  type->regions[mid].size))
1745                        left = mid + 1;
1746                else
1747                        return mid;
1748        } while (left < right);
1749        return -1;
1750}
1751
1752bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1753{
1754        return memblock_search(&memblock.reserved, addr) != -1;
1755}
1756
1757bool __init_memblock memblock_is_memory(phys_addr_t addr)
1758{
1759        return memblock_search(&memblock.memory, addr) != -1;
1760}
1761
1762bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1763{
1764        int i = memblock_search(&memblock.memory, addr);
1765
1766        if (i == -1)
1767                return false;
1768        return !memblock_is_nomap(&memblock.memory.regions[i]);
1769}
1770
1771int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1772                         unsigned long *start_pfn, unsigned long *end_pfn)
1773{
1774        struct memblock_type *type = &memblock.memory;
1775        int mid = memblock_search(type, PFN_PHYS(pfn));
1776
1777        if (mid == -1)
1778                return -1;
1779
1780        *start_pfn = PFN_DOWN(type->regions[mid].base);
1781        *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1782
1783        return memblock_get_region_node(&type->regions[mid]);
1784}
1785
1786/**
1787 * memblock_is_region_memory - check if a region is a subset of memory
1788 * @base: base of region to check
1789 * @size: size of region to check
1790 *
1791 * Check if the region [@base, @base + @size) is a subset of a memory block.
1792 *
1793 * Return:
1794 * 0 if false, non-zero if true
1795 */
1796bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1797{
1798        int idx = memblock_search(&memblock.memory, base);
1799        phys_addr_t end = base + memblock_cap_size(base, &size);
1800
1801        if (idx == -1)
1802                return false;
1803        return (memblock.memory.regions[idx].base +
1804                 memblock.memory.regions[idx].size) >= end;
1805}
1806
1807/**
1808 * memblock_is_region_reserved - check if a region intersects reserved memory
1809 * @base: base of region to check
1810 * @size: size of region to check
1811 *
1812 * Check if the region [@base, @base + @size) intersects a reserved
1813 * memory block.
1814 *
1815 * Return:
1816 * True if they intersect, false if not.
1817 */
1818bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1819{
1820        return memblock_overlaps_region(&memblock.reserved, base, size);
1821}
1822
1823void __init_memblock memblock_trim_memory(phys_addr_t align)
1824{
1825        phys_addr_t start, end, orig_start, orig_end;
1826        struct memblock_region *r;
1827
1828        for_each_mem_region(r) {
1829                orig_start = r->base;
1830                orig_end = r->base + r->size;
1831                start = round_up(orig_start, align);
1832                end = round_down(orig_end, align);
1833
1834                if (start == orig_start && end == orig_end)
1835                        continue;
1836
1837                if (start < end) {
1838                        r->base = start;
1839                        r->size = end - start;
1840                } else {
1841                        memblock_remove_region(&memblock.memory,
1842                                               r - memblock.memory.regions);
1843                        r--;
1844                }
1845        }
1846}
1847
1848void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1849{
1850        memblock.current_limit = limit;
1851}
1852
1853phys_addr_t __init_memblock memblock_get_current_limit(void)
1854{
1855        return memblock.current_limit;
1856}
1857
1858static void __init_memblock memblock_dump(struct memblock_type *type)
1859{
1860        phys_addr_t base, end, size;
1861        enum memblock_flags flags;
1862        int idx;
1863        struct memblock_region *rgn;
1864
1865        pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
1866
1867        for_each_memblock_type(idx, type, rgn) {
1868                char nid_buf[32] = "";
1869
1870                base = rgn->base;
1871                size = rgn->size;
1872                end = base + size - 1;
1873                flags = rgn->flags;
1874#ifdef CONFIG_NUMA
1875                if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1876                        snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1877                                 memblock_get_region_node(rgn));
1878#endif
1879                pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1880                        type->name, idx, &base, &end, &size, nid_buf, flags);
1881        }
1882}
1883
1884static void __init_memblock __memblock_dump_all(void)
1885{
1886        pr_info("MEMBLOCK configuration:\n");
1887        pr_info(" memory size = %pa reserved size = %pa\n",
1888                &memblock.memory.total_size,
1889                &memblock.reserved.total_size);
1890
1891        memblock_dump(&memblock.memory);
1892        memblock_dump(&memblock.reserved);
1893#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1894        memblock_dump(&physmem);
1895#endif
1896}
1897
1898void __init_memblock memblock_dump_all(void)
1899{
1900        if (memblock_debug)
1901                __memblock_dump_all();
1902}
1903
1904void __init memblock_allow_resize(void)
1905{
1906        memblock_can_resize = 1;
1907}
1908
1909static int __init early_memblock(char *p)
1910{
1911        if (p && strstr(p, "debug"))
1912                memblock_debug = 1;
1913        return 0;
1914}
1915early_param("memblock", early_memblock);
1916
1917static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1918{
1919        struct page *start_pg, *end_pg;
1920        phys_addr_t pg, pgend;
1921
1922        /*
1923         * Convert start_pfn/end_pfn to a struct page pointer.
1924         */
1925        start_pg = pfn_to_page(start_pfn - 1) + 1;
1926        end_pg = pfn_to_page(end_pfn - 1) + 1;
1927
1928        /*
1929         * Convert to physical addresses, and round start upwards and end
1930         * downwards.
1931         */
1932        pg = PAGE_ALIGN(__pa(start_pg));
1933        pgend = __pa(end_pg) & PAGE_MASK;
1934
1935        /*
1936         * If there are free pages between these, free the section of the
1937         * memmap array.
1938         */
1939        if (pg < pgend)
1940                memblock_free(pg, pgend - pg);
1941}
1942
1943/*
1944 * The mem_map array can get very big.  Free the unused area of the memory map.
1945 */
1946static void __init free_unused_memmap(void)
1947{
1948        unsigned long start, end, prev_end = 0;
1949        int i;
1950
1951        if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1952            IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1953                return;
1954
1955        /*
1956         * This relies on each bank being in address order.
1957         * The banks are sorted previously in bootmem_init().
1958         */
1959        for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1960#ifdef CONFIG_SPARSEMEM
1961                /*
1962                 * Take care not to free memmap entries that don't exist
1963                 * due to SPARSEMEM sections which aren't present.
1964                 */
1965                start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1966#endif
1967                /*
1968                 * Align down here since many operations in VM subsystem
1969                 * presume that there are no holes in the memory map inside
1970                 * a pageblock
1971                 */
1972                start = round_down(start, pageblock_nr_pages);
1973
1974                /*
1975                 * If we had a previous bank, and there is a space
1976                 * between the current bank and the previous, free it.
1977                 */
1978                if (prev_end && prev_end < start)
1979                        free_memmap(prev_end, start);
1980
1981                /*
1982                 * Align up here since many operations in VM subsystem
1983                 * presume that there are no holes in the memory map inside
1984                 * a pageblock
1985                 */
1986                prev_end = ALIGN(end, pageblock_nr_pages);
1987        }
1988
1989#ifdef CONFIG_SPARSEMEM
1990        if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
1991                prev_end = ALIGN(end, pageblock_nr_pages);
1992                free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
1993        }
1994#endif
1995}
1996
1997static void __init __free_pages_memory(unsigned long start, unsigned long end)
1998{
1999        int order;
2000
2001        while (start < end) {
2002                order = min(MAX_ORDER - 1UL, __ffs(start));
2003
2004                while (start + (1UL << order) > end)
2005                        order--;
2006
2007                memblock_free_pages(pfn_to_page(start), start, order);
2008
2009                start += (1UL << order);
2010        }
2011}
2012
2013static unsigned long __init __free_memory_core(phys_addr_t start,
2014                                 phys_addr_t end)
2015{
2016        unsigned long start_pfn = PFN_UP(start);
2017        unsigned long end_pfn = min_t(unsigned long,
2018                                      PFN_DOWN(end), max_low_pfn);
2019
2020        if (start_pfn >= end_pfn)
2021                return 0;
2022
2023        __free_pages_memory(start_pfn, end_pfn);
2024
2025        return end_pfn - start_pfn;
2026}
2027
2028static void __init memmap_init_reserved_pages(void)
2029{
2030        struct memblock_region *region;
2031        phys_addr_t start, end;
2032        u64 i;
2033
2034        /* initialize struct pages for the reserved regions */
2035        for_each_reserved_mem_range(i, &start, &end)
2036                reserve_bootmem_region(start, end);
2037
2038        /* and also treat struct pages for the NOMAP regions as PageReserved */
2039        for_each_mem_region(region) {
2040                if (memblock_is_nomap(region)) {
2041                        start = region->base;
2042                        end = start + region->size;
2043                        reserve_bootmem_region(start, end);
2044                }
2045        }
2046}
2047
2048static unsigned long __init free_low_memory_core_early(void)
2049{
2050        unsigned long count = 0;
2051        phys_addr_t start, end;
2052        u64 i;
2053
2054        memblock_clear_hotplug(0, -1);
2055
2056        memmap_init_reserved_pages();
2057
2058        /*
2059         * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2060         *  because in some case like Node0 doesn't have RAM installed
2061         *  low ram will be on Node1
2062         */
2063        for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2064                                NULL)
2065                count += __free_memory_core(start, end);
2066
2067        return count;
2068}
2069
2070static int reset_managed_pages_done __initdata;
2071
2072void reset_node_managed_pages(pg_data_t *pgdat)
2073{
2074        struct zone *z;
2075
2076        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2077                atomic_long_set(&z->managed_pages, 0);
2078}
2079
2080void __init reset_all_zones_managed_pages(void)
2081{
2082        struct pglist_data *pgdat;
2083
2084        if (reset_managed_pages_done)
2085                return;
2086
2087        for_each_online_pgdat(pgdat)
2088                reset_node_managed_pages(pgdat);
2089
2090        reset_managed_pages_done = 1;
2091}
2092
2093/**
2094 * memblock_free_all - release free pages to the buddy allocator
2095 */
2096void __init memblock_free_all(void)
2097{
2098        unsigned long pages;
2099
2100        free_unused_memmap();
2101        reset_all_zones_managed_pages();
2102
2103        pages = free_low_memory_core_early();
2104        totalram_pages_add(pages);
2105}
2106
2107#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2108
2109static int memblock_debug_show(struct seq_file *m, void *private)
2110{
2111        struct memblock_type *type = m->private;
2112        struct memblock_region *reg;
2113        int i;
2114        phys_addr_t end;
2115
2116        for (i = 0; i < type->cnt; i++) {
2117                reg = &type->regions[i];
2118                end = reg->base + reg->size - 1;
2119
2120                seq_printf(m, "%4d: ", i);
2121                seq_printf(m, "%pa..%pa\n", &reg->base, &end);
2122        }
2123        return 0;
2124}
2125DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2126
2127static int __init memblock_init_debugfs(void)
2128{
2129        struct dentry *root = debugfs_create_dir("memblock", NULL);
2130
2131        debugfs_create_file("memory", 0444, root,
2132                            &memblock.memory, &memblock_debug_fops);
2133        debugfs_create_file("reserved", 0444, root,
2134                            &memblock.reserved, &memblock_debug_fops);
2135#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2136        debugfs_create_file("physmem", 0444, root, &physmem,
2137                            &memblock_debug_fops);
2138#endif
2139
2140        return 0;
2141}
2142__initcall(memblock_init_debugfs);
2143
2144#endif /* CONFIG_DEBUG_FS */
2145