linux/mm/memblock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Procedures for maintaining information about logical memory blocks.
   4 *
   5 * Peter Bergner, IBM Corp.     June 2001.
   6 * Copyright (C) 2001 Peter Bergner.
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/init.h>
  12#include <linux/bitops.h>
  13#include <linux/poison.h>
  14#include <linux/pfn.h>
  15#include <linux/debugfs.h>
  16#include <linux/kmemleak.h>
  17#include <linux/seq_file.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/sections.h>
  21#include <linux/io.h>
  22
  23#include "internal.h"
  24
  25#define INIT_MEMBLOCK_REGIONS                   128
  26#define INIT_PHYSMEM_REGIONS                    4
  27
  28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
  29# define INIT_MEMBLOCK_RESERVED_REGIONS         INIT_MEMBLOCK_REGIONS
  30#endif
  31
  32/**
  33 * DOC: memblock overview
  34 *
  35 * Memblock is a method of managing memory regions during the early
  36 * boot period when the usual kernel memory allocators are not up and
  37 * running.
  38 *
  39 * Memblock views the system memory as collections of contiguous
  40 * regions. There are several types of these collections:
  41 *
  42 * * ``memory`` - describes the physical memory available to the
  43 *   kernel; this may differ from the actual physical memory installed
  44 *   in the system, for instance when the memory is restricted with
  45 *   ``mem=`` command line parameter
  46 * * ``reserved`` - describes the regions that were allocated
  47 * * ``physmap`` - describes the actual physical memory regardless of
  48 *   the possible restrictions; the ``physmap`` type is only available
  49 *   on some architectures.
  50 *
  51 * Each region is represented by :c:type:`struct memblock_region` that
  52 * defines the region extents, its attributes and NUMA node id on NUMA
  53 * systems. Every memory type is described by the :c:type:`struct
  54 * memblock_type` which contains an array of memory regions along with
  55 * the allocator metadata. The memory types are nicely wrapped with
  56 * :c:type:`struct memblock`. This structure is statically initialzed
  57 * at build time. The region arrays for the "memory" and "reserved"
  58 * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
  59 * "physmap" type to %INIT_PHYSMEM_REGIONS.
  60 * The :c:func:`memblock_allow_resize` enables automatic resizing of
  61 * the region arrays during addition of new regions. This feature
  62 * should be used with care so that memory allocated for the region
  63 * array will not overlap with areas that should be reserved, for
  64 * example initrd.
  65 *
  66 * The early architecture setup should tell memblock what the physical
  67 * memory layout is by using :c:func:`memblock_add` or
  68 * :c:func:`memblock_add_node` functions. The first function does not
  69 * assign the region to a NUMA node and it is appropriate for UMA
  70 * systems. Yet, it is possible to use it on NUMA systems as well and
  71 * assign the region to a NUMA node later in the setup process using
  72 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
  73 * performs such an assignment directly.
  74 *
  75 * Once memblock is setup the memory can be allocated using one of the
  76 * API variants:
  77 *
  78 * * :c:func:`memblock_phys_alloc*` - these functions return the
  79 *   **physical** address of the allocated memory
  80 * * :c:func:`memblock_alloc*` - these functions return the **virtual**
  81 *   address of the allocated memory.
  82 *
  83 * Note, that both API variants use implict assumptions about allowed
  84 * memory ranges and the fallback methods. Consult the documentation
  85 * of :c:func:`memblock_alloc_internal` and
  86 * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
  87 * description.
  88 *
  89 * As the system boot progresses, the architecture specific
  90 * :c:func:`mem_init` function frees all the memory to the buddy page
  91 * allocator.
  92 *
  93 * Unless an architecure enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
  94 * memblock data structures will be discarded after the system
  95 * initialization compltes.
  96 */
  97
  98#ifndef CONFIG_NEED_MULTIPLE_NODES
  99struct pglist_data __refdata contig_page_data;
 100EXPORT_SYMBOL(contig_page_data);
 101#endif
 102
 103unsigned long max_low_pfn;
 104unsigned long min_low_pfn;
 105unsigned long max_pfn;
 106unsigned long long max_possible_pfn;
 107
 108static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
 109static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
 110#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 111static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
 112#endif
 113
 114struct memblock memblock __initdata_memblock = {
 115        .memory.regions         = memblock_memory_init_regions,
 116        .memory.cnt             = 1,    /* empty dummy entry */
 117        .memory.max             = INIT_MEMBLOCK_REGIONS,
 118        .memory.name            = "memory",
 119
 120        .reserved.regions       = memblock_reserved_init_regions,
 121        .reserved.cnt           = 1,    /* empty dummy entry */
 122        .reserved.max           = INIT_MEMBLOCK_RESERVED_REGIONS,
 123        .reserved.name          = "reserved",
 124
 125#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 126        .physmem.regions        = memblock_physmem_init_regions,
 127        .physmem.cnt            = 1,    /* empty dummy entry */
 128        .physmem.max            = INIT_PHYSMEM_REGIONS,
 129        .physmem.name           = "physmem",
 130#endif
 131
 132        .bottom_up              = false,
 133        .current_limit          = MEMBLOCK_ALLOC_ANYWHERE,
 134};
 135
 136int memblock_debug __initdata_memblock;
 137static bool system_has_some_mirror __initdata_memblock = false;
 138static int memblock_can_resize __initdata_memblock;
 139static int memblock_memory_in_slab __initdata_memblock = 0;
 140static int memblock_reserved_in_slab __initdata_memblock = 0;
 141
 142static enum memblock_flags __init_memblock choose_memblock_flags(void)
 143{
 144        return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
 145}
 146
 147/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
 148static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
 149{
 150        return *size = min(*size, PHYS_ADDR_MAX - base);
 151}
 152
 153/*
 154 * Address comparison utilities
 155 */
 156static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
 157                                       phys_addr_t base2, phys_addr_t size2)
 158{
 159        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
 160}
 161
 162bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
 163                                        phys_addr_t base, phys_addr_t size)
 164{
 165        unsigned long i;
 166
 167        for (i = 0; i < type->cnt; i++)
 168                if (memblock_addrs_overlap(base, size, type->regions[i].base,
 169                                           type->regions[i].size))
 170                        break;
 171        return i < type->cnt;
 172}
 173
 174/**
 175 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 176 * @start: start of candidate range
 177 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 178 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 179 * @size: size of free area to find
 180 * @align: alignment of free area to find
 181 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 182 * @flags: pick from blocks based on memory attributes
 183 *
 184 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 185 *
 186 * Return:
 187 * Found address on success, 0 on failure.
 188 */
 189static phys_addr_t __init_memblock
 190__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
 191                                phys_addr_t size, phys_addr_t align, int nid,
 192                                enum memblock_flags flags)
 193{
 194        phys_addr_t this_start, this_end, cand;
 195        u64 i;
 196
 197        for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
 198                this_start = clamp(this_start, start, end);
 199                this_end = clamp(this_end, start, end);
 200
 201                cand = round_up(this_start, align);
 202                if (cand < this_end && this_end - cand >= size)
 203                        return cand;
 204        }
 205
 206        return 0;
 207}
 208
 209/**
 210 * __memblock_find_range_top_down - find free area utility, in top-down
 211 * @start: start of candidate range
 212 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 213 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 214 * @size: size of free area to find
 215 * @align: alignment of free area to find
 216 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 217 * @flags: pick from blocks based on memory attributes
 218 *
 219 * Utility called from memblock_find_in_range_node(), find free area top-down.
 220 *
 221 * Return:
 222 * Found address on success, 0 on failure.
 223 */
 224static phys_addr_t __init_memblock
 225__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
 226                               phys_addr_t size, phys_addr_t align, int nid,
 227                               enum memblock_flags flags)
 228{
 229        phys_addr_t this_start, this_end, cand;
 230        u64 i;
 231
 232        for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
 233                                        NULL) {
 234                this_start = clamp(this_start, start, end);
 235                this_end = clamp(this_end, start, end);
 236
 237                if (this_end < size)
 238                        continue;
 239
 240                cand = round_down(this_end - size, align);
 241                if (cand >= this_start)
 242                        return cand;
 243        }
 244
 245        return 0;
 246}
 247
 248/**
 249 * memblock_find_in_range_node - find free area in given range and node
 250 * @size: size of free area to find
 251 * @align: alignment of free area to find
 252 * @start: start of candidate range
 253 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 254 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 255 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 256 * @flags: pick from blocks based on memory attributes
 257 *
 258 * Find @size free area aligned to @align in the specified range and node.
 259 *
 260 * When allocation direction is bottom-up, the @start should be greater
 261 * than the end of the kernel image. Otherwise, it will be trimmed. The
 262 * reason is that we want the bottom-up allocation just near the kernel
 263 * image so it is highly likely that the allocated memory and the kernel
 264 * will reside in the same node.
 265 *
 266 * If bottom-up allocation failed, will try to allocate memory top-down.
 267 *
 268 * Return:
 269 * Found address on success, 0 on failure.
 270 */
 271static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 272                                        phys_addr_t align, phys_addr_t start,
 273                                        phys_addr_t end, int nid,
 274                                        enum memblock_flags flags)
 275{
 276        phys_addr_t kernel_end, ret;
 277
 278        /* pump up @end */
 279        if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
 280            end == MEMBLOCK_ALLOC_KASAN)
 281                end = memblock.current_limit;
 282
 283        /* avoid allocating the first page */
 284        start = max_t(phys_addr_t, start, PAGE_SIZE);
 285        end = max(start, end);
 286        kernel_end = __pa_symbol(_end);
 287
 288        /*
 289         * try bottom-up allocation only when bottom-up mode
 290         * is set and @end is above the kernel image.
 291         */
 292        if (memblock_bottom_up() && end > kernel_end) {
 293                phys_addr_t bottom_up_start;
 294
 295                /* make sure we will allocate above the kernel */
 296                bottom_up_start = max(start, kernel_end);
 297
 298                /* ok, try bottom-up allocation first */
 299                ret = __memblock_find_range_bottom_up(bottom_up_start, end,
 300                                                      size, align, nid, flags);
 301                if (ret)
 302                        return ret;
 303
 304                /*
 305                 * we always limit bottom-up allocation above the kernel,
 306                 * but top-down allocation doesn't have the limit, so
 307                 * retrying top-down allocation may succeed when bottom-up
 308                 * allocation failed.
 309                 *
 310                 * bottom-up allocation is expected to be fail very rarely,
 311                 * so we use WARN_ONCE() here to see the stack trace if
 312                 * fail happens.
 313                 */
 314                WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
 315                          "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
 316        }
 317
 318        return __memblock_find_range_top_down(start, end, size, align, nid,
 319                                              flags);
 320}
 321
 322/**
 323 * memblock_find_in_range - find free area in given range
 324 * @start: start of candidate range
 325 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 326 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 327 * @size: size of free area to find
 328 * @align: alignment of free area to find
 329 *
 330 * Find @size free area aligned to @align in the specified range.
 331 *
 332 * Return:
 333 * Found address on success, 0 on failure.
 334 */
 335phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 336                                        phys_addr_t end, phys_addr_t size,
 337                                        phys_addr_t align)
 338{
 339        phys_addr_t ret;
 340        enum memblock_flags flags = choose_memblock_flags();
 341
 342again:
 343        ret = memblock_find_in_range_node(size, align, start, end,
 344                                            NUMA_NO_NODE, flags);
 345
 346        if (!ret && (flags & MEMBLOCK_MIRROR)) {
 347                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 348                        &size);
 349                flags &= ~MEMBLOCK_MIRROR;
 350                goto again;
 351        }
 352
 353        return ret;
 354}
 355
 356static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 357{
 358        type->total_size -= type->regions[r].size;
 359        memmove(&type->regions[r], &type->regions[r + 1],
 360                (type->cnt - (r + 1)) * sizeof(type->regions[r]));
 361        type->cnt--;
 362
 363        /* Special case for empty arrays */
 364        if (type->cnt == 0) {
 365                WARN_ON(type->total_size != 0);
 366                type->cnt = 1;
 367                type->regions[0].base = 0;
 368                type->regions[0].size = 0;
 369                type->regions[0].flags = 0;
 370                memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 371        }
 372}
 373
 374#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
 375/**
 376 * memblock_discard - discard memory and reserved arrays if they were allocated
 377 */
 378void __init memblock_discard(void)
 379{
 380        phys_addr_t addr, size;
 381
 382        if (memblock.reserved.regions != memblock_reserved_init_regions) {
 383                addr = __pa(memblock.reserved.regions);
 384                size = PAGE_ALIGN(sizeof(struct memblock_region) *
 385                                  memblock.reserved.max);
 386                __memblock_free_late(addr, size);
 387        }
 388
 389        if (memblock.memory.regions != memblock_memory_init_regions) {
 390                addr = __pa(memblock.memory.regions);
 391                size = PAGE_ALIGN(sizeof(struct memblock_region) *
 392                                  memblock.memory.max);
 393                __memblock_free_late(addr, size);
 394        }
 395}
 396#endif
 397
 398/**
 399 * memblock_double_array - double the size of the memblock regions array
 400 * @type: memblock type of the regions array being doubled
 401 * @new_area_start: starting address of memory range to avoid overlap with
 402 * @new_area_size: size of memory range to avoid overlap with
 403 *
 404 * Double the size of the @type regions array. If memblock is being used to
 405 * allocate memory for a new reserved regions array and there is a previously
 406 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
 407 * waiting to be reserved, ensure the memory used by the new array does
 408 * not overlap.
 409 *
 410 * Return:
 411 * 0 on success, -1 on failure.
 412 */
 413static int __init_memblock memblock_double_array(struct memblock_type *type,
 414                                                phys_addr_t new_area_start,
 415                                                phys_addr_t new_area_size)
 416{
 417        struct memblock_region *new_array, *old_array;
 418        phys_addr_t old_alloc_size, new_alloc_size;
 419        phys_addr_t old_size, new_size, addr, new_end;
 420        int use_slab = slab_is_available();
 421        int *in_slab;
 422
 423        /* We don't allow resizing until we know about the reserved regions
 424         * of memory that aren't suitable for allocation
 425         */
 426        if (!memblock_can_resize)
 427                return -1;
 428
 429        /* Calculate new doubled size */
 430        old_size = type->max * sizeof(struct memblock_region);
 431        new_size = old_size << 1;
 432        /*
 433         * We need to allocated new one align to PAGE_SIZE,
 434         *   so we can free them completely later.
 435         */
 436        old_alloc_size = PAGE_ALIGN(old_size);
 437        new_alloc_size = PAGE_ALIGN(new_size);
 438
 439        /* Retrieve the slab flag */
 440        if (type == &memblock.memory)
 441                in_slab = &memblock_memory_in_slab;
 442        else
 443                in_slab = &memblock_reserved_in_slab;
 444
 445        /* Try to find some space for it */
 446        if (use_slab) {
 447                new_array = kmalloc(new_size, GFP_KERNEL);
 448                addr = new_array ? __pa(new_array) : 0;
 449        } else {
 450                /* only exclude range when trying to double reserved.regions */
 451                if (type != &memblock.reserved)
 452                        new_area_start = new_area_size = 0;
 453
 454                addr = memblock_find_in_range(new_area_start + new_area_size,
 455                                                memblock.current_limit,
 456                                                new_alloc_size, PAGE_SIZE);
 457                if (!addr && new_area_size)
 458                        addr = memblock_find_in_range(0,
 459                                min(new_area_start, memblock.current_limit),
 460                                new_alloc_size, PAGE_SIZE);
 461
 462                new_array = addr ? __va(addr) : NULL;
 463        }
 464        if (!addr) {
 465                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 466                       type->name, type->max, type->max * 2);
 467                return -1;
 468        }
 469
 470        new_end = addr + new_size - 1;
 471        memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
 472                        type->name, type->max * 2, &addr, &new_end);
 473
 474        /*
 475         * Found space, we now need to move the array over before we add the
 476         * reserved region since it may be our reserved array itself that is
 477         * full.
 478         */
 479        memcpy(new_array, type->regions, old_size);
 480        memset(new_array + type->max, 0, old_size);
 481        old_array = type->regions;
 482        type->regions = new_array;
 483        type->max <<= 1;
 484
 485        /* Free old array. We needn't free it if the array is the static one */
 486        if (*in_slab)
 487                kfree(old_array);
 488        else if (old_array != memblock_memory_init_regions &&
 489                 old_array != memblock_reserved_init_regions)
 490                memblock_free(__pa(old_array), old_alloc_size);
 491
 492        /*
 493         * Reserve the new array if that comes from the memblock.  Otherwise, we
 494         * needn't do it
 495         */
 496        if (!use_slab)
 497                BUG_ON(memblock_reserve(addr, new_alloc_size));
 498
 499        /* Update slab flag */
 500        *in_slab = use_slab;
 501
 502        return 0;
 503}
 504
 505/**
 506 * memblock_merge_regions - merge neighboring compatible regions
 507 * @type: memblock type to scan
 508 *
 509 * Scan @type and merge neighboring compatible regions.
 510 */
 511static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 512{
 513        int i = 0;
 514
 515        /* cnt never goes below 1 */
 516        while (i < type->cnt - 1) {
 517                struct memblock_region *this = &type->regions[i];
 518                struct memblock_region *next = &type->regions[i + 1];
 519
 520                if (this->base + this->size != next->base ||
 521                    memblock_get_region_node(this) !=
 522                    memblock_get_region_node(next) ||
 523                    this->flags != next->flags) {
 524                        BUG_ON(this->base + this->size > next->base);
 525                        i++;
 526                        continue;
 527                }
 528
 529                this->size += next->size;
 530                /* move forward from next + 1, index of which is i + 2 */
 531                memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
 532                type->cnt--;
 533        }
 534}
 535
 536/**
 537 * memblock_insert_region - insert new memblock region
 538 * @type:       memblock type to insert into
 539 * @idx:        index for the insertion point
 540 * @base:       base address of the new region
 541 * @size:       size of the new region
 542 * @nid:        node id of the new region
 543 * @flags:      flags of the new region
 544 *
 545 * Insert new memblock region [@base, @base + @size) into @type at @idx.
 546 * @type must already have extra room to accommodate the new region.
 547 */
 548static void __init_memblock memblock_insert_region(struct memblock_type *type,
 549                                                   int idx, phys_addr_t base,
 550                                                   phys_addr_t size,
 551                                                   int nid,
 552                                                   enum memblock_flags flags)
 553{
 554        struct memblock_region *rgn = &type->regions[idx];
 555
 556        BUG_ON(type->cnt >= type->max);
 557        memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
 558        rgn->base = base;
 559        rgn->size = size;
 560        rgn->flags = flags;
 561        memblock_set_region_node(rgn, nid);
 562        type->cnt++;
 563        type->total_size += size;
 564}
 565
 566/**
 567 * memblock_add_range - add new memblock region
 568 * @type: memblock type to add new region into
 569 * @base: base address of the new region
 570 * @size: size of the new region
 571 * @nid: nid of the new region
 572 * @flags: flags of the new region
 573 *
 574 * Add new memblock region [@base, @base + @size) into @type.  The new region
 575 * is allowed to overlap with existing ones - overlaps don't affect already
 576 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 577 * compatible regions are merged) after the addition.
 578 *
 579 * Return:
 580 * 0 on success, -errno on failure.
 581 */
 582int __init_memblock memblock_add_range(struct memblock_type *type,
 583                                phys_addr_t base, phys_addr_t size,
 584                                int nid, enum memblock_flags flags)
 585{
 586        bool insert = false;
 587        phys_addr_t obase = base;
 588        phys_addr_t end = base + memblock_cap_size(base, &size);
 589        int idx, nr_new;
 590        struct memblock_region *rgn;
 591
 592        if (!size)
 593                return 0;
 594
 595        /* special case for empty array */
 596        if (type->regions[0].size == 0) {
 597                WARN_ON(type->cnt != 1 || type->total_size);
 598                type->regions[0].base = base;
 599                type->regions[0].size = size;
 600                type->regions[0].flags = flags;
 601                memblock_set_region_node(&type->regions[0], nid);
 602                type->total_size = size;
 603                return 0;
 604        }
 605repeat:
 606        /*
 607         * The following is executed twice.  Once with %false @insert and
 608         * then with %true.  The first counts the number of regions needed
 609         * to accommodate the new area.  The second actually inserts them.
 610         */
 611        base = obase;
 612        nr_new = 0;
 613
 614        for_each_memblock_type(idx, type, rgn) {
 615                phys_addr_t rbase = rgn->base;
 616                phys_addr_t rend = rbase + rgn->size;
 617
 618                if (rbase >= end)
 619                        break;
 620                if (rend <= base)
 621                        continue;
 622                /*
 623                 * @rgn overlaps.  If it separates the lower part of new
 624                 * area, insert that portion.
 625                 */
 626                if (rbase > base) {
 627#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 628                        WARN_ON(nid != memblock_get_region_node(rgn));
 629#endif
 630                        WARN_ON(flags != rgn->flags);
 631                        nr_new++;
 632                        if (insert)
 633                                memblock_insert_region(type, idx++, base,
 634                                                       rbase - base, nid,
 635                                                       flags);
 636                }
 637                /* area below @rend is dealt with, forget about it */
 638                base = min(rend, end);
 639        }
 640
 641        /* insert the remaining portion */
 642        if (base < end) {
 643                nr_new++;
 644                if (insert)
 645                        memblock_insert_region(type, idx, base, end - base,
 646                                               nid, flags);
 647        }
 648
 649        if (!nr_new)
 650                return 0;
 651
 652        /*
 653         * If this was the first round, resize array and repeat for actual
 654         * insertions; otherwise, merge and return.
 655         */
 656        if (!insert) {
 657                while (type->cnt + nr_new > type->max)
 658                        if (memblock_double_array(type, obase, size) < 0)
 659                                return -ENOMEM;
 660                insert = true;
 661                goto repeat;
 662        } else {
 663                memblock_merge_regions(type);
 664                return 0;
 665        }
 666}
 667
 668/**
 669 * memblock_add_node - add new memblock region within a NUMA node
 670 * @base: base address of the new region
 671 * @size: size of the new region
 672 * @nid: nid of the new region
 673 *
 674 * Add new memblock region [@base, @base + @size) to the "memory"
 675 * type. See memblock_add_range() description for mode details
 676 *
 677 * Return:
 678 * 0 on success, -errno on failure.
 679 */
 680int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
 681                                       int nid)
 682{
 683        return memblock_add_range(&memblock.memory, base, size, nid, 0);
 684}
 685
 686/**
 687 * memblock_add - add new memblock region
 688 * @base: base address of the new region
 689 * @size: size of the new region
 690 *
 691 * Add new memblock region [@base, @base + @size) to the "memory"
 692 * type. See memblock_add_range() description for mode details
 693 *
 694 * Return:
 695 * 0 on success, -errno on failure.
 696 */
 697int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 698{
 699        phys_addr_t end = base + size - 1;
 700
 701        memblock_dbg("memblock_add: [%pa-%pa] %pS\n",
 702                     &base, &end, (void *)_RET_IP_);
 703
 704        return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
 705}
 706
 707/**
 708 * memblock_isolate_range - isolate given range into disjoint memblocks
 709 * @type: memblock type to isolate range for
 710 * @base: base of range to isolate
 711 * @size: size of range to isolate
 712 * @start_rgn: out parameter for the start of isolated region
 713 * @end_rgn: out parameter for the end of isolated region
 714 *
 715 * Walk @type and ensure that regions don't cross the boundaries defined by
 716 * [@base, @base + @size).  Crossing regions are split at the boundaries,
 717 * which may create at most two more regions.  The index of the first
 718 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 719 *
 720 * Return:
 721 * 0 on success, -errno on failure.
 722 */
 723static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 724                                        phys_addr_t base, phys_addr_t size,
 725                                        int *start_rgn, int *end_rgn)
 726{
 727        phys_addr_t end = base + memblock_cap_size(base, &size);
 728        int idx;
 729        struct memblock_region *rgn;
 730
 731        *start_rgn = *end_rgn = 0;
 732
 733        if (!size)
 734                return 0;
 735
 736        /* we'll create at most two more regions */
 737        while (type->cnt + 2 > type->max)
 738                if (memblock_double_array(type, base, size) < 0)
 739                        return -ENOMEM;
 740
 741        for_each_memblock_type(idx, type, rgn) {
 742                phys_addr_t rbase = rgn->base;
 743                phys_addr_t rend = rbase + rgn->size;
 744
 745                if (rbase >= end)
 746                        break;
 747                if (rend <= base)
 748                        continue;
 749
 750                if (rbase < base) {
 751                        /*
 752                         * @rgn intersects from below.  Split and continue
 753                         * to process the next region - the new top half.
 754                         */
 755                        rgn->base = base;
 756                        rgn->size -= base - rbase;
 757                        type->total_size -= base - rbase;
 758                        memblock_insert_region(type, idx, rbase, base - rbase,
 759                                               memblock_get_region_node(rgn),
 760                                               rgn->flags);
 761                } else if (rend > end) {
 762                        /*
 763                         * @rgn intersects from above.  Split and redo the
 764                         * current region - the new bottom half.
 765                         */
 766                        rgn->base = end;
 767                        rgn->size -= end - rbase;
 768                        type->total_size -= end - rbase;
 769                        memblock_insert_region(type, idx--, rbase, end - rbase,
 770                                               memblock_get_region_node(rgn),
 771                                               rgn->flags);
 772                } else {
 773                        /* @rgn is fully contained, record it */
 774                        if (!*end_rgn)
 775                                *start_rgn = idx;
 776                        *end_rgn = idx + 1;
 777                }
 778        }
 779
 780        return 0;
 781}
 782
 783static int __init_memblock memblock_remove_range(struct memblock_type *type,
 784                                          phys_addr_t base, phys_addr_t size)
 785{
 786        int start_rgn, end_rgn;
 787        int i, ret;
 788
 789        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 790        if (ret)
 791                return ret;
 792
 793        for (i = end_rgn - 1; i >= start_rgn; i--)
 794                memblock_remove_region(type, i);
 795        return 0;
 796}
 797
 798int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 799{
 800        phys_addr_t end = base + size - 1;
 801
 802        memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
 803                     &base, &end, (void *)_RET_IP_);
 804
 805        return memblock_remove_range(&memblock.memory, base, size);
 806}
 807
 808/**
 809 * memblock_free - free boot memory block
 810 * @base: phys starting address of the  boot memory block
 811 * @size: size of the boot memory block in bytes
 812 *
 813 * Free boot memory block previously allocated by memblock_alloc_xx() API.
 814 * The freeing memory will not be released to the buddy allocator.
 815 */
 816int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 817{
 818        phys_addr_t end = base + size - 1;
 819
 820        memblock_dbg("   memblock_free: [%pa-%pa] %pS\n",
 821                     &base, &end, (void *)_RET_IP_);
 822
 823        kmemleak_free_part_phys(base, size);
 824        return memblock_remove_range(&memblock.reserved, base, size);
 825}
 826
 827int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 828{
 829        phys_addr_t end = base + size - 1;
 830
 831        memblock_dbg("memblock_reserve: [%pa-%pa] %pS\n",
 832                     &base, &end, (void *)_RET_IP_);
 833
 834        return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
 835}
 836
 837/**
 838 * memblock_setclr_flag - set or clear flag for a memory region
 839 * @base: base address of the region
 840 * @size: size of the region
 841 * @set: set or clear the flag
 842 * @flag: the flag to udpate
 843 *
 844 * This function isolates region [@base, @base + @size), and sets/clears flag
 845 *
 846 * Return: 0 on success, -errno on failure.
 847 */
 848static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 849                                phys_addr_t size, int set, int flag)
 850{
 851        struct memblock_type *type = &memblock.memory;
 852        int i, ret, start_rgn, end_rgn;
 853
 854        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 855        if (ret)
 856                return ret;
 857
 858        for (i = start_rgn; i < end_rgn; i++) {
 859                struct memblock_region *r = &type->regions[i];
 860
 861                if (set)
 862                        r->flags |= flag;
 863                else
 864                        r->flags &= ~flag;
 865        }
 866
 867        memblock_merge_regions(type);
 868        return 0;
 869}
 870
 871/**
 872 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 873 * @base: the base phys addr of the region
 874 * @size: the size of the region
 875 *
 876 * Return: 0 on success, -errno on failure.
 877 */
 878int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 879{
 880        return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
 881}
 882
 883/**
 884 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 885 * @base: the base phys addr of the region
 886 * @size: the size of the region
 887 *
 888 * Return: 0 on success, -errno on failure.
 889 */
 890int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 891{
 892        return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
 893}
 894
 895/**
 896 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 897 * @base: the base phys addr of the region
 898 * @size: the size of the region
 899 *
 900 * Return: 0 on success, -errno on failure.
 901 */
 902int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 903{
 904        system_has_some_mirror = true;
 905
 906        return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 907}
 908
 909/**
 910 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
 911 * @base: the base phys addr of the region
 912 * @size: the size of the region
 913 *
 914 * Return: 0 on success, -errno on failure.
 915 */
 916int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 917{
 918        return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
 919}
 920
 921/**
 922 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
 923 * @base: the base phys addr of the region
 924 * @size: the size of the region
 925 *
 926 * Return: 0 on success, -errno on failure.
 927 */
 928int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
 929{
 930        return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
 931}
 932
 933/**
 934 * __next_reserved_mem_region - next function for for_each_reserved_region()
 935 * @idx: pointer to u64 loop variable
 936 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
 937 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
 938 *
 939 * Iterate over all reserved memory regions.
 940 */
 941void __init_memblock __next_reserved_mem_region(u64 *idx,
 942                                           phys_addr_t *out_start,
 943                                           phys_addr_t *out_end)
 944{
 945        struct memblock_type *type = &memblock.reserved;
 946
 947        if (*idx < type->cnt) {
 948                struct memblock_region *r = &type->regions[*idx];
 949                phys_addr_t base = r->base;
 950                phys_addr_t size = r->size;
 951
 952                if (out_start)
 953                        *out_start = base;
 954                if (out_end)
 955                        *out_end = base + size - 1;
 956
 957                *idx += 1;
 958                return;
 959        }
 960
 961        /* signal end of iteration */
 962        *idx = ULLONG_MAX;
 963}
 964
 965static bool should_skip_region(struct memblock_region *m, int nid, int flags)
 966{
 967        int m_nid = memblock_get_region_node(m);
 968
 969        /* only memory regions are associated with nodes, check it */
 970        if (nid != NUMA_NO_NODE && nid != m_nid)
 971                return true;
 972
 973        /* skip hotpluggable memory regions if needed */
 974        if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
 975                return true;
 976
 977        /* if we want mirror memory skip non-mirror memory regions */
 978        if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 979                return true;
 980
 981        /* skip nomap memory unless we were asked for it explicitly */
 982        if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
 983                return true;
 984
 985        return false;
 986}
 987
 988/**
 989 * __next_mem_range - next function for for_each_free_mem_range() etc.
 990 * @idx: pointer to u64 loop variable
 991 * @nid: node selector, %NUMA_NO_NODE for all nodes
 992 * @flags: pick from blocks based on memory attributes
 993 * @type_a: pointer to memblock_type from where the range is taken
 994 * @type_b: pointer to memblock_type which excludes memory from being taken
 995 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 996 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 997 * @out_nid: ptr to int for nid of the range, can be %NULL
 998 *
 999 * Find the first area from *@idx which matches @nid, fill the out
1000 * parameters, and update *@idx for the next iteration.  The lower 32bit of
1001 * *@idx contains index into type_a and the upper 32bit indexes the
1002 * areas before each region in type_b.  For example, if type_b regions
1003 * look like the following,
1004 *
1005 *      0:[0-16), 1:[32-48), 2:[128-130)
1006 *
1007 * The upper 32bit indexes the following regions.
1008 *
1009 *      0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1010 *
1011 * As both region arrays are sorted, the function advances the two indices
1012 * in lockstep and returns each intersection.
1013 */
1014void __init_memblock __next_mem_range(u64 *idx, int nid,
1015                                      enum memblock_flags flags,
1016                                      struct memblock_type *type_a,
1017                                      struct memblock_type *type_b,
1018                                      phys_addr_t *out_start,
1019                                      phys_addr_t *out_end, int *out_nid)
1020{
1021        int idx_a = *idx & 0xffffffff;
1022        int idx_b = *idx >> 32;
1023
1024        if (WARN_ONCE(nid == MAX_NUMNODES,
1025        "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1026                nid = NUMA_NO_NODE;
1027
1028        for (; idx_a < type_a->cnt; idx_a++) {
1029                struct memblock_region *m = &type_a->regions[idx_a];
1030
1031                phys_addr_t m_start = m->base;
1032                phys_addr_t m_end = m->base + m->size;
1033                int         m_nid = memblock_get_region_node(m);
1034
1035                if (should_skip_region(m, nid, flags))
1036                        continue;
1037
1038                if (!type_b) {
1039                        if (out_start)
1040                                *out_start = m_start;
1041                        if (out_end)
1042                                *out_end = m_end;
1043                        if (out_nid)
1044                                *out_nid = m_nid;
1045                        idx_a++;
1046                        *idx = (u32)idx_a | (u64)idx_b << 32;
1047                        return;
1048                }
1049
1050                /* scan areas before each reservation */
1051                for (; idx_b < type_b->cnt + 1; idx_b++) {
1052                        struct memblock_region *r;
1053                        phys_addr_t r_start;
1054                        phys_addr_t r_end;
1055
1056                        r = &type_b->regions[idx_b];
1057                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1058                        r_end = idx_b < type_b->cnt ?
1059                                r->base : PHYS_ADDR_MAX;
1060
1061                        /*
1062                         * if idx_b advanced past idx_a,
1063                         * break out to advance idx_a
1064                         */
1065                        if (r_start >= m_end)
1066                                break;
1067                        /* if the two regions intersect, we're done */
1068                        if (m_start < r_end) {
1069                                if (out_start)
1070                                        *out_start =
1071                                                max(m_start, r_start);
1072                                if (out_end)
1073                                        *out_end = min(m_end, r_end);
1074                                if (out_nid)
1075                                        *out_nid = m_nid;
1076                                /*
1077                                 * The region which ends first is
1078                                 * advanced for the next iteration.
1079                                 */
1080                                if (m_end <= r_end)
1081                                        idx_a++;
1082                                else
1083                                        idx_b++;
1084                                *idx = (u32)idx_a | (u64)idx_b << 32;
1085                                return;
1086                        }
1087                }
1088        }
1089
1090        /* signal end of iteration */
1091        *idx = ULLONG_MAX;
1092}
1093
1094/**
1095 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1096 *
1097 * @idx: pointer to u64 loop variable
1098 * @nid: node selector, %NUMA_NO_NODE for all nodes
1099 * @flags: pick from blocks based on memory attributes
1100 * @type_a: pointer to memblock_type from where the range is taken
1101 * @type_b: pointer to memblock_type which excludes memory from being taken
1102 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1103 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1104 * @out_nid: ptr to int for nid of the range, can be %NULL
1105 *
1106 * Finds the next range from type_a which is not marked as unsuitable
1107 * in type_b.
1108 *
1109 * Reverse of __next_mem_range().
1110 */
1111void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1112                                          enum memblock_flags flags,
1113                                          struct memblock_type *type_a,
1114                                          struct memblock_type *type_b,
1115                                          phys_addr_t *out_start,
1116                                          phys_addr_t *out_end, int *out_nid)
1117{
1118        int idx_a = *idx & 0xffffffff;
1119        int idx_b = *idx >> 32;
1120
1121        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1122                nid = NUMA_NO_NODE;
1123
1124        if (*idx == (u64)ULLONG_MAX) {
1125                idx_a = type_a->cnt - 1;
1126                if (type_b != NULL)
1127                        idx_b = type_b->cnt;
1128                else
1129                        idx_b = 0;
1130        }
1131
1132        for (; idx_a >= 0; idx_a--) {
1133                struct memblock_region *m = &type_a->regions[idx_a];
1134
1135                phys_addr_t m_start = m->base;
1136                phys_addr_t m_end = m->base + m->size;
1137                int m_nid = memblock_get_region_node(m);
1138
1139                if (should_skip_region(m, nid, flags))
1140                        continue;
1141
1142                if (!type_b) {
1143                        if (out_start)
1144                                *out_start = m_start;
1145                        if (out_end)
1146                                *out_end = m_end;
1147                        if (out_nid)
1148                                *out_nid = m_nid;
1149                        idx_a--;
1150                        *idx = (u32)idx_a | (u64)idx_b << 32;
1151                        return;
1152                }
1153
1154                /* scan areas before each reservation */
1155                for (; idx_b >= 0; idx_b--) {
1156                        struct memblock_region *r;
1157                        phys_addr_t r_start;
1158                        phys_addr_t r_end;
1159
1160                        r = &type_b->regions[idx_b];
1161                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1162                        r_end = idx_b < type_b->cnt ?
1163                                r->base : PHYS_ADDR_MAX;
1164                        /*
1165                         * if idx_b advanced past idx_a,
1166                         * break out to advance idx_a
1167                         */
1168
1169                        if (r_end <= m_start)
1170                                break;
1171                        /* if the two regions intersect, we're done */
1172                        if (m_end > r_start) {
1173                                if (out_start)
1174                                        *out_start = max(m_start, r_start);
1175                                if (out_end)
1176                                        *out_end = min(m_end, r_end);
1177                                if (out_nid)
1178                                        *out_nid = m_nid;
1179                                if (m_start >= r_start)
1180                                        idx_a--;
1181                                else
1182                                        idx_b--;
1183                                *idx = (u32)idx_a | (u64)idx_b << 32;
1184                                return;
1185                        }
1186                }
1187        }
1188        /* signal end of iteration */
1189        *idx = ULLONG_MAX;
1190}
1191
1192#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1193/*
1194 * Common iterator interface used to define for_each_mem_pfn_range().
1195 */
1196void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1197                                unsigned long *out_start_pfn,
1198                                unsigned long *out_end_pfn, int *out_nid)
1199{
1200        struct memblock_type *type = &memblock.memory;
1201        struct memblock_region *r;
1202
1203        while (++*idx < type->cnt) {
1204                r = &type->regions[*idx];
1205
1206                if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1207                        continue;
1208                if (nid == MAX_NUMNODES || nid == r->nid)
1209                        break;
1210        }
1211        if (*idx >= type->cnt) {
1212                *idx = -1;
1213                return;
1214        }
1215
1216        if (out_start_pfn)
1217                *out_start_pfn = PFN_UP(r->base);
1218        if (out_end_pfn)
1219                *out_end_pfn = PFN_DOWN(r->base + r->size);
1220        if (out_nid)
1221                *out_nid = r->nid;
1222}
1223
1224/**
1225 * memblock_set_node - set node ID on memblock regions
1226 * @base: base of area to set node ID for
1227 * @size: size of area to set node ID for
1228 * @type: memblock type to set node ID for
1229 * @nid: node ID to set
1230 *
1231 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1232 * Regions which cross the area boundaries are split as necessary.
1233 *
1234 * Return:
1235 * 0 on success, -errno on failure.
1236 */
1237int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1238                                      struct memblock_type *type, int nid)
1239{
1240        int start_rgn, end_rgn;
1241        int i, ret;
1242
1243        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1244        if (ret)
1245                return ret;
1246
1247        for (i = start_rgn; i < end_rgn; i++)
1248                memblock_set_region_node(&type->regions[i], nid);
1249
1250        memblock_merge_regions(type);
1251        return 0;
1252}
1253#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1254#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1255/**
1256 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1257 *
1258 * @idx: pointer to u64 loop variable
1259 * @zone: zone in which all of the memory blocks reside
1260 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1261 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1262 *
1263 * This function is meant to be a zone/pfn specific wrapper for the
1264 * for_each_mem_range type iterators. Specifically they are used in the
1265 * deferred memory init routines and as such we were duplicating much of
1266 * this logic throughout the code. So instead of having it in multiple
1267 * locations it seemed like it would make more sense to centralize this to
1268 * one new iterator that does everything they need.
1269 */
1270void __init_memblock
1271__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1272                             unsigned long *out_spfn, unsigned long *out_epfn)
1273{
1274        int zone_nid = zone_to_nid(zone);
1275        phys_addr_t spa, epa;
1276        int nid;
1277
1278        __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1279                         &memblock.memory, &memblock.reserved,
1280                         &spa, &epa, &nid);
1281
1282        while (*idx != U64_MAX) {
1283                unsigned long epfn = PFN_DOWN(epa);
1284                unsigned long spfn = PFN_UP(spa);
1285
1286                /*
1287                 * Verify the end is at least past the start of the zone and
1288                 * that we have at least one PFN to initialize.
1289                 */
1290                if (zone->zone_start_pfn < epfn && spfn < epfn) {
1291                        /* if we went too far just stop searching */
1292                        if (zone_end_pfn(zone) <= spfn) {
1293                                *idx = U64_MAX;
1294                                break;
1295                        }
1296
1297                        if (out_spfn)
1298                                *out_spfn = max(zone->zone_start_pfn, spfn);
1299                        if (out_epfn)
1300                                *out_epfn = min(zone_end_pfn(zone), epfn);
1301
1302                        return;
1303                }
1304
1305                __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1306                                 &memblock.memory, &memblock.reserved,
1307                                 &spa, &epa, &nid);
1308        }
1309
1310        /* signal end of iteration */
1311        if (out_spfn)
1312                *out_spfn = ULONG_MAX;
1313        if (out_epfn)
1314                *out_epfn = 0;
1315}
1316
1317#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1318
1319/**
1320 * memblock_alloc_range_nid - allocate boot memory block
1321 * @size: size of memory block to be allocated in bytes
1322 * @align: alignment of the region and block's size
1323 * @start: the lower bound of the memory region to allocate (phys address)
1324 * @end: the upper bound of the memory region to allocate (phys address)
1325 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1326 *
1327 * The allocation is performed from memory region limited by
1328 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1329 *
1330 * If the specified node can not hold the requested memory the
1331 * allocation falls back to any node in the system
1332 *
1333 * For systems with memory mirroring, the allocation is attempted first
1334 * from the regions with mirroring enabled and then retried from any
1335 * memory region.
1336 *
1337 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1338 * allocated boot memory block, so that it is never reported as leaks.
1339 *
1340 * Return:
1341 * Physical address of allocated memory block on success, %0 on failure.
1342 */
1343static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1344                                        phys_addr_t align, phys_addr_t start,
1345                                        phys_addr_t end, int nid)
1346{
1347        enum memblock_flags flags = choose_memblock_flags();
1348        phys_addr_t found;
1349
1350        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1351                nid = NUMA_NO_NODE;
1352
1353        if (!align) {
1354                /* Can't use WARNs this early in boot on powerpc */
1355                dump_stack();
1356                align = SMP_CACHE_BYTES;
1357        }
1358
1359        if (end > memblock.current_limit)
1360                end = memblock.current_limit;
1361
1362again:
1363        found = memblock_find_in_range_node(size, align, start, end, nid,
1364                                            flags);
1365        if (found && !memblock_reserve(found, size))
1366                goto done;
1367
1368        if (nid != NUMA_NO_NODE) {
1369                found = memblock_find_in_range_node(size, align, start,
1370                                                    end, NUMA_NO_NODE,
1371                                                    flags);
1372                if (found && !memblock_reserve(found, size))
1373                        goto done;
1374        }
1375
1376        if (flags & MEMBLOCK_MIRROR) {
1377                flags &= ~MEMBLOCK_MIRROR;
1378                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1379                        &size);
1380                goto again;
1381        }
1382
1383        return 0;
1384
1385done:
1386        /* Skip kmemleak for kasan_init() due to high volume. */
1387        if (end != MEMBLOCK_ALLOC_KASAN)
1388                /*
1389                 * The min_count is set to 0 so that memblock allocated
1390                 * blocks are never reported as leaks. This is because many
1391                 * of these blocks are only referred via the physical
1392                 * address which is not looked up by kmemleak.
1393                 */
1394                kmemleak_alloc_phys(found, size, 0, 0);
1395
1396        return found;
1397}
1398
1399/**
1400 * memblock_phys_alloc_range - allocate a memory block inside specified range
1401 * @size: size of memory block to be allocated in bytes
1402 * @align: alignment of the region and block's size
1403 * @start: the lower bound of the memory region to allocate (physical address)
1404 * @end: the upper bound of the memory region to allocate (physical address)
1405 *
1406 * Allocate @size bytes in the between @start and @end.
1407 *
1408 * Return: physical address of the allocated memory block on success,
1409 * %0 on failure.
1410 */
1411phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1412                                             phys_addr_t align,
1413                                             phys_addr_t start,
1414                                             phys_addr_t end)
1415{
1416        return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1417}
1418
1419/**
1420 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1421 * @size: size of memory block to be allocated in bytes
1422 * @align: alignment of the region and block's size
1423 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1424 *
1425 * Allocates memory block from the specified NUMA node. If the node
1426 * has no available memory, attempts to allocated from any node in the
1427 * system.
1428 *
1429 * Return: physical address of the allocated memory block on success,
1430 * %0 on failure.
1431 */
1432phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1433{
1434        return memblock_alloc_range_nid(size, align, 0,
1435                                        MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1436}
1437
1438/**
1439 * memblock_alloc_internal - allocate boot memory block
1440 * @size: size of memory block to be allocated in bytes
1441 * @align: alignment of the region and block's size
1442 * @min_addr: the lower bound of the memory region to allocate (phys address)
1443 * @max_addr: the upper bound of the memory region to allocate (phys address)
1444 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1445 *
1446 * Allocates memory block using memblock_alloc_range_nid() and
1447 * converts the returned physical address to virtual.
1448 *
1449 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1450 * will fall back to memory below @min_addr. Other constraints, such
1451 * as node and mirrored memory will be handled again in
1452 * memblock_alloc_range_nid().
1453 *
1454 * Return:
1455 * Virtual address of allocated memory block on success, NULL on failure.
1456 */
1457static void * __init memblock_alloc_internal(
1458                                phys_addr_t size, phys_addr_t align,
1459                                phys_addr_t min_addr, phys_addr_t max_addr,
1460                                int nid)
1461{
1462        phys_addr_t alloc;
1463
1464        /*
1465         * Detect any accidental use of these APIs after slab is ready, as at
1466         * this moment memblock may be deinitialized already and its
1467         * internal data may be destroyed (after execution of memblock_free_all)
1468         */
1469        if (WARN_ON_ONCE(slab_is_available()))
1470                return kzalloc_node(size, GFP_NOWAIT, nid);
1471
1472        alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
1473
1474        /* retry allocation without lower limit */
1475        if (!alloc && min_addr)
1476                alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1477
1478        if (!alloc)
1479                return NULL;
1480
1481        return phys_to_virt(alloc);
1482}
1483
1484/**
1485 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1486 * memory and without panicking
1487 * @size: size of memory block to be allocated in bytes
1488 * @align: alignment of the region and block's size
1489 * @min_addr: the lower bound of the memory region from where the allocation
1490 *        is preferred (phys address)
1491 * @max_addr: the upper bound of the memory region from where the allocation
1492 *            is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1493 *            allocate only from memory limited by memblock.current_limit value
1494 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1495 *
1496 * Public function, provides additional debug information (including caller
1497 * info), if enabled. Does not zero allocated memory, does not panic if request
1498 * cannot be satisfied.
1499 *
1500 * Return:
1501 * Virtual address of allocated memory block on success, NULL on failure.
1502 */
1503void * __init memblock_alloc_try_nid_raw(
1504                        phys_addr_t size, phys_addr_t align,
1505                        phys_addr_t min_addr, phys_addr_t max_addr,
1506                        int nid)
1507{
1508        void *ptr;
1509
1510        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1511                     __func__, (u64)size, (u64)align, nid, &min_addr,
1512                     &max_addr, (void *)_RET_IP_);
1513
1514        ptr = memblock_alloc_internal(size, align,
1515                                           min_addr, max_addr, nid);
1516        if (ptr && size > 0)
1517                page_init_poison(ptr, size);
1518
1519        return ptr;
1520}
1521
1522/**
1523 * memblock_alloc_try_nid - allocate boot memory block
1524 * @size: size of memory block to be allocated in bytes
1525 * @align: alignment of the region and block's size
1526 * @min_addr: the lower bound of the memory region from where the allocation
1527 *        is preferred (phys address)
1528 * @max_addr: the upper bound of the memory region from where the allocation
1529 *            is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1530 *            allocate only from memory limited by memblock.current_limit value
1531 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1532 *
1533 * Public function, provides additional debug information (including caller
1534 * info), if enabled. This function zeroes the allocated memory.
1535 *
1536 * Return:
1537 * Virtual address of allocated memory block on success, NULL on failure.
1538 */
1539void * __init memblock_alloc_try_nid(
1540                        phys_addr_t size, phys_addr_t align,
1541                        phys_addr_t min_addr, phys_addr_t max_addr,
1542                        int nid)
1543{
1544        void *ptr;
1545
1546        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1547                     __func__, (u64)size, (u64)align, nid, &min_addr,
1548                     &max_addr, (void *)_RET_IP_);
1549        ptr = memblock_alloc_internal(size, align,
1550                                           min_addr, max_addr, nid);
1551        if (ptr)
1552                memset(ptr, 0, size);
1553
1554        return ptr;
1555}
1556
1557/**
1558 * __memblock_free_late - free pages directly to buddy allocator
1559 * @base: phys starting address of the  boot memory block
1560 * @size: size of the boot memory block in bytes
1561 *
1562 * This is only useful when the memblock allocator has already been torn
1563 * down, but we are still initializing the system.  Pages are released directly
1564 * to the buddy allocator.
1565 */
1566void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1567{
1568        phys_addr_t cursor, end;
1569
1570        end = base + size - 1;
1571        memblock_dbg("%s: [%pa-%pa] %pS\n",
1572                     __func__, &base, &end, (void *)_RET_IP_);
1573        kmemleak_free_part_phys(base, size);
1574        cursor = PFN_UP(base);
1575        end = PFN_DOWN(base + size);
1576
1577        for (; cursor < end; cursor++) {
1578                memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1579                totalram_pages_inc();
1580        }
1581}
1582
1583/*
1584 * Remaining API functions
1585 */
1586
1587phys_addr_t __init_memblock memblock_phys_mem_size(void)
1588{
1589        return memblock.memory.total_size;
1590}
1591
1592phys_addr_t __init_memblock memblock_reserved_size(void)
1593{
1594        return memblock.reserved.total_size;
1595}
1596
1597phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1598{
1599        unsigned long pages = 0;
1600        struct memblock_region *r;
1601        unsigned long start_pfn, end_pfn;
1602
1603        for_each_memblock(memory, r) {
1604                start_pfn = memblock_region_memory_base_pfn(r);
1605                end_pfn = memblock_region_memory_end_pfn(r);
1606                start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1607                end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1608                pages += end_pfn - start_pfn;
1609        }
1610
1611        return PFN_PHYS(pages);
1612}
1613
1614/* lowest address */
1615phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1616{
1617        return memblock.memory.regions[0].base;
1618}
1619
1620phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1621{
1622        int idx = memblock.memory.cnt - 1;
1623
1624        return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1625}
1626
1627static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1628{
1629        phys_addr_t max_addr = PHYS_ADDR_MAX;
1630        struct memblock_region *r;
1631
1632        /*
1633         * translate the memory @limit size into the max address within one of
1634         * the memory memblock regions, if the @limit exceeds the total size
1635         * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1636         */
1637        for_each_memblock(memory, r) {
1638                if (limit <= r->size) {
1639                        max_addr = r->base + limit;
1640                        break;
1641                }
1642                limit -= r->size;
1643        }
1644
1645        return max_addr;
1646}
1647
1648void __init memblock_enforce_memory_limit(phys_addr_t limit)
1649{
1650        phys_addr_t max_addr = PHYS_ADDR_MAX;
1651
1652        if (!limit)
1653                return;
1654
1655        max_addr = __find_max_addr(limit);
1656
1657        /* @limit exceeds the total size of the memory, do nothing */
1658        if (max_addr == PHYS_ADDR_MAX)
1659                return;
1660
1661        /* truncate both memory and reserved regions */
1662        memblock_remove_range(&memblock.memory, max_addr,
1663                              PHYS_ADDR_MAX);
1664        memblock_remove_range(&memblock.reserved, max_addr,
1665                              PHYS_ADDR_MAX);
1666}
1667
1668void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1669{
1670        int start_rgn, end_rgn;
1671        int i, ret;
1672
1673        if (!size)
1674                return;
1675
1676        ret = memblock_isolate_range(&memblock.memory, base, size,
1677                                                &start_rgn, &end_rgn);
1678        if (ret)
1679                return;
1680
1681        /* remove all the MAP regions */
1682        for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1683                if (!memblock_is_nomap(&memblock.memory.regions[i]))
1684                        memblock_remove_region(&memblock.memory, i);
1685
1686        for (i = start_rgn - 1; i >= 0; i--)
1687                if (!memblock_is_nomap(&memblock.memory.regions[i]))
1688                        memblock_remove_region(&memblock.memory, i);
1689
1690        /* truncate the reserved regions */
1691        memblock_remove_range(&memblock.reserved, 0, base);
1692        memblock_remove_range(&memblock.reserved,
1693                        base + size, PHYS_ADDR_MAX);
1694}
1695
1696void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1697{
1698        phys_addr_t max_addr;
1699
1700        if (!limit)
1701                return;
1702
1703        max_addr = __find_max_addr(limit);
1704
1705        /* @limit exceeds the total size of the memory, do nothing */
1706        if (max_addr == PHYS_ADDR_MAX)
1707                return;
1708
1709        memblock_cap_memory_range(0, max_addr);
1710}
1711
1712static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1713{
1714        unsigned int left = 0, right = type->cnt;
1715
1716        do {
1717                unsigned int mid = (right + left) / 2;
1718
1719                if (addr < type->regions[mid].base)
1720                        right = mid;
1721                else if (addr >= (type->regions[mid].base +
1722                                  type->regions[mid].size))
1723                        left = mid + 1;
1724                else
1725                        return mid;
1726        } while (left < right);
1727        return -1;
1728}
1729
1730bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1731{
1732        return memblock_search(&memblock.reserved, addr) != -1;
1733}
1734
1735bool __init_memblock memblock_is_memory(phys_addr_t addr)
1736{
1737        return memblock_search(&memblock.memory, addr) != -1;
1738}
1739
1740bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1741{
1742        int i = memblock_search(&memblock.memory, addr);
1743
1744        if (i == -1)
1745                return false;
1746        return !memblock_is_nomap(&memblock.memory.regions[i]);
1747}
1748
1749#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1750int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1751                         unsigned long *start_pfn, unsigned long *end_pfn)
1752{
1753        struct memblock_type *type = &memblock.memory;
1754        int mid = memblock_search(type, PFN_PHYS(pfn));
1755
1756        if (mid == -1)
1757                return -1;
1758
1759        *start_pfn = PFN_DOWN(type->regions[mid].base);
1760        *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1761
1762        return type->regions[mid].nid;
1763}
1764#endif
1765
1766/**
1767 * memblock_is_region_memory - check if a region is a subset of memory
1768 * @base: base of region to check
1769 * @size: size of region to check
1770 *
1771 * Check if the region [@base, @base + @size) is a subset of a memory block.
1772 *
1773 * Return:
1774 * 0 if false, non-zero if true
1775 */
1776bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1777{
1778        int idx = memblock_search(&memblock.memory, base);
1779        phys_addr_t end = base + memblock_cap_size(base, &size);
1780
1781        if (idx == -1)
1782                return false;
1783        return (memblock.memory.regions[idx].base +
1784                 memblock.memory.regions[idx].size) >= end;
1785}
1786
1787/**
1788 * memblock_is_region_reserved - check if a region intersects reserved memory
1789 * @base: base of region to check
1790 * @size: size of region to check
1791 *
1792 * Check if the region [@base, @base + @size) intersects a reserved
1793 * memory block.
1794 *
1795 * Return:
1796 * True if they intersect, false if not.
1797 */
1798bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1799{
1800        memblock_cap_size(base, &size);
1801        return memblock_overlaps_region(&memblock.reserved, base, size);
1802}
1803
1804void __init_memblock memblock_trim_memory(phys_addr_t align)
1805{
1806        phys_addr_t start, end, orig_start, orig_end;
1807        struct memblock_region *r;
1808
1809        for_each_memblock(memory, r) {
1810                orig_start = r->base;
1811                orig_end = r->base + r->size;
1812                start = round_up(orig_start, align);
1813                end = round_down(orig_end, align);
1814
1815                if (start == orig_start && end == orig_end)
1816                        continue;
1817
1818                if (start < end) {
1819                        r->base = start;
1820                        r->size = end - start;
1821                } else {
1822                        memblock_remove_region(&memblock.memory,
1823                                               r - memblock.memory.regions);
1824                        r--;
1825                }
1826        }
1827}
1828
1829void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1830{
1831        memblock.current_limit = limit;
1832}
1833
1834phys_addr_t __init_memblock memblock_get_current_limit(void)
1835{
1836        return memblock.current_limit;
1837}
1838
1839static void __init_memblock memblock_dump(struct memblock_type *type)
1840{
1841        phys_addr_t base, end, size;
1842        enum memblock_flags flags;
1843        int idx;
1844        struct memblock_region *rgn;
1845
1846        pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
1847
1848        for_each_memblock_type(idx, type, rgn) {
1849                char nid_buf[32] = "";
1850
1851                base = rgn->base;
1852                size = rgn->size;
1853                end = base + size - 1;
1854                flags = rgn->flags;
1855#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1856                if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1857                        snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1858                                 memblock_get_region_node(rgn));
1859#endif
1860                pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1861                        type->name, idx, &base, &end, &size, nid_buf, flags);
1862        }
1863}
1864
1865void __init_memblock __memblock_dump_all(void)
1866{
1867        pr_info("MEMBLOCK configuration:\n");
1868        pr_info(" memory size = %pa reserved size = %pa\n",
1869                &memblock.memory.total_size,
1870                &memblock.reserved.total_size);
1871
1872        memblock_dump(&memblock.memory);
1873        memblock_dump(&memblock.reserved);
1874#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1875        memblock_dump(&memblock.physmem);
1876#endif
1877}
1878
1879void __init memblock_allow_resize(void)
1880{
1881        memblock_can_resize = 1;
1882}
1883
1884static int __init early_memblock(char *p)
1885{
1886        if (p && strstr(p, "debug"))
1887                memblock_debug = 1;
1888        return 0;
1889}
1890early_param("memblock", early_memblock);
1891
1892static void __init __free_pages_memory(unsigned long start, unsigned long end)
1893{
1894        int order;
1895
1896        while (start < end) {
1897                order = min(MAX_ORDER - 1UL, __ffs(start));
1898
1899                while (start + (1UL << order) > end)
1900                        order--;
1901
1902                memblock_free_pages(pfn_to_page(start), start, order);
1903
1904                start += (1UL << order);
1905        }
1906}
1907
1908static unsigned long __init __free_memory_core(phys_addr_t start,
1909                                 phys_addr_t end)
1910{
1911        unsigned long start_pfn = PFN_UP(start);
1912        unsigned long end_pfn = min_t(unsigned long,
1913                                      PFN_DOWN(end), max_low_pfn);
1914
1915        if (start_pfn >= end_pfn)
1916                return 0;
1917
1918        __free_pages_memory(start_pfn, end_pfn);
1919
1920        return end_pfn - start_pfn;
1921}
1922
1923static unsigned long __init free_low_memory_core_early(void)
1924{
1925        unsigned long count = 0;
1926        phys_addr_t start, end;
1927        u64 i;
1928
1929        memblock_clear_hotplug(0, -1);
1930
1931        for_each_reserved_mem_region(i, &start, &end)
1932                reserve_bootmem_region(start, end);
1933
1934        /*
1935         * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
1936         *  because in some case like Node0 doesn't have RAM installed
1937         *  low ram will be on Node1
1938         */
1939        for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1940                                NULL)
1941                count += __free_memory_core(start, end);
1942
1943        return count;
1944}
1945
1946static int reset_managed_pages_done __initdata;
1947
1948void reset_node_managed_pages(pg_data_t *pgdat)
1949{
1950        struct zone *z;
1951
1952        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1953                atomic_long_set(&z->managed_pages, 0);
1954}
1955
1956void __init reset_all_zones_managed_pages(void)
1957{
1958        struct pglist_data *pgdat;
1959
1960        if (reset_managed_pages_done)
1961                return;
1962
1963        for_each_online_pgdat(pgdat)
1964                reset_node_managed_pages(pgdat);
1965
1966        reset_managed_pages_done = 1;
1967}
1968
1969/**
1970 * memblock_free_all - release free pages to the buddy allocator
1971 *
1972 * Return: the number of pages actually released.
1973 */
1974unsigned long __init memblock_free_all(void)
1975{
1976        unsigned long pages;
1977
1978        reset_all_zones_managed_pages();
1979
1980        pages = free_low_memory_core_early();
1981        totalram_pages_add(pages);
1982
1983        return pages;
1984}
1985
1986#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
1987
1988static int memblock_debug_show(struct seq_file *m, void *private)
1989{
1990        struct memblock_type *type = m->private;
1991        struct memblock_region *reg;
1992        int i;
1993        phys_addr_t end;
1994
1995        for (i = 0; i < type->cnt; i++) {
1996                reg = &type->regions[i];
1997                end = reg->base + reg->size - 1;
1998
1999                seq_printf(m, "%4d: ", i);
2000                seq_printf(m, "%pa..%pa\n", &reg->base, &end);
2001        }
2002        return 0;
2003}
2004DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2005
2006static int __init memblock_init_debugfs(void)
2007{
2008        struct dentry *root = debugfs_create_dir("memblock", NULL);
2009
2010        debugfs_create_file("memory", 0444, root,
2011                            &memblock.memory, &memblock_debug_fops);
2012        debugfs_create_file("reserved", 0444, root,
2013                            &memblock.reserved, &memblock_debug_fops);
2014#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2015        debugfs_create_file("physmem", 0444, root,
2016                            &memblock.physmem, &memblock_debug_fops);
2017#endif
2018
2019        return 0;
2020}
2021__initcall(memblock_init_debugfs);
2022
2023#endif /* CONFIG_DEBUG_FS */
2024