linux/mm/memblock.c
<<
>>
Prefs
   1/*
   2 * Procedures for maintaining information about logical memory blocks.
   3 *
   4 * Peter Bergner, IBM Corp.     June 2001.
   5 * Copyright (C) 2001 Peter Bergner.
   6 *
   7 *      This program is free software; you can redistribute it and/or
   8 *      modify it under the terms of the GNU General Public License
   9 *      as published by the Free Software Foundation; either version
  10 *      2 of the License, or (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/init.h>
  16#include <linux/bitops.h>
  17#include <linux/poison.h>
  18#include <linux/pfn.h>
  19#include <linux/debugfs.h>
  20#include <linux/seq_file.h>
  21#include <linux/memblock.h>
  22
  23#include <asm/sections.h>
  24#include <linux/io.h>
  25
  26#include "internal.h"
  27
  28static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  29static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  30#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  31static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
  32#endif
  33
  34struct memblock memblock __initdata_memblock = {
  35        .memory.regions         = memblock_memory_init_regions,
  36        .memory.cnt             = 1,    /* empty dummy entry */
  37        .memory.max             = INIT_MEMBLOCK_REGIONS,
  38        .memory.name            = "memory",
  39
  40        .reserved.regions       = memblock_reserved_init_regions,
  41        .reserved.cnt           = 1,    /* empty dummy entry */
  42        .reserved.max           = INIT_MEMBLOCK_REGIONS,
  43        .reserved.name          = "reserved",
  44
  45#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  46        .physmem.regions        = memblock_physmem_init_regions,
  47        .physmem.cnt            = 1,    /* empty dummy entry */
  48        .physmem.max            = INIT_PHYSMEM_REGIONS,
  49        .physmem.name           = "physmem",
  50#endif
  51
  52        .bottom_up              = false,
  53        .current_limit          = MEMBLOCK_ALLOC_ANYWHERE,
  54};
  55
  56int memblock_debug __initdata_memblock;
  57static bool system_has_some_mirror __initdata_memblock = false;
  58static int memblock_can_resize __initdata_memblock;
  59static int memblock_memory_in_slab __initdata_memblock = 0;
  60static int memblock_reserved_in_slab __initdata_memblock = 0;
  61
  62ulong __init_memblock choose_memblock_flags(void)
  63{
  64        return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
  65}
  66
  67/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
  68static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
  69{
  70        return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
  71}
  72
  73/*
  74 * Address comparison utilities
  75 */
  76static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  77                                       phys_addr_t base2, phys_addr_t size2)
  78{
  79        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  80}
  81
  82bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
  83                                        phys_addr_t base, phys_addr_t size)
  84{
  85        unsigned long i;
  86
  87        for (i = 0; i < type->cnt; i++)
  88                if (memblock_addrs_overlap(base, size, type->regions[i].base,
  89                                           type->regions[i].size))
  90                        break;
  91        return i < type->cnt;
  92}
  93
  94/*
  95 * __memblock_find_range_bottom_up - find free area utility in bottom-up
  96 * @start: start of candidate range
  97 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  98 * @size: size of free area to find
  99 * @align: alignment of free area to find
 100 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 101 * @flags: pick from blocks based on memory attributes
 102 *
 103 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 104 *
 105 * RETURNS:
 106 * Found address on success, 0 on failure.
 107 */
 108static phys_addr_t __init_memblock
 109__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
 110                                phys_addr_t size, phys_addr_t align, int nid,
 111                                ulong flags)
 112{
 113        phys_addr_t this_start, this_end, cand;
 114        u64 i;
 115
 116        for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
 117                this_start = clamp(this_start, start, end);
 118                this_end = clamp(this_end, start, end);
 119
 120                cand = round_up(this_start, align);
 121                if (cand < this_end && this_end - cand >= size)
 122                        return cand;
 123        }
 124
 125        return 0;
 126}
 127
 128/**
 129 * __memblock_find_range_top_down - find free area utility, in top-down
 130 * @start: start of candidate range
 131 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 132 * @size: size of free area to find
 133 * @align: alignment of free area to find
 134 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 135 * @flags: pick from blocks based on memory attributes
 136 *
 137 * Utility called from memblock_find_in_range_node(), find free area top-down.
 138 *
 139 * RETURNS:
 140 * Found address on success, 0 on failure.
 141 */
 142static phys_addr_t __init_memblock
 143__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
 144                               phys_addr_t size, phys_addr_t align, int nid,
 145                               ulong flags)
 146{
 147        phys_addr_t this_start, this_end, cand;
 148        u64 i;
 149
 150        for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
 151                                        NULL) {
 152                this_start = clamp(this_start, start, end);
 153                this_end = clamp(this_end, start, end);
 154
 155                if (this_end < size)
 156                        continue;
 157
 158                cand = round_down(this_end - size, align);
 159                if (cand >= this_start)
 160                        return cand;
 161        }
 162
 163        return 0;
 164}
 165
 166/**
 167 * memblock_find_in_range_node - find free area in given range and node
 168 * @size: size of free area to find
 169 * @align: alignment of free area to find
 170 * @start: start of candidate range
 171 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 172 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 173 * @flags: pick from blocks based on memory attributes
 174 *
 175 * Find @size free area aligned to @align in the specified range and node.
 176 *
 177 * When allocation direction is bottom-up, the @start should be greater
 178 * than the end of the kernel image. Otherwise, it will be trimmed. The
 179 * reason is that we want the bottom-up allocation just near the kernel
 180 * image so it is highly likely that the allocated memory and the kernel
 181 * will reside in the same node.
 182 *
 183 * If bottom-up allocation failed, will try to allocate memory top-down.
 184 *
 185 * RETURNS:
 186 * Found address on success, 0 on failure.
 187 */
 188phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 189                                        phys_addr_t align, phys_addr_t start,
 190                                        phys_addr_t end, int nid, ulong flags)
 191{
 192        phys_addr_t kernel_end, ret;
 193
 194        /* pump up @end */
 195        if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 196                end = memblock.current_limit;
 197
 198        /* avoid allocating the first page */
 199        start = max_t(phys_addr_t, start, PAGE_SIZE);
 200        end = max(start, end);
 201        kernel_end = __pa_symbol(_end);
 202
 203        /*
 204         * try bottom-up allocation only when bottom-up mode
 205         * is set and @end is above the kernel image.
 206         */
 207        if (memblock_bottom_up() && end > kernel_end) {
 208                phys_addr_t bottom_up_start;
 209
 210                /* make sure we will allocate above the kernel */
 211                bottom_up_start = max(start, kernel_end);
 212
 213                /* ok, try bottom-up allocation first */
 214                ret = __memblock_find_range_bottom_up(bottom_up_start, end,
 215                                                      size, align, nid, flags);
 216                if (ret)
 217                        return ret;
 218
 219                /*
 220                 * we always limit bottom-up allocation above the kernel,
 221                 * but top-down allocation doesn't have the limit, so
 222                 * retrying top-down allocation may succeed when bottom-up
 223                 * allocation failed.
 224                 *
 225                 * bottom-up allocation is expected to be fail very rarely,
 226                 * so we use WARN_ONCE() here to see the stack trace if
 227                 * fail happens.
 228                 */
 229                WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
 230        }
 231
 232        return __memblock_find_range_top_down(start, end, size, align, nid,
 233                                              flags);
 234}
 235
 236/**
 237 * memblock_find_in_range - find free area in given range
 238 * @start: start of candidate range
 239 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 240 * @size: size of free area to find
 241 * @align: alignment of free area to find
 242 *
 243 * Find @size free area aligned to @align in the specified range.
 244 *
 245 * RETURNS:
 246 * Found address on success, 0 on failure.
 247 */
 248phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 249                                        phys_addr_t end, phys_addr_t size,
 250                                        phys_addr_t align)
 251{
 252        phys_addr_t ret;
 253        ulong flags = choose_memblock_flags();
 254
 255again:
 256        ret = memblock_find_in_range_node(size, align, start, end,
 257                                            NUMA_NO_NODE, flags);
 258
 259        if (!ret && (flags & MEMBLOCK_MIRROR)) {
 260                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 261                        &size);
 262                flags &= ~MEMBLOCK_MIRROR;
 263                goto again;
 264        }
 265
 266        return ret;
 267}
 268
 269static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 270{
 271        type->total_size -= type->regions[r].size;
 272        memmove(&type->regions[r], &type->regions[r + 1],
 273                (type->cnt - (r + 1)) * sizeof(type->regions[r]));
 274        type->cnt--;
 275
 276        /* Special case for empty arrays */
 277        if (type->cnt == 0) {
 278                WARN_ON(type->total_size != 0);
 279                type->cnt = 1;
 280                type->regions[0].base = 0;
 281                type->regions[0].size = 0;
 282                type->regions[0].flags = 0;
 283                memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 284        }
 285}
 286
 287#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
 288/**
 289 * Discard memory and reserved arrays if they were allocated
 290 */
 291void __init memblock_discard(void)
 292{
 293        phys_addr_t addr, size;
 294
 295        if (memblock.reserved.regions != memblock_reserved_init_regions) {
 296                addr = __pa(memblock.reserved.regions);
 297                size = PAGE_ALIGN(sizeof(struct memblock_region) *
 298                                  memblock.reserved.max);
 299                __memblock_free_late(addr, size);
 300        }
 301
 302        if (memblock.memory.regions != memblock_memory_init_regions) {
 303                addr = __pa(memblock.memory.regions);
 304                size = PAGE_ALIGN(sizeof(struct memblock_region) *
 305                                  memblock.memory.max);
 306                __memblock_free_late(addr, size);
 307        }
 308}
 309#endif
 310
 311/**
 312 * memblock_double_array - double the size of the memblock regions array
 313 * @type: memblock type of the regions array being doubled
 314 * @new_area_start: starting address of memory range to avoid overlap with
 315 * @new_area_size: size of memory range to avoid overlap with
 316 *
 317 * Double the size of the @type regions array. If memblock is being used to
 318 * allocate memory for a new reserved regions array and there is a previously
 319 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
 320 * waiting to be reserved, ensure the memory used by the new array does
 321 * not overlap.
 322 *
 323 * RETURNS:
 324 * 0 on success, -1 on failure.
 325 */
 326static int __init_memblock memblock_double_array(struct memblock_type *type,
 327                                                phys_addr_t new_area_start,
 328                                                phys_addr_t new_area_size)
 329{
 330        struct memblock_region *new_array, *old_array;
 331        phys_addr_t old_alloc_size, new_alloc_size;
 332        phys_addr_t old_size, new_size, addr;
 333        int use_slab = slab_is_available();
 334        int *in_slab;
 335
 336        /* We don't allow resizing until we know about the reserved regions
 337         * of memory that aren't suitable for allocation
 338         */
 339        if (!memblock_can_resize)
 340                return -1;
 341
 342        /* Calculate new doubled size */
 343        old_size = type->max * sizeof(struct memblock_region);
 344        new_size = old_size << 1;
 345        /*
 346         * We need to allocated new one align to PAGE_SIZE,
 347         *   so we can free them completely later.
 348         */
 349        old_alloc_size = PAGE_ALIGN(old_size);
 350        new_alloc_size = PAGE_ALIGN(new_size);
 351
 352        /* Retrieve the slab flag */
 353        if (type == &memblock.memory)
 354                in_slab = &memblock_memory_in_slab;
 355        else
 356                in_slab = &memblock_reserved_in_slab;
 357
 358        /* Try to find some space for it.
 359         *
 360         * WARNING: We assume that either slab_is_available() and we use it or
 361         * we use MEMBLOCK for allocations. That means that this is unsafe to
 362         * use when bootmem is currently active (unless bootmem itself is
 363         * implemented on top of MEMBLOCK which isn't the case yet)
 364         *
 365         * This should however not be an issue for now, as we currently only
 366         * call into MEMBLOCK while it's still active, or much later when slab
 367         * is active for memory hotplug operations
 368         */
 369        if (use_slab) {
 370                new_array = kmalloc(new_size, GFP_KERNEL);
 371                addr = new_array ? __pa(new_array) : 0;
 372        } else {
 373                /* only exclude range when trying to double reserved.regions */
 374                if (type != &memblock.reserved)
 375                        new_area_start = new_area_size = 0;
 376
 377                addr = memblock_find_in_range(new_area_start + new_area_size,
 378                                                memblock.current_limit,
 379                                                new_alloc_size, PAGE_SIZE);
 380                if (!addr && new_area_size)
 381                        addr = memblock_find_in_range(0,
 382                                min(new_area_start, memblock.current_limit),
 383                                new_alloc_size, PAGE_SIZE);
 384
 385                new_array = addr ? __va(addr) : NULL;
 386        }
 387        if (!addr) {
 388                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 389                       type->name, type->max, type->max * 2);
 390                return -1;
 391        }
 392
 393        memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
 394                        type->name, type->max * 2, (u64)addr,
 395                        (u64)addr + new_size - 1);
 396
 397        /*
 398         * Found space, we now need to move the array over before we add the
 399         * reserved region since it may be our reserved array itself that is
 400         * full.
 401         */
 402        memcpy(new_array, type->regions, old_size);
 403        memset(new_array + type->max, 0, old_size);
 404        old_array = type->regions;
 405        type->regions = new_array;
 406        type->max <<= 1;
 407
 408        /* Free old array. We needn't free it if the array is the static one */
 409        if (*in_slab)
 410                kfree(old_array);
 411        else if (old_array != memblock_memory_init_regions &&
 412                 old_array != memblock_reserved_init_regions)
 413                memblock_free(__pa(old_array), old_alloc_size);
 414
 415        /*
 416         * Reserve the new array if that comes from the memblock.  Otherwise, we
 417         * needn't do it
 418         */
 419        if (!use_slab)
 420                BUG_ON(memblock_reserve(addr, new_alloc_size));
 421
 422        /* Update slab flag */
 423        *in_slab = use_slab;
 424
 425        return 0;
 426}
 427
 428/**
 429 * memblock_merge_regions - merge neighboring compatible regions
 430 * @type: memblock type to scan
 431 *
 432 * Scan @type and merge neighboring compatible regions.
 433 */
 434static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 435{
 436        int i = 0;
 437
 438        /* cnt never goes below 1 */
 439        while (i < type->cnt - 1) {
 440                struct memblock_region *this = &type->regions[i];
 441                struct memblock_region *next = &type->regions[i + 1];
 442
 443                if (this->base + this->size != next->base ||
 444                    memblock_get_region_node(this) !=
 445                    memblock_get_region_node(next) ||
 446                    this->flags != next->flags) {
 447                        BUG_ON(this->base + this->size > next->base);
 448                        i++;
 449                        continue;
 450                }
 451
 452                this->size += next->size;
 453                /* move forward from next + 1, index of which is i + 2 */
 454                memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
 455                type->cnt--;
 456        }
 457}
 458
 459/**
 460 * memblock_insert_region - insert new memblock region
 461 * @type:       memblock type to insert into
 462 * @idx:        index for the insertion point
 463 * @base:       base address of the new region
 464 * @size:       size of the new region
 465 * @nid:        node id of the new region
 466 * @flags:      flags of the new region
 467 *
 468 * Insert new memblock region [@base,@base+@size) into @type at @idx.
 469 * @type must already have extra room to accommodate the new region.
 470 */
 471static void __init_memblock memblock_insert_region(struct memblock_type *type,
 472                                                   int idx, phys_addr_t base,
 473                                                   phys_addr_t size,
 474                                                   int nid, unsigned long flags)
 475{
 476        struct memblock_region *rgn = &type->regions[idx];
 477
 478        BUG_ON(type->cnt >= type->max);
 479        memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
 480        rgn->base = base;
 481        rgn->size = size;
 482        rgn->flags = flags;
 483        memblock_set_region_node(rgn, nid);
 484        type->cnt++;
 485        type->total_size += size;
 486}
 487
 488/**
 489 * memblock_add_range - add new memblock region
 490 * @type: memblock type to add new region into
 491 * @base: base address of the new region
 492 * @size: size of the new region
 493 * @nid: nid of the new region
 494 * @flags: flags of the new region
 495 *
 496 * Add new memblock region [@base,@base+@size) into @type.  The new region
 497 * is allowed to overlap with existing ones - overlaps don't affect already
 498 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 499 * compatible regions are merged) after the addition.
 500 *
 501 * RETURNS:
 502 * 0 on success, -errno on failure.
 503 */
 504int __init_memblock memblock_add_range(struct memblock_type *type,
 505                                phys_addr_t base, phys_addr_t size,
 506                                int nid, unsigned long flags)
 507{
 508        bool insert = false;
 509        phys_addr_t obase = base;
 510        phys_addr_t end = base + memblock_cap_size(base, &size);
 511        int idx, nr_new;
 512        struct memblock_region *rgn;
 513
 514        if (!size)
 515                return 0;
 516
 517        /* special case for empty array */
 518        if (type->regions[0].size == 0) {
 519                WARN_ON(type->cnt != 1 || type->total_size);
 520                type->regions[0].base = base;
 521                type->regions[0].size = size;
 522                type->regions[0].flags = flags;
 523                memblock_set_region_node(&type->regions[0], nid);
 524                type->total_size = size;
 525                return 0;
 526        }
 527repeat:
 528        /*
 529         * The following is executed twice.  Once with %false @insert and
 530         * then with %true.  The first counts the number of regions needed
 531         * to accommodate the new area.  The second actually inserts them.
 532         */
 533        base = obase;
 534        nr_new = 0;
 535
 536        for_each_memblock_type(type, rgn) {
 537                phys_addr_t rbase = rgn->base;
 538                phys_addr_t rend = rbase + rgn->size;
 539
 540                if (rbase >= end)
 541                        break;
 542                if (rend <= base)
 543                        continue;
 544                /*
 545                 * @rgn overlaps.  If it separates the lower part of new
 546                 * area, insert that portion.
 547                 */
 548                if (rbase > base) {
 549#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 550                        WARN_ON(nid != memblock_get_region_node(rgn));
 551#endif
 552                        WARN_ON(flags != rgn->flags);
 553                        nr_new++;
 554                        if (insert)
 555                                memblock_insert_region(type, idx++, base,
 556                                                       rbase - base, nid,
 557                                                       flags);
 558                }
 559                /* area below @rend is dealt with, forget about it */
 560                base = min(rend, end);
 561        }
 562
 563        /* insert the remaining portion */
 564        if (base < end) {
 565                nr_new++;
 566                if (insert)
 567                        memblock_insert_region(type, idx, base, end - base,
 568                                               nid, flags);
 569        }
 570
 571        if (!nr_new)
 572                return 0;
 573
 574        /*
 575         * If this was the first round, resize array and repeat for actual
 576         * insertions; otherwise, merge and return.
 577         */
 578        if (!insert) {
 579                while (type->cnt + nr_new > type->max)
 580                        if (memblock_double_array(type, obase, size) < 0)
 581                                return -ENOMEM;
 582                insert = true;
 583                goto repeat;
 584        } else {
 585                memblock_merge_regions(type);
 586                return 0;
 587        }
 588}
 589
 590int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
 591                                       int nid)
 592{
 593        return memblock_add_range(&memblock.memory, base, size, nid, 0);
 594}
 595
 596int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 597{
 598        phys_addr_t end = base + size - 1;
 599
 600        memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
 601                     &base, &end, (void *)_RET_IP_);
 602
 603        return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
 604}
 605
 606/**
 607 * memblock_isolate_range - isolate given range into disjoint memblocks
 608 * @type: memblock type to isolate range for
 609 * @base: base of range to isolate
 610 * @size: size of range to isolate
 611 * @start_rgn: out parameter for the start of isolated region
 612 * @end_rgn: out parameter for the end of isolated region
 613 *
 614 * Walk @type and ensure that regions don't cross the boundaries defined by
 615 * [@base,@base+@size).  Crossing regions are split at the boundaries,
 616 * which may create at most two more regions.  The index of the first
 617 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 618 *
 619 * RETURNS:
 620 * 0 on success, -errno on failure.
 621 */
 622static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 623                                        phys_addr_t base, phys_addr_t size,
 624                                        int *start_rgn, int *end_rgn)
 625{
 626        phys_addr_t end = base + memblock_cap_size(base, &size);
 627        int idx;
 628        struct memblock_region *rgn;
 629
 630        *start_rgn = *end_rgn = 0;
 631
 632        if (!size)
 633                return 0;
 634
 635        /* we'll create at most two more regions */
 636        while (type->cnt + 2 > type->max)
 637                if (memblock_double_array(type, base, size) < 0)
 638                        return -ENOMEM;
 639
 640        for_each_memblock_type(type, rgn) {
 641                phys_addr_t rbase = rgn->base;
 642                phys_addr_t rend = rbase + rgn->size;
 643
 644                if (rbase >= end)
 645                        break;
 646                if (rend <= base)
 647                        continue;
 648
 649                if (rbase < base) {
 650                        /*
 651                         * @rgn intersects from below.  Split and continue
 652                         * to process the next region - the new top half.
 653                         */
 654                        rgn->base = base;
 655                        rgn->size -= base - rbase;
 656                        type->total_size -= base - rbase;
 657                        memblock_insert_region(type, idx, rbase, base - rbase,
 658                                               memblock_get_region_node(rgn),
 659                                               rgn->flags);
 660                } else if (rend > end) {
 661                        /*
 662                         * @rgn intersects from above.  Split and redo the
 663                         * current region - the new bottom half.
 664                         */
 665                        rgn->base = end;
 666                        rgn->size -= end - rbase;
 667                        type->total_size -= end - rbase;
 668                        memblock_insert_region(type, idx--, rbase, end - rbase,
 669                                               memblock_get_region_node(rgn),
 670                                               rgn->flags);
 671                } else {
 672                        /* @rgn is fully contained, record it */
 673                        if (!*end_rgn)
 674                                *start_rgn = idx;
 675                        *end_rgn = idx + 1;
 676                }
 677        }
 678
 679        return 0;
 680}
 681
 682static int __init_memblock memblock_remove_range(struct memblock_type *type,
 683                                          phys_addr_t base, phys_addr_t size)
 684{
 685        int start_rgn, end_rgn;
 686        int i, ret;
 687
 688        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 689        if (ret)
 690                return ret;
 691
 692        for (i = end_rgn - 1; i >= start_rgn; i--)
 693                memblock_remove_region(type, i);
 694        return 0;
 695}
 696
 697int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 698{
 699        return memblock_remove_range(&memblock.memory, base, size);
 700}
 701
 702
 703int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 704{
 705        phys_addr_t end = base + size - 1;
 706
 707        memblock_dbg("   memblock_free: [%pa-%pa] %pF\n",
 708                     &base, &end, (void *)_RET_IP_);
 709
 710        kmemleak_free_part_phys(base, size);
 711        return memblock_remove_range(&memblock.reserved, base, size);
 712}
 713
 714int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 715{
 716        phys_addr_t end = base + size - 1;
 717
 718        memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
 719                     &base, &end, (void *)_RET_IP_);
 720
 721        return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
 722}
 723
 724/**
 725 *
 726 * This function isolates region [@base, @base + @size), and sets/clears flag
 727 *
 728 * Return 0 on success, -errno on failure.
 729 */
 730static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 731                                phys_addr_t size, int set, int flag)
 732{
 733        struct memblock_type *type = &memblock.memory;
 734        int i, ret, start_rgn, end_rgn;
 735
 736        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 737        if (ret)
 738                return ret;
 739
 740        for (i = start_rgn; i < end_rgn; i++)
 741                if (set)
 742                        memblock_set_region_flags(&type->regions[i], flag);
 743                else
 744                        memblock_clear_region_flags(&type->regions[i], flag);
 745
 746        memblock_merge_regions(type);
 747        return 0;
 748}
 749
 750/**
 751 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 752 * @base: the base phys addr of the region
 753 * @size: the size of the region
 754 *
 755 * Return 0 on success, -errno on failure.
 756 */
 757int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 758{
 759        return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
 760}
 761
 762/**
 763 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 764 * @base: the base phys addr of the region
 765 * @size: the size of the region
 766 *
 767 * Return 0 on success, -errno on failure.
 768 */
 769int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 770{
 771        return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
 772}
 773
 774/**
 775 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 776 * @base: the base phys addr of the region
 777 * @size: the size of the region
 778 *
 779 * Return 0 on success, -errno on failure.
 780 */
 781int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 782{
 783        system_has_some_mirror = true;
 784
 785        return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 786}
 787
 788/**
 789 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
 790 * @base: the base phys addr of the region
 791 * @size: the size of the region
 792 *
 793 * Return 0 on success, -errno on failure.
 794 */
 795int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 796{
 797        return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
 798}
 799
 800/**
 801 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
 802 * @base: the base phys addr of the region
 803 * @size: the size of the region
 804 *
 805 * Return 0 on success, -errno on failure.
 806 */
 807int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
 808{
 809        return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
 810}
 811
 812/**
 813 * __next_reserved_mem_region - next function for for_each_reserved_region()
 814 * @idx: pointer to u64 loop variable
 815 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
 816 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
 817 *
 818 * Iterate over all reserved memory regions.
 819 */
 820void __init_memblock __next_reserved_mem_region(u64 *idx,
 821                                           phys_addr_t *out_start,
 822                                           phys_addr_t *out_end)
 823{
 824        struct memblock_type *type = &memblock.reserved;
 825
 826        if (*idx < type->cnt) {
 827                struct memblock_region *r = &type->regions[*idx];
 828                phys_addr_t base = r->base;
 829                phys_addr_t size = r->size;
 830
 831                if (out_start)
 832                        *out_start = base;
 833                if (out_end)
 834                        *out_end = base + size - 1;
 835
 836                *idx += 1;
 837                return;
 838        }
 839
 840        /* signal end of iteration */
 841        *idx = ULLONG_MAX;
 842}
 843
 844/**
 845 * __next__mem_range - next function for for_each_free_mem_range() etc.
 846 * @idx: pointer to u64 loop variable
 847 * @nid: node selector, %NUMA_NO_NODE for all nodes
 848 * @flags: pick from blocks based on memory attributes
 849 * @type_a: pointer to memblock_type from where the range is taken
 850 * @type_b: pointer to memblock_type which excludes memory from being taken
 851 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 852 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 853 * @out_nid: ptr to int for nid of the range, can be %NULL
 854 *
 855 * Find the first area from *@idx which matches @nid, fill the out
 856 * parameters, and update *@idx for the next iteration.  The lower 32bit of
 857 * *@idx contains index into type_a and the upper 32bit indexes the
 858 * areas before each region in type_b.  For example, if type_b regions
 859 * look like the following,
 860 *
 861 *      0:[0-16), 1:[32-48), 2:[128-130)
 862 *
 863 * The upper 32bit indexes the following regions.
 864 *
 865 *      0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 866 *
 867 * As both region arrays are sorted, the function advances the two indices
 868 * in lockstep and returns each intersection.
 869 */
 870void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
 871                                      struct memblock_type *type_a,
 872                                      struct memblock_type *type_b,
 873                                      phys_addr_t *out_start,
 874                                      phys_addr_t *out_end, int *out_nid)
 875{
 876        int idx_a = *idx & 0xffffffff;
 877        int idx_b = *idx >> 32;
 878
 879        if (WARN_ONCE(nid == MAX_NUMNODES,
 880        "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
 881                nid = NUMA_NO_NODE;
 882
 883        for (; idx_a < type_a->cnt; idx_a++) {
 884                struct memblock_region *m = &type_a->regions[idx_a];
 885
 886                phys_addr_t m_start = m->base;
 887                phys_addr_t m_end = m->base + m->size;
 888                int         m_nid = memblock_get_region_node(m);
 889
 890                /* only memory regions are associated with nodes, check it */
 891                if (nid != NUMA_NO_NODE && nid != m_nid)
 892                        continue;
 893
 894                /* skip hotpluggable memory regions if needed */
 895                if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
 896                        continue;
 897
 898                /* if we want mirror memory skip non-mirror memory regions */
 899                if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 900                        continue;
 901
 902                /* skip nomap memory unless we were asked for it explicitly */
 903                if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
 904                        continue;
 905
 906                if (!type_b) {
 907                        if (out_start)
 908                                *out_start = m_start;
 909                        if (out_end)
 910                                *out_end = m_end;
 911                        if (out_nid)
 912                                *out_nid = m_nid;
 913                        idx_a++;
 914                        *idx = (u32)idx_a | (u64)idx_b << 32;
 915                        return;
 916                }
 917
 918                /* scan areas before each reservation */
 919                for (; idx_b < type_b->cnt + 1; idx_b++) {
 920                        struct memblock_region *r;
 921                        phys_addr_t r_start;
 922                        phys_addr_t r_end;
 923
 924                        r = &type_b->regions[idx_b];
 925                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
 926                        r_end = idx_b < type_b->cnt ?
 927                                r->base : ULLONG_MAX;
 928
 929                        /*
 930                         * if idx_b advanced past idx_a,
 931                         * break out to advance idx_a
 932                         */
 933                        if (r_start >= m_end)
 934                                break;
 935                        /* if the two regions intersect, we're done */
 936                        if (m_start < r_end) {
 937                                if (out_start)
 938                                        *out_start =
 939                                                max(m_start, r_start);
 940                                if (out_end)
 941                                        *out_end = min(m_end, r_end);
 942                                if (out_nid)
 943                                        *out_nid = m_nid;
 944                                /*
 945                                 * The region which ends first is
 946                                 * advanced for the next iteration.
 947                                 */
 948                                if (m_end <= r_end)
 949                                        idx_a++;
 950                                else
 951                                        idx_b++;
 952                                *idx = (u32)idx_a | (u64)idx_b << 32;
 953                                return;
 954                        }
 955                }
 956        }
 957
 958        /* signal end of iteration */
 959        *idx = ULLONG_MAX;
 960}
 961
 962/**
 963 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
 964 *
 965 * Finds the next range from type_a which is not marked as unsuitable
 966 * in type_b.
 967 *
 968 * @idx: pointer to u64 loop variable
 969 * @nid: node selector, %NUMA_NO_NODE for all nodes
 970 * @flags: pick from blocks based on memory attributes
 971 * @type_a: pointer to memblock_type from where the range is taken
 972 * @type_b: pointer to memblock_type which excludes memory from being taken
 973 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 974 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 975 * @out_nid: ptr to int for nid of the range, can be %NULL
 976 *
 977 * Reverse of __next_mem_range().
 978 */
 979void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
 980                                          struct memblock_type *type_a,
 981                                          struct memblock_type *type_b,
 982                                          phys_addr_t *out_start,
 983                                          phys_addr_t *out_end, int *out_nid)
 984{
 985        int idx_a = *idx & 0xffffffff;
 986        int idx_b = *idx >> 32;
 987
 988        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
 989                nid = NUMA_NO_NODE;
 990
 991        if (*idx == (u64)ULLONG_MAX) {
 992                idx_a = type_a->cnt - 1;
 993                if (type_b != NULL)
 994                        idx_b = type_b->cnt;
 995                else
 996                        idx_b = 0;
 997        }
 998
 999        for (; idx_a >= 0; idx_a--) {
1000                struct memblock_region *m = &type_a->regions[idx_a];
1001
1002                phys_addr_t m_start = m->base;
1003                phys_addr_t m_end = m->base + m->size;
1004                int m_nid = memblock_get_region_node(m);
1005
1006                /* only memory regions are associated with nodes, check it */
1007                if (nid != NUMA_NO_NODE && nid != m_nid)
1008                        continue;
1009
1010                /* skip hotpluggable memory regions if needed */
1011                if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1012                        continue;
1013
1014                /* if we want mirror memory skip non-mirror memory regions */
1015                if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1016                        continue;
1017
1018                /* skip nomap memory unless we were asked for it explicitly */
1019                if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1020                        continue;
1021
1022                if (!type_b) {
1023                        if (out_start)
1024                                *out_start = m_start;
1025                        if (out_end)
1026                                *out_end = m_end;
1027                        if (out_nid)
1028                                *out_nid = m_nid;
1029                        idx_a--;
1030                        *idx = (u32)idx_a | (u64)idx_b << 32;
1031                        return;
1032                }
1033
1034                /* scan areas before each reservation */
1035                for (; idx_b >= 0; idx_b--) {
1036                        struct memblock_region *r;
1037                        phys_addr_t r_start;
1038                        phys_addr_t r_end;
1039
1040                        r = &type_b->regions[idx_b];
1041                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1042                        r_end = idx_b < type_b->cnt ?
1043                                r->base : ULLONG_MAX;
1044                        /*
1045                         * if idx_b advanced past idx_a,
1046                         * break out to advance idx_a
1047                         */
1048
1049                        if (r_end <= m_start)
1050                                break;
1051                        /* if the two regions intersect, we're done */
1052                        if (m_end > r_start) {
1053                                if (out_start)
1054                                        *out_start = max(m_start, r_start);
1055                                if (out_end)
1056                                        *out_end = min(m_end, r_end);
1057                                if (out_nid)
1058                                        *out_nid = m_nid;
1059                                if (m_start >= r_start)
1060                                        idx_a--;
1061                                else
1062                                        idx_b--;
1063                                *idx = (u32)idx_a | (u64)idx_b << 32;
1064                                return;
1065                        }
1066                }
1067        }
1068        /* signal end of iteration */
1069        *idx = ULLONG_MAX;
1070}
1071
1072#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1073/*
1074 * Common iterator interface used to define for_each_mem_range().
1075 */
1076void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1077                                unsigned long *out_start_pfn,
1078                                unsigned long *out_end_pfn, int *out_nid)
1079{
1080        struct memblock_type *type = &memblock.memory;
1081        struct memblock_region *r;
1082
1083        while (++*idx < type->cnt) {
1084                r = &type->regions[*idx];
1085
1086                if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1087                        continue;
1088                if (nid == MAX_NUMNODES || nid == r->nid)
1089                        break;
1090        }
1091        if (*idx >= type->cnt) {
1092                *idx = -1;
1093                return;
1094        }
1095
1096        if (out_start_pfn)
1097                *out_start_pfn = PFN_UP(r->base);
1098        if (out_end_pfn)
1099                *out_end_pfn = PFN_DOWN(r->base + r->size);
1100        if (out_nid)
1101                *out_nid = r->nid;
1102}
1103
1104unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
1105                                                      unsigned long max_pfn)
1106{
1107        struct memblock_type *type = &memblock.memory;
1108        unsigned int right = type->cnt;
1109        unsigned int mid, left = 0;
1110        phys_addr_t addr = PFN_PHYS(pfn + 1);
1111
1112        do {
1113                mid = (right + left) / 2;
1114
1115                if (addr < type->regions[mid].base)
1116                        right = mid;
1117                else if (addr >= (type->regions[mid].base +
1118                                  type->regions[mid].size))
1119                        left = mid + 1;
1120                else {
1121                        /* addr is within the region, so pfn + 1 is valid */
1122                        return min(pfn + 1, max_pfn);
1123                }
1124        } while (left < right);
1125
1126        if (right == type->cnt)
1127                return max_pfn;
1128        else
1129                return min(PHYS_PFN(type->regions[right].base), max_pfn);
1130}
1131
1132/**
1133 * memblock_set_node - set node ID on memblock regions
1134 * @base: base of area to set node ID for
1135 * @size: size of area to set node ID for
1136 * @type: memblock type to set node ID for
1137 * @nid: node ID to set
1138 *
1139 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1140 * Regions which cross the area boundaries are split as necessary.
1141 *
1142 * RETURNS:
1143 * 0 on success, -errno on failure.
1144 */
1145int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1146                                      struct memblock_type *type, int nid)
1147{
1148        int start_rgn, end_rgn;
1149        int i, ret;
1150
1151        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1152        if (ret)
1153                return ret;
1154
1155        for (i = start_rgn; i < end_rgn; i++)
1156                memblock_set_region_node(&type->regions[i], nid);
1157
1158        memblock_merge_regions(type);
1159        return 0;
1160}
1161#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1162
1163static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1164                                        phys_addr_t align, phys_addr_t start,
1165                                        phys_addr_t end, int nid, ulong flags)
1166{
1167        phys_addr_t found;
1168
1169        if (!align)
1170                align = SMP_CACHE_BYTES;
1171
1172        found = memblock_find_in_range_node(size, align, start, end, nid,
1173                                            flags);
1174        if (found && !memblock_reserve(found, size)) {
1175                /*
1176                 * The min_count is set to 0 so that memblock allocations are
1177                 * never reported as leaks.
1178                 */
1179                kmemleak_alloc_phys(found, size, 0, 0);
1180                return found;
1181        }
1182        return 0;
1183}
1184
1185phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1186                                        phys_addr_t start, phys_addr_t end,
1187                                        ulong flags)
1188{
1189        return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1190                                        flags);
1191}
1192
1193static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1194                                        phys_addr_t align, phys_addr_t max_addr,
1195                                        int nid, ulong flags)
1196{
1197        return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1198}
1199
1200phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1201{
1202        ulong flags = choose_memblock_flags();
1203        phys_addr_t ret;
1204
1205again:
1206        ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1207                                      nid, flags);
1208
1209        if (!ret && (flags & MEMBLOCK_MIRROR)) {
1210                flags &= ~MEMBLOCK_MIRROR;
1211                goto again;
1212        }
1213        return ret;
1214}
1215
1216phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1217{
1218        return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1219                                       MEMBLOCK_NONE);
1220}
1221
1222phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1223{
1224        phys_addr_t alloc;
1225
1226        alloc = __memblock_alloc_base(size, align, max_addr);
1227
1228        if (alloc == 0)
1229                panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
1230                      &size, &max_addr);
1231
1232        return alloc;
1233}
1234
1235phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1236{
1237        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1238}
1239
1240phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1241{
1242        phys_addr_t res = memblock_alloc_nid(size, align, nid);
1243
1244        if (res)
1245                return res;
1246        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1247}
1248
1249/**
1250 * memblock_virt_alloc_internal - allocate boot memory block
1251 * @size: size of memory block to be allocated in bytes
1252 * @align: alignment of the region and block's size
1253 * @min_addr: the lower bound of the memory region to allocate (phys address)
1254 * @max_addr: the upper bound of the memory region to allocate (phys address)
1255 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1256 *
1257 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1258 * will fall back to memory below @min_addr. Also, allocation may fall back
1259 * to any node in the system if the specified node can not
1260 * hold the requested memory.
1261 *
1262 * The allocation is performed from memory region limited by
1263 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1264 *
1265 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1266 *
1267 * The phys address of allocated boot memory block is converted to virtual and
1268 * allocated memory is reset to 0.
1269 *
1270 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1271 * allocated boot memory block, so that it is never reported as leaks.
1272 *
1273 * RETURNS:
1274 * Virtual address of allocated memory block on success, NULL on failure.
1275 */
1276static void * __init memblock_virt_alloc_internal(
1277                                phys_addr_t size, phys_addr_t align,
1278                                phys_addr_t min_addr, phys_addr_t max_addr,
1279                                int nid)
1280{
1281        phys_addr_t alloc;
1282        void *ptr;
1283        ulong flags = choose_memblock_flags();
1284
1285        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1286                nid = NUMA_NO_NODE;
1287
1288        /*
1289         * Detect any accidental use of these APIs after slab is ready, as at
1290         * this moment memblock may be deinitialized already and its
1291         * internal data may be destroyed (after execution of free_all_bootmem)
1292         */
1293        if (WARN_ON_ONCE(slab_is_available()))
1294                return kzalloc_node(size, GFP_NOWAIT, nid);
1295
1296        if (!align)
1297                align = SMP_CACHE_BYTES;
1298
1299        if (max_addr > memblock.current_limit)
1300                max_addr = memblock.current_limit;
1301again:
1302        alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1303                                            nid, flags);
1304        if (alloc && !memblock_reserve(alloc, size))
1305                goto done;
1306
1307        if (nid != NUMA_NO_NODE) {
1308                alloc = memblock_find_in_range_node(size, align, min_addr,
1309                                                    max_addr, NUMA_NO_NODE,
1310                                                    flags);
1311                if (alloc && !memblock_reserve(alloc, size))
1312                        goto done;
1313        }
1314
1315        if (min_addr) {
1316                min_addr = 0;
1317                goto again;
1318        }
1319
1320        if (flags & MEMBLOCK_MIRROR) {
1321                flags &= ~MEMBLOCK_MIRROR;
1322                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1323                        &size);
1324                goto again;
1325        }
1326
1327        return NULL;
1328done:
1329        ptr = phys_to_virt(alloc);
1330        memset(ptr, 0, size);
1331
1332        /*
1333         * The min_count is set to 0 so that bootmem allocated blocks
1334         * are never reported as leaks. This is because many of these blocks
1335         * are only referred via the physical address which is not
1336         * looked up by kmemleak.
1337         */
1338        kmemleak_alloc(ptr, size, 0, 0);
1339
1340        return ptr;
1341}
1342
1343/**
1344 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1345 * @size: size of memory block to be allocated in bytes
1346 * @align: alignment of the region and block's size
1347 * @min_addr: the lower bound of the memory region from where the allocation
1348 *        is preferred (phys address)
1349 * @max_addr: the upper bound of the memory region from where the allocation
1350 *            is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1351 *            allocate only from memory limited by memblock.current_limit value
1352 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1353 *
1354 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1355 * additional debug information (including caller info), if enabled.
1356 *
1357 * RETURNS:
1358 * Virtual address of allocated memory block on success, NULL on failure.
1359 */
1360void * __init memblock_virt_alloc_try_nid_nopanic(
1361                                phys_addr_t size, phys_addr_t align,
1362                                phys_addr_t min_addr, phys_addr_t max_addr,
1363                                int nid)
1364{
1365        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1366                     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1367                     (u64)max_addr, (void *)_RET_IP_);
1368        return memblock_virt_alloc_internal(size, align, min_addr,
1369                                             max_addr, nid);
1370}
1371
1372/**
1373 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1374 * @size: size of memory block to be allocated in bytes
1375 * @align: alignment of the region and block's size
1376 * @min_addr: the lower bound of the memory region from where the allocation
1377 *        is preferred (phys address)
1378 * @max_addr: the upper bound of the memory region from where the allocation
1379 *            is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1380 *            allocate only from memory limited by memblock.current_limit value
1381 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1382 *
1383 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1384 * which provides debug information (including caller info), if enabled,
1385 * and panics if the request can not be satisfied.
1386 *
1387 * RETURNS:
1388 * Virtual address of allocated memory block on success, NULL on failure.
1389 */
1390void * __init memblock_virt_alloc_try_nid(
1391                        phys_addr_t size, phys_addr_t align,
1392                        phys_addr_t min_addr, phys_addr_t max_addr,
1393                        int nid)
1394{
1395        void *ptr;
1396
1397        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1398                     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1399                     (u64)max_addr, (void *)_RET_IP_);
1400        ptr = memblock_virt_alloc_internal(size, align,
1401                                           min_addr, max_addr, nid);
1402        if (ptr)
1403                return ptr;
1404
1405        panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1406              __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1407              (u64)max_addr);
1408        return NULL;
1409}
1410
1411/**
1412 * __memblock_free_early - free boot memory block
1413 * @base: phys starting address of the  boot memory block
1414 * @size: size of the boot memory block in bytes
1415 *
1416 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1417 * The freeing memory will not be released to the buddy allocator.
1418 */
1419void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1420{
1421        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1422                     __func__, (u64)base, (u64)base + size - 1,
1423                     (void *)_RET_IP_);
1424        kmemleak_free_part_phys(base, size);
1425        memblock_remove_range(&memblock.reserved, base, size);
1426}
1427
1428/*
1429 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1430 * @addr: phys starting address of the  boot memory block
1431 * @size: size of the boot memory block in bytes
1432 *
1433 * This is only useful when the bootmem allocator has already been torn
1434 * down, but we are still initializing the system.  Pages are released directly
1435 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1436 */
1437void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1438{
1439        u64 cursor, end;
1440
1441        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1442                     __func__, (u64)base, (u64)base + size - 1,
1443                     (void *)_RET_IP_);
1444        kmemleak_free_part_phys(base, size);
1445        cursor = PFN_UP(base);
1446        end = PFN_DOWN(base + size);
1447
1448        for (; cursor < end; cursor++) {
1449                __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1450                totalram_pages++;
1451        }
1452}
1453
1454/*
1455 * Remaining API functions
1456 */
1457
1458phys_addr_t __init_memblock memblock_phys_mem_size(void)
1459{
1460        return memblock.memory.total_size;
1461}
1462
1463phys_addr_t __init_memblock memblock_reserved_size(void)
1464{
1465        return memblock.reserved.total_size;
1466}
1467
1468phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1469{
1470        unsigned long pages = 0;
1471        struct memblock_region *r;
1472        unsigned long start_pfn, end_pfn;
1473
1474        for_each_memblock(memory, r) {
1475                start_pfn = memblock_region_memory_base_pfn(r);
1476                end_pfn = memblock_region_memory_end_pfn(r);
1477                start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1478                end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1479                pages += end_pfn - start_pfn;
1480        }
1481
1482        return PFN_PHYS(pages);
1483}
1484
1485/* lowest address */
1486phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1487{
1488        return memblock.memory.regions[0].base;
1489}
1490
1491phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1492{
1493        int idx = memblock.memory.cnt - 1;
1494
1495        return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1496}
1497
1498static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1499{
1500        phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1501        struct memblock_region *r;
1502
1503        /*
1504         * translate the memory @limit size into the max address within one of
1505         * the memory memblock regions, if the @limit exceeds the total size
1506         * of those regions, max_addr will keep original value ULLONG_MAX
1507         */
1508        for_each_memblock(memory, r) {
1509                if (limit <= r->size) {
1510                        max_addr = r->base + limit;
1511                        break;
1512                }
1513                limit -= r->size;
1514        }
1515
1516        return max_addr;
1517}
1518
1519void __init memblock_enforce_memory_limit(phys_addr_t limit)
1520{
1521        phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1522
1523        if (!limit)
1524                return;
1525
1526        max_addr = __find_max_addr(limit);
1527
1528        /* @limit exceeds the total size of the memory, do nothing */
1529        if (max_addr == (phys_addr_t)ULLONG_MAX)
1530                return;
1531
1532        /* truncate both memory and reserved regions */
1533        memblock_remove_range(&memblock.memory, max_addr,
1534                              (phys_addr_t)ULLONG_MAX);
1535        memblock_remove_range(&memblock.reserved, max_addr,
1536                              (phys_addr_t)ULLONG_MAX);
1537}
1538
1539void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1540{
1541        int start_rgn, end_rgn;
1542        int i, ret;
1543
1544        if (!size)
1545                return;
1546
1547        ret = memblock_isolate_range(&memblock.memory, base, size,
1548                                                &start_rgn, &end_rgn);
1549        if (ret)
1550                return;
1551
1552        /* remove all the MAP regions */
1553        for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1554                if (!memblock_is_nomap(&memblock.memory.regions[i]))
1555                        memblock_remove_region(&memblock.memory, i);
1556
1557        for (i = start_rgn - 1; i >= 0; i--)
1558                if (!memblock_is_nomap(&memblock.memory.regions[i]))
1559                        memblock_remove_region(&memblock.memory, i);
1560
1561        /* truncate the reserved regions */
1562        memblock_remove_range(&memblock.reserved, 0, base);
1563        memblock_remove_range(&memblock.reserved,
1564                        base + size, (phys_addr_t)ULLONG_MAX);
1565}
1566
1567void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1568{
1569        phys_addr_t max_addr;
1570
1571        if (!limit)
1572                return;
1573
1574        max_addr = __find_max_addr(limit);
1575
1576        /* @limit exceeds the total size of the memory, do nothing */
1577        if (max_addr == (phys_addr_t)ULLONG_MAX)
1578                return;
1579
1580        memblock_cap_memory_range(0, max_addr);
1581}
1582
1583static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1584{
1585        unsigned int left = 0, right = type->cnt;
1586
1587        do {
1588                unsigned int mid = (right + left) / 2;
1589
1590                if (addr < type->regions[mid].base)
1591                        right = mid;
1592                else if (addr >= (type->regions[mid].base +
1593                                  type->regions[mid].size))
1594                        left = mid + 1;
1595                else
1596                        return mid;
1597        } while (left < right);
1598        return -1;
1599}
1600
1601bool __init memblock_is_reserved(phys_addr_t addr)
1602{
1603        return memblock_search(&memblock.reserved, addr) != -1;
1604}
1605
1606bool __init_memblock memblock_is_memory(phys_addr_t addr)
1607{
1608        return memblock_search(&memblock.memory, addr) != -1;
1609}
1610
1611int __init_memblock memblock_is_map_memory(phys_addr_t addr)
1612{
1613        int i = memblock_search(&memblock.memory, addr);
1614
1615        if (i == -1)
1616                return false;
1617        return !memblock_is_nomap(&memblock.memory.regions[i]);
1618}
1619
1620#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1621int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1622                         unsigned long *start_pfn, unsigned long *end_pfn)
1623{
1624        struct memblock_type *type = &memblock.memory;
1625        int mid = memblock_search(type, PFN_PHYS(pfn));
1626
1627        if (mid == -1)
1628                return -1;
1629
1630        *start_pfn = PFN_DOWN(type->regions[mid].base);
1631        *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1632
1633        return type->regions[mid].nid;
1634}
1635#endif
1636
1637/**
1638 * memblock_is_region_memory - check if a region is a subset of memory
1639 * @base: base of region to check
1640 * @size: size of region to check
1641 *
1642 * Check if the region [@base, @base+@size) is a subset of a memory block.
1643 *
1644 * RETURNS:
1645 * 0 if false, non-zero if true
1646 */
1647int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1648{
1649        int idx = memblock_search(&memblock.memory, base);
1650        phys_addr_t end = base + memblock_cap_size(base, &size);
1651
1652        if (idx == -1)
1653                return 0;
1654        return (memblock.memory.regions[idx].base +
1655                 memblock.memory.regions[idx].size) >= end;
1656}
1657
1658/**
1659 * memblock_is_region_reserved - check if a region intersects reserved memory
1660 * @base: base of region to check
1661 * @size: size of region to check
1662 *
1663 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1664 *
1665 * RETURNS:
1666 * True if they intersect, false if not.
1667 */
1668bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1669{
1670        memblock_cap_size(base, &size);
1671        return memblock_overlaps_region(&memblock.reserved, base, size);
1672}
1673
1674void __init_memblock memblock_trim_memory(phys_addr_t align)
1675{
1676        phys_addr_t start, end, orig_start, orig_end;
1677        struct memblock_region *r;
1678
1679        for_each_memblock(memory, r) {
1680                orig_start = r->base;
1681                orig_end = r->base + r->size;
1682                start = round_up(orig_start, align);
1683                end = round_down(orig_end, align);
1684
1685                if (start == orig_start && end == orig_end)
1686                        continue;
1687
1688                if (start < end) {
1689                        r->base = start;
1690                        r->size = end - start;
1691                } else {
1692                        memblock_remove_region(&memblock.memory,
1693                                               r - memblock.memory.regions);
1694                        r--;
1695                }
1696        }
1697}
1698
1699void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1700{
1701        memblock.current_limit = limit;
1702}
1703
1704phys_addr_t __init_memblock memblock_get_current_limit(void)
1705{
1706        return memblock.current_limit;
1707}
1708
1709static void __init_memblock memblock_dump(struct memblock_type *type)
1710{
1711        phys_addr_t base, end, size;
1712        unsigned long flags;
1713        int idx;
1714        struct memblock_region *rgn;
1715
1716        pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
1717
1718        for_each_memblock_type(type, rgn) {
1719                char nid_buf[32] = "";
1720
1721                base = rgn->base;
1722                size = rgn->size;
1723                end = base + size - 1;
1724                flags = rgn->flags;
1725#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1726                if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1727                        snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1728                                 memblock_get_region_node(rgn));
1729#endif
1730                pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
1731                        type->name, idx, &base, &end, &size, nid_buf, flags);
1732        }
1733}
1734
1735extern unsigned long __init_memblock
1736memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
1737{
1738        struct memblock_region *rgn;
1739        unsigned long size = 0;
1740        int idx;
1741
1742        for_each_memblock_type((&memblock.reserved), rgn) {
1743                phys_addr_t start, end;
1744
1745                if (rgn->base + rgn->size < start_addr)
1746                        continue;
1747                if (rgn->base > end_addr)
1748                        continue;
1749
1750                start = rgn->base;
1751                end = start + rgn->size;
1752                size += end - start;
1753        }
1754
1755        return size;
1756}
1757
1758void __init_memblock __memblock_dump_all(void)
1759{
1760        pr_info("MEMBLOCK configuration:\n");
1761        pr_info(" memory size = %pa reserved size = %pa\n",
1762                &memblock.memory.total_size,
1763                &memblock.reserved.total_size);
1764
1765        memblock_dump(&memblock.memory);
1766        memblock_dump(&memblock.reserved);
1767#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1768        memblock_dump(&memblock.physmem);
1769#endif
1770}
1771
1772void __init memblock_allow_resize(void)
1773{
1774        memblock_can_resize = 1;
1775}
1776
1777static int __init early_memblock(char *p)
1778{
1779        if (p && strstr(p, "debug"))
1780                memblock_debug = 1;
1781        return 0;
1782}
1783early_param("memblock", early_memblock);
1784
1785#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1786
1787static int memblock_debug_show(struct seq_file *m, void *private)
1788{
1789        struct memblock_type *type = m->private;
1790        struct memblock_region *reg;
1791        int i;
1792        phys_addr_t end;
1793
1794        for (i = 0; i < type->cnt; i++) {
1795                reg = &type->regions[i];
1796                end = reg->base + reg->size - 1;
1797
1798                seq_printf(m, "%4d: ", i);
1799                seq_printf(m, "%pa..%pa\n", &reg->base, &end);
1800        }
1801        return 0;
1802}
1803
1804static int memblock_debug_open(struct inode *inode, struct file *file)
1805{
1806        return single_open(file, memblock_debug_show, inode->i_private);
1807}
1808
1809static const struct file_operations memblock_debug_fops = {
1810        .open = memblock_debug_open,
1811        .read = seq_read,
1812        .llseek = seq_lseek,
1813        .release = single_release,
1814};
1815
1816static int __init memblock_init_debugfs(void)
1817{
1818        struct dentry *root = debugfs_create_dir("memblock", NULL);
1819        if (!root)
1820                return -ENXIO;
1821        debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1822        debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1823#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1824        debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1825#endif
1826
1827        return 0;
1828}
1829__initcall(memblock_init_debugfs);
1830
1831#endif /* CONFIG_DEBUG_FS */
1832