linux/mm/memblock.c
<<
>>
Prefs
   1/*
   2 * Procedures for maintaining information about logical memory blocks.
   3 *
   4 * Peter Bergner, IBM Corp.     June 2001.
   5 * Copyright (C) 2001 Peter Bergner.
   6 *
   7 *      This program is free software; you can redistribute it and/or
   8 *      modify it under the terms of the GNU General Public License
   9 *      as published by the Free Software Foundation; either version
  10 *      2 of the License, or (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/init.h>
  16#include <linux/bitops.h>
  17#include <linux/poison.h>
  18#include <linux/pfn.h>
  19#include <linux/debugfs.h>
  20#include <linux/seq_file.h>
  21#include <linux/memblock.h>
  22
  23#include <asm/sections.h>
  24#include <linux/io.h>
  25
  26#include "internal.h"
  27
  28static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  29static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  30#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  31static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
  32#endif
  33
  34struct memblock memblock __initdata_memblock = {
  35        .memory.regions         = memblock_memory_init_regions,
  36        .memory.cnt             = 1,    /* empty dummy entry */
  37        .memory.max             = INIT_MEMBLOCK_REGIONS,
  38
  39        .reserved.regions       = memblock_reserved_init_regions,
  40        .reserved.cnt           = 1,    /* empty dummy entry */
  41        .reserved.max           = INIT_MEMBLOCK_REGIONS,
  42
  43#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  44        .physmem.regions        = memblock_physmem_init_regions,
  45        .physmem.cnt            = 1,    /* empty dummy entry */
  46        .physmem.max            = INIT_PHYSMEM_REGIONS,
  47#endif
  48
  49        .bottom_up              = false,
  50        .current_limit          = MEMBLOCK_ALLOC_ANYWHERE,
  51};
  52
  53int memblock_debug __initdata_memblock;
  54#ifdef CONFIG_MOVABLE_NODE
  55bool movable_node_enabled __initdata_memblock = false;
  56#endif
  57static bool system_has_some_mirror __initdata_memblock = false;
  58static int memblock_can_resize __initdata_memblock;
  59static int memblock_memory_in_slab __initdata_memblock = 0;
  60static int memblock_reserved_in_slab __initdata_memblock = 0;
  61
  62ulong __init_memblock choose_memblock_flags(void)
  63{
  64        return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
  65}
  66
  67/* inline so we don't get a warning when pr_debug is compiled out */
  68static __init_memblock const char *
  69memblock_type_name(struct memblock_type *type)
  70{
  71        if (type == &memblock.memory)
  72                return "memory";
  73        else if (type == &memblock.reserved)
  74                return "reserved";
  75        else
  76                return "unknown";
  77}
  78
  79/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
  80static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
  81{
  82        return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
  83}
  84
  85/*
  86 * Address comparison utilities
  87 */
  88static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  89                                       phys_addr_t base2, phys_addr_t size2)
  90{
  91        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  92}
  93
  94bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
  95                                        phys_addr_t base, phys_addr_t size)
  96{
  97        unsigned long i;
  98
  99        for (i = 0; i < type->cnt; i++)
 100                if (memblock_addrs_overlap(base, size, type->regions[i].base,
 101                                           type->regions[i].size))
 102                        break;
 103        return i < type->cnt;
 104}
 105
 106/*
 107 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 108 * @start: start of candidate range
 109 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 110 * @size: size of free area to find
 111 * @align: alignment of free area to find
 112 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 113 * @flags: pick from blocks based on memory attributes
 114 *
 115 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 116 *
 117 * RETURNS:
 118 * Found address on success, 0 on failure.
 119 */
 120static phys_addr_t __init_memblock
 121__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
 122                                phys_addr_t size, phys_addr_t align, int nid,
 123                                ulong flags)
 124{
 125        phys_addr_t this_start, this_end, cand;
 126        u64 i;
 127
 128        for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
 129                this_start = clamp(this_start, start, end);
 130                this_end = clamp(this_end, start, end);
 131
 132                cand = round_up(this_start, align);
 133                if (cand < this_end && this_end - cand >= size)
 134                        return cand;
 135        }
 136
 137        return 0;
 138}
 139
 140/**
 141 * __memblock_find_range_top_down - find free area utility, in top-down
 142 * @start: start of candidate range
 143 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 144 * @size: size of free area to find
 145 * @align: alignment of free area to find
 146 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 147 * @flags: pick from blocks based on memory attributes
 148 *
 149 * Utility called from memblock_find_in_range_node(), find free area top-down.
 150 *
 151 * RETURNS:
 152 * Found address on success, 0 on failure.
 153 */
 154static phys_addr_t __init_memblock
 155__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
 156                               phys_addr_t size, phys_addr_t align, int nid,
 157                               ulong flags)
 158{
 159        phys_addr_t this_start, this_end, cand;
 160        u64 i;
 161
 162        for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
 163                                        NULL) {
 164                this_start = clamp(this_start, start, end);
 165                this_end = clamp(this_end, start, end);
 166
 167                if (this_end < size)
 168                        continue;
 169
 170                cand = round_down(this_end - size, align);
 171                if (cand >= this_start)
 172                        return cand;
 173        }
 174
 175        return 0;
 176}
 177
 178/**
 179 * memblock_find_in_range_node - find free area in given range and node
 180 * @size: size of free area to find
 181 * @align: alignment of free area to find
 182 * @start: start of candidate range
 183 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 184 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 185 * @flags: pick from blocks based on memory attributes
 186 *
 187 * Find @size free area aligned to @align in the specified range and node.
 188 *
 189 * When allocation direction is bottom-up, the @start should be greater
 190 * than the end of the kernel image. Otherwise, it will be trimmed. The
 191 * reason is that we want the bottom-up allocation just near the kernel
 192 * image so it is highly likely that the allocated memory and the kernel
 193 * will reside in the same node.
 194 *
 195 * If bottom-up allocation failed, will try to allocate memory top-down.
 196 *
 197 * RETURNS:
 198 * Found address on success, 0 on failure.
 199 */
 200phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 201                                        phys_addr_t align, phys_addr_t start,
 202                                        phys_addr_t end, int nid, ulong flags)
 203{
 204        phys_addr_t kernel_end, ret;
 205
 206        /* pump up @end */
 207        if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 208                end = memblock.current_limit;
 209
 210        /* avoid allocating the first page */
 211        start = max_t(phys_addr_t, start, PAGE_SIZE);
 212        end = max(start, end);
 213        kernel_end = __pa_symbol(_end);
 214
 215        /*
 216         * try bottom-up allocation only when bottom-up mode
 217         * is set and @end is above the kernel image.
 218         */
 219        if (memblock_bottom_up() && end > kernel_end) {
 220                phys_addr_t bottom_up_start;
 221
 222                /* make sure we will allocate above the kernel */
 223                bottom_up_start = max(start, kernel_end);
 224
 225                /* ok, try bottom-up allocation first */
 226                ret = __memblock_find_range_bottom_up(bottom_up_start, end,
 227                                                      size, align, nid, flags);
 228                if (ret)
 229                        return ret;
 230
 231                /*
 232                 * we always limit bottom-up allocation above the kernel,
 233                 * but top-down allocation doesn't have the limit, so
 234                 * retrying top-down allocation may succeed when bottom-up
 235                 * allocation failed.
 236                 *
 237                 * bottom-up allocation is expected to be fail very rarely,
 238                 * so we use WARN_ONCE() here to see the stack trace if
 239                 * fail happens.
 240                 */
 241                WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
 242        }
 243
 244        return __memblock_find_range_top_down(start, end, size, align, nid,
 245                                              flags);
 246}
 247
 248/**
 249 * memblock_find_in_range - find free area in given range
 250 * @start: start of candidate range
 251 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 252 * @size: size of free area to find
 253 * @align: alignment of free area to find
 254 *
 255 * Find @size free area aligned to @align in the specified range.
 256 *
 257 * RETURNS:
 258 * Found address on success, 0 on failure.
 259 */
 260phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 261                                        phys_addr_t end, phys_addr_t size,
 262                                        phys_addr_t align)
 263{
 264        phys_addr_t ret;
 265        ulong flags = choose_memblock_flags();
 266
 267again:
 268        ret = memblock_find_in_range_node(size, align, start, end,
 269                                            NUMA_NO_NODE, flags);
 270
 271        if (!ret && (flags & MEMBLOCK_MIRROR)) {
 272                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 273                        &size);
 274                flags &= ~MEMBLOCK_MIRROR;
 275                goto again;
 276        }
 277
 278        return ret;
 279}
 280
 281static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 282{
 283        type->total_size -= type->regions[r].size;
 284        memmove(&type->regions[r], &type->regions[r + 1],
 285                (type->cnt - (r + 1)) * sizeof(type->regions[r]));
 286        type->cnt--;
 287
 288        /* Special case for empty arrays */
 289        if (type->cnt == 0) {
 290                WARN_ON(type->total_size != 0);
 291                type->cnt = 1;
 292                type->regions[0].base = 0;
 293                type->regions[0].size = 0;
 294                type->regions[0].flags = 0;
 295                memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 296        }
 297}
 298
 299#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
 300
 301phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
 302                                        phys_addr_t *addr)
 303{
 304        if (memblock.reserved.regions == memblock_reserved_init_regions)
 305                return 0;
 306
 307        *addr = __pa(memblock.reserved.regions);
 308
 309        return PAGE_ALIGN(sizeof(struct memblock_region) *
 310                          memblock.reserved.max);
 311}
 312
 313phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
 314                                        phys_addr_t *addr)
 315{
 316        if (memblock.memory.regions == memblock_memory_init_regions)
 317                return 0;
 318
 319        *addr = __pa(memblock.memory.regions);
 320
 321        return PAGE_ALIGN(sizeof(struct memblock_region) *
 322                          memblock.memory.max);
 323}
 324
 325#endif
 326
 327/**
 328 * memblock_double_array - double the size of the memblock regions array
 329 * @type: memblock type of the regions array being doubled
 330 * @new_area_start: starting address of memory range to avoid overlap with
 331 * @new_area_size: size of memory range to avoid overlap with
 332 *
 333 * Double the size of the @type regions array. If memblock is being used to
 334 * allocate memory for a new reserved regions array and there is a previously
 335 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
 336 * waiting to be reserved, ensure the memory used by the new array does
 337 * not overlap.
 338 *
 339 * RETURNS:
 340 * 0 on success, -1 on failure.
 341 */
 342static int __init_memblock memblock_double_array(struct memblock_type *type,
 343                                                phys_addr_t new_area_start,
 344                                                phys_addr_t new_area_size)
 345{
 346        struct memblock_region *new_array, *old_array;
 347        phys_addr_t old_alloc_size, new_alloc_size;
 348        phys_addr_t old_size, new_size, addr;
 349        int use_slab = slab_is_available();
 350        int *in_slab;
 351
 352        /* We don't allow resizing until we know about the reserved regions
 353         * of memory that aren't suitable for allocation
 354         */
 355        if (!memblock_can_resize)
 356                return -1;
 357
 358        /* Calculate new doubled size */
 359        old_size = type->max * sizeof(struct memblock_region);
 360        new_size = old_size << 1;
 361        /*
 362         * We need to allocated new one align to PAGE_SIZE,
 363         *   so we can free them completely later.
 364         */
 365        old_alloc_size = PAGE_ALIGN(old_size);
 366        new_alloc_size = PAGE_ALIGN(new_size);
 367
 368        /* Retrieve the slab flag */
 369        if (type == &memblock.memory)
 370                in_slab = &memblock_memory_in_slab;
 371        else
 372                in_slab = &memblock_reserved_in_slab;
 373
 374        /* Try to find some space for it.
 375         *
 376         * WARNING: We assume that either slab_is_available() and we use it or
 377         * we use MEMBLOCK for allocations. That means that this is unsafe to
 378         * use when bootmem is currently active (unless bootmem itself is
 379         * implemented on top of MEMBLOCK which isn't the case yet)
 380         *
 381         * This should however not be an issue for now, as we currently only
 382         * call into MEMBLOCK while it's still active, or much later when slab
 383         * is active for memory hotplug operations
 384         */
 385        if (use_slab) {
 386                new_array = kmalloc(new_size, GFP_KERNEL);
 387                addr = new_array ? __pa(new_array) : 0;
 388        } else {
 389                /* only exclude range when trying to double reserved.regions */
 390                if (type != &memblock.reserved)
 391                        new_area_start = new_area_size = 0;
 392
 393                addr = memblock_find_in_range(new_area_start + new_area_size,
 394                                                memblock.current_limit,
 395                                                new_alloc_size, PAGE_SIZE);
 396                if (!addr && new_area_size)
 397                        addr = memblock_find_in_range(0,
 398                                min(new_area_start, memblock.current_limit),
 399                                new_alloc_size, PAGE_SIZE);
 400
 401                new_array = addr ? __va(addr) : NULL;
 402        }
 403        if (!addr) {
 404                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 405                       memblock_type_name(type), type->max, type->max * 2);
 406                return -1;
 407        }
 408
 409        memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
 410                        memblock_type_name(type), type->max * 2, (u64)addr,
 411                        (u64)addr + new_size - 1);
 412
 413        /*
 414         * Found space, we now need to move the array over before we add the
 415         * reserved region since it may be our reserved array itself that is
 416         * full.
 417         */
 418        memcpy(new_array, type->regions, old_size);
 419        memset(new_array + type->max, 0, old_size);
 420        old_array = type->regions;
 421        type->regions = new_array;
 422        type->max <<= 1;
 423
 424        /* Free old array. We needn't free it if the array is the static one */
 425        if (*in_slab)
 426                kfree(old_array);
 427        else if (old_array != memblock_memory_init_regions &&
 428                 old_array != memblock_reserved_init_regions)
 429                memblock_free(__pa(old_array), old_alloc_size);
 430
 431        /*
 432         * Reserve the new array if that comes from the memblock.  Otherwise, we
 433         * needn't do it
 434         */
 435        if (!use_slab)
 436                BUG_ON(memblock_reserve(addr, new_alloc_size));
 437
 438        /* Update slab flag */
 439        *in_slab = use_slab;
 440
 441        return 0;
 442}
 443
 444/**
 445 * memblock_merge_regions - merge neighboring compatible regions
 446 * @type: memblock type to scan
 447 *
 448 * Scan @type and merge neighboring compatible regions.
 449 */
 450static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 451{
 452        int i = 0;
 453
 454        /* cnt never goes below 1 */
 455        while (i < type->cnt - 1) {
 456                struct memblock_region *this = &type->regions[i];
 457                struct memblock_region *next = &type->regions[i + 1];
 458
 459                if (this->base + this->size != next->base ||
 460                    memblock_get_region_node(this) !=
 461                    memblock_get_region_node(next) ||
 462                    this->flags != next->flags) {
 463                        BUG_ON(this->base + this->size > next->base);
 464                        i++;
 465                        continue;
 466                }
 467
 468                this->size += next->size;
 469                /* move forward from next + 1, index of which is i + 2 */
 470                memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
 471                type->cnt--;
 472        }
 473}
 474
 475/**
 476 * memblock_insert_region - insert new memblock region
 477 * @type:       memblock type to insert into
 478 * @idx:        index for the insertion point
 479 * @base:       base address of the new region
 480 * @size:       size of the new region
 481 * @nid:        node id of the new region
 482 * @flags:      flags of the new region
 483 *
 484 * Insert new memblock region [@base,@base+@size) into @type at @idx.
 485 * @type must already have extra room to accommodate the new region.
 486 */
 487static void __init_memblock memblock_insert_region(struct memblock_type *type,
 488                                                   int idx, phys_addr_t base,
 489                                                   phys_addr_t size,
 490                                                   int nid, unsigned long flags)
 491{
 492        struct memblock_region *rgn = &type->regions[idx];
 493
 494        BUG_ON(type->cnt >= type->max);
 495        memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
 496        rgn->base = base;
 497        rgn->size = size;
 498        rgn->flags = flags;
 499        memblock_set_region_node(rgn, nid);
 500        type->cnt++;
 501        type->total_size += size;
 502}
 503
 504/**
 505 * memblock_add_range - add new memblock region
 506 * @type: memblock type to add new region into
 507 * @base: base address of the new region
 508 * @size: size of the new region
 509 * @nid: nid of the new region
 510 * @flags: flags of the new region
 511 *
 512 * Add new memblock region [@base,@base+@size) into @type.  The new region
 513 * is allowed to overlap with existing ones - overlaps don't affect already
 514 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 515 * compatible regions are merged) after the addition.
 516 *
 517 * RETURNS:
 518 * 0 on success, -errno on failure.
 519 */
 520int __init_memblock memblock_add_range(struct memblock_type *type,
 521                                phys_addr_t base, phys_addr_t size,
 522                                int nid, unsigned long flags)
 523{
 524        bool insert = false;
 525        phys_addr_t obase = base;
 526        phys_addr_t end = base + memblock_cap_size(base, &size);
 527        int idx, nr_new;
 528        struct memblock_region *rgn;
 529
 530        if (!size)
 531                return 0;
 532
 533        /* special case for empty array */
 534        if (type->regions[0].size == 0) {
 535                WARN_ON(type->cnt != 1 || type->total_size);
 536                type->regions[0].base = base;
 537                type->regions[0].size = size;
 538                type->regions[0].flags = flags;
 539                memblock_set_region_node(&type->regions[0], nid);
 540                type->total_size = size;
 541                return 0;
 542        }
 543repeat:
 544        /*
 545         * The following is executed twice.  Once with %false @insert and
 546         * then with %true.  The first counts the number of regions needed
 547         * to accommodate the new area.  The second actually inserts them.
 548         */
 549        base = obase;
 550        nr_new = 0;
 551
 552        for_each_memblock_type(type, rgn) {
 553                phys_addr_t rbase = rgn->base;
 554                phys_addr_t rend = rbase + rgn->size;
 555
 556                if (rbase >= end)
 557                        break;
 558                if (rend <= base)
 559                        continue;
 560                /*
 561                 * @rgn overlaps.  If it separates the lower part of new
 562                 * area, insert that portion.
 563                 */
 564                if (rbase > base) {
 565#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 566                        WARN_ON(nid != memblock_get_region_node(rgn));
 567#endif
 568                        WARN_ON(flags != rgn->flags);
 569                        nr_new++;
 570                        if (insert)
 571                                memblock_insert_region(type, idx++, base,
 572                                                       rbase - base, nid,
 573                                                       flags);
 574                }
 575                /* area below @rend is dealt with, forget about it */
 576                base = min(rend, end);
 577        }
 578
 579        /* insert the remaining portion */
 580        if (base < end) {
 581                nr_new++;
 582                if (insert)
 583                        memblock_insert_region(type, idx, base, end - base,
 584                                               nid, flags);
 585        }
 586
 587        if (!nr_new)
 588                return 0;
 589
 590        /*
 591         * If this was the first round, resize array and repeat for actual
 592         * insertions; otherwise, merge and return.
 593         */
 594        if (!insert) {
 595                while (type->cnt + nr_new > type->max)
 596                        if (memblock_double_array(type, obase, size) < 0)
 597                                return -ENOMEM;
 598                insert = true;
 599                goto repeat;
 600        } else {
 601                memblock_merge_regions(type);
 602                return 0;
 603        }
 604}
 605
 606int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
 607                                       int nid)
 608{
 609        return memblock_add_range(&memblock.memory, base, size, nid, 0);
 610}
 611
 612int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 613{
 614        memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
 615                     (unsigned long long)base,
 616                     (unsigned long long)base + size - 1,
 617                     0UL, (void *)_RET_IP_);
 618
 619        return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
 620}
 621
 622/**
 623 * memblock_isolate_range - isolate given range into disjoint memblocks
 624 * @type: memblock type to isolate range for
 625 * @base: base of range to isolate
 626 * @size: size of range to isolate
 627 * @start_rgn: out parameter for the start of isolated region
 628 * @end_rgn: out parameter for the end of isolated region
 629 *
 630 * Walk @type and ensure that regions don't cross the boundaries defined by
 631 * [@base,@base+@size).  Crossing regions are split at the boundaries,
 632 * which may create at most two more regions.  The index of the first
 633 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 634 *
 635 * RETURNS:
 636 * 0 on success, -errno on failure.
 637 */
 638static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 639                                        phys_addr_t base, phys_addr_t size,
 640                                        int *start_rgn, int *end_rgn)
 641{
 642        phys_addr_t end = base + memblock_cap_size(base, &size);
 643        int idx;
 644        struct memblock_region *rgn;
 645
 646        *start_rgn = *end_rgn = 0;
 647
 648        if (!size)
 649                return 0;
 650
 651        /* we'll create at most two more regions */
 652        while (type->cnt + 2 > type->max)
 653                if (memblock_double_array(type, base, size) < 0)
 654                        return -ENOMEM;
 655
 656        for_each_memblock_type(type, rgn) {
 657                phys_addr_t rbase = rgn->base;
 658                phys_addr_t rend = rbase + rgn->size;
 659
 660                if (rbase >= end)
 661                        break;
 662                if (rend <= base)
 663                        continue;
 664
 665                if (rbase < base) {
 666                        /*
 667                         * @rgn intersects from below.  Split and continue
 668                         * to process the next region - the new top half.
 669                         */
 670                        rgn->base = base;
 671                        rgn->size -= base - rbase;
 672                        type->total_size -= base - rbase;
 673                        memblock_insert_region(type, idx, rbase, base - rbase,
 674                                               memblock_get_region_node(rgn),
 675                                               rgn->flags);
 676                } else if (rend > end) {
 677                        /*
 678                         * @rgn intersects from above.  Split and redo the
 679                         * current region - the new bottom half.
 680                         */
 681                        rgn->base = end;
 682                        rgn->size -= end - rbase;
 683                        type->total_size -= end - rbase;
 684                        memblock_insert_region(type, idx--, rbase, end - rbase,
 685                                               memblock_get_region_node(rgn),
 686                                               rgn->flags);
 687                } else {
 688                        /* @rgn is fully contained, record it */
 689                        if (!*end_rgn)
 690                                *start_rgn = idx;
 691                        *end_rgn = idx + 1;
 692                }
 693        }
 694
 695        return 0;
 696}
 697
 698static int __init_memblock memblock_remove_range(struct memblock_type *type,
 699                                          phys_addr_t base, phys_addr_t size)
 700{
 701        int start_rgn, end_rgn;
 702        int i, ret;
 703
 704        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 705        if (ret)
 706                return ret;
 707
 708        for (i = end_rgn - 1; i >= start_rgn; i--)
 709                memblock_remove_region(type, i);
 710        return 0;
 711}
 712
 713int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 714{
 715        return memblock_remove_range(&memblock.memory, base, size);
 716}
 717
 718
 719int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 720{
 721        memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
 722                     (unsigned long long)base,
 723                     (unsigned long long)base + size - 1,
 724                     (void *)_RET_IP_);
 725
 726        kmemleak_free_part_phys(base, size);
 727        return memblock_remove_range(&memblock.reserved, base, size);
 728}
 729
 730int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 731{
 732        memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
 733                     (unsigned long long)base,
 734                     (unsigned long long)base + size - 1,
 735                     0UL, (void *)_RET_IP_);
 736
 737        return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
 738}
 739
 740/**
 741 *
 742 * This function isolates region [@base, @base + @size), and sets/clears flag
 743 *
 744 * Return 0 on success, -errno on failure.
 745 */
 746static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 747                                phys_addr_t size, int set, int flag)
 748{
 749        struct memblock_type *type = &memblock.memory;
 750        int i, ret, start_rgn, end_rgn;
 751
 752        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 753        if (ret)
 754                return ret;
 755
 756        for (i = start_rgn; i < end_rgn; i++)
 757                if (set)
 758                        memblock_set_region_flags(&type->regions[i], flag);
 759                else
 760                        memblock_clear_region_flags(&type->regions[i], flag);
 761
 762        memblock_merge_regions(type);
 763        return 0;
 764}
 765
 766/**
 767 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 768 * @base: the base phys addr of the region
 769 * @size: the size of the region
 770 *
 771 * Return 0 on success, -errno on failure.
 772 */
 773int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 774{
 775        return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
 776}
 777
 778/**
 779 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 780 * @base: the base phys addr of the region
 781 * @size: the size of the region
 782 *
 783 * Return 0 on success, -errno on failure.
 784 */
 785int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 786{
 787        return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
 788}
 789
 790/**
 791 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 792 * @base: the base phys addr of the region
 793 * @size: the size of the region
 794 *
 795 * Return 0 on success, -errno on failure.
 796 */
 797int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 798{
 799        system_has_some_mirror = true;
 800
 801        return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 802}
 803
 804/**
 805 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
 806 * @base: the base phys addr of the region
 807 * @size: the size of the region
 808 *
 809 * Return 0 on success, -errno on failure.
 810 */
 811int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 812{
 813        return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
 814}
 815
 816/**
 817 * __next_reserved_mem_region - next function for for_each_reserved_region()
 818 * @idx: pointer to u64 loop variable
 819 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
 820 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
 821 *
 822 * Iterate over all reserved memory regions.
 823 */
 824void __init_memblock __next_reserved_mem_region(u64 *idx,
 825                                           phys_addr_t *out_start,
 826                                           phys_addr_t *out_end)
 827{
 828        struct memblock_type *type = &memblock.reserved;
 829
 830        if (*idx < type->cnt) {
 831                struct memblock_region *r = &type->regions[*idx];
 832                phys_addr_t base = r->base;
 833                phys_addr_t size = r->size;
 834
 835                if (out_start)
 836                        *out_start = base;
 837                if (out_end)
 838                        *out_end = base + size - 1;
 839
 840                *idx += 1;
 841                return;
 842        }
 843
 844        /* signal end of iteration */
 845        *idx = ULLONG_MAX;
 846}
 847
 848/**
 849 * __next__mem_range - next function for for_each_free_mem_range() etc.
 850 * @idx: pointer to u64 loop variable
 851 * @nid: node selector, %NUMA_NO_NODE for all nodes
 852 * @flags: pick from blocks based on memory attributes
 853 * @type_a: pointer to memblock_type from where the range is taken
 854 * @type_b: pointer to memblock_type which excludes memory from being taken
 855 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 856 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 857 * @out_nid: ptr to int for nid of the range, can be %NULL
 858 *
 859 * Find the first area from *@idx which matches @nid, fill the out
 860 * parameters, and update *@idx for the next iteration.  The lower 32bit of
 861 * *@idx contains index into type_a and the upper 32bit indexes the
 862 * areas before each region in type_b.  For example, if type_b regions
 863 * look like the following,
 864 *
 865 *      0:[0-16), 1:[32-48), 2:[128-130)
 866 *
 867 * The upper 32bit indexes the following regions.
 868 *
 869 *      0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 870 *
 871 * As both region arrays are sorted, the function advances the two indices
 872 * in lockstep and returns each intersection.
 873 */
 874void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
 875                                      struct memblock_type *type_a,
 876                                      struct memblock_type *type_b,
 877                                      phys_addr_t *out_start,
 878                                      phys_addr_t *out_end, int *out_nid)
 879{
 880        int idx_a = *idx & 0xffffffff;
 881        int idx_b = *idx >> 32;
 882
 883        if (WARN_ONCE(nid == MAX_NUMNODES,
 884        "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
 885                nid = NUMA_NO_NODE;
 886
 887        for (; idx_a < type_a->cnt; idx_a++) {
 888                struct memblock_region *m = &type_a->regions[idx_a];
 889
 890                phys_addr_t m_start = m->base;
 891                phys_addr_t m_end = m->base + m->size;
 892                int         m_nid = memblock_get_region_node(m);
 893
 894                /* only memory regions are associated with nodes, check it */
 895                if (nid != NUMA_NO_NODE && nid != m_nid)
 896                        continue;
 897
 898                /* skip hotpluggable memory regions if needed */
 899                if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
 900                        continue;
 901
 902                /* if we want mirror memory skip non-mirror memory regions */
 903                if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 904                        continue;
 905
 906                /* skip nomap memory unless we were asked for it explicitly */
 907                if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
 908                        continue;
 909
 910                if (!type_b) {
 911                        if (out_start)
 912                                *out_start = m_start;
 913                        if (out_end)
 914                                *out_end = m_end;
 915                        if (out_nid)
 916                                *out_nid = m_nid;
 917                        idx_a++;
 918                        *idx = (u32)idx_a | (u64)idx_b << 32;
 919                        return;
 920                }
 921
 922                /* scan areas before each reservation */
 923                for (; idx_b < type_b->cnt + 1; idx_b++) {
 924                        struct memblock_region *r;
 925                        phys_addr_t r_start;
 926                        phys_addr_t r_end;
 927
 928                        r = &type_b->regions[idx_b];
 929                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
 930                        r_end = idx_b < type_b->cnt ?
 931                                r->base : ULLONG_MAX;
 932
 933                        /*
 934                         * if idx_b advanced past idx_a,
 935                         * break out to advance idx_a
 936                         */
 937                        if (r_start >= m_end)
 938                                break;
 939                        /* if the two regions intersect, we're done */
 940                        if (m_start < r_end) {
 941                                if (out_start)
 942                                        *out_start =
 943                                                max(m_start, r_start);
 944                                if (out_end)
 945                                        *out_end = min(m_end, r_end);
 946                                if (out_nid)
 947                                        *out_nid = m_nid;
 948                                /*
 949                                 * The region which ends first is
 950                                 * advanced for the next iteration.
 951                                 */
 952                                if (m_end <= r_end)
 953                                        idx_a++;
 954                                else
 955                                        idx_b++;
 956                                *idx = (u32)idx_a | (u64)idx_b << 32;
 957                                return;
 958                        }
 959                }
 960        }
 961
 962        /* signal end of iteration */
 963        *idx = ULLONG_MAX;
 964}
 965
 966/**
 967 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
 968 *
 969 * Finds the next range from type_a which is not marked as unsuitable
 970 * in type_b.
 971 *
 972 * @idx: pointer to u64 loop variable
 973 * @nid: node selector, %NUMA_NO_NODE for all nodes
 974 * @flags: pick from blocks based on memory attributes
 975 * @type_a: pointer to memblock_type from where the range is taken
 976 * @type_b: pointer to memblock_type which excludes memory from being taken
 977 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 978 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 979 * @out_nid: ptr to int for nid of the range, can be %NULL
 980 *
 981 * Reverse of __next_mem_range().
 982 */
 983void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
 984                                          struct memblock_type *type_a,
 985                                          struct memblock_type *type_b,
 986                                          phys_addr_t *out_start,
 987                                          phys_addr_t *out_end, int *out_nid)
 988{
 989        int idx_a = *idx & 0xffffffff;
 990        int idx_b = *idx >> 32;
 991
 992        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
 993                nid = NUMA_NO_NODE;
 994
 995        if (*idx == (u64)ULLONG_MAX) {
 996                idx_a = type_a->cnt - 1;
 997                if (type_b != NULL)
 998                        idx_b = type_b->cnt;
 999                else
1000                        idx_b = 0;
1001        }
1002
1003        for (; idx_a >= 0; idx_a--) {
1004                struct memblock_region *m = &type_a->regions[idx_a];
1005
1006                phys_addr_t m_start = m->base;
1007                phys_addr_t m_end = m->base + m->size;
1008                int m_nid = memblock_get_region_node(m);
1009
1010                /* only memory regions are associated with nodes, check it */
1011                if (nid != NUMA_NO_NODE && nid != m_nid)
1012                        continue;
1013
1014                /* skip hotpluggable memory regions if needed */
1015                if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1016                        continue;
1017
1018                /* if we want mirror memory skip non-mirror memory regions */
1019                if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1020                        continue;
1021
1022                /* skip nomap memory unless we were asked for it explicitly */
1023                if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1024                        continue;
1025
1026                if (!type_b) {
1027                        if (out_start)
1028                                *out_start = m_start;
1029                        if (out_end)
1030                                *out_end = m_end;
1031                        if (out_nid)
1032                                *out_nid = m_nid;
1033                        idx_a--;
1034                        *idx = (u32)idx_a | (u64)idx_b << 32;
1035                        return;
1036                }
1037
1038                /* scan areas before each reservation */
1039                for (; idx_b >= 0; idx_b--) {
1040                        struct memblock_region *r;
1041                        phys_addr_t r_start;
1042                        phys_addr_t r_end;
1043
1044                        r = &type_b->regions[idx_b];
1045                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1046                        r_end = idx_b < type_b->cnt ?
1047                                r->base : ULLONG_MAX;
1048                        /*
1049                         * if idx_b advanced past idx_a,
1050                         * break out to advance idx_a
1051                         */
1052
1053                        if (r_end <= m_start)
1054                                break;
1055                        /* if the two regions intersect, we're done */
1056                        if (m_end > r_start) {
1057                                if (out_start)
1058                                        *out_start = max(m_start, r_start);
1059                                if (out_end)
1060                                        *out_end = min(m_end, r_end);
1061                                if (out_nid)
1062                                        *out_nid = m_nid;
1063                                if (m_start >= r_start)
1064                                        idx_a--;
1065                                else
1066                                        idx_b--;
1067                                *idx = (u32)idx_a | (u64)idx_b << 32;
1068                                return;
1069                        }
1070                }
1071        }
1072        /* signal end of iteration */
1073        *idx = ULLONG_MAX;
1074}
1075
1076#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1077/*
1078 * Common iterator interface used to define for_each_mem_range().
1079 */
1080void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1081                                unsigned long *out_start_pfn,
1082                                unsigned long *out_end_pfn, int *out_nid)
1083{
1084        struct memblock_type *type = &memblock.memory;
1085        struct memblock_region *r;
1086
1087        while (++*idx < type->cnt) {
1088                r = &type->regions[*idx];
1089
1090                if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1091                        continue;
1092                if (nid == MAX_NUMNODES || nid == r->nid)
1093                        break;
1094        }
1095        if (*idx >= type->cnt) {
1096                *idx = -1;
1097                return;
1098        }
1099
1100        if (out_start_pfn)
1101                *out_start_pfn = PFN_UP(r->base);
1102        if (out_end_pfn)
1103                *out_end_pfn = PFN_DOWN(r->base + r->size);
1104        if (out_nid)
1105                *out_nid = r->nid;
1106}
1107
1108/**
1109 * memblock_set_node - set node ID on memblock regions
1110 * @base: base of area to set node ID for
1111 * @size: size of area to set node ID for
1112 * @type: memblock type to set node ID for
1113 * @nid: node ID to set
1114 *
1115 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1116 * Regions which cross the area boundaries are split as necessary.
1117 *
1118 * RETURNS:
1119 * 0 on success, -errno on failure.
1120 */
1121int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1122                                      struct memblock_type *type, int nid)
1123{
1124        int start_rgn, end_rgn;
1125        int i, ret;
1126
1127        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1128        if (ret)
1129                return ret;
1130
1131        for (i = start_rgn; i < end_rgn; i++)
1132                memblock_set_region_node(&type->regions[i], nid);
1133
1134        memblock_merge_regions(type);
1135        return 0;
1136}
1137#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1138
1139static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1140                                        phys_addr_t align, phys_addr_t start,
1141                                        phys_addr_t end, int nid, ulong flags)
1142{
1143        phys_addr_t found;
1144
1145        if (!align)
1146                align = SMP_CACHE_BYTES;
1147
1148        found = memblock_find_in_range_node(size, align, start, end, nid,
1149                                            flags);
1150        if (found && !memblock_reserve(found, size)) {
1151                /*
1152                 * The min_count is set to 0 so that memblock allocations are
1153                 * never reported as leaks.
1154                 */
1155                kmemleak_alloc_phys(found, size, 0, 0);
1156                return found;
1157        }
1158        return 0;
1159}
1160
1161phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1162                                        phys_addr_t start, phys_addr_t end,
1163                                        ulong flags)
1164{
1165        return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1166                                        flags);
1167}
1168
1169static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1170                                        phys_addr_t align, phys_addr_t max_addr,
1171                                        int nid, ulong flags)
1172{
1173        return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1174}
1175
1176phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1177{
1178        ulong flags = choose_memblock_flags();
1179        phys_addr_t ret;
1180
1181again:
1182        ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1183                                      nid, flags);
1184
1185        if (!ret && (flags & MEMBLOCK_MIRROR)) {
1186                flags &= ~MEMBLOCK_MIRROR;
1187                goto again;
1188        }
1189        return ret;
1190}
1191
1192phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1193{
1194        return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1195                                       MEMBLOCK_NONE);
1196}
1197
1198phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1199{
1200        phys_addr_t alloc;
1201
1202        alloc = __memblock_alloc_base(size, align, max_addr);
1203
1204        if (alloc == 0)
1205                panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1206                      (unsigned long long) size, (unsigned long long) max_addr);
1207
1208        return alloc;
1209}
1210
1211phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1212{
1213        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1214}
1215
1216phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1217{
1218        phys_addr_t res = memblock_alloc_nid(size, align, nid);
1219
1220        if (res)
1221                return res;
1222        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1223}
1224
1225/**
1226 * memblock_virt_alloc_internal - allocate boot memory block
1227 * @size: size of memory block to be allocated in bytes
1228 * @align: alignment of the region and block's size
1229 * @min_addr: the lower bound of the memory region to allocate (phys address)
1230 * @max_addr: the upper bound of the memory region to allocate (phys address)
1231 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1232 *
1233 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1234 * will fall back to memory below @min_addr. Also, allocation may fall back
1235 * to any node in the system if the specified node can not
1236 * hold the requested memory.
1237 *
1238 * The allocation is performed from memory region limited by
1239 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1240 *
1241 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1242 *
1243 * The phys address of allocated boot memory block is converted to virtual and
1244 * allocated memory is reset to 0.
1245 *
1246 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1247 * allocated boot memory block, so that it is never reported as leaks.
1248 *
1249 * RETURNS:
1250 * Virtual address of allocated memory block on success, NULL on failure.
1251 */
1252static void * __init memblock_virt_alloc_internal(
1253                                phys_addr_t size, phys_addr_t align,
1254                                phys_addr_t min_addr, phys_addr_t max_addr,
1255                                int nid)
1256{
1257        phys_addr_t alloc;
1258        void *ptr;
1259        ulong flags = choose_memblock_flags();
1260
1261        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1262                nid = NUMA_NO_NODE;
1263
1264        /*
1265         * Detect any accidental use of these APIs after slab is ready, as at
1266         * this moment memblock may be deinitialized already and its
1267         * internal data may be destroyed (after execution of free_all_bootmem)
1268         */
1269        if (WARN_ON_ONCE(slab_is_available()))
1270                return kzalloc_node(size, GFP_NOWAIT, nid);
1271
1272        if (!align)
1273                align = SMP_CACHE_BYTES;
1274
1275        if (max_addr > memblock.current_limit)
1276                max_addr = memblock.current_limit;
1277
1278again:
1279        alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1280                                            nid, flags);
1281        if (alloc)
1282                goto done;
1283
1284        if (nid != NUMA_NO_NODE) {
1285                alloc = memblock_find_in_range_node(size, align, min_addr,
1286                                                    max_addr, NUMA_NO_NODE,
1287                                                    flags);
1288                if (alloc)
1289                        goto done;
1290        }
1291
1292        if (min_addr) {
1293                min_addr = 0;
1294                goto again;
1295        }
1296
1297        if (flags & MEMBLOCK_MIRROR) {
1298                flags &= ~MEMBLOCK_MIRROR;
1299                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1300                        &size);
1301                goto again;
1302        }
1303
1304        return NULL;
1305done:
1306        memblock_reserve(alloc, size);
1307        ptr = phys_to_virt(alloc);
1308        memset(ptr, 0, size);
1309
1310        /*
1311         * The min_count is set to 0 so that bootmem allocated blocks
1312         * are never reported as leaks. This is because many of these blocks
1313         * are only referred via the physical address which is not
1314         * looked up by kmemleak.
1315         */
1316        kmemleak_alloc(ptr, size, 0, 0);
1317
1318        return ptr;
1319}
1320
1321/**
1322 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1323 * @size: size of memory block to be allocated in bytes
1324 * @align: alignment of the region and block's size
1325 * @min_addr: the lower bound of the memory region from where the allocation
1326 *        is preferred (phys address)
1327 * @max_addr: the upper bound of the memory region from where the allocation
1328 *            is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1329 *            allocate only from memory limited by memblock.current_limit value
1330 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1331 *
1332 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1333 * additional debug information (including caller info), if enabled.
1334 *
1335 * RETURNS:
1336 * Virtual address of allocated memory block on success, NULL on failure.
1337 */
1338void * __init memblock_virt_alloc_try_nid_nopanic(
1339                                phys_addr_t size, phys_addr_t align,
1340                                phys_addr_t min_addr, phys_addr_t max_addr,
1341                                int nid)
1342{
1343        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1344                     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1345                     (u64)max_addr, (void *)_RET_IP_);
1346        return memblock_virt_alloc_internal(size, align, min_addr,
1347                                             max_addr, nid);
1348}
1349
1350/**
1351 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1352 * @size: size of memory block to be allocated in bytes
1353 * @align: alignment of the region and block's size
1354 * @min_addr: the lower bound of the memory region from where the allocation
1355 *        is preferred (phys address)
1356 * @max_addr: the upper bound of the memory region from where the allocation
1357 *            is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1358 *            allocate only from memory limited by memblock.current_limit value
1359 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1360 *
1361 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1362 * which provides debug information (including caller info), if enabled,
1363 * and panics if the request can not be satisfied.
1364 *
1365 * RETURNS:
1366 * Virtual address of allocated memory block on success, NULL on failure.
1367 */
1368void * __init memblock_virt_alloc_try_nid(
1369                        phys_addr_t size, phys_addr_t align,
1370                        phys_addr_t min_addr, phys_addr_t max_addr,
1371                        int nid)
1372{
1373        void *ptr;
1374
1375        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1376                     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1377                     (u64)max_addr, (void *)_RET_IP_);
1378        ptr = memblock_virt_alloc_internal(size, align,
1379                                           min_addr, max_addr, nid);
1380        if (ptr)
1381                return ptr;
1382
1383        panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1384              __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1385              (u64)max_addr);
1386        return NULL;
1387}
1388
1389/**
1390 * __memblock_free_early - free boot memory block
1391 * @base: phys starting address of the  boot memory block
1392 * @size: size of the boot memory block in bytes
1393 *
1394 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1395 * The freeing memory will not be released to the buddy allocator.
1396 */
1397void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1398{
1399        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1400                     __func__, (u64)base, (u64)base + size - 1,
1401                     (void *)_RET_IP_);
1402        kmemleak_free_part_phys(base, size);
1403        memblock_remove_range(&memblock.reserved, base, size);
1404}
1405
1406/*
1407 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1408 * @addr: phys starting address of the  boot memory block
1409 * @size: size of the boot memory block in bytes
1410 *
1411 * This is only useful when the bootmem allocator has already been torn
1412 * down, but we are still initializing the system.  Pages are released directly
1413 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1414 */
1415void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1416{
1417        u64 cursor, end;
1418
1419        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1420                     __func__, (u64)base, (u64)base + size - 1,
1421                     (void *)_RET_IP_);
1422        kmemleak_free_part_phys(base, size);
1423        cursor = PFN_UP(base);
1424        end = PFN_DOWN(base + size);
1425
1426        for (; cursor < end; cursor++) {
1427                __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1428                totalram_pages++;
1429        }
1430}
1431
1432/*
1433 * Remaining API functions
1434 */
1435
1436phys_addr_t __init_memblock memblock_phys_mem_size(void)
1437{
1438        return memblock.memory.total_size;
1439}
1440
1441phys_addr_t __init_memblock memblock_reserved_size(void)
1442{
1443        return memblock.reserved.total_size;
1444}
1445
1446phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1447{
1448        unsigned long pages = 0;
1449        struct memblock_region *r;
1450        unsigned long start_pfn, end_pfn;
1451
1452        for_each_memblock(memory, r) {
1453                start_pfn = memblock_region_memory_base_pfn(r);
1454                end_pfn = memblock_region_memory_end_pfn(r);
1455                start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1456                end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1457                pages += end_pfn - start_pfn;
1458        }
1459
1460        return PFN_PHYS(pages);
1461}
1462
1463/* lowest address */
1464phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1465{
1466        return memblock.memory.regions[0].base;
1467}
1468
1469phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1470{
1471        int idx = memblock.memory.cnt - 1;
1472
1473        return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1474}
1475
1476static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1477{
1478        phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1479        struct memblock_region *r;
1480
1481        /*
1482         * translate the memory @limit size into the max address within one of
1483         * the memory memblock regions, if the @limit exceeds the total size
1484         * of those regions, max_addr will keep original value ULLONG_MAX
1485         */
1486        for_each_memblock(memory, r) {
1487                if (limit <= r->size) {
1488                        max_addr = r->base + limit;
1489                        break;
1490                }
1491                limit -= r->size;
1492        }
1493
1494        return max_addr;
1495}
1496
1497void __init memblock_enforce_memory_limit(phys_addr_t limit)
1498{
1499        phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1500
1501        if (!limit)
1502                return;
1503
1504        max_addr = __find_max_addr(limit);
1505
1506        /* @limit exceeds the total size of the memory, do nothing */
1507        if (max_addr == (phys_addr_t)ULLONG_MAX)
1508                return;
1509
1510        /* truncate both memory and reserved regions */
1511        memblock_remove_range(&memblock.memory, max_addr,
1512                              (phys_addr_t)ULLONG_MAX);
1513        memblock_remove_range(&memblock.reserved, max_addr,
1514                              (phys_addr_t)ULLONG_MAX);
1515}
1516
1517void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1518{
1519        struct memblock_type *type = &memblock.memory;
1520        phys_addr_t max_addr;
1521        int i, ret, start_rgn, end_rgn;
1522
1523        if (!limit)
1524                return;
1525
1526        max_addr = __find_max_addr(limit);
1527
1528        /* @limit exceeds the total size of the memory, do nothing */
1529        if (max_addr == (phys_addr_t)ULLONG_MAX)
1530                return;
1531
1532        ret = memblock_isolate_range(type, max_addr, (phys_addr_t)ULLONG_MAX,
1533                                &start_rgn, &end_rgn);
1534        if (ret)
1535                return;
1536
1537        /* remove all the MAP regions above the limit */
1538        for (i = end_rgn - 1; i >= start_rgn; i--) {
1539                if (!memblock_is_nomap(&type->regions[i]))
1540                        memblock_remove_region(type, i);
1541        }
1542        /* truncate the reserved regions */
1543        memblock_remove_range(&memblock.reserved, max_addr,
1544                              (phys_addr_t)ULLONG_MAX);
1545}
1546
1547static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1548{
1549        unsigned int left = 0, right = type->cnt;
1550
1551        do {
1552                unsigned int mid = (right + left) / 2;
1553
1554                if (addr < type->regions[mid].base)
1555                        right = mid;
1556                else if (addr >= (type->regions[mid].base +
1557                                  type->regions[mid].size))
1558                        left = mid + 1;
1559                else
1560                        return mid;
1561        } while (left < right);
1562        return -1;
1563}
1564
1565bool __init memblock_is_reserved(phys_addr_t addr)
1566{
1567        return memblock_search(&memblock.reserved, addr) != -1;
1568}
1569
1570bool __init_memblock memblock_is_memory(phys_addr_t addr)
1571{
1572        return memblock_search(&memblock.memory, addr) != -1;
1573}
1574
1575int __init_memblock memblock_is_map_memory(phys_addr_t addr)
1576{
1577        int i = memblock_search(&memblock.memory, addr);
1578
1579        if (i == -1)
1580                return false;
1581        return !memblock_is_nomap(&memblock.memory.regions[i]);
1582}
1583
1584#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1585int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1586                         unsigned long *start_pfn, unsigned long *end_pfn)
1587{
1588        struct memblock_type *type = &memblock.memory;
1589        int mid = memblock_search(type, PFN_PHYS(pfn));
1590
1591        if (mid == -1)
1592                return -1;
1593
1594        *start_pfn = PFN_DOWN(type->regions[mid].base);
1595        *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1596
1597        return type->regions[mid].nid;
1598}
1599#endif
1600
1601/**
1602 * memblock_is_region_memory - check if a region is a subset of memory
1603 * @base: base of region to check
1604 * @size: size of region to check
1605 *
1606 * Check if the region [@base, @base+@size) is a subset of a memory block.
1607 *
1608 * RETURNS:
1609 * 0 if false, non-zero if true
1610 */
1611int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1612{
1613        int idx = memblock_search(&memblock.memory, base);
1614        phys_addr_t end = base + memblock_cap_size(base, &size);
1615
1616        if (idx == -1)
1617                return 0;
1618        return memblock.memory.regions[idx].base <= base &&
1619                (memblock.memory.regions[idx].base +
1620                 memblock.memory.regions[idx].size) >= end;
1621}
1622
1623/**
1624 * memblock_is_region_reserved - check if a region intersects reserved memory
1625 * @base: base of region to check
1626 * @size: size of region to check
1627 *
1628 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1629 *
1630 * RETURNS:
1631 * True if they intersect, false if not.
1632 */
1633bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1634{
1635        memblock_cap_size(base, &size);
1636        return memblock_overlaps_region(&memblock.reserved, base, size);
1637}
1638
1639void __init_memblock memblock_trim_memory(phys_addr_t align)
1640{
1641        phys_addr_t start, end, orig_start, orig_end;
1642        struct memblock_region *r;
1643
1644        for_each_memblock(memory, r) {
1645                orig_start = r->base;
1646                orig_end = r->base + r->size;
1647                start = round_up(orig_start, align);
1648                end = round_down(orig_end, align);
1649
1650                if (start == orig_start && end == orig_end)
1651                        continue;
1652
1653                if (start < end) {
1654                        r->base = start;
1655                        r->size = end - start;
1656                } else {
1657                        memblock_remove_region(&memblock.memory,
1658                                               r - memblock.memory.regions);
1659                        r--;
1660                }
1661        }
1662}
1663
1664void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1665{
1666        memblock.current_limit = limit;
1667}
1668
1669phys_addr_t __init_memblock memblock_get_current_limit(void)
1670{
1671        return memblock.current_limit;
1672}
1673
1674static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1675{
1676        unsigned long long base, size;
1677        unsigned long flags;
1678        int idx;
1679        struct memblock_region *rgn;
1680
1681        pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1682
1683        for_each_memblock_type(type, rgn) {
1684                char nid_buf[32] = "";
1685
1686                base = rgn->base;
1687                size = rgn->size;
1688                flags = rgn->flags;
1689#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1690                if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1691                        snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1692                                 memblock_get_region_node(rgn));
1693#endif
1694                pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1695                        name, idx, base, base + size - 1, size, nid_buf, flags);
1696        }
1697}
1698
1699void __init_memblock __memblock_dump_all(void)
1700{
1701        pr_info("MEMBLOCK configuration:\n");
1702        pr_info(" memory size = %#llx reserved size = %#llx\n",
1703                (unsigned long long)memblock.memory.total_size,
1704                (unsigned long long)memblock.reserved.total_size);
1705
1706        memblock_dump(&memblock.memory, "memory");
1707        memblock_dump(&memblock.reserved, "reserved");
1708}
1709
1710void __init memblock_allow_resize(void)
1711{
1712        memblock_can_resize = 1;
1713}
1714
1715static int __init early_memblock(char *p)
1716{
1717        if (p && strstr(p, "debug"))
1718                memblock_debug = 1;
1719        return 0;
1720}
1721early_param("memblock", early_memblock);
1722
1723#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1724
1725static int memblock_debug_show(struct seq_file *m, void *private)
1726{
1727        struct memblock_type *type = m->private;
1728        struct memblock_region *reg;
1729        int i;
1730
1731        for (i = 0; i < type->cnt; i++) {
1732                reg = &type->regions[i];
1733                seq_printf(m, "%4d: ", i);
1734                if (sizeof(phys_addr_t) == 4)
1735                        seq_printf(m, "0x%08lx..0x%08lx\n",
1736                                   (unsigned long)reg->base,
1737                                   (unsigned long)(reg->base + reg->size - 1));
1738                else
1739                        seq_printf(m, "0x%016llx..0x%016llx\n",
1740                                   (unsigned long long)reg->base,
1741                                   (unsigned long long)(reg->base + reg->size - 1));
1742
1743        }
1744        return 0;
1745}
1746
1747static int memblock_debug_open(struct inode *inode, struct file *file)
1748{
1749        return single_open(file, memblock_debug_show, inode->i_private);
1750}
1751
1752static const struct file_operations memblock_debug_fops = {
1753        .open = memblock_debug_open,
1754        .read = seq_read,
1755        .llseek = seq_lseek,
1756        .release = single_release,
1757};
1758
1759static int __init memblock_init_debugfs(void)
1760{
1761        struct dentry *root = debugfs_create_dir("memblock", NULL);
1762        if (!root)
1763                return -ENXIO;
1764        debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1765        debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1766#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1767        debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1768#endif
1769
1770        return 0;
1771}
1772__initcall(memblock_init_debugfs);
1773
1774#endif /* CONFIG_DEBUG_FS */
1775