linux/mm/memblock.c
<<
>>
Prefs
   1/*
   2 * Procedures for maintaining information about logical memory blocks.
   3 *
   4 * Peter Bergner, IBM Corp.     June 2001.
   5 * Copyright (C) 2001 Peter Bergner.
   6 *
   7 *      This program is free software; you can redistribute it and/or
   8 *      modify it under the terms of the GNU General Public License
   9 *      as published by the Free Software Foundation; either version
  10 *      2 of the License, or (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/init.h>
  16#include <linux/bitops.h>
  17#include <linux/poison.h>
  18#include <linux/pfn.h>
  19#include <linux/debugfs.h>
  20#include <linux/seq_file.h>
  21#include <linux/memblock.h>
  22#include <linux/kmemleak.h>
  23
  24#include <asm-generic/sections.h>
  25#include <linux/io.h>
  26
  27#include "internal.h"
  28
  29static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  30static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  31
  32struct memblock memblock __initdata_memblock = {
  33        .memory.regions         = memblock_memory_init_regions,
  34        .memory.cnt             = 1,    /* empty dummy entry */
  35        .memory.max             = INIT_MEMBLOCK_REGIONS,
  36
  37        .reserved.regions       = memblock_reserved_init_regions,
  38        .reserved.cnt           = 1,    /* empty dummy entry */
  39        .reserved.max           = INIT_MEMBLOCK_REGIONS,
  40
  41        .bottom_up              = false,
  42        .current_limit          = MEMBLOCK_ALLOC_ANYWHERE,
  43};
  44
  45int memblock_debug __initdata_memblock;
  46#ifdef CONFIG_MOVABLE_NODE
  47bool movable_node_enabled __initdata_memblock = false;
  48#endif
  49static bool system_has_some_mirror __initdata_memblock = false;
  50static int memblock_can_resize __initdata_memblock;
  51static int memblock_memory_in_slab __initdata_memblock = 0;
  52static int memblock_reserved_in_slab __initdata_memblock = 0;
  53
  54ulong __init_memblock choose_memblock_flags(void)
  55{
  56        return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
  57}
  58
  59/* inline so we don't get a warning when pr_debug is compiled out */
  60static __init_memblock const char *
  61memblock_type_name(struct memblock_type *type)
  62{
  63        if (type == &memblock.memory)
  64                return "memory";
  65        else if (type == &memblock.reserved)
  66                return "reserved";
  67        else
  68                return "unknown";
  69}
  70
  71/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
  72static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
  73{
  74        return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
  75}
  76
  77/*
  78 * Address comparison utilities
  79 */
  80static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  81                                       phys_addr_t base2, phys_addr_t size2)
  82{
  83        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  84}
  85
  86static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
  87                                        phys_addr_t base, phys_addr_t size)
  88{
  89        unsigned long i;
  90
  91        for (i = 0; i < type->cnt; i++) {
  92                phys_addr_t rgnbase = type->regions[i].base;
  93                phys_addr_t rgnsize = type->regions[i].size;
  94                if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
  95                        break;
  96        }
  97
  98        return (i < type->cnt) ? i : -1;
  99}
 100
 101/*
 102 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 103 * @start: start of candidate range
 104 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 105 * @size: size of free area to find
 106 * @align: alignment of free area to find
 107 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 108 * @flags: pick from blocks based on memory attributes
 109 *
 110 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 111 *
 112 * RETURNS:
 113 * Found address on success, 0 on failure.
 114 */
 115static phys_addr_t __init_memblock
 116__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
 117                                phys_addr_t size, phys_addr_t align, int nid,
 118                                ulong flags)
 119{
 120        phys_addr_t this_start, this_end, cand;
 121        u64 i;
 122
 123        for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
 124                this_start = clamp(this_start, start, end);
 125                this_end = clamp(this_end, start, end);
 126
 127                cand = round_up(this_start, align);
 128                if (cand < this_end && this_end - cand >= size)
 129                        return cand;
 130        }
 131
 132        return 0;
 133}
 134
 135/**
 136 * __memblock_find_range_top_down - find free area utility, in top-down
 137 * @start: start of candidate range
 138 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 139 * @size: size of free area to find
 140 * @align: alignment of free area to find
 141 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 142 * @flags: pick from blocks based on memory attributes
 143 *
 144 * Utility called from memblock_find_in_range_node(), find free area top-down.
 145 *
 146 * RETURNS:
 147 * Found address on success, 0 on failure.
 148 */
 149static phys_addr_t __init_memblock
 150__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
 151                               phys_addr_t size, phys_addr_t align, int nid,
 152                               ulong flags)
 153{
 154        phys_addr_t this_start, this_end, cand;
 155        u64 i;
 156
 157        for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
 158                                        NULL) {
 159                this_start = clamp(this_start, start, end);
 160                this_end = clamp(this_end, start, end);
 161
 162                if (this_end < size)
 163                        continue;
 164
 165                cand = round_down(this_end - size, align);
 166                if (cand >= this_start)
 167                        return cand;
 168        }
 169
 170        return 0;
 171}
 172
 173/**
 174 * memblock_find_in_range_node - find free area in given range and node
 175 * @size: size of free area to find
 176 * @align: alignment of free area to find
 177 * @start: start of candidate range
 178 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 179 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 180 * @flags: pick from blocks based on memory attributes
 181 *
 182 * Find @size free area aligned to @align in the specified range and node.
 183 *
 184 * When allocation direction is bottom-up, the @start should be greater
 185 * than the end of the kernel image. Otherwise, it will be trimmed. The
 186 * reason is that we want the bottom-up allocation just near the kernel
 187 * image so it is highly likely that the allocated memory and the kernel
 188 * will reside in the same node.
 189 *
 190 * If bottom-up allocation failed, will try to allocate memory top-down.
 191 *
 192 * RETURNS:
 193 * Found address on success, 0 on failure.
 194 */
 195phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 196                                        phys_addr_t align, phys_addr_t start,
 197                                        phys_addr_t end, int nid, ulong flags)
 198{
 199        phys_addr_t kernel_end, ret;
 200
 201        /* pump up @end */
 202        if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 203                end = memblock.current_limit;
 204
 205        /* avoid allocating the first page */
 206        start = max_t(phys_addr_t, start, PAGE_SIZE);
 207        end = max(start, end);
 208        kernel_end = __pa_symbol(_end);
 209
 210        /*
 211         * try bottom-up allocation only when bottom-up mode
 212         * is set and @end is above the kernel image.
 213         */
 214        if (memblock_bottom_up() && end > kernel_end) {
 215                phys_addr_t bottom_up_start;
 216
 217                /* make sure we will allocate above the kernel */
 218                bottom_up_start = max(start, kernel_end);
 219
 220                /* ok, try bottom-up allocation first */
 221                ret = __memblock_find_range_bottom_up(bottom_up_start, end,
 222                                                      size, align, nid, flags);
 223                if (ret)
 224                        return ret;
 225
 226                /*
 227                 * we always limit bottom-up allocation above the kernel,
 228                 * but top-down allocation doesn't have the limit, so
 229                 * retrying top-down allocation may succeed when bottom-up
 230                 * allocation failed.
 231                 *
 232                 * bottom-up allocation is expected to be fail very rarely,
 233                 * so we use WARN_ONCE() here to see the stack trace if
 234                 * fail happens.
 235                 */
 236                WARN_ONCE(1, "memblock: bottom-up allocation failed, "
 237                             "memory hotunplug may be affected\n");
 238        }
 239
 240        return __memblock_find_range_top_down(start, end, size, align, nid,
 241                                              flags);
 242}
 243
 244/**
 245 * memblock_find_in_range - find free area in given range
 246 * @start: start of candidate range
 247 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 248 * @size: size of free area to find
 249 * @align: alignment of free area to find
 250 *
 251 * Find @size free area aligned to @align in the specified range.
 252 *
 253 * RETURNS:
 254 * Found address on success, 0 on failure.
 255 */
 256phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 257                                        phys_addr_t end, phys_addr_t size,
 258                                        phys_addr_t align)
 259{
 260        phys_addr_t ret;
 261        ulong flags = choose_memblock_flags();
 262
 263again:
 264        ret = memblock_find_in_range_node(size, align, start, end,
 265                                            NUMA_NO_NODE, flags);
 266
 267        if (!ret && (flags & MEMBLOCK_MIRROR)) {
 268                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 269                        &size);
 270                flags &= ~MEMBLOCK_MIRROR;
 271                goto again;
 272        }
 273
 274        return ret;
 275}
 276
 277static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 278{
 279        type->total_size -= type->regions[r].size;
 280        memmove(&type->regions[r], &type->regions[r + 1],
 281                (type->cnt - (r + 1)) * sizeof(type->regions[r]));
 282        type->cnt--;
 283
 284        /* Special case for empty arrays */
 285        if (type->cnt == 0) {
 286                WARN_ON(type->total_size != 0);
 287                type->cnt = 1;
 288                type->regions[0].base = 0;
 289                type->regions[0].size = 0;
 290                type->regions[0].flags = 0;
 291                memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 292        }
 293}
 294
 295phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
 296                                        phys_addr_t *addr)
 297{
 298        if (memblock.reserved.regions == memblock_reserved_init_regions)
 299                return 0;
 300
 301        *addr = __pa(memblock.reserved.regions);
 302
 303        return PAGE_ALIGN(sizeof(struct memblock_region) *
 304                          memblock.reserved.max);
 305}
 306
 307/**
 308 * memblock_double_array - double the size of the memblock regions array
 309 * @type: memblock type of the regions array being doubled
 310 * @new_area_start: starting address of memory range to avoid overlap with
 311 * @new_area_size: size of memory range to avoid overlap with
 312 *
 313 * Double the size of the @type regions array. If memblock is being used to
 314 * allocate memory for a new reserved regions array and there is a previously
 315 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
 316 * waiting to be reserved, ensure the memory used by the new array does
 317 * not overlap.
 318 *
 319 * RETURNS:
 320 * 0 on success, -1 on failure.
 321 */
 322static int __init_memblock memblock_double_array(struct memblock_type *type,
 323                                                phys_addr_t new_area_start,
 324                                                phys_addr_t new_area_size)
 325{
 326        struct memblock_region *new_array, *old_array;
 327        phys_addr_t old_alloc_size, new_alloc_size;
 328        phys_addr_t old_size, new_size, addr;
 329        int use_slab = slab_is_available();
 330        int *in_slab;
 331
 332        /* We don't allow resizing until we know about the reserved regions
 333         * of memory that aren't suitable for allocation
 334         */
 335        if (!memblock_can_resize)
 336                return -1;
 337
 338        /* Calculate new doubled size */
 339        old_size = type->max * sizeof(struct memblock_region);
 340        new_size = old_size << 1;
 341        /*
 342         * We need to allocated new one align to PAGE_SIZE,
 343         *   so we can free them completely later.
 344         */
 345        old_alloc_size = PAGE_ALIGN(old_size);
 346        new_alloc_size = PAGE_ALIGN(new_size);
 347
 348        /* Retrieve the slab flag */
 349        if (type == &memblock.memory)
 350                in_slab = &memblock_memory_in_slab;
 351        else
 352                in_slab = &memblock_reserved_in_slab;
 353
 354        /* Try to find some space for it.
 355         *
 356         * WARNING: We assume that either slab_is_available() and we use it or
 357         * we use MEMBLOCK for allocations. That means that this is unsafe to
 358         * use when bootmem is currently active (unless bootmem itself is
 359         * implemented on top of MEMBLOCK which isn't the case yet)
 360         *
 361         * This should however not be an issue for now, as we currently only
 362         * call into MEMBLOCK while it's still active, or much later when slab
 363         * is active for memory hotplug operations
 364         */
 365        if (use_slab) {
 366                new_array = kmalloc(new_size, GFP_KERNEL);
 367                addr = new_array ? __pa(new_array) : 0;
 368        } else {
 369                /* only exclude range when trying to double reserved.regions */
 370                if (type != &memblock.reserved)
 371                        new_area_start = new_area_size = 0;
 372
 373                addr = memblock_find_in_range(new_area_start + new_area_size,
 374                                                memblock.current_limit,
 375                                                new_alloc_size, PAGE_SIZE);
 376                if (!addr && new_area_size)
 377                        addr = memblock_find_in_range(0,
 378                                min(new_area_start, memblock.current_limit),
 379                                new_alloc_size, PAGE_SIZE);
 380
 381                new_array = addr ? __va(addr) : NULL;
 382        }
 383        if (!addr) {
 384                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 385                       memblock_type_name(type), type->max, type->max * 2);
 386                return -1;
 387        }
 388
 389        memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
 390                        memblock_type_name(type), type->max * 2, (u64)addr,
 391                        (u64)addr + new_size - 1);
 392
 393        /*
 394         * Found space, we now need to move the array over before we add the
 395         * reserved region since it may be our reserved array itself that is
 396         * full.
 397         */
 398        memcpy(new_array, type->regions, old_size);
 399        memset(new_array + type->max, 0, old_size);
 400        old_array = type->regions;
 401        type->regions = new_array;
 402        type->max <<= 1;
 403
 404        /* Free old array. We needn't free it if the array is the static one */
 405        if (*in_slab)
 406                kfree(old_array);
 407        else if (old_array != memblock_memory_init_regions &&
 408                 old_array != memblock_reserved_init_regions)
 409                memblock_free(__pa(old_array), old_alloc_size);
 410
 411        /*
 412         * Reserve the new array if that comes from the memblock.  Otherwise, we
 413         * needn't do it
 414         */
 415        if (!use_slab)
 416                BUG_ON(memblock_reserve(addr, new_alloc_size));
 417
 418        /* Update slab flag */
 419        *in_slab = use_slab;
 420
 421        return 0;
 422}
 423
 424/**
 425 * memblock_merge_regions - merge neighboring compatible regions
 426 * @type: memblock type to scan
 427 *
 428 * Scan @type and merge neighboring compatible regions.
 429 */
 430static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 431{
 432        int i = 0;
 433
 434        /* cnt never goes below 1 */
 435        while (i < type->cnt - 1) {
 436                struct memblock_region *this = &type->regions[i];
 437                struct memblock_region *next = &type->regions[i + 1];
 438
 439                if (this->base + this->size != next->base ||
 440                    memblock_get_region_node(this) !=
 441                    memblock_get_region_node(next) ||
 442                    this->flags != next->flags) {
 443                        BUG_ON(this->base + this->size > next->base);
 444                        i++;
 445                        continue;
 446                }
 447
 448                this->size += next->size;
 449                /* move forward from next + 1, index of which is i + 2 */
 450                memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
 451                type->cnt--;
 452        }
 453}
 454
 455/**
 456 * memblock_insert_region - insert new memblock region
 457 * @type:       memblock type to insert into
 458 * @idx:        index for the insertion point
 459 * @base:       base address of the new region
 460 * @size:       size of the new region
 461 * @nid:        node id of the new region
 462 * @flags:      flags of the new region
 463 *
 464 * Insert new memblock region [@base,@base+@size) into @type at @idx.
 465 * @type must already have extra room to accomodate the new region.
 466 */
 467static void __init_memblock memblock_insert_region(struct memblock_type *type,
 468                                                   int idx, phys_addr_t base,
 469                                                   phys_addr_t size,
 470                                                   int nid, unsigned long flags)
 471{
 472        struct memblock_region *rgn = &type->regions[idx];
 473
 474        BUG_ON(type->cnt >= type->max);
 475        memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
 476        rgn->base = base;
 477        rgn->size = size;
 478        rgn->flags = flags;
 479        memblock_set_region_node(rgn, nid);
 480        type->cnt++;
 481        type->total_size += size;
 482}
 483
 484/**
 485 * memblock_add_range - add new memblock region
 486 * @type: memblock type to add new region into
 487 * @base: base address of the new region
 488 * @size: size of the new region
 489 * @nid: nid of the new region
 490 * @flags: flags of the new region
 491 *
 492 * Add new memblock region [@base,@base+@size) into @type.  The new region
 493 * is allowed to overlap with existing ones - overlaps don't affect already
 494 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 495 * compatible regions are merged) after the addition.
 496 *
 497 * RETURNS:
 498 * 0 on success, -errno on failure.
 499 */
 500int __init_memblock memblock_add_range(struct memblock_type *type,
 501                                phys_addr_t base, phys_addr_t size,
 502                                int nid, unsigned long flags)
 503{
 504        bool insert = false;
 505        phys_addr_t obase = base;
 506        phys_addr_t end = base + memblock_cap_size(base, &size);
 507        int i, nr_new;
 508
 509        if (!size)
 510                return 0;
 511
 512        /* special case for empty array */
 513        if (type->regions[0].size == 0) {
 514                WARN_ON(type->cnt != 1 || type->total_size);
 515                type->regions[0].base = base;
 516                type->regions[0].size = size;
 517                type->regions[0].flags = flags;
 518                memblock_set_region_node(&type->regions[0], nid);
 519                type->total_size = size;
 520                return 0;
 521        }
 522repeat:
 523        /*
 524         * The following is executed twice.  Once with %false @insert and
 525         * then with %true.  The first counts the number of regions needed
 526         * to accomodate the new area.  The second actually inserts them.
 527         */
 528        base = obase;
 529        nr_new = 0;
 530
 531        for (i = 0; i < type->cnt; i++) {
 532                struct memblock_region *rgn = &type->regions[i];
 533                phys_addr_t rbase = rgn->base;
 534                phys_addr_t rend = rbase + rgn->size;
 535
 536                if (rbase >= end)
 537                        break;
 538                if (rend <= base)
 539                        continue;
 540                /*
 541                 * @rgn overlaps.  If it separates the lower part of new
 542                 * area, insert that portion.
 543                 */
 544                if (rbase > base) {
 545                        nr_new++;
 546                        if (insert)
 547                                memblock_insert_region(type, i++, base,
 548                                                       rbase - base, nid,
 549                                                       flags);
 550                }
 551                /* area below @rend is dealt with, forget about it */
 552                base = min(rend, end);
 553        }
 554
 555        /* insert the remaining portion */
 556        if (base < end) {
 557                nr_new++;
 558                if (insert)
 559                        memblock_insert_region(type, i, base, end - base,
 560                                               nid, flags);
 561        }
 562
 563        /*
 564         * If this was the first round, resize array and repeat for actual
 565         * insertions; otherwise, merge and return.
 566         */
 567        if (!insert) {
 568                while (type->cnt + nr_new > type->max)
 569                        if (memblock_double_array(type, obase, size) < 0)
 570                                return -ENOMEM;
 571                insert = true;
 572                goto repeat;
 573        } else {
 574                memblock_merge_regions(type);
 575                return 0;
 576        }
 577}
 578
 579int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
 580                                       int nid)
 581{
 582        return memblock_add_range(&memblock.memory, base, size, nid, 0);
 583}
 584
 585int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 586{
 587        return memblock_add_range(&memblock.memory, base, size,
 588                                   MAX_NUMNODES, 0);
 589}
 590
 591/**
 592 * memblock_isolate_range - isolate given range into disjoint memblocks
 593 * @type: memblock type to isolate range for
 594 * @base: base of range to isolate
 595 * @size: size of range to isolate
 596 * @start_rgn: out parameter for the start of isolated region
 597 * @end_rgn: out parameter for the end of isolated region
 598 *
 599 * Walk @type and ensure that regions don't cross the boundaries defined by
 600 * [@base,@base+@size).  Crossing regions are split at the boundaries,
 601 * which may create at most two more regions.  The index of the first
 602 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 603 *
 604 * RETURNS:
 605 * 0 on success, -errno on failure.
 606 */
 607static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 608                                        phys_addr_t base, phys_addr_t size,
 609                                        int *start_rgn, int *end_rgn)
 610{
 611        phys_addr_t end = base + memblock_cap_size(base, &size);
 612        int i;
 613
 614        *start_rgn = *end_rgn = 0;
 615
 616        if (!size)
 617                return 0;
 618
 619        /* we'll create at most two more regions */
 620        while (type->cnt + 2 > type->max)
 621                if (memblock_double_array(type, base, size) < 0)
 622                        return -ENOMEM;
 623
 624        for (i = 0; i < type->cnt; i++) {
 625                struct memblock_region *rgn = &type->regions[i];
 626                phys_addr_t rbase = rgn->base;
 627                phys_addr_t rend = rbase + rgn->size;
 628
 629                if (rbase >= end)
 630                        break;
 631                if (rend <= base)
 632                        continue;
 633
 634                if (rbase < base) {
 635                        /*
 636                         * @rgn intersects from below.  Split and continue
 637                         * to process the next region - the new top half.
 638                         */
 639                        rgn->base = base;
 640                        rgn->size -= base - rbase;
 641                        type->total_size -= base - rbase;
 642                        memblock_insert_region(type, i, rbase, base - rbase,
 643                                               memblock_get_region_node(rgn),
 644                                               rgn->flags);
 645                } else if (rend > end) {
 646                        /*
 647                         * @rgn intersects from above.  Split and redo the
 648                         * current region - the new bottom half.
 649                         */
 650                        rgn->base = end;
 651                        rgn->size -= end - rbase;
 652                        type->total_size -= end - rbase;
 653                        memblock_insert_region(type, i--, rbase, end - rbase,
 654                                               memblock_get_region_node(rgn),
 655                                               rgn->flags);
 656                } else {
 657                        /* @rgn is fully contained, record it */
 658                        if (!*end_rgn)
 659                                *start_rgn = i;
 660                        *end_rgn = i + 1;
 661                }
 662        }
 663
 664        return 0;
 665}
 666
 667int __init_memblock memblock_remove_range(struct memblock_type *type,
 668                                          phys_addr_t base, phys_addr_t size)
 669{
 670        int start_rgn, end_rgn;
 671        int i, ret;
 672
 673        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 674        if (ret)
 675                return ret;
 676
 677        for (i = end_rgn - 1; i >= start_rgn; i--)
 678                memblock_remove_region(type, i);
 679        return 0;
 680}
 681
 682int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 683{
 684        return memblock_remove_range(&memblock.memory, base, size);
 685}
 686
 687
 688int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 689{
 690        memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
 691                     (unsigned long long)base,
 692                     (unsigned long long)base + size - 1,
 693                     (void *)_RET_IP_);
 694
 695        return memblock_remove_range(&memblock.reserved, base, size);
 696}
 697
 698static int __init_memblock memblock_reserve_region(phys_addr_t base,
 699                                                   phys_addr_t size,
 700                                                   int nid,
 701                                                   unsigned long flags)
 702{
 703        struct memblock_type *_rgn = &memblock.reserved;
 704
 705        memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
 706                     (unsigned long long)base,
 707                     (unsigned long long)base + size - 1,
 708                     flags, (void *)_RET_IP_);
 709
 710        return memblock_add_range(_rgn, base, size, nid, flags);
 711}
 712
 713int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 714{
 715        return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
 716}
 717
 718/**
 719 *
 720 * This function isolates region [@base, @base + @size), and sets/clears flag
 721 *
 722 * Return 0 on succees, -errno on failure.
 723 */
 724static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 725                                phys_addr_t size, int set, int flag)
 726{
 727        struct memblock_type *type = &memblock.memory;
 728        int i, ret, start_rgn, end_rgn;
 729
 730        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 731        if (ret)
 732                return ret;
 733
 734        for (i = start_rgn; i < end_rgn; i++)
 735                if (set)
 736                        memblock_set_region_flags(&type->regions[i], flag);
 737                else
 738                        memblock_clear_region_flags(&type->regions[i], flag);
 739
 740        memblock_merge_regions(type);
 741        return 0;
 742}
 743
 744/**
 745 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 746 * @base: the base phys addr of the region
 747 * @size: the size of the region
 748 *
 749 * Return 0 on succees, -errno on failure.
 750 */
 751int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 752{
 753        return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
 754}
 755
 756/**
 757 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 758 * @base: the base phys addr of the region
 759 * @size: the size of the region
 760 *
 761 * Return 0 on succees, -errno on failure.
 762 */
 763int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 764{
 765        return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
 766}
 767
 768/**
 769 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 770 * @base: the base phys addr of the region
 771 * @size: the size of the region
 772 *
 773 * Return 0 on succees, -errno on failure.
 774 */
 775int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 776{
 777        system_has_some_mirror = true;
 778
 779        return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 780}
 781
 782
 783/**
 784 * __next_reserved_mem_region - next function for for_each_reserved_region()
 785 * @idx: pointer to u64 loop variable
 786 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
 787 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
 788 *
 789 * Iterate over all reserved memory regions.
 790 */
 791void __init_memblock __next_reserved_mem_region(u64 *idx,
 792                                           phys_addr_t *out_start,
 793                                           phys_addr_t *out_end)
 794{
 795        struct memblock_type *rsv = &memblock.reserved;
 796
 797        if (*idx >= 0 && *idx < rsv->cnt) {
 798                struct memblock_region *r = &rsv->regions[*idx];
 799                phys_addr_t base = r->base;
 800                phys_addr_t size = r->size;
 801
 802                if (out_start)
 803                        *out_start = base;
 804                if (out_end)
 805                        *out_end = base + size - 1;
 806
 807                *idx += 1;
 808                return;
 809        }
 810
 811        /* signal end of iteration */
 812        *idx = ULLONG_MAX;
 813}
 814
 815/**
 816 * __next__mem_range - next function for for_each_free_mem_range() etc.
 817 * @idx: pointer to u64 loop variable
 818 * @nid: node selector, %NUMA_NO_NODE for all nodes
 819 * @flags: pick from blocks based on memory attributes
 820 * @type_a: pointer to memblock_type from where the range is taken
 821 * @type_b: pointer to memblock_type which excludes memory from being taken
 822 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 823 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 824 * @out_nid: ptr to int for nid of the range, can be %NULL
 825 *
 826 * Find the first area from *@idx which matches @nid, fill the out
 827 * parameters, and update *@idx for the next iteration.  The lower 32bit of
 828 * *@idx contains index into type_a and the upper 32bit indexes the
 829 * areas before each region in type_b.  For example, if type_b regions
 830 * look like the following,
 831 *
 832 *      0:[0-16), 1:[32-48), 2:[128-130)
 833 *
 834 * The upper 32bit indexes the following regions.
 835 *
 836 *      0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 837 *
 838 * As both region arrays are sorted, the function advances the two indices
 839 * in lockstep and returns each intersection.
 840 */
 841void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
 842                                      struct memblock_type *type_a,
 843                                      struct memblock_type *type_b,
 844                                      phys_addr_t *out_start,
 845                                      phys_addr_t *out_end, int *out_nid)
 846{
 847        int idx_a = *idx & 0xffffffff;
 848        int idx_b = *idx >> 32;
 849
 850        if (WARN_ONCE(nid == MAX_NUMNODES,
 851        "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
 852                nid = NUMA_NO_NODE;
 853
 854        for (; idx_a < type_a->cnt; idx_a++) {
 855                struct memblock_region *m = &type_a->regions[idx_a];
 856
 857                phys_addr_t m_start = m->base;
 858                phys_addr_t m_end = m->base + m->size;
 859                int         m_nid = memblock_get_region_node(m);
 860
 861                /* only memory regions are associated with nodes, check it */
 862                if (nid != NUMA_NO_NODE && nid != m_nid)
 863                        continue;
 864
 865                /* skip hotpluggable memory regions if needed */
 866                if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
 867                        continue;
 868
 869                /* if we want mirror memory skip non-mirror memory regions */
 870                if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 871                        continue;
 872
 873                if (!type_b) {
 874                        if (out_start)
 875                                *out_start = m_start;
 876                        if (out_end)
 877                                *out_end = m_end;
 878                        if (out_nid)
 879                                *out_nid = m_nid;
 880                        idx_a++;
 881                        *idx = (u32)idx_a | (u64)idx_b << 32;
 882                        return;
 883                }
 884
 885                /* scan areas before each reservation */
 886                for (; idx_b < type_b->cnt + 1; idx_b++) {
 887                        struct memblock_region *r;
 888                        phys_addr_t r_start;
 889                        phys_addr_t r_end;
 890
 891                        r = &type_b->regions[idx_b];
 892                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
 893                        r_end = idx_b < type_b->cnt ?
 894                                r->base : ULLONG_MAX;
 895
 896                        /*
 897                         * if idx_b advanced past idx_a,
 898                         * break out to advance idx_a
 899                         */
 900                        if (r_start >= m_end)
 901                                break;
 902                        /* if the two regions intersect, we're done */
 903                        if (m_start < r_end) {
 904                                if (out_start)
 905                                        *out_start =
 906                                                max(m_start, r_start);
 907                                if (out_end)
 908                                        *out_end = min(m_end, r_end);
 909                                if (out_nid)
 910                                        *out_nid = m_nid;
 911                                /*
 912                                 * The region which ends first is
 913                                 * advanced for the next iteration.
 914                                 */
 915                                if (m_end <= r_end)
 916                                        idx_a++;
 917                                else
 918                                        idx_b++;
 919                                *idx = (u32)idx_a | (u64)idx_b << 32;
 920                                return;
 921                        }
 922                }
 923        }
 924
 925        /* signal end of iteration */
 926        *idx = ULLONG_MAX;
 927}
 928
 929/**
 930 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
 931 *
 932 * Finds the next range from type_a which is not marked as unsuitable
 933 * in type_b.
 934 *
 935 * @idx: pointer to u64 loop variable
 936 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
 937 * @flags: pick from blocks based on memory attributes
 938 * @type_a: pointer to memblock_type from where the range is taken
 939 * @type_b: pointer to memblock_type which excludes memory from being taken
 940 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 941 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 942 * @out_nid: ptr to int for nid of the range, can be %NULL
 943 *
 944 * Reverse of __next_mem_range().
 945 */
 946void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
 947                                          struct memblock_type *type_a,
 948                                          struct memblock_type *type_b,
 949                                          phys_addr_t *out_start,
 950                                          phys_addr_t *out_end, int *out_nid)
 951{
 952        int idx_a = *idx & 0xffffffff;
 953        int idx_b = *idx >> 32;
 954
 955        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
 956                nid = NUMA_NO_NODE;
 957
 958        if (*idx == (u64)ULLONG_MAX) {
 959                idx_a = type_a->cnt - 1;
 960                idx_b = type_b->cnt;
 961        }
 962
 963        for (; idx_a >= 0; idx_a--) {
 964                struct memblock_region *m = &type_a->regions[idx_a];
 965
 966                phys_addr_t m_start = m->base;
 967                phys_addr_t m_end = m->base + m->size;
 968                int m_nid = memblock_get_region_node(m);
 969
 970                /* only memory regions are associated with nodes, check it */
 971                if (nid != NUMA_NO_NODE && nid != m_nid)
 972                        continue;
 973
 974                /* skip hotpluggable memory regions if needed */
 975                if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
 976                        continue;
 977
 978                /* if we want mirror memory skip non-mirror memory regions */
 979                if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 980                        continue;
 981
 982                if (!type_b) {
 983                        if (out_start)
 984                                *out_start = m_start;
 985                        if (out_end)
 986                                *out_end = m_end;
 987                        if (out_nid)
 988                                *out_nid = m_nid;
 989                        idx_a++;
 990                        *idx = (u32)idx_a | (u64)idx_b << 32;
 991                        return;
 992                }
 993
 994                /* scan areas before each reservation */
 995                for (; idx_b >= 0; idx_b--) {
 996                        struct memblock_region *r;
 997                        phys_addr_t r_start;
 998                        phys_addr_t r_end;
 999
1000                        r = &type_b->regions[idx_b];
1001                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1002                        r_end = idx_b < type_b->cnt ?
1003                                r->base : ULLONG_MAX;
1004                        /*
1005                         * if idx_b advanced past idx_a,
1006                         * break out to advance idx_a
1007                         */
1008
1009                        if (r_end <= m_start)
1010                                break;
1011                        /* if the two regions intersect, we're done */
1012                        if (m_end > r_start) {
1013                                if (out_start)
1014                                        *out_start = max(m_start, r_start);
1015                                if (out_end)
1016                                        *out_end = min(m_end, r_end);
1017                                if (out_nid)
1018                                        *out_nid = m_nid;
1019                                if (m_start >= r_start)
1020                                        idx_a--;
1021                                else
1022                                        idx_b--;
1023                                *idx = (u32)idx_a | (u64)idx_b << 32;
1024                                return;
1025                        }
1026                }
1027        }
1028        /* signal end of iteration */
1029        *idx = ULLONG_MAX;
1030}
1031
1032#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1033/*
1034 * Common iterator interface used to define for_each_mem_range().
1035 */
1036void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1037                                unsigned long *out_start_pfn,
1038                                unsigned long *out_end_pfn, int *out_nid)
1039{
1040        struct memblock_type *type = &memblock.memory;
1041        struct memblock_region *r;
1042
1043        while (++*idx < type->cnt) {
1044                r = &type->regions[*idx];
1045
1046                if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1047                        continue;
1048                if (nid == MAX_NUMNODES || nid == r->nid)
1049                        break;
1050        }
1051        if (*idx >= type->cnt) {
1052                *idx = -1;
1053                return;
1054        }
1055
1056        if (out_start_pfn)
1057                *out_start_pfn = PFN_UP(r->base);
1058        if (out_end_pfn)
1059                *out_end_pfn = PFN_DOWN(r->base + r->size);
1060        if (out_nid)
1061                *out_nid = r->nid;
1062}
1063
1064unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
1065                                                      unsigned long max_pfn)
1066{
1067        struct memblock_type *type = &memblock.memory;
1068        unsigned int right = type->cnt;
1069        unsigned int mid, left = 0;
1070        phys_addr_t addr = PFN_PHYS(pfn + 1);
1071
1072        do {
1073                mid = (right + left) / 2;
1074
1075                if (addr < type->regions[mid].base)
1076                        right = mid;
1077                else if (addr >= (type->regions[mid].base +
1078                                  type->regions[mid].size))
1079                        left = mid + 1;
1080                else {
1081                        /* addr is within the region, so pfn + 1 is valid */
1082                        return min(pfn + 1, max_pfn);
1083                }
1084        } while (left < right);
1085
1086        if (right == type->cnt)
1087                return max_pfn;
1088        else
1089                return min(PHYS_PFN(type->regions[right].base), max_pfn);
1090}
1091
1092/**
1093 * memblock_set_node - set node ID on memblock regions
1094 * @base: base of area to set node ID for
1095 * @size: size of area to set node ID for
1096 * @type: memblock type to set node ID for
1097 * @nid: node ID to set
1098 *
1099 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1100 * Regions which cross the area boundaries are split as necessary.
1101 *
1102 * RETURNS:
1103 * 0 on success, -errno on failure.
1104 */
1105int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1106                                      struct memblock_type *type, int nid)
1107{
1108        int start_rgn, end_rgn;
1109        int i, ret;
1110
1111        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1112        if (ret)
1113                return ret;
1114
1115        for (i = start_rgn; i < end_rgn; i++)
1116                memblock_set_region_node(&type->regions[i], nid);
1117
1118        memblock_merge_regions(type);
1119        return 0;
1120}
1121#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1122
1123static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1124                                        phys_addr_t align, phys_addr_t max_addr,
1125                                        int nid, ulong flags)
1126{
1127        phys_addr_t found;
1128
1129        if (WARN_ON(!align))
1130                align = __alignof__(long long);
1131
1132        /* align @size to avoid excessive fragmentation on reserved array */
1133        size = round_up(size, align);
1134
1135        found = memblock_find_in_range_node(size, align, 0, max_addr, nid,
1136                                            flags);
1137        if (found && !memblock_reserve(found, size))
1138                return found;
1139
1140        return 0;
1141}
1142
1143phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1144{
1145        ulong flags = choose_memblock_flags();
1146        phys_addr_t ret;
1147
1148again:
1149        ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1150                                      nid, flags);
1151
1152        if (!ret && (flags & MEMBLOCK_MIRROR)) {
1153                flags &= ~MEMBLOCK_MIRROR;
1154                goto again;
1155        }
1156        return ret;
1157}
1158
1159phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1160{
1161        return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1162                                       MEMBLOCK_NONE);
1163}
1164
1165phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1166{
1167        phys_addr_t alloc;
1168
1169        alloc = __memblock_alloc_base(size, align, max_addr);
1170
1171        if (alloc == 0)
1172                panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1173                      (unsigned long long) size, (unsigned long long) max_addr);
1174
1175        return alloc;
1176}
1177
1178phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1179{
1180        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1181}
1182
1183phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1184{
1185        phys_addr_t res = memblock_alloc_nid(size, align, nid);
1186
1187        if (res)
1188                return res;
1189        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1190}
1191
1192/**
1193 * memblock_virt_alloc_internal - allocate boot memory block
1194 * @size: size of memory block to be allocated in bytes
1195 * @align: alignment of the region and block's size
1196 * @min_addr: the lower bound of the memory region to allocate (phys address)
1197 * @max_addr: the upper bound of the memory region to allocate (phys address)
1198 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1199 *
1200 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1201 * will fall back to memory below @min_addr. Also, allocation may fall back
1202 * to any node in the system if the specified node can not
1203 * hold the requested memory.
1204 *
1205 * The allocation is performed from memory region limited by
1206 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1207 *
1208 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1209 *
1210 * The phys address of allocated boot memory block is converted to virtual and
1211 * allocated memory is reset to 0.
1212 *
1213 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1214 * allocated boot memory block, so that it is never reported as leaks.
1215 *
1216 * RETURNS:
1217 * Virtual address of allocated memory block on success, NULL on failure.
1218 */
1219static void * __init memblock_virt_alloc_internal(
1220                                phys_addr_t size, phys_addr_t align,
1221                                phys_addr_t min_addr, phys_addr_t max_addr,
1222                                int nid)
1223{
1224        phys_addr_t alloc;
1225        void *ptr;
1226        ulong flags = choose_memblock_flags();
1227
1228        if (nid == MAX_NUMNODES)
1229                pr_warn("%s: usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE\n",
1230                        __func__);
1231
1232        /*
1233         * Detect any accidental use of these APIs after slab is ready, as at
1234         * this moment memblock may be deinitialized already and its
1235         * internal data may be destroyed (after execution of free_all_bootmem)
1236         */
1237        if (WARN_ON_ONCE(slab_is_available()))
1238                return kzalloc_node(size, GFP_NOWAIT, nid);
1239
1240        if (!align)
1241                align = SMP_CACHE_BYTES;
1242
1243        /* align @size to avoid excessive fragmentation on reserved array */
1244        size = round_up(size, align);
1245
1246again:
1247        alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1248                                            nid, flags);
1249        if (alloc)
1250                goto done;
1251
1252        if (nid != NUMA_NO_NODE) {
1253                alloc = memblock_find_in_range_node(size, align, min_addr,
1254                                                    max_addr,  NUMA_NO_NODE,
1255                                                    flags);
1256                if (alloc)
1257                        goto done;
1258        }
1259
1260        if (min_addr) {
1261                min_addr = 0;
1262                goto again;
1263        }
1264
1265        if (flags & MEMBLOCK_MIRROR) {
1266                flags &= ~MEMBLOCK_MIRROR;
1267                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1268                        &size);
1269                goto again;
1270        }
1271
1272        return NULL;
1273done:
1274        memblock_reserve(alloc, size);
1275        ptr = phys_to_virt(alloc);
1276        memset(ptr, 0, size);
1277
1278        /*
1279         * The min_count is set to 0 so that bootmem allocated blocks
1280         * are never reported as leaks. This is because many of these blocks
1281         * are only referred via the physical address which is not
1282         * looked up by kmemleak.
1283         */
1284        kmemleak_alloc(ptr, size, 0, 0);
1285
1286        return ptr;
1287}
1288
1289/**
1290 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1291 * @size: size of memory block to be allocated in bytes
1292 * @align: alignment of the region and block's size
1293 * @min_addr: the lower bound of the memory region from where the allocation
1294 *        is preferred (phys address)
1295 * @max_addr: the upper bound of the memory region from where the allocation
1296 *            is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1297 *            allocate only from memory limited by memblock.current_limit value
1298 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1299 *
1300 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1301 * additional debug information (including caller info), if enabled.
1302 *
1303 * RETURNS:
1304 * Virtual address of allocated memory block on success, NULL on failure.
1305 */
1306void * __init memblock_virt_alloc_try_nid_nopanic(
1307                                phys_addr_t size, phys_addr_t align,
1308                                phys_addr_t min_addr, phys_addr_t max_addr,
1309                                int nid)
1310{
1311        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1312                     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1313                     (u64)max_addr, (void *)_RET_IP_);
1314        return memblock_virt_alloc_internal(size, align, min_addr,
1315                                             max_addr, nid);
1316}
1317
1318/**
1319 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1320 * @size: size of memory block to be allocated in bytes
1321 * @align: alignment of the region and block's size
1322 * @min_addr: the lower bound of the memory region from where the allocation
1323 *        is preferred (phys address)
1324 * @max_addr: the upper bound of the memory region from where the allocation
1325 *            is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1326 *            allocate only from memory limited by memblock.current_limit value
1327 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1328 *
1329 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1330 * which provides debug information (including caller info), if enabled,
1331 * and panics if the request can not be satisfied.
1332 *
1333 * RETURNS:
1334 * Virtual address of allocated memory block on success, NULL on failure.
1335 */
1336void * __init memblock_virt_alloc_try_nid(
1337                        phys_addr_t size, phys_addr_t align,
1338                        phys_addr_t min_addr, phys_addr_t max_addr,
1339                        int nid)
1340{
1341        void *ptr;
1342
1343        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1344                     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1345                     (u64)max_addr, (void *)_RET_IP_);
1346        ptr = memblock_virt_alloc_internal(size, align,
1347                                           min_addr, max_addr, nid);
1348        if (ptr)
1349                return ptr;
1350
1351        panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1352              __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1353              (u64)max_addr);
1354        return NULL;
1355}
1356
1357/**
1358 * __memblock_free_early - free boot memory block
1359 * @base: phys starting address of the  boot memory block
1360 * @size: size of the boot memory block in bytes
1361 *
1362 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1363 * The freeing memory will not be released to the buddy allocator.
1364 */
1365void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1366{
1367        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1368                     __func__, (u64)base, (u64)base + size - 1,
1369                     (void *)_RET_IP_);
1370        kmemleak_free_part(__va(base), size);
1371        memblock_remove_range(&memblock.reserved, base, size);
1372}
1373
1374/*
1375 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1376 * @addr: phys starting address of the  boot memory block
1377 * @size: size of the boot memory block in bytes
1378 *
1379 * This is only useful when the bootmem allocator has already been torn
1380 * down, but we are still initializing the system.  Pages are released directly
1381 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1382 */
1383void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1384{
1385        u64 cursor, end;
1386
1387        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1388                     __func__, (u64)base, (u64)base + size - 1,
1389                     (void *)_RET_IP_);
1390        kmemleak_free_part(__va(base), size);
1391        cursor = PFN_UP(base);
1392        end = PFN_DOWN(base + size);
1393
1394        for (; cursor < end; cursor++) {
1395                __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1396                totalram_pages++;
1397        }
1398}
1399
1400/*
1401 * Remaining API functions
1402 */
1403
1404phys_addr_t __init_memblock memblock_phys_mem_size(void)
1405{
1406        return memblock.memory.total_size;
1407}
1408
1409phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1410{
1411        unsigned long pages = 0;
1412        struct memblock_region *r;
1413        unsigned long start_pfn, end_pfn;
1414
1415        for_each_memblock(memory, r) {
1416                start_pfn = memblock_region_memory_base_pfn(r);
1417                end_pfn = memblock_region_memory_end_pfn(r);
1418                start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1419                end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1420                pages += end_pfn - start_pfn;
1421        }
1422
1423        return (phys_addr_t)pages << PAGE_SHIFT;
1424}
1425
1426/* lowest address */
1427phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1428{
1429        return memblock.memory.regions[0].base;
1430}
1431
1432phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1433{
1434        int idx = memblock.memory.cnt - 1;
1435
1436        return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1437}
1438
1439void __init memblock_enforce_memory_limit(phys_addr_t limit)
1440{
1441        phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1442        struct memblock_region *r;
1443
1444        if (!limit)
1445                return;
1446
1447        /* find out max address */
1448        for_each_memblock(memory, r) {
1449                if (limit <= r->size) {
1450                        max_addr = r->base + limit;
1451                        break;
1452                }
1453                limit -= r->size;
1454        }
1455
1456        /* truncate both memory and reserved regions */
1457        memblock_remove_range(&memblock.memory, max_addr,
1458                              (phys_addr_t)ULLONG_MAX);
1459        memblock_remove_range(&memblock.reserved, max_addr,
1460                              (phys_addr_t)ULLONG_MAX);
1461}
1462
1463static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1464{
1465        unsigned int left = 0, right = type->cnt;
1466
1467        do {
1468                unsigned int mid = (right + left) / 2;
1469
1470                if (addr < type->regions[mid].base)
1471                        right = mid;
1472                else if (addr >= (type->regions[mid].base +
1473                                  type->regions[mid].size))
1474                        left = mid + 1;
1475                else
1476                        return mid;
1477        } while (left < right);
1478        return -1;
1479}
1480
1481int __init memblock_is_reserved(phys_addr_t addr)
1482{
1483        return memblock_search(&memblock.reserved, addr) != -1;
1484}
1485
1486int __init_memblock memblock_is_memory(phys_addr_t addr)
1487{
1488        return memblock_search(&memblock.memory, addr) != -1;
1489}
1490
1491#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1492int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1493                         unsigned long *start_pfn, unsigned long *end_pfn)
1494{
1495        struct memblock_type *type = &memblock.memory;
1496        int mid = memblock_search(type, (phys_addr_t)pfn << PAGE_SHIFT);
1497
1498        if (mid == -1)
1499                return -1;
1500
1501        *start_pfn = type->regions[mid].base >> PAGE_SHIFT;
1502        *end_pfn = (type->regions[mid].base + type->regions[mid].size)
1503                        >> PAGE_SHIFT;
1504
1505        return type->regions[mid].nid;
1506}
1507#endif
1508
1509/**
1510 * memblock_is_region_memory - check if a region is a subset of memory
1511 * @base: base of region to check
1512 * @size: size of region to check
1513 *
1514 * Check if the region [@base, @base+@size) is a subset of a memory block.
1515 *
1516 * RETURNS:
1517 * 0 if false, non-zero if true
1518 */
1519int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1520{
1521        int idx = memblock_search(&memblock.memory, base);
1522        phys_addr_t end = base + memblock_cap_size(base, &size);
1523
1524        if (idx == -1)
1525                return 0;
1526        return memblock.memory.regions[idx].base <= base &&
1527                (memblock.memory.regions[idx].base +
1528                 memblock.memory.regions[idx].size) >= end;
1529}
1530
1531/**
1532 * memblock_is_region_reserved - check if a region intersects reserved memory
1533 * @base: base of region to check
1534 * @size: size of region to check
1535 *
1536 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1537 *
1538 * RETURNS:
1539 * 0 if false, non-zero if true
1540 */
1541int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1542{
1543        memblock_cap_size(base, &size);
1544        return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
1545}
1546
1547void __init_memblock memblock_trim_memory(phys_addr_t align)
1548{
1549        phys_addr_t start, end, orig_start, orig_end;
1550        struct memblock_region *r;
1551
1552        for_each_memblock(memory, r) {
1553                orig_start = r->base;
1554                orig_end = r->base + r->size;
1555                start = round_up(orig_start, align);
1556                end = round_down(orig_end, align);
1557
1558                if (start == orig_start && end == orig_end)
1559                        continue;
1560
1561                if (start < end) {
1562                        r->base = start;
1563                        r->size = end - start;
1564                } else {
1565                        memblock_remove_region(&memblock.memory,
1566                                               r - memblock.memory.regions);
1567                        r--;
1568                }
1569        }
1570}
1571
1572void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1573{
1574        memblock.current_limit = limit;
1575}
1576
1577static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1578{
1579        unsigned long long base, size;
1580        unsigned long flags;
1581        int i;
1582
1583        pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1584
1585        for (i = 0; i < type->cnt; i++) {
1586                struct memblock_region *rgn = &type->regions[i];
1587                char nid_buf[32] = "";
1588
1589                base = rgn->base;
1590                size = rgn->size;
1591                flags = rgn->flags;
1592#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1593                if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1594                        snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1595                                 memblock_get_region_node(rgn));
1596#endif
1597                pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1598                        name, i, base, base + size - 1, size, nid_buf, flags);
1599        }
1600}
1601
1602void __init_memblock __memblock_dump_all(void)
1603{
1604        pr_info("MEMBLOCK configuration:\n");
1605        pr_info(" memory size = %#llx reserved size = %#llx\n",
1606                (unsigned long long)memblock.memory.total_size,
1607                (unsigned long long)memblock.reserved.total_size);
1608
1609        memblock_dump(&memblock.memory, "memory");
1610        memblock_dump(&memblock.reserved, "reserved");
1611}
1612
1613void __init memblock_allow_resize(void)
1614{
1615        memblock_can_resize = 1;
1616}
1617
1618static int __init early_memblock(char *p)
1619{
1620        if (p && strstr(p, "debug"))
1621                memblock_debug = 1;
1622        return 0;
1623}
1624early_param("memblock", early_memblock);
1625
1626#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1627
1628static int memblock_debug_show(struct seq_file *m, void *private)
1629{
1630        struct memblock_type *type = m->private;
1631        struct memblock_region *reg;
1632        int i;
1633
1634        for (i = 0; i < type->cnt; i++) {
1635                reg = &type->regions[i];
1636                seq_printf(m, "%4d: ", i);
1637                if (sizeof(phys_addr_t) == 4)
1638                        seq_printf(m, "0x%08lx..0x%08lx\n",
1639                                   (unsigned long)reg->base,
1640                                   (unsigned long)(reg->base + reg->size - 1));
1641                else
1642                        seq_printf(m, "0x%016llx..0x%016llx\n",
1643                                   (unsigned long long)reg->base,
1644                                   (unsigned long long)(reg->base + reg->size - 1));
1645
1646        }
1647        return 0;
1648}
1649
1650static int memblock_debug_open(struct inode *inode, struct file *file)
1651{
1652        return single_open(file, memblock_debug_show, inode->i_private);
1653}
1654
1655static const struct file_operations memblock_debug_fops = {
1656        .open = memblock_debug_open,
1657        .read = seq_read,
1658        .llseek = seq_lseek,
1659        .release = single_release,
1660};
1661
1662static int __init memblock_init_debugfs(void)
1663{
1664        struct dentry *root = debugfs_create_dir("memblock", NULL);
1665        if (!root)
1666                return -ENXIO;
1667        debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1668        debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1669
1670        return 0;
1671}
1672__initcall(memblock_init_debugfs);
1673
1674#endif /* CONFIG_DEBUG_FS */
1675