linux/mm/memblock.c
<<
>>
Prefs
   1/*
   2 * Procedures for maintaining information about logical memory blocks.
   3 *
   4 * Peter Bergner, IBM Corp.     June 2001.
   5 * Copyright (C) 2001 Peter Bergner.
   6 *
   7 *      This program is free software; you can redistribute it and/or
   8 *      modify it under the terms of the GNU General Public License
   9 *      as published by the Free Software Foundation; either version
  10 *      2 of the License, or (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/init.h>
  16#include <linux/bitops.h>
  17#include <linux/poison.h>
  18#include <linux/pfn.h>
  19#include <linux/debugfs.h>
  20#include <linux/seq_file.h>
  21#include <linux/memblock.h>
  22
  23struct memblock memblock __initdata_memblock;
  24
  25int memblock_debug __initdata_memblock;
  26int memblock_can_resize __initdata_memblock;
  27static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
  28static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
  29
  30/* inline so we don't get a warning when pr_debug is compiled out */
  31static inline const char *memblock_type_name(struct memblock_type *type)
  32{
  33        if (type == &memblock.memory)
  34                return "memory";
  35        else if (type == &memblock.reserved)
  36                return "reserved";
  37        else
  38                return "unknown";
  39}
  40
  41/*
  42 * Address comparison utilities
  43 */
  44
  45static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
  46{
  47        return addr & ~(size - 1);
  48}
  49
  50static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
  51{
  52        return (addr + (size - 1)) & ~(size - 1);
  53}
  54
  55static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  56                                       phys_addr_t base2, phys_addr_t size2)
  57{
  58        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  59}
  60
  61static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
  62                               phys_addr_t base2, phys_addr_t size2)
  63{
  64        if (base2 == base1 + size1)
  65                return 1;
  66        else if (base1 == base2 + size2)
  67                return -1;
  68
  69        return 0;
  70}
  71
  72static long __init_memblock memblock_regions_adjacent(struct memblock_type *type,
  73                                 unsigned long r1, unsigned long r2)
  74{
  75        phys_addr_t base1 = type->regions[r1].base;
  76        phys_addr_t size1 = type->regions[r1].size;
  77        phys_addr_t base2 = type->regions[r2].base;
  78        phys_addr_t size2 = type->regions[r2].size;
  79
  80        return memblock_addrs_adjacent(base1, size1, base2, size2);
  81}
  82
  83long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
  84{
  85        unsigned long i;
  86
  87        for (i = 0; i < type->cnt; i++) {
  88                phys_addr_t rgnbase = type->regions[i].base;
  89                phys_addr_t rgnsize = type->regions[i].size;
  90                if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
  91                        break;
  92        }
  93
  94        return (i < type->cnt) ? i : -1;
  95}
  96
  97/*
  98 * Find, allocate, deallocate or reserve unreserved regions. All allocations
  99 * are top-down.
 100 */
 101
 102static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
 103                                          phys_addr_t size, phys_addr_t align)
 104{
 105        phys_addr_t base, res_base;
 106        long j;
 107
 108        /* In case, huge size is requested */
 109        if (end < size)
 110                return MEMBLOCK_ERROR;
 111
 112        base = memblock_align_down((end - size), align);
 113
 114        /* Prevent allocations returning 0 as it's also used to
 115         * indicate an allocation failure
 116         */
 117        if (start == 0)
 118                start = PAGE_SIZE;
 119
 120        while (start <= base) {
 121                j = memblock_overlaps_region(&memblock.reserved, base, size);
 122                if (j < 0)
 123                        return base;
 124                res_base = memblock.reserved.regions[j].base;
 125                if (res_base < size)
 126                        break;
 127                base = memblock_align_down(res_base - size, align);
 128        }
 129
 130        return MEMBLOCK_ERROR;
 131}
 132
 133static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
 134                        phys_addr_t align, phys_addr_t start, phys_addr_t end)
 135{
 136        long i;
 137
 138        BUG_ON(0 == size);
 139
 140        /* Pump up max_addr */
 141        if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 142                end = memblock.current_limit;
 143
 144        /* We do a top-down search, this tends to limit memory
 145         * fragmentation by keeping early boot allocs near the
 146         * top of memory
 147         */
 148        for (i = memblock.memory.cnt - 1; i >= 0; i--) {
 149                phys_addr_t memblockbase = memblock.memory.regions[i].base;
 150                phys_addr_t memblocksize = memblock.memory.regions[i].size;
 151                phys_addr_t bottom, top, found;
 152
 153                if (memblocksize < size)
 154                        continue;
 155                if ((memblockbase + memblocksize) <= start)
 156                        break;
 157                bottom = max(memblockbase, start);
 158                top = min(memblockbase + memblocksize, end);
 159                if (bottom >= top)
 160                        continue;
 161                found = memblock_find_region(bottom, top, size, align);
 162                if (found != MEMBLOCK_ERROR)
 163                        return found;
 164        }
 165        return MEMBLOCK_ERROR;
 166}
 167
 168/*
 169 * Find a free area with specified alignment in a specific range.
 170 */
 171u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
 172{
 173        return memblock_find_base(size, align, start, end);
 174}
 175
 176/*
 177 * Free memblock.reserved.regions
 178 */
 179int __init_memblock memblock_free_reserved_regions(void)
 180{
 181        if (memblock.reserved.regions == memblock_reserved_init_regions)
 182                return 0;
 183
 184        return memblock_free(__pa(memblock.reserved.regions),
 185                 sizeof(struct memblock_region) * memblock.reserved.max);
 186}
 187
 188/*
 189 * Reserve memblock.reserved.regions
 190 */
 191int __init_memblock memblock_reserve_reserved_regions(void)
 192{
 193        if (memblock.reserved.regions == memblock_reserved_init_regions)
 194                return 0;
 195
 196        return memblock_reserve(__pa(memblock.reserved.regions),
 197                 sizeof(struct memblock_region) * memblock.reserved.max);
 198}
 199
 200static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 201{
 202        unsigned long i;
 203
 204        for (i = r; i < type->cnt - 1; i++) {
 205                type->regions[i].base = type->regions[i + 1].base;
 206                type->regions[i].size = type->regions[i + 1].size;
 207        }
 208        type->cnt--;
 209}
 210
 211/* Assumption: base addr of region 1 < base addr of region 2 */
 212static void __init_memblock memblock_coalesce_regions(struct memblock_type *type,
 213                unsigned long r1, unsigned long r2)
 214{
 215        type->regions[r1].size += type->regions[r2].size;
 216        memblock_remove_region(type, r2);
 217}
 218
 219/* Defined below but needed now */
 220static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
 221
 222static int __init_memblock memblock_double_array(struct memblock_type *type)
 223{
 224        struct memblock_region *new_array, *old_array;
 225        phys_addr_t old_size, new_size, addr;
 226        int use_slab = slab_is_available();
 227
 228        /* We don't allow resizing until we know about the reserved regions
 229         * of memory that aren't suitable for allocation
 230         */
 231        if (!memblock_can_resize)
 232                return -1;
 233
 234        /* Calculate new doubled size */
 235        old_size = type->max * sizeof(struct memblock_region);
 236        new_size = old_size << 1;
 237
 238        /* Try to find some space for it.
 239         *
 240         * WARNING: We assume that either slab_is_available() and we use it or
 241         * we use MEMBLOCK for allocations. That means that this is unsafe to use
 242         * when bootmem is currently active (unless bootmem itself is implemented
 243         * on top of MEMBLOCK which isn't the case yet)
 244         *
 245         * This should however not be an issue for now, as we currently only
 246         * call into MEMBLOCK while it's still active, or much later when slab is
 247         * active for memory hotplug operations
 248         */
 249        if (use_slab) {
 250                new_array = kmalloc(new_size, GFP_KERNEL);
 251                addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
 252        } else
 253                addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
 254        if (addr == MEMBLOCK_ERROR) {
 255                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 256                       memblock_type_name(type), type->max, type->max * 2);
 257                return -1;
 258        }
 259        new_array = __va(addr);
 260
 261        memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
 262                 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
 263
 264        /* Found space, we now need to move the array over before
 265         * we add the reserved region since it may be our reserved
 266         * array itself that is full.
 267         */
 268        memcpy(new_array, type->regions, old_size);
 269        memset(new_array + type->max, 0, old_size);
 270        old_array = type->regions;
 271        type->regions = new_array;
 272        type->max <<= 1;
 273
 274        /* If we use SLAB that's it, we are done */
 275        if (use_slab)
 276                return 0;
 277
 278        /* Add the new reserved region now. Should not fail ! */
 279        BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
 280
 281        /* If the array wasn't our static init one, then free it. We only do
 282         * that before SLAB is available as later on, we don't know whether
 283         * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
 284         * anyways
 285         */
 286        if (old_array != memblock_memory_init_regions &&
 287            old_array != memblock_reserved_init_regions)
 288                memblock_free(__pa(old_array), old_size);
 289
 290        return 0;
 291}
 292
 293extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
 294                                          phys_addr_t addr2, phys_addr_t size2)
 295{
 296        return 1;
 297}
 298
 299static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
 300{
 301        unsigned long coalesced = 0;
 302        long adjacent, i;
 303
 304        if ((type->cnt == 1) && (type->regions[0].size == 0)) {
 305                type->regions[0].base = base;
 306                type->regions[0].size = size;
 307                return 0;
 308        }
 309
 310        /* First try and coalesce this MEMBLOCK with another. */
 311        for (i = 0; i < type->cnt; i++) {
 312                phys_addr_t rgnbase = type->regions[i].base;
 313                phys_addr_t rgnsize = type->regions[i].size;
 314
 315                if ((rgnbase == base) && (rgnsize == size))
 316                        /* Already have this region, so we're done */
 317                        return 0;
 318
 319                adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
 320                /* Check if arch allows coalescing */
 321                if (adjacent != 0 && type == &memblock.memory &&
 322                    !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize))
 323                        break;
 324                if (adjacent > 0) {
 325                        type->regions[i].base -= size;
 326                        type->regions[i].size += size;
 327                        coalesced++;
 328                        break;
 329                } else if (adjacent < 0) {
 330                        type->regions[i].size += size;
 331                        coalesced++;
 332                        break;
 333                }
 334        }
 335
 336        /* If we plugged a hole, we may want to also coalesce with the
 337         * next region
 338         */
 339        if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) &&
 340            ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base,
 341                                                             type->regions[i].size,
 342                                                             type->regions[i+1].base,
 343                                                             type->regions[i+1].size)))) {
 344                memblock_coalesce_regions(type, i, i+1);
 345                coalesced++;
 346        }
 347
 348        if (coalesced)
 349                return coalesced;
 350
 351        /* If we are out of space, we fail. It's too late to resize the array
 352         * but then this shouldn't have happened in the first place.
 353         */
 354        if (WARN_ON(type->cnt >= type->max))
 355                return -1;
 356
 357        /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
 358        for (i = type->cnt - 1; i >= 0; i--) {
 359                if (base < type->regions[i].base) {
 360                        type->regions[i+1].base = type->regions[i].base;
 361                        type->regions[i+1].size = type->regions[i].size;
 362                } else {
 363                        type->regions[i+1].base = base;
 364                        type->regions[i+1].size = size;
 365                        break;
 366                }
 367        }
 368
 369        if (base < type->regions[0].base) {
 370                type->regions[0].base = base;
 371                type->regions[0].size = size;
 372        }
 373        type->cnt++;
 374
 375        /* The array is full ? Try to resize it. If that fails, we undo
 376         * our allocation and return an error
 377         */
 378        if (type->cnt == type->max && memblock_double_array(type)) {
 379                type->cnt--;
 380                return -1;
 381        }
 382
 383        return 0;
 384}
 385
 386long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 387{
 388        return memblock_add_region(&memblock.memory, base, size);
 389
 390}
 391
 392static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
 393{
 394        phys_addr_t rgnbegin, rgnend;
 395        phys_addr_t end = base + size;
 396        int i;
 397
 398        rgnbegin = rgnend = 0; /* supress gcc warnings */
 399
 400        /* Find the region where (base, size) belongs to */
 401        for (i=0; i < type->cnt; i++) {
 402                rgnbegin = type->regions[i].base;
 403                rgnend = rgnbegin + type->regions[i].size;
 404
 405                if ((rgnbegin <= base) && (end <= rgnend))
 406                        break;
 407        }
 408
 409        /* Didn't find the region */
 410        if (i == type->cnt)
 411                return -1;
 412
 413        /* Check to see if we are removing entire region */
 414        if ((rgnbegin == base) && (rgnend == end)) {
 415                memblock_remove_region(type, i);
 416                return 0;
 417        }
 418
 419        /* Check to see if region is matching at the front */
 420        if (rgnbegin == base) {
 421                type->regions[i].base = end;
 422                type->regions[i].size -= size;
 423                return 0;
 424        }
 425
 426        /* Check to see if the region is matching at the end */
 427        if (rgnend == end) {
 428                type->regions[i].size -= size;
 429                return 0;
 430        }
 431
 432        /*
 433         * We need to split the entry -  adjust the current one to the
 434         * beginging of the hole and add the region after hole.
 435         */
 436        type->regions[i].size = base - type->regions[i].base;
 437        return memblock_add_region(type, end, rgnend - end);
 438}
 439
 440long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 441{
 442        return __memblock_remove(&memblock.memory, base, size);
 443}
 444
 445long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 446{
 447        return __memblock_remove(&memblock.reserved, base, size);
 448}
 449
 450long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 451{
 452        struct memblock_type *_rgn = &memblock.reserved;
 453
 454        BUG_ON(0 == size);
 455
 456        return memblock_add_region(_rgn, base, size);
 457}
 458
 459phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
 460{
 461        phys_addr_t found;
 462
 463        /* We align the size to limit fragmentation. Without this, a lot of
 464         * small allocs quickly eat up the whole reserve array on sparc
 465         */
 466        size = memblock_align_up(size, align);
 467
 468        found = memblock_find_base(size, align, 0, max_addr);
 469        if (found != MEMBLOCK_ERROR &&
 470            memblock_add_region(&memblock.reserved, found, size) >= 0)
 471                return found;
 472
 473        return 0;
 474}
 475
 476phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
 477{
 478        phys_addr_t alloc;
 479
 480        alloc = __memblock_alloc_base(size, align, max_addr);
 481
 482        if (alloc == 0)
 483                panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
 484                      (unsigned long long) size, (unsigned long long) max_addr);
 485
 486        return alloc;
 487}
 488
 489phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
 490{
 491        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
 492}
 493
 494
 495/*
 496 * Additional node-local allocators. Search for node memory is bottom up
 497 * and walks memblock regions within that node bottom-up as well, but allocation
 498 * within an memblock region is top-down. XXX I plan to fix that at some stage
 499 *
 500 * WARNING: Only available after early_node_map[] has been populated,
 501 * on some architectures, that is after all the calls to add_active_range()
 502 * have been done to populate it.
 503 */
 504
 505phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
 506{
 507#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
 508        /*
 509         * This code originates from sparc which really wants use to walk by addresses
 510         * and returns the nid. This is not very convenient for early_pfn_map[] users
 511         * as the map isn't sorted yet, and it really wants to be walked by nid.
 512         *
 513         * For now, I implement the inefficient method below which walks the early
 514         * map multiple times. Eventually we may want to use an ARCH config option
 515         * to implement a completely different method for both case.
 516         */
 517        unsigned long start_pfn, end_pfn;
 518        int i;
 519
 520        for (i = 0; i < MAX_NUMNODES; i++) {
 521                get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
 522                if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
 523                        continue;
 524                *nid = i;
 525                return min(end, PFN_PHYS(end_pfn));
 526        }
 527#endif
 528        *nid = 0;
 529
 530        return end;
 531}
 532
 533static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
 534                                               phys_addr_t size,
 535                                               phys_addr_t align, int nid)
 536{
 537        phys_addr_t start, end;
 538
 539        start = mp->base;
 540        end = start + mp->size;
 541
 542        start = memblock_align_up(start, align);
 543        while (start < end) {
 544                phys_addr_t this_end;
 545                int this_nid;
 546
 547                this_end = memblock_nid_range(start, end, &this_nid);
 548                if (this_nid == nid) {
 549                        phys_addr_t ret = memblock_find_region(start, this_end, size, align);
 550                        if (ret != MEMBLOCK_ERROR &&
 551                            memblock_add_region(&memblock.reserved, ret, size) >= 0)
 552                                return ret;
 553                }
 554                start = this_end;
 555        }
 556
 557        return MEMBLOCK_ERROR;
 558}
 559
 560phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
 561{
 562        struct memblock_type *mem = &memblock.memory;
 563        int i;
 564
 565        BUG_ON(0 == size);
 566
 567        /* We align the size to limit fragmentation. Without this, a lot of
 568         * small allocs quickly eat up the whole reserve array on sparc
 569         */
 570        size = memblock_align_up(size, align);
 571
 572        /* We do a bottom-up search for a region with the right
 573         * nid since that's easier considering how memblock_nid_range()
 574         * works
 575         */
 576        for (i = 0; i < mem->cnt; i++) {
 577                phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
 578                                               size, align, nid);
 579                if (ret != MEMBLOCK_ERROR)
 580                        return ret;
 581        }
 582
 583        return 0;
 584}
 585
 586phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
 587{
 588        phys_addr_t res = memblock_alloc_nid(size, align, nid);
 589
 590        if (res)
 591                return res;
 592        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
 593}
 594
 595
 596/*
 597 * Remaining API functions
 598 */
 599
 600/* You must call memblock_analyze() before this. */
 601phys_addr_t __init memblock_phys_mem_size(void)
 602{
 603        return memblock.memory_size;
 604}
 605
 606phys_addr_t __init_memblock memblock_end_of_DRAM(void)
 607{
 608        int idx = memblock.memory.cnt - 1;
 609
 610        return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
 611}
 612
 613/* You must call memblock_analyze() after this. */
 614void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
 615{
 616        unsigned long i;
 617        phys_addr_t limit;
 618        struct memblock_region *p;
 619
 620        if (!memory_limit)
 621                return;
 622
 623        /* Truncate the memblock regions to satisfy the memory limit. */
 624        limit = memory_limit;
 625        for (i = 0; i < memblock.memory.cnt; i++) {
 626                if (limit > memblock.memory.regions[i].size) {
 627                        limit -= memblock.memory.regions[i].size;
 628                        continue;
 629                }
 630
 631                memblock.memory.regions[i].size = limit;
 632                memblock.memory.cnt = i + 1;
 633                break;
 634        }
 635
 636        memory_limit = memblock_end_of_DRAM();
 637
 638        /* And truncate any reserves above the limit also. */
 639        for (i = 0; i < memblock.reserved.cnt; i++) {
 640                p = &memblock.reserved.regions[i];
 641
 642                if (p->base > memory_limit)
 643                        p->size = 0;
 644                else if ((p->base + p->size) > memory_limit)
 645                        p->size = memory_limit - p->base;
 646
 647                if (p->size == 0) {
 648                        memblock_remove_region(&memblock.reserved, i);
 649                        i--;
 650                }
 651        }
 652}
 653
 654static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
 655{
 656        unsigned int left = 0, right = type->cnt;
 657
 658        do {
 659                unsigned int mid = (right + left) / 2;
 660
 661                if (addr < type->regions[mid].base)
 662                        right = mid;
 663                else if (addr >= (type->regions[mid].base +
 664                                  type->regions[mid].size))
 665                        left = mid + 1;
 666                else
 667                        return mid;
 668        } while (left < right);
 669        return -1;
 670}
 671
 672int __init memblock_is_reserved(phys_addr_t addr)
 673{
 674        return memblock_search(&memblock.reserved, addr) != -1;
 675}
 676
 677int __init_memblock memblock_is_memory(phys_addr_t addr)
 678{
 679        return memblock_search(&memblock.memory, addr) != -1;
 680}
 681
 682int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
 683{
 684        int idx = memblock_search(&memblock.memory, base);
 685
 686        if (idx == -1)
 687                return 0;
 688        return memblock.memory.regions[idx].base <= base &&
 689                (memblock.memory.regions[idx].base +
 690                 memblock.memory.regions[idx].size) >= (base + size);
 691}
 692
 693int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
 694{
 695        return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
 696}
 697
 698
 699void __init_memblock memblock_set_current_limit(phys_addr_t limit)
 700{
 701        memblock.current_limit = limit;
 702}
 703
 704static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
 705{
 706        unsigned long long base, size;
 707        int i;
 708
 709        pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
 710
 711        for (i = 0; i < region->cnt; i++) {
 712                base = region->regions[i].base;
 713                size = region->regions[i].size;
 714
 715                pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
 716                    name, i, base, base + size - 1, size);
 717        }
 718}
 719
 720void __init_memblock memblock_dump_all(void)
 721{
 722        if (!memblock_debug)
 723                return;
 724
 725        pr_info("MEMBLOCK configuration:\n");
 726        pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
 727
 728        memblock_dump(&memblock.memory, "memory");
 729        memblock_dump(&memblock.reserved, "reserved");
 730}
 731
 732void __init memblock_analyze(void)
 733{
 734        int i;
 735
 736        /* Check marker in the unused last array entry */
 737        WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
 738                != (phys_addr_t)RED_INACTIVE);
 739        WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
 740                != (phys_addr_t)RED_INACTIVE);
 741
 742        memblock.memory_size = 0;
 743
 744        for (i = 0; i < memblock.memory.cnt; i++)
 745                memblock.memory_size += memblock.memory.regions[i].size;
 746
 747        /* We allow resizing from there */
 748        memblock_can_resize = 1;
 749}
 750
 751void __init memblock_init(void)
 752{
 753        static int init_done __initdata = 0;
 754
 755        if (init_done)
 756                return;
 757        init_done = 1;
 758
 759        /* Hookup the initial arrays */
 760        memblock.memory.regions = memblock_memory_init_regions;
 761        memblock.memory.max             = INIT_MEMBLOCK_REGIONS;
 762        memblock.reserved.regions       = memblock_reserved_init_regions;
 763        memblock.reserved.max   = INIT_MEMBLOCK_REGIONS;
 764
 765        /* Write a marker in the unused last array entry */
 766        memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
 767        memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
 768
 769        /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
 770         * This simplifies the memblock_add() code below...
 771         */
 772        memblock.memory.regions[0].base = 0;
 773        memblock.memory.regions[0].size = 0;
 774        memblock.memory.cnt = 1;
 775
 776        /* Ditto. */
 777        memblock.reserved.regions[0].base = 0;
 778        memblock.reserved.regions[0].size = 0;
 779        memblock.reserved.cnt = 1;
 780
 781        memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
 782}
 783
 784static int __init early_memblock(char *p)
 785{
 786        if (p && strstr(p, "debug"))
 787                memblock_debug = 1;
 788        return 0;
 789}
 790early_param("memblock", early_memblock);
 791
 792#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
 793
 794static int memblock_debug_show(struct seq_file *m, void *private)
 795{
 796        struct memblock_type *type = m->private;
 797        struct memblock_region *reg;
 798        int i;
 799
 800        for (i = 0; i < type->cnt; i++) {
 801                reg = &type->regions[i];
 802                seq_printf(m, "%4d: ", i);
 803                if (sizeof(phys_addr_t) == 4)
 804                        seq_printf(m, "0x%08lx..0x%08lx\n",
 805                                   (unsigned long)reg->base,
 806                                   (unsigned long)(reg->base + reg->size - 1));
 807                else
 808                        seq_printf(m, "0x%016llx..0x%016llx\n",
 809                                   (unsigned long long)reg->base,
 810                                   (unsigned long long)(reg->base + reg->size - 1));
 811
 812        }
 813        return 0;
 814}
 815
 816static int memblock_debug_open(struct inode *inode, struct file *file)
 817{
 818        return single_open(file, memblock_debug_show, inode->i_private);
 819}
 820
 821static const struct file_operations memblock_debug_fops = {
 822        .open = memblock_debug_open,
 823        .read = seq_read,
 824        .llseek = seq_lseek,
 825        .release = single_release,
 826};
 827
 828static int __init memblock_init_debugfs(void)
 829{
 830        struct dentry *root = debugfs_create_dir("memblock", NULL);
 831        if (!root)
 832                return -ENXIO;
 833        debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
 834        debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
 835
 836        return 0;
 837}
 838__initcall(memblock_init_debugfs);
 839
 840#endif /* CONFIG_DEBUG_FS */
 841