linux/mm/nobootmem.c
<<
>>
Prefs
   1/*
   2 *  bootmem - A boot-time physical memory allocator and configurator
   3 *
   4 *  Copyright (C) 1999 Ingo Molnar
   5 *                1999 Kanoj Sarcar, SGI
   6 *                2008 Johannes Weiner
   7 *
   8 * Access to this subsystem has to be serialized externally (which is true
   9 * for the boot process anyway).
  10 */
  11#include <linux/init.h>
  12#include <linux/pfn.h>
  13#include <linux/slab.h>
  14#include <linux/bootmem.h>
  15#include <linux/export.h>
  16#include <linux/kmemleak.h>
  17#include <linux/range.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/bug.h>
  21#include <asm/io.h>
  22#include <asm/processor.h>
  23
  24#include "internal.h"
  25
  26#ifndef CONFIG_NEED_MULTIPLE_NODES
  27struct pglist_data __refdata contig_page_data;
  28EXPORT_SYMBOL(contig_page_data);
  29#endif
  30
  31unsigned long max_low_pfn;
  32unsigned long min_low_pfn;
  33unsigned long max_pfn;
  34unsigned long long max_possible_pfn;
  35
  36static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
  37                                        u64 goal, u64 limit)
  38{
  39        void *ptr;
  40        u64 addr;
  41        ulong flags = choose_memblock_flags();
  42
  43        if (limit > memblock.current_limit)
  44                limit = memblock.current_limit;
  45
  46again:
  47        addr = memblock_find_in_range_node(size, align, goal, limit, nid,
  48                                           flags);
  49        if (!addr && (flags & MEMBLOCK_MIRROR)) {
  50                flags &= ~MEMBLOCK_MIRROR;
  51                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
  52                        &size);
  53                goto again;
  54        }
  55        if (!addr)
  56                return NULL;
  57
  58        if (memblock_reserve(addr, size))
  59                return NULL;
  60
  61        ptr = phys_to_virt(addr);
  62        memset(ptr, 0, size);
  63        /*
  64         * The min_count is set to 0 so that bootmem allocated blocks
  65         * are never reported as leaks.
  66         */
  67        kmemleak_alloc(ptr, size, 0, 0);
  68        return ptr;
  69}
  70
  71/*
  72 * free_bootmem_late - free bootmem pages directly to page allocator
  73 * @addr: starting address of the range
  74 * @size: size of the range in bytes
  75 *
  76 * This is only useful when the bootmem allocator has already been torn
  77 * down, but we are still initializing the system.  Pages are given directly
  78 * to the page allocator, no bootmem metadata is updated because it is gone.
  79 */
  80void __init free_bootmem_late(unsigned long addr, unsigned long size)
  81{
  82        unsigned long cursor, end;
  83
  84        kmemleak_free_part(__va(addr), size);
  85
  86        cursor = PFN_UP(addr);
  87        end = PFN_DOWN(addr + size);
  88
  89        for (; cursor < end; cursor++) {
  90                __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
  91                totalram_pages++;
  92        }
  93}
  94
  95static void __init __free_pages_memory(unsigned long start, unsigned long end)
  96{
  97        int order;
  98
  99        while (start < end) {
 100                order = min(MAX_ORDER - 1UL, __ffs(start));
 101
 102                while (start + (1UL << order) > end)
 103                        order--;
 104
 105                __free_pages_bootmem(pfn_to_page(start), start, order);
 106
 107                start += (1UL << order);
 108        }
 109}
 110
 111static unsigned long __init __free_memory_core(phys_addr_t start,
 112                                 phys_addr_t end)
 113{
 114        unsigned long start_pfn = PFN_UP(start);
 115        unsigned long end_pfn = min_t(unsigned long,
 116                                      PFN_DOWN(end), max_low_pfn);
 117
 118        if (start_pfn > end_pfn)
 119                return 0;
 120
 121        __free_pages_memory(start_pfn, end_pfn);
 122
 123        return end_pfn - start_pfn;
 124}
 125
 126static unsigned long __init free_low_memory_core_early(void)
 127{
 128        unsigned long count = 0;
 129        phys_addr_t start, end;
 130        u64 i;
 131
 132        memblock_clear_hotplug(0, -1);
 133
 134        for_each_reserved_mem_region(i, &start, &end)
 135                reserve_bootmem_region(start, end);
 136
 137        for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
 138                                NULL)
 139                count += __free_memory_core(start, end);
 140
 141#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
 142        {
 143                phys_addr_t size;
 144
 145                /* Free memblock.reserved array if it was allocated */
 146                size = get_allocated_memblock_reserved_regions_info(&start);
 147                if (size)
 148                        count += __free_memory_core(start, start + size);
 149
 150                /* Free memblock.memory array if it was allocated */
 151                size = get_allocated_memblock_memory_regions_info(&start);
 152                if (size)
 153                        count += __free_memory_core(start, start + size);
 154        }
 155#endif
 156
 157        return count;
 158}
 159
 160static int reset_managed_pages_done __initdata;
 161
 162void reset_node_managed_pages(pg_data_t *pgdat)
 163{
 164        struct zone *z;
 165
 166        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
 167                z->managed_pages = 0;
 168}
 169
 170void __init reset_all_zones_managed_pages(void)
 171{
 172        struct pglist_data *pgdat;
 173
 174        if (reset_managed_pages_done)
 175                return;
 176
 177        for_each_online_pgdat(pgdat)
 178                reset_node_managed_pages(pgdat);
 179
 180        reset_managed_pages_done = 1;
 181}
 182
 183/**
 184 * free_all_bootmem - release free pages to the buddy allocator
 185 *
 186 * Returns the number of pages actually released.
 187 */
 188unsigned long __init free_all_bootmem(void)
 189{
 190        unsigned long pages;
 191
 192        reset_all_zones_managed_pages();
 193
 194        /*
 195         * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
 196         *  because in some case like Node0 doesn't have RAM installed
 197         *  low ram will be on Node1
 198         */
 199        pages = free_low_memory_core_early();
 200        totalram_pages += pages;
 201
 202        return pages;
 203}
 204
 205/**
 206 * free_bootmem_node - mark a page range as usable
 207 * @pgdat: node the range resides on
 208 * @physaddr: starting address of the range
 209 * @size: size of the range in bytes
 210 *
 211 * Partial pages will be considered reserved and left as they are.
 212 *
 213 * The range must reside completely on the specified node.
 214 */
 215void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 216                              unsigned long size)
 217{
 218        memblock_free(physaddr, size);
 219}
 220
 221/**
 222 * free_bootmem - mark a page range as usable
 223 * @addr: starting address of the range
 224 * @size: size of the range in bytes
 225 *
 226 * Partial pages will be considered reserved and left as they are.
 227 *
 228 * The range must be contiguous but may span node boundaries.
 229 */
 230void __init free_bootmem(unsigned long addr, unsigned long size)
 231{
 232        memblock_free(addr, size);
 233}
 234
 235static void * __init ___alloc_bootmem_nopanic(unsigned long size,
 236                                        unsigned long align,
 237                                        unsigned long goal,
 238                                        unsigned long limit)
 239{
 240        void *ptr;
 241
 242        if (WARN_ON_ONCE(slab_is_available()))
 243                return kzalloc(size, GFP_NOWAIT);
 244
 245restart:
 246
 247        ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
 248
 249        if (ptr)
 250                return ptr;
 251
 252        if (goal != 0) {
 253                goal = 0;
 254                goto restart;
 255        }
 256
 257        return NULL;
 258}
 259
 260/**
 261 * __alloc_bootmem_nopanic - allocate boot memory without panicking
 262 * @size: size of the request in bytes
 263 * @align: alignment of the region
 264 * @goal: preferred starting address of the region
 265 *
 266 * The goal is dropped if it can not be satisfied and the allocation will
 267 * fall back to memory below @goal.
 268 *
 269 * Allocation may happen on any node in the system.
 270 *
 271 * Returns NULL on failure.
 272 */
 273void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
 274                                        unsigned long goal)
 275{
 276        unsigned long limit = -1UL;
 277
 278        return ___alloc_bootmem_nopanic(size, align, goal, limit);
 279}
 280
 281static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
 282                                        unsigned long goal, unsigned long limit)
 283{
 284        void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
 285
 286        if (mem)
 287                return mem;
 288        /*
 289         * Whoops, we cannot satisfy the allocation request.
 290         */
 291        pr_alert("bootmem alloc of %lu bytes failed!\n", size);
 292        panic("Out of memory");
 293        return NULL;
 294}
 295
 296/**
 297 * __alloc_bootmem - allocate boot memory
 298 * @size: size of the request in bytes
 299 * @align: alignment of the region
 300 * @goal: preferred starting address of the region
 301 *
 302 * The goal is dropped if it can not be satisfied and the allocation will
 303 * fall back to memory below @goal.
 304 *
 305 * Allocation may happen on any node in the system.
 306 *
 307 * The function panics if the request can not be satisfied.
 308 */
 309void * __init __alloc_bootmem(unsigned long size, unsigned long align,
 310                              unsigned long goal)
 311{
 312        unsigned long limit = -1UL;
 313
 314        return ___alloc_bootmem(size, align, goal, limit);
 315}
 316
 317void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
 318                                                   unsigned long size,
 319                                                   unsigned long align,
 320                                                   unsigned long goal,
 321                                                   unsigned long limit)
 322{
 323        void *ptr;
 324
 325again:
 326        ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
 327                                        goal, limit);
 328        if (ptr)
 329                return ptr;
 330
 331        ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
 332                                        goal, limit);
 333        if (ptr)
 334                return ptr;
 335
 336        if (goal) {
 337                goal = 0;
 338                goto again;
 339        }
 340
 341        return NULL;
 342}
 343
 344void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
 345                                   unsigned long align, unsigned long goal)
 346{
 347        if (WARN_ON_ONCE(slab_is_available()))
 348                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 349
 350        return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
 351}
 352
 353static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 354                                    unsigned long align, unsigned long goal,
 355                                    unsigned long limit)
 356{
 357        void *ptr;
 358
 359        ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
 360        if (ptr)
 361                return ptr;
 362
 363        pr_alert("bootmem alloc of %lu bytes failed!\n", size);
 364        panic("Out of memory");
 365        return NULL;
 366}
 367
 368/**
 369 * __alloc_bootmem_node - allocate boot memory from a specific node
 370 * @pgdat: node to allocate from
 371 * @size: size of the request in bytes
 372 * @align: alignment of the region
 373 * @goal: preferred starting address of the region
 374 *
 375 * The goal is dropped if it can not be satisfied and the allocation will
 376 * fall back to memory below @goal.
 377 *
 378 * Allocation may fall back to any node in the system if the specified node
 379 * can not hold the requested memory.
 380 *
 381 * The function panics if the request can not be satisfied.
 382 */
 383void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 384                                   unsigned long align, unsigned long goal)
 385{
 386        if (WARN_ON_ONCE(slab_is_available()))
 387                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 388
 389        return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
 390}
 391
 392void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
 393                                   unsigned long align, unsigned long goal)
 394{
 395        return __alloc_bootmem_node(pgdat, size, align, goal);
 396}
 397
 398#ifndef ARCH_LOW_ADDRESS_LIMIT
 399#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 400#endif
 401
 402/**
 403 * __alloc_bootmem_low - allocate low boot memory
 404 * @size: size of the request in bytes
 405 * @align: alignment of the region
 406 * @goal: preferred starting address of the region
 407 *
 408 * The goal is dropped if it can not be satisfied and the allocation will
 409 * fall back to memory below @goal.
 410 *
 411 * Allocation may happen on any node in the system.
 412 *
 413 * The function panics if the request can not be satisfied.
 414 */
 415void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 416                                  unsigned long goal)
 417{
 418        return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
 419}
 420
 421void * __init __alloc_bootmem_low_nopanic(unsigned long size,
 422                                          unsigned long align,
 423                                          unsigned long goal)
 424{
 425        return ___alloc_bootmem_nopanic(size, align, goal,
 426                                        ARCH_LOW_ADDRESS_LIMIT);
 427}
 428
 429/**
 430 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
 431 * @pgdat: node to allocate from
 432 * @size: size of the request in bytes
 433 * @align: alignment of the region
 434 * @goal: preferred starting address of the region
 435 *
 436 * The goal is dropped if it can not be satisfied and the allocation will
 437 * fall back to memory below @goal.
 438 *
 439 * Allocation may fall back to any node in the system if the specified node
 440 * can not hold the requested memory.
 441 *
 442 * The function panics if the request can not be satisfied.
 443 */
 444void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
 445                                       unsigned long align, unsigned long goal)
 446{
 447        if (WARN_ON_ONCE(slab_is_available()))
 448                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 449
 450        return ___alloc_bootmem_node(pgdat, size, align, goal,
 451                                     ARCH_LOW_ADDRESS_LIMIT);
 452}
 453