linux/include/linux/memblock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2#ifndef _LINUX_MEMBLOCK_H
   3#define _LINUX_MEMBLOCK_H
   4#ifdef __KERNEL__
   5
   6/*
   7 * Logical memory blocks.
   8 *
   9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/mm.h>
  14#include <asm/dma.h>
  15
  16extern unsigned long max_low_pfn;
  17extern unsigned long min_low_pfn;
  18
  19/*
  20 * highest page
  21 */
  22extern unsigned long max_pfn;
  23/*
  24 * highest possible page
  25 */
  26extern unsigned long long max_possible_pfn;
  27
  28/**
  29 * enum memblock_flags - definition of memory region attributes
  30 * @MEMBLOCK_NONE: no special request
  31 * @MEMBLOCK_HOTPLUG: hotpluggable region
  32 * @MEMBLOCK_MIRROR: mirrored region
  33 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
  34 * reserved in the memory map; refer to memblock_mark_nomap() description
  35 * for further details
  36 */
  37enum memblock_flags {
  38        MEMBLOCK_NONE           = 0x0,  /* No special request */
  39        MEMBLOCK_HOTPLUG        = 0x1,  /* hotpluggable region */
  40        MEMBLOCK_MIRROR         = 0x2,  /* mirrored region */
  41        MEMBLOCK_NOMAP          = 0x4,  /* don't add to kernel direct mapping */
  42};
  43
  44/**
  45 * struct memblock_region - represents a memory region
  46 * @base: base address of the region
  47 * @size: size of the region
  48 * @flags: memory region attributes
  49 * @nid: NUMA node id
  50 */
  51struct memblock_region {
  52        phys_addr_t base;
  53        phys_addr_t size;
  54        enum memblock_flags flags;
  55#ifdef CONFIG_NUMA
  56        int nid;
  57#endif
  58};
  59
  60/**
  61 * struct memblock_type - collection of memory regions of certain type
  62 * @cnt: number of regions
  63 * @max: size of the allocated array
  64 * @total_size: size of all regions
  65 * @regions: array of regions
  66 * @name: the memory type symbolic name
  67 */
  68struct memblock_type {
  69        unsigned long cnt;
  70        unsigned long max;
  71        phys_addr_t total_size;
  72        struct memblock_region *regions;
  73        char *name;
  74};
  75
  76/**
  77 * struct memblock - memblock allocator metadata
  78 * @bottom_up: is bottom up direction?
  79 * @current_limit: physical address of the current allocation limit
  80 * @memory: usable memory regions
  81 * @reserved: reserved memory regions
  82 */
  83struct memblock {
  84        bool bottom_up;  /* is bottom up direction? */
  85        phys_addr_t current_limit;
  86        struct memblock_type memory;
  87        struct memblock_type reserved;
  88};
  89
  90extern struct memblock memblock;
  91
  92#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
  93#define __init_memblock __meminit
  94#define __initdata_memblock __meminitdata
  95void memblock_discard(void);
  96#else
  97#define __init_memblock
  98#define __initdata_memblock
  99static inline void memblock_discard(void) {}
 100#endif
 101
 102phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
 103                                   phys_addr_t size, phys_addr_t align);
 104void memblock_allow_resize(void);
 105int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 106int memblock_add(phys_addr_t base, phys_addr_t size);
 107int memblock_remove(phys_addr_t base, phys_addr_t size);
 108int memblock_free(phys_addr_t base, phys_addr_t size);
 109int memblock_reserve(phys_addr_t base, phys_addr_t size);
 110#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 111int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
 112#endif
 113void memblock_trim_memory(phys_addr_t align);
 114bool memblock_overlaps_region(struct memblock_type *type,
 115                              phys_addr_t base, phys_addr_t size);
 116int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
 117int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
 118int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
 119int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 120int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 121
 122void memblock_free_all(void);
 123void reset_node_managed_pages(pg_data_t *pgdat);
 124void reset_all_zones_managed_pages(void);
 125
 126/* Low level functions */
 127void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
 128                      struct memblock_type *type_a,
 129                      struct memblock_type *type_b, phys_addr_t *out_start,
 130                      phys_addr_t *out_end, int *out_nid);
 131
 132void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
 133                          struct memblock_type *type_a,
 134                          struct memblock_type *type_b, phys_addr_t *out_start,
 135                          phys_addr_t *out_end, int *out_nid);
 136
 137void __memblock_free_late(phys_addr_t base, phys_addr_t size);
 138
 139#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 140static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
 141                                        phys_addr_t *out_start,
 142                                        phys_addr_t *out_end)
 143{
 144        extern struct memblock_type physmem;
 145
 146        __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
 147                         out_start, out_end, NULL);
 148}
 149
 150/**
 151 * for_each_physmem_range - iterate through physmem areas not included in type.
 152 * @i: u64 used as loop variable
 153 * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
 154 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 155 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 156 */
 157#define for_each_physmem_range(i, type, p_start, p_end)                 \
 158        for (i = 0, __next_physmem_range(&i, type, p_start, p_end);     \
 159             i != (u64)ULLONG_MAX;                                      \
 160             __next_physmem_range(&i, type, p_start, p_end))
 161#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
 162
 163/**
 164 * __for_each_mem_range - iterate through memblock areas from type_a and not
 165 * included in type_b. Or just type_a if type_b is NULL.
 166 * @i: u64 used as loop variable
 167 * @type_a: ptr to memblock_type to iterate
 168 * @type_b: ptr to memblock_type which excludes from the iteration
 169 * @nid: node selector, %NUMA_NO_NODE for all nodes
 170 * @flags: pick from blocks based on memory attributes
 171 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 172 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 173 * @p_nid: ptr to int for nid of the range, can be %NULL
 174 */
 175#define __for_each_mem_range(i, type_a, type_b, nid, flags,             \
 176                           p_start, p_end, p_nid)                       \
 177        for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,    \
 178                                     p_start, p_end, p_nid);            \
 179             i != (u64)ULLONG_MAX;                                      \
 180             __next_mem_range(&i, nid, flags, type_a, type_b,           \
 181                              p_start, p_end, p_nid))
 182
 183/**
 184 * __for_each_mem_range_rev - reverse iterate through memblock areas from
 185 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 186 * @i: u64 used as loop variable
 187 * @type_a: ptr to memblock_type to iterate
 188 * @type_b: ptr to memblock_type which excludes from the iteration
 189 * @nid: node selector, %NUMA_NO_NODE for all nodes
 190 * @flags: pick from blocks based on memory attributes
 191 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 192 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 193 * @p_nid: ptr to int for nid of the range, can be %NULL
 194 */
 195#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags,         \
 196                                 p_start, p_end, p_nid)                 \
 197        for (i = (u64)ULLONG_MAX,                                       \
 198                     __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
 199                                          p_start, p_end, p_nid);       \
 200             i != (u64)ULLONG_MAX;                                      \
 201             __next_mem_range_rev(&i, nid, flags, type_a, type_b,       \
 202                                  p_start, p_end, p_nid))
 203
 204/**
 205 * for_each_mem_range - iterate through memory areas.
 206 * @i: u64 used as loop variable
 207 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 208 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 209 */
 210#define for_each_mem_range(i, p_start, p_end) \
 211        __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,   \
 212                             MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
 213
 214/**
 215 * for_each_mem_range_rev - reverse iterate through memblock areas from
 216 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 217 * @i: u64 used as loop variable
 218 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 219 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 220 */
 221#define for_each_mem_range_rev(i, p_start, p_end)                       \
 222        __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
 223                                 MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
 224
 225/**
 226 * for_each_reserved_mem_range - iterate over all reserved memblock areas
 227 * @i: u64 used as loop variable
 228 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 229 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 230 *
 231 * Walks over reserved areas of memblock. Available as soon as memblock
 232 * is initialized.
 233 */
 234#define for_each_reserved_mem_range(i, p_start, p_end)                  \
 235        __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
 236                             MEMBLOCK_NONE, p_start, p_end, NULL)
 237
 238static inline bool memblock_is_hotpluggable(struct memblock_region *m)
 239{
 240        return m->flags & MEMBLOCK_HOTPLUG;
 241}
 242
 243static inline bool memblock_is_mirror(struct memblock_region *m)
 244{
 245        return m->flags & MEMBLOCK_MIRROR;
 246}
 247
 248static inline bool memblock_is_nomap(struct memblock_region *m)
 249{
 250        return m->flags & MEMBLOCK_NOMAP;
 251}
 252
 253int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
 254                            unsigned long  *end_pfn);
 255void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
 256                          unsigned long *out_end_pfn, int *out_nid);
 257
 258/**
 259 * for_each_mem_pfn_range - early memory pfn range iterator
 260 * @i: an integer used as loop variable
 261 * @nid: node selector, %MAX_NUMNODES for all nodes
 262 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 263 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 264 * @p_nid: ptr to int for nid of the range, can be %NULL
 265 *
 266 * Walks over configured memory ranges.
 267 */
 268#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)           \
 269        for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
 270             i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
 271
 272#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 273void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
 274                                  unsigned long *out_spfn,
 275                                  unsigned long *out_epfn);
 276/**
 277 * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
 278 * memblock areas
 279 * @i: u64 used as loop variable
 280 * @zone: zone in which all of the memory blocks reside
 281 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 282 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 283 *
 284 * Walks over free (memory && !reserved) areas of memblock in a specific
 285 * zone. Available once memblock and an empty zone is initialized. The main
 286 * assumption is that the zone start, end, and pgdat have been associated.
 287 * This way we can use the zone to determine NUMA node, and if a given part
 288 * of the memblock is valid for the zone.
 289 */
 290#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)    \
 291        for (i = 0,                                                     \
 292             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);    \
 293             i != U64_MAX;                                      \
 294             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
 295
 296/**
 297 * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
 298 * free memblock areas from a given point
 299 * @i: u64 used as loop variable
 300 * @zone: zone in which all of the memory blocks reside
 301 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 302 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 303 *
 304 * Walks over free (memory && !reserved) areas of memblock in a specific
 305 * zone, continuing from current position. Available as soon as memblock is
 306 * initialized.
 307 */
 308#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
 309        for (; i != U64_MAX;                                      \
 310             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
 311
 312int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
 313
 314#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 315
 316/**
 317 * for_each_free_mem_range - iterate through free memblock areas
 318 * @i: u64 used as loop variable
 319 * @nid: node selector, %NUMA_NO_NODE for all nodes
 320 * @flags: pick from blocks based on memory attributes
 321 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 322 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 323 * @p_nid: ptr to int for nid of the range, can be %NULL
 324 *
 325 * Walks over free (memory && !reserved) areas of memblock.  Available as
 326 * soon as memblock is initialized.
 327 */
 328#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)   \
 329        __for_each_mem_range(i, &memblock.memory, &memblock.reserved,   \
 330                             nid, flags, p_start, p_end, p_nid)
 331
 332/**
 333 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 334 * @i: u64 used as loop variable
 335 * @nid: node selector, %NUMA_NO_NODE for all nodes
 336 * @flags: pick from blocks based on memory attributes
 337 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 338 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 339 * @p_nid: ptr to int for nid of the range, can be %NULL
 340 *
 341 * Walks over free (memory && !reserved) areas of memblock in reverse
 342 * order.  Available as soon as memblock is initialized.
 343 */
 344#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,  \
 345                                        p_nid)                          \
 346        __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
 347                                 nid, flags, p_start, p_end, p_nid)
 348
 349int memblock_set_node(phys_addr_t base, phys_addr_t size,
 350                      struct memblock_type *type, int nid);
 351
 352#ifdef CONFIG_NUMA
 353static inline void memblock_set_region_node(struct memblock_region *r, int nid)
 354{
 355        r->nid = nid;
 356}
 357
 358static inline int memblock_get_region_node(const struct memblock_region *r)
 359{
 360        return r->nid;
 361}
 362#else
 363static inline void memblock_set_region_node(struct memblock_region *r, int nid)
 364{
 365}
 366
 367static inline int memblock_get_region_node(const struct memblock_region *r)
 368{
 369        return 0;
 370}
 371#endif /* CONFIG_NUMA */
 372
 373/* Flags for memblock allocation APIs */
 374#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
 375#define MEMBLOCK_ALLOC_ACCESSIBLE       0
 376#define MEMBLOCK_ALLOC_KASAN            1
 377
 378/* We are using top down, so it is safe to use 0 here */
 379#define MEMBLOCK_LOW_LIMIT 0
 380
 381#ifndef ARCH_LOW_ADDRESS_LIMIT
 382#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 383#endif
 384
 385phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
 386                                      phys_addr_t start, phys_addr_t end);
 387phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
 388                                      phys_addr_t align, phys_addr_t start,
 389                                      phys_addr_t end, int nid, bool exact_nid);
 390phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
 391
 392static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
 393                                              phys_addr_t align)
 394{
 395        return memblock_phys_alloc_range(size, align, 0,
 396                                         MEMBLOCK_ALLOC_ACCESSIBLE);
 397}
 398
 399void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
 400                                 phys_addr_t min_addr, phys_addr_t max_addr,
 401                                 int nid);
 402void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
 403                                 phys_addr_t min_addr, phys_addr_t max_addr,
 404                                 int nid);
 405void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
 406                             phys_addr_t min_addr, phys_addr_t max_addr,
 407                             int nid);
 408
 409static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
 410{
 411        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 412                                      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 413}
 414
 415static inline void *memblock_alloc_raw(phys_addr_t size,
 416                                               phys_addr_t align)
 417{
 418        return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
 419                                          MEMBLOCK_ALLOC_ACCESSIBLE,
 420                                          NUMA_NO_NODE);
 421}
 422
 423static inline void *memblock_alloc_from(phys_addr_t size,
 424                                                phys_addr_t align,
 425                                                phys_addr_t min_addr)
 426{
 427        return memblock_alloc_try_nid(size, align, min_addr,
 428                                      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 429}
 430
 431static inline void *memblock_alloc_low(phys_addr_t size,
 432                                               phys_addr_t align)
 433{
 434        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 435                                      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
 436}
 437
 438static inline void *memblock_alloc_node(phys_addr_t size,
 439                                                phys_addr_t align, int nid)
 440{
 441        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 442                                      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 443}
 444
 445static inline void memblock_free_early(phys_addr_t base,
 446                                              phys_addr_t size)
 447{
 448        memblock_free(base, size);
 449}
 450
 451static inline void memblock_free_early_nid(phys_addr_t base,
 452                                                  phys_addr_t size, int nid)
 453{
 454        memblock_free(base, size);
 455}
 456
 457static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
 458{
 459        __memblock_free_late(base, size);
 460}
 461
 462/*
 463 * Set the allocation direction to bottom-up or top-down.
 464 */
 465static inline __init_memblock void memblock_set_bottom_up(bool enable)
 466{
 467        memblock.bottom_up = enable;
 468}
 469
 470/*
 471 * Check if the allocation direction is bottom-up or not.
 472 * if this is true, that said, memblock will allocate memory
 473 * in bottom-up direction.
 474 */
 475static inline __init_memblock bool memblock_bottom_up(void)
 476{
 477        return memblock.bottom_up;
 478}
 479
 480phys_addr_t memblock_phys_mem_size(void);
 481phys_addr_t memblock_reserved_size(void);
 482phys_addr_t memblock_start_of_DRAM(void);
 483phys_addr_t memblock_end_of_DRAM(void);
 484void memblock_enforce_memory_limit(phys_addr_t memory_limit);
 485void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
 486void memblock_mem_limit_remove_map(phys_addr_t limit);
 487bool memblock_is_memory(phys_addr_t addr);
 488bool memblock_is_map_memory(phys_addr_t addr);
 489bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
 490bool memblock_is_reserved(phys_addr_t addr);
 491bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
 492
 493void memblock_dump_all(void);
 494
 495/**
 496 * memblock_set_current_limit - Set the current allocation limit to allow
 497 *                         limiting allocations to what is currently
 498 *                         accessible during boot
 499 * @limit: New limit value (physical address)
 500 */
 501void memblock_set_current_limit(phys_addr_t limit);
 502
 503
 504phys_addr_t memblock_get_current_limit(void);
 505
 506/*
 507 * pfn conversion functions
 508 *
 509 * While the memory MEMBLOCKs should always be page aligned, the reserved
 510 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 511 * idea of what they return for such non aligned MEMBLOCKs.
 512 */
 513
 514/**
 515 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
 516 * @reg: memblock_region structure
 517 *
 518 * Return: the lowest pfn intersecting with the memory region
 519 */
 520static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
 521{
 522        return PFN_UP(reg->base);
 523}
 524
 525/**
 526 * memblock_region_memory_end_pfn - get the end pfn of the memory region
 527 * @reg: memblock_region structure
 528 *
 529 * Return: the end_pfn of the reserved region
 530 */
 531static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
 532{
 533        return PFN_DOWN(reg->base + reg->size);
 534}
 535
 536/**
 537 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
 538 * @reg: memblock_region structure
 539 *
 540 * Return: the lowest pfn intersecting with the reserved region
 541 */
 542static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
 543{
 544        return PFN_DOWN(reg->base);
 545}
 546
 547/**
 548 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
 549 * @reg: memblock_region structure
 550 *
 551 * Return: the end_pfn of the reserved region
 552 */
 553static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
 554{
 555        return PFN_UP(reg->base + reg->size);
 556}
 557
 558/**
 559 * for_each_mem_region - itereate over memory regions
 560 * @region: loop variable
 561 */
 562#define for_each_mem_region(region)                                     \
 563        for (region = memblock.memory.regions;                          \
 564             region < (memblock.memory.regions + memblock.memory.cnt);  \
 565             region++)
 566
 567/**
 568 * for_each_reserved_mem_region - itereate over reserved memory regions
 569 * @region: loop variable
 570 */
 571#define for_each_reserved_mem_region(region)                            \
 572        for (region = memblock.reserved.regions;                        \
 573             region < (memblock.reserved.regions + memblock.reserved.cnt); \
 574             region++)
 575
 576extern void *alloc_large_system_hash(const char *tablename,
 577                                     unsigned long bucketsize,
 578                                     unsigned long numentries,
 579                                     int scale,
 580                                     int flags,
 581                                     unsigned int *_hash_shift,
 582                                     unsigned int *_hash_mask,
 583                                     unsigned long low_limit,
 584                                     unsigned long high_limit);
 585
 586#define HASH_EARLY      0x00000001      /* Allocating during early boot? */
 587#define HASH_SMALL      0x00000002      /* sub-page allocation allowed, min
 588                                         * shift passed via *_hash_shift */
 589#define HASH_ZERO       0x00000004      /* Zero allocated hash table */
 590
 591/* Only NUMA needs hash distribution. 64bit NUMA architectures have
 592 * sufficient vmalloc space.
 593 */
 594#ifdef CONFIG_NUMA
 595#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
 596extern int hashdist;            /* Distribute hashes across NUMA nodes? */
 597#else
 598#define hashdist (0)
 599#endif
 600
 601#ifdef CONFIG_MEMTEST
 602extern void early_memtest(phys_addr_t start, phys_addr_t end);
 603#else
 604static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 605{
 606}
 607#endif
 608
 609#endif /* __KERNEL__ */
 610
 611#endif /* _LINUX_MEMBLOCK_H */
 612