linux/include/linux/memblock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2#ifndef _LINUX_MEMBLOCK_H
   3#define _LINUX_MEMBLOCK_H
   4#ifdef __KERNEL__
   5
   6/*
   7 * Logical memory blocks.
   8 *
   9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/mm.h>
  14#include <asm/dma.h>
  15
  16extern unsigned long max_low_pfn;
  17extern unsigned long min_low_pfn;
  18
  19/*
  20 * highest page
  21 */
  22extern unsigned long max_pfn;
  23/*
  24 * highest possible page
  25 */
  26extern unsigned long long max_possible_pfn;
  27
  28/**
  29 * enum memblock_flags - definition of memory region attributes
  30 * @MEMBLOCK_NONE: no special request
  31 * @MEMBLOCK_HOTPLUG: hotpluggable region
  32 * @MEMBLOCK_MIRROR: mirrored region
  33 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
  34 * reserved in the memory map; refer to memblock_mark_nomap() description
  35 * for further details
  36 */
  37enum memblock_flags {
  38        MEMBLOCK_NONE           = 0x0,  /* No special request */
  39        MEMBLOCK_HOTPLUG        = 0x1,  /* hotpluggable region */
  40        MEMBLOCK_MIRROR         = 0x2,  /* mirrored region */
  41        MEMBLOCK_NOMAP          = 0x4,  /* don't add to kernel direct mapping */
  42};
  43
  44/**
  45 * struct memblock_region - represents a memory region
  46 * @base: base address of the region
  47 * @size: size of the region
  48 * @flags: memory region attributes
  49 * @nid: NUMA node id
  50 */
  51struct memblock_region {
  52        phys_addr_t base;
  53        phys_addr_t size;
  54        enum memblock_flags flags;
  55#ifdef CONFIG_NUMA
  56        int nid;
  57#endif
  58};
  59
  60/**
  61 * struct memblock_type - collection of memory regions of certain type
  62 * @cnt: number of regions
  63 * @max: size of the allocated array
  64 * @total_size: size of all regions
  65 * @regions: array of regions
  66 * @name: the memory type symbolic name
  67 */
  68struct memblock_type {
  69        unsigned long cnt;
  70        unsigned long max;
  71        phys_addr_t total_size;
  72        struct memblock_region *regions;
  73        char *name;
  74};
  75
  76/**
  77 * struct memblock - memblock allocator metadata
  78 * @bottom_up: is bottom up direction?
  79 * @current_limit: physical address of the current allocation limit
  80 * @memory: usable memory regions
  81 * @reserved: reserved memory regions
  82 */
  83struct memblock {
  84        bool bottom_up;  /* is bottom up direction? */
  85        phys_addr_t current_limit;
  86        struct memblock_type memory;
  87        struct memblock_type reserved;
  88};
  89
  90extern struct memblock memblock;
  91
  92#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
  93#define __init_memblock __meminit
  94#define __initdata_memblock __meminitdata
  95void memblock_discard(void);
  96#else
  97#define __init_memblock
  98#define __initdata_memblock
  99static inline void memblock_discard(void) {}
 100#endif
 101
 102void memblock_allow_resize(void);
 103int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 104int memblock_add(phys_addr_t base, phys_addr_t size);
 105int memblock_remove(phys_addr_t base, phys_addr_t size);
 106int memblock_free(phys_addr_t base, phys_addr_t size);
 107int memblock_reserve(phys_addr_t base, phys_addr_t size);
 108#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 109int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
 110#endif
 111void memblock_trim_memory(phys_addr_t align);
 112bool memblock_overlaps_region(struct memblock_type *type,
 113                              phys_addr_t base, phys_addr_t size);
 114int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
 115int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
 116int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
 117int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 118int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 119
 120void memblock_free_all(void);
 121void memblock_free_ptr(void *ptr, size_t size);
 122void reset_node_managed_pages(pg_data_t *pgdat);
 123void reset_all_zones_managed_pages(void);
 124
 125/* Low level functions */
 126void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
 127                      struct memblock_type *type_a,
 128                      struct memblock_type *type_b, phys_addr_t *out_start,
 129                      phys_addr_t *out_end, int *out_nid);
 130
 131void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
 132                          struct memblock_type *type_a,
 133                          struct memblock_type *type_b, phys_addr_t *out_start,
 134                          phys_addr_t *out_end, int *out_nid);
 135
 136void __memblock_free_late(phys_addr_t base, phys_addr_t size);
 137
 138#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 139static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
 140                                        phys_addr_t *out_start,
 141                                        phys_addr_t *out_end)
 142{
 143        extern struct memblock_type physmem;
 144
 145        __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
 146                         out_start, out_end, NULL);
 147}
 148
 149/**
 150 * for_each_physmem_range - iterate through physmem areas not included in type.
 151 * @i: u64 used as loop variable
 152 * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
 153 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 154 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 155 */
 156#define for_each_physmem_range(i, type, p_start, p_end)                 \
 157        for (i = 0, __next_physmem_range(&i, type, p_start, p_end);     \
 158             i != (u64)ULLONG_MAX;                                      \
 159             __next_physmem_range(&i, type, p_start, p_end))
 160#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
 161
 162/**
 163 * __for_each_mem_range - iterate through memblock areas from type_a and not
 164 * included in type_b. Or just type_a if type_b is NULL.
 165 * @i: u64 used as loop variable
 166 * @type_a: ptr to memblock_type to iterate
 167 * @type_b: ptr to memblock_type which excludes from the iteration
 168 * @nid: node selector, %NUMA_NO_NODE for all nodes
 169 * @flags: pick from blocks based on memory attributes
 170 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 171 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 172 * @p_nid: ptr to int for nid of the range, can be %NULL
 173 */
 174#define __for_each_mem_range(i, type_a, type_b, nid, flags,             \
 175                           p_start, p_end, p_nid)                       \
 176        for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,    \
 177                                     p_start, p_end, p_nid);            \
 178             i != (u64)ULLONG_MAX;                                      \
 179             __next_mem_range(&i, nid, flags, type_a, type_b,           \
 180                              p_start, p_end, p_nid))
 181
 182/**
 183 * __for_each_mem_range_rev - reverse iterate through memblock areas from
 184 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 185 * @i: u64 used as loop variable
 186 * @type_a: ptr to memblock_type to iterate
 187 * @type_b: ptr to memblock_type which excludes from the iteration
 188 * @nid: node selector, %NUMA_NO_NODE for all nodes
 189 * @flags: pick from blocks based on memory attributes
 190 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 191 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 192 * @p_nid: ptr to int for nid of the range, can be %NULL
 193 */
 194#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags,         \
 195                                 p_start, p_end, p_nid)                 \
 196        for (i = (u64)ULLONG_MAX,                                       \
 197                     __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
 198                                          p_start, p_end, p_nid);       \
 199             i != (u64)ULLONG_MAX;                                      \
 200             __next_mem_range_rev(&i, nid, flags, type_a, type_b,       \
 201                                  p_start, p_end, p_nid))
 202
 203/**
 204 * for_each_mem_range - iterate through memory areas.
 205 * @i: u64 used as loop variable
 206 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 207 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 208 */
 209#define for_each_mem_range(i, p_start, p_end) \
 210        __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,   \
 211                             MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
 212
 213/**
 214 * for_each_mem_range_rev - reverse iterate through memblock areas from
 215 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 216 * @i: u64 used as loop variable
 217 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 218 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 219 */
 220#define for_each_mem_range_rev(i, p_start, p_end)                       \
 221        __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
 222                                 MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
 223
 224/**
 225 * for_each_reserved_mem_range - iterate over all reserved memblock areas
 226 * @i: u64 used as loop variable
 227 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 228 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 229 *
 230 * Walks over reserved areas of memblock. Available as soon as memblock
 231 * is initialized.
 232 */
 233#define for_each_reserved_mem_range(i, p_start, p_end)                  \
 234        __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
 235                             MEMBLOCK_NONE, p_start, p_end, NULL)
 236
 237static inline bool memblock_is_hotpluggable(struct memblock_region *m)
 238{
 239        return m->flags & MEMBLOCK_HOTPLUG;
 240}
 241
 242static inline bool memblock_is_mirror(struct memblock_region *m)
 243{
 244        return m->flags & MEMBLOCK_MIRROR;
 245}
 246
 247static inline bool memblock_is_nomap(struct memblock_region *m)
 248{
 249        return m->flags & MEMBLOCK_NOMAP;
 250}
 251
 252int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
 253                            unsigned long  *end_pfn);
 254void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
 255                          unsigned long *out_end_pfn, int *out_nid);
 256
 257/**
 258 * for_each_mem_pfn_range - early memory pfn range iterator
 259 * @i: an integer used as loop variable
 260 * @nid: node selector, %MAX_NUMNODES for all nodes
 261 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 262 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 263 * @p_nid: ptr to int for nid of the range, can be %NULL
 264 *
 265 * Walks over configured memory ranges.
 266 */
 267#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)           \
 268        for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
 269             i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
 270
 271#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 272void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
 273                                  unsigned long *out_spfn,
 274                                  unsigned long *out_epfn);
 275/**
 276 * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
 277 * memblock areas
 278 * @i: u64 used as loop variable
 279 * @zone: zone in which all of the memory blocks reside
 280 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 281 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 282 *
 283 * Walks over free (memory && !reserved) areas of memblock in a specific
 284 * zone. Available once memblock and an empty zone is initialized. The main
 285 * assumption is that the zone start, end, and pgdat have been associated.
 286 * This way we can use the zone to determine NUMA node, and if a given part
 287 * of the memblock is valid for the zone.
 288 */
 289#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)    \
 290        for (i = 0,                                                     \
 291             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);    \
 292             i != U64_MAX;                                      \
 293             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
 294
 295/**
 296 * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
 297 * free memblock areas from a given point
 298 * @i: u64 used as loop variable
 299 * @zone: zone in which all of the memory blocks reside
 300 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 301 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 302 *
 303 * Walks over free (memory && !reserved) areas of memblock in a specific
 304 * zone, continuing from current position. Available as soon as memblock is
 305 * initialized.
 306 */
 307#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
 308        for (; i != U64_MAX;                                      \
 309             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
 310
 311int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
 312
 313#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 314
 315/**
 316 * for_each_free_mem_range - iterate through free memblock areas
 317 * @i: u64 used as loop variable
 318 * @nid: node selector, %NUMA_NO_NODE for all nodes
 319 * @flags: pick from blocks based on memory attributes
 320 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 321 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 322 * @p_nid: ptr to int for nid of the range, can be %NULL
 323 *
 324 * Walks over free (memory && !reserved) areas of memblock.  Available as
 325 * soon as memblock is initialized.
 326 */
 327#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)   \
 328        __for_each_mem_range(i, &memblock.memory, &memblock.reserved,   \
 329                             nid, flags, p_start, p_end, p_nid)
 330
 331/**
 332 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 333 * @i: u64 used as loop variable
 334 * @nid: node selector, %NUMA_NO_NODE for all nodes
 335 * @flags: pick from blocks based on memory attributes
 336 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 337 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 338 * @p_nid: ptr to int for nid of the range, can be %NULL
 339 *
 340 * Walks over free (memory && !reserved) areas of memblock in reverse
 341 * order.  Available as soon as memblock is initialized.
 342 */
 343#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,  \
 344                                        p_nid)                          \
 345        __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
 346                                 nid, flags, p_start, p_end, p_nid)
 347
 348int memblock_set_node(phys_addr_t base, phys_addr_t size,
 349                      struct memblock_type *type, int nid);
 350
 351#ifdef CONFIG_NUMA
 352static inline void memblock_set_region_node(struct memblock_region *r, int nid)
 353{
 354        r->nid = nid;
 355}
 356
 357static inline int memblock_get_region_node(const struct memblock_region *r)
 358{
 359        return r->nid;
 360}
 361#else
 362static inline void memblock_set_region_node(struct memblock_region *r, int nid)
 363{
 364}
 365
 366static inline int memblock_get_region_node(const struct memblock_region *r)
 367{
 368        return 0;
 369}
 370#endif /* CONFIG_NUMA */
 371
 372/* Flags for memblock allocation APIs */
 373#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
 374#define MEMBLOCK_ALLOC_ACCESSIBLE       0
 375#define MEMBLOCK_ALLOC_KASAN            1
 376
 377/* We are using top down, so it is safe to use 0 here */
 378#define MEMBLOCK_LOW_LIMIT 0
 379
 380#ifndef ARCH_LOW_ADDRESS_LIMIT
 381#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 382#endif
 383
 384phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
 385                                      phys_addr_t start, phys_addr_t end);
 386phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
 387                                      phys_addr_t align, phys_addr_t start,
 388                                      phys_addr_t end, int nid, bool exact_nid);
 389phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
 390
 391static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
 392                                              phys_addr_t align)
 393{
 394        return memblock_phys_alloc_range(size, align, 0,
 395                                         MEMBLOCK_ALLOC_ACCESSIBLE);
 396}
 397
 398void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
 399                                 phys_addr_t min_addr, phys_addr_t max_addr,
 400                                 int nid);
 401void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
 402                                 phys_addr_t min_addr, phys_addr_t max_addr,
 403                                 int nid);
 404void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
 405                             phys_addr_t min_addr, phys_addr_t max_addr,
 406                             int nid);
 407
 408static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
 409{
 410        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 411                                      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 412}
 413
 414static inline void *memblock_alloc_raw(phys_addr_t size,
 415                                               phys_addr_t align)
 416{
 417        return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
 418                                          MEMBLOCK_ALLOC_ACCESSIBLE,
 419                                          NUMA_NO_NODE);
 420}
 421
 422static inline void *memblock_alloc_from(phys_addr_t size,
 423                                                phys_addr_t align,
 424                                                phys_addr_t min_addr)
 425{
 426        return memblock_alloc_try_nid(size, align, min_addr,
 427                                      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 428}
 429
 430static inline void *memblock_alloc_low(phys_addr_t size,
 431                                               phys_addr_t align)
 432{
 433        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 434                                      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
 435}
 436
 437static inline void *memblock_alloc_node(phys_addr_t size,
 438                                                phys_addr_t align, int nid)
 439{
 440        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 441                                      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 442}
 443
 444static inline void memblock_free_early(phys_addr_t base,
 445                                              phys_addr_t size)
 446{
 447        memblock_free(base, size);
 448}
 449
 450static inline void memblock_free_early_nid(phys_addr_t base,
 451                                                  phys_addr_t size, int nid)
 452{
 453        memblock_free(base, size);
 454}
 455
 456static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
 457{
 458        __memblock_free_late(base, size);
 459}
 460
 461/*
 462 * Set the allocation direction to bottom-up or top-down.
 463 */
 464static inline __init_memblock void memblock_set_bottom_up(bool enable)
 465{
 466        memblock.bottom_up = enable;
 467}
 468
 469/*
 470 * Check if the allocation direction is bottom-up or not.
 471 * if this is true, that said, memblock will allocate memory
 472 * in bottom-up direction.
 473 */
 474static inline __init_memblock bool memblock_bottom_up(void)
 475{
 476        return memblock.bottom_up;
 477}
 478
 479phys_addr_t memblock_phys_mem_size(void);
 480phys_addr_t memblock_reserved_size(void);
 481phys_addr_t memblock_start_of_DRAM(void);
 482phys_addr_t memblock_end_of_DRAM(void);
 483void memblock_enforce_memory_limit(phys_addr_t memory_limit);
 484void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
 485void memblock_mem_limit_remove_map(phys_addr_t limit);
 486bool memblock_is_memory(phys_addr_t addr);
 487bool memblock_is_map_memory(phys_addr_t addr);
 488bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
 489bool memblock_is_reserved(phys_addr_t addr);
 490bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
 491
 492void memblock_dump_all(void);
 493
 494/**
 495 * memblock_set_current_limit - Set the current allocation limit to allow
 496 *                         limiting allocations to what is currently
 497 *                         accessible during boot
 498 * @limit: New limit value (physical address)
 499 */
 500void memblock_set_current_limit(phys_addr_t limit);
 501
 502
 503phys_addr_t memblock_get_current_limit(void);
 504
 505/*
 506 * pfn conversion functions
 507 *
 508 * While the memory MEMBLOCKs should always be page aligned, the reserved
 509 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 510 * idea of what they return for such non aligned MEMBLOCKs.
 511 */
 512
 513/**
 514 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
 515 * @reg: memblock_region structure
 516 *
 517 * Return: the lowest pfn intersecting with the memory region
 518 */
 519static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
 520{
 521        return PFN_UP(reg->base);
 522}
 523
 524/**
 525 * memblock_region_memory_end_pfn - get the end pfn of the memory region
 526 * @reg: memblock_region structure
 527 *
 528 * Return: the end_pfn of the reserved region
 529 */
 530static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
 531{
 532        return PFN_DOWN(reg->base + reg->size);
 533}
 534
 535/**
 536 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
 537 * @reg: memblock_region structure
 538 *
 539 * Return: the lowest pfn intersecting with the reserved region
 540 */
 541static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
 542{
 543        return PFN_DOWN(reg->base);
 544}
 545
 546/**
 547 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
 548 * @reg: memblock_region structure
 549 *
 550 * Return: the end_pfn of the reserved region
 551 */
 552static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
 553{
 554        return PFN_UP(reg->base + reg->size);
 555}
 556
 557/**
 558 * for_each_mem_region - itereate over memory regions
 559 * @region: loop variable
 560 */
 561#define for_each_mem_region(region)                                     \
 562        for (region = memblock.memory.regions;                          \
 563             region < (memblock.memory.regions + memblock.memory.cnt);  \
 564             region++)
 565
 566/**
 567 * for_each_reserved_mem_region - itereate over reserved memory regions
 568 * @region: loop variable
 569 */
 570#define for_each_reserved_mem_region(region)                            \
 571        for (region = memblock.reserved.regions;                        \
 572             region < (memblock.reserved.regions + memblock.reserved.cnt); \
 573             region++)
 574
 575extern void *alloc_large_system_hash(const char *tablename,
 576                                     unsigned long bucketsize,
 577                                     unsigned long numentries,
 578                                     int scale,
 579                                     int flags,
 580                                     unsigned int *_hash_shift,
 581                                     unsigned int *_hash_mask,
 582                                     unsigned long low_limit,
 583                                     unsigned long high_limit);
 584
 585#define HASH_EARLY      0x00000001      /* Allocating during early boot? */
 586#define HASH_SMALL      0x00000002      /* sub-page allocation allowed, min
 587                                         * shift passed via *_hash_shift */
 588#define HASH_ZERO       0x00000004      /* Zero allocated hash table */
 589
 590/* Only NUMA needs hash distribution. 64bit NUMA architectures have
 591 * sufficient vmalloc space.
 592 */
 593#ifdef CONFIG_NUMA
 594#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
 595extern int hashdist;            /* Distribute hashes across NUMA nodes? */
 596#else
 597#define hashdist (0)
 598#endif
 599
 600#ifdef CONFIG_MEMTEST
 601extern void early_memtest(phys_addr_t start, phys_addr_t end);
 602#else
 603static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 604{
 605}
 606#endif
 607
 608#endif /* __KERNEL__ */
 609
 610#endif /* _LINUX_MEMBLOCK_H */
 611