linux/include/linux/memory_hotplug.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_MEMORY_HOTPLUG_H
   3#define __LINUX_MEMORY_HOTPLUG_H
   4
   5#include <linux/mmzone.h>
   6#include <linux/spinlock.h>
   7#include <linux/notifier.h>
   8#include <linux/bug.h>
   9
  10struct page;
  11struct zone;
  12struct pglist_data;
  13struct mem_section;
  14struct memory_block;
  15struct resource;
  16struct vmem_altmap;
  17
  18#ifdef CONFIG_MEMORY_HOTPLUG
  19struct page *pfn_to_online_page(unsigned long pfn);
  20
  21/*
  22 * Types for free bootmem stored in page->lru.next. These have to be in
  23 * some random range in unsigned long space for debugging purposes.
  24 */
  25enum {
  26        MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
  27        SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
  28        MIX_SECTION_INFO,
  29        NODE_INFO,
  30        MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
  31};
  32
  33/* Types for control the zone type of onlined and offlined memory */
  34enum {
  35        /* Offline the memory. */
  36        MMOP_OFFLINE = 0,
  37        /* Online the memory. Zone depends, see default_zone_for_pfn(). */
  38        MMOP_ONLINE,
  39        /* Online the memory to ZONE_NORMAL. */
  40        MMOP_ONLINE_KERNEL,
  41        /* Online the memory to ZONE_MOVABLE. */
  42        MMOP_ONLINE_MOVABLE,
  43};
  44
  45/* Flags for add_memory() and friends to specify memory hotplug details. */
  46typedef int __bitwise mhp_t;
  47
  48/* No special request */
  49#define MHP_NONE                ((__force mhp_t)0)
  50/*
  51 * Allow merging of the added System RAM resource with adjacent,
  52 * mergeable resources. After a successful call to add_memory_resource()
  53 * with this flag set, the resource pointer must no longer be used as it
  54 * might be stale, or the resource might have changed.
  55 */
  56#define MHP_MERGE_RESOURCE      ((__force mhp_t)BIT(0))
  57
  58/*
  59 * We want memmap (struct page array) to be self contained.
  60 * To do so, we will use the beginning of the hot-added range to build
  61 * the page tables for the memmap array that describes the entire range.
  62 * Only selected architectures support it with SPARSE_VMEMMAP.
  63 */
  64#define MHP_MEMMAP_ON_MEMORY   ((__force mhp_t)BIT(1))
  65
  66/*
  67 * Extended parameters for memory hotplug:
  68 * altmap: alternative allocator for memmap array (optional)
  69 * pgprot: page protection flags to apply to newly created page tables
  70 *      (required)
  71 */
  72struct mhp_params {
  73        struct vmem_altmap *altmap;
  74        pgprot_t pgprot;
  75};
  76
  77bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
  78struct range mhp_get_pluggable_range(bool need_mapping);
  79
  80/*
  81 * Zone resizing functions
  82 *
  83 * Note: any attempt to resize a zone should has pgdat_resize_lock()
  84 * zone_span_writelock() both held. This ensure the size of a zone
  85 * can't be changed while pgdat_resize_lock() held.
  86 */
  87static inline unsigned zone_span_seqbegin(struct zone *zone)
  88{
  89        return read_seqbegin(&zone->span_seqlock);
  90}
  91static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
  92{
  93        return read_seqretry(&zone->span_seqlock, iv);
  94}
  95static inline void zone_span_writelock(struct zone *zone)
  96{
  97        write_seqlock(&zone->span_seqlock);
  98}
  99static inline void zone_span_writeunlock(struct zone *zone)
 100{
 101        write_sequnlock(&zone->span_seqlock);
 102}
 103static inline void zone_seqlock_init(struct zone *zone)
 104{
 105        seqlock_init(&zone->span_seqlock);
 106}
 107extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
 108extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
 109extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
 110extern void adjust_present_page_count(struct zone *zone, long nr_pages);
 111/* VM interface that may be used by firmware interface */
 112extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
 113                                     struct zone *zone);
 114extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
 115extern int online_pages(unsigned long pfn, unsigned long nr_pages,
 116                        struct zone *zone);
 117extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
 118                                         unsigned long end_pfn);
 119extern void __offline_isolated_pages(unsigned long start_pfn,
 120                                     unsigned long end_pfn);
 121
 122typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
 123
 124extern void generic_online_page(struct page *page, unsigned int order);
 125extern int set_online_page_callback(online_page_callback_t callback);
 126extern int restore_online_page_callback(online_page_callback_t callback);
 127
 128extern int try_online_node(int nid);
 129
 130extern int arch_add_memory(int nid, u64 start, u64 size,
 131                           struct mhp_params *params);
 132extern u64 max_mem_size;
 133
 134extern int mhp_online_type_from_str(const char *str);
 135
 136/* Default online_type (MMOP_*) when new memory blocks are added. */
 137extern int mhp_default_online_type;
 138/* If movable_node boot option specified */
 139extern bool movable_node_enabled;
 140static inline bool movable_node_is_enabled(void)
 141{
 142        return movable_node_enabled;
 143}
 144
 145extern void arch_remove_memory(int nid, u64 start, u64 size,
 146                               struct vmem_altmap *altmap);
 147extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
 148                           struct vmem_altmap *altmap);
 149
 150/* reasonably generic interface to expand the physical pages */
 151extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
 152                       struct mhp_params *params);
 153
 154#ifndef CONFIG_ARCH_HAS_ADD_PAGES
 155static inline int add_pages(int nid, unsigned long start_pfn,
 156                unsigned long nr_pages, struct mhp_params *params)
 157{
 158        return __add_pages(nid, start_pfn, nr_pages, params);
 159}
 160#else /* ARCH_HAS_ADD_PAGES */
 161int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
 162              struct mhp_params *params);
 163#endif /* ARCH_HAS_ADD_PAGES */
 164
 165#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
 166/*
 167 * For supporting node-hotadd, we have to allocate a new pgdat.
 168 *
 169 * If an arch has generic style NODE_DATA(),
 170 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
 171 *
 172 * In general, generic_alloc_nodedata() is used.
 173 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
 174 *
 175 */
 176extern pg_data_t *arch_alloc_nodedata(int nid);
 177extern void arch_free_nodedata(pg_data_t *pgdat);
 178extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
 179
 180#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
 181
 182#define arch_alloc_nodedata(nid)        generic_alloc_nodedata(nid)
 183#define arch_free_nodedata(pgdat)       generic_free_nodedata(pgdat)
 184
 185#ifdef CONFIG_NUMA
 186/*
 187 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
 188 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
 189 *      Because, pgdat for the new node is not allocated/initialized yet itself.
 190 *      To use new node's memory, more consideration will be necessary.
 191 */
 192#define generic_alloc_nodedata(nid)                             \
 193({                                                              \
 194        kzalloc(sizeof(pg_data_t), GFP_KERNEL);                 \
 195})
 196/*
 197 * This definition is just for error path in node hotadd.
 198 * For node hotremove, we have to replace this.
 199 */
 200#define generic_free_nodedata(pgdat)    kfree(pgdat)
 201
 202extern pg_data_t *node_data[];
 203static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
 204{
 205        node_data[nid] = pgdat;
 206}
 207
 208#else /* !CONFIG_NUMA */
 209
 210/* never called */
 211static inline pg_data_t *generic_alloc_nodedata(int nid)
 212{
 213        BUG();
 214        return NULL;
 215}
 216static inline void generic_free_nodedata(pg_data_t *pgdat)
 217{
 218}
 219static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
 220{
 221}
 222#endif /* CONFIG_NUMA */
 223#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
 224
 225#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
 226extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
 227#else
 228static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
 229{
 230}
 231#endif
 232extern void put_page_bootmem(struct page *page);
 233extern void get_page_bootmem(unsigned long ingo, struct page *page,
 234                             unsigned long type);
 235
 236void get_online_mems(void);
 237void put_online_mems(void);
 238
 239void mem_hotplug_begin(void);
 240void mem_hotplug_done(void);
 241
 242#else /* ! CONFIG_MEMORY_HOTPLUG */
 243#define pfn_to_online_page(pfn)                 \
 244({                                              \
 245        struct page *___page = NULL;            \
 246        if (pfn_valid(pfn))                     \
 247                ___page = pfn_to_page(pfn);     \
 248        ___page;                                \
 249 })
 250
 251static inline unsigned zone_span_seqbegin(struct zone *zone)
 252{
 253        return 0;
 254}
 255static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
 256{
 257        return 0;
 258}
 259static inline void zone_span_writelock(struct zone *zone) {}
 260static inline void zone_span_writeunlock(struct zone *zone) {}
 261static inline void zone_seqlock_init(struct zone *zone) {}
 262
 263static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
 264{
 265}
 266
 267static inline int try_online_node(int nid)
 268{
 269        return 0;
 270}
 271
 272static inline void get_online_mems(void) {}
 273static inline void put_online_mems(void) {}
 274
 275static inline void mem_hotplug_begin(void) {}
 276static inline void mem_hotplug_done(void) {}
 277
 278static inline bool movable_node_is_enabled(void)
 279{
 280        return false;
 281}
 282#endif /* ! CONFIG_MEMORY_HOTPLUG */
 283
 284/*
 285 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
 286 * platforms might override and use arch_get_mappable_range()
 287 * for internal non memory hotplug purposes.
 288 */
 289struct range arch_get_mappable_range(void);
 290
 291#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
 292/*
 293 * pgdat resizing functions
 294 */
 295static inline
 296void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
 297{
 298        spin_lock_irqsave(&pgdat->node_size_lock, *flags);
 299}
 300static inline
 301void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
 302{
 303        spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
 304}
 305static inline
 306void pgdat_resize_init(struct pglist_data *pgdat)
 307{
 308        spin_lock_init(&pgdat->node_size_lock);
 309}
 310#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
 311/*
 312 * Stub functions for when hotplug is off
 313 */
 314static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
 315static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
 316static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
 317#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
 318
 319#ifdef CONFIG_MEMORY_HOTREMOVE
 320
 321extern void try_offline_node(int nid);
 322extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
 323extern int remove_memory(int nid, u64 start, u64 size);
 324extern void __remove_memory(int nid, u64 start, u64 size);
 325extern int offline_and_remove_memory(int nid, u64 start, u64 size);
 326
 327#else
 328static inline void try_offline_node(int nid) {}
 329
 330static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
 331{
 332        return -EINVAL;
 333}
 334
 335static inline int remove_memory(int nid, u64 start, u64 size)
 336{
 337        return -EBUSY;
 338}
 339
 340static inline void __remove_memory(int nid, u64 start, u64 size) {}
 341#endif /* CONFIG_MEMORY_HOTREMOVE */
 342
 343extern void set_zone_contiguous(struct zone *zone);
 344extern void clear_zone_contiguous(struct zone *zone);
 345
 346#ifdef CONFIG_MEMORY_HOTPLUG
 347extern void __ref free_area_init_core_hotplug(int nid);
 348extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
 349extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
 350extern int add_memory_resource(int nid, struct resource *resource,
 351                               mhp_t mhp_flags);
 352extern int add_memory_driver_managed(int nid, u64 start, u64 size,
 353                                     const char *resource_name,
 354                                     mhp_t mhp_flags);
 355extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 356                                   unsigned long nr_pages,
 357                                   struct vmem_altmap *altmap, int migratetype);
 358extern void remove_pfn_range_from_zone(struct zone *zone,
 359                                       unsigned long start_pfn,
 360                                       unsigned long nr_pages);
 361extern bool is_memblock_offlined(struct memory_block *mem);
 362extern int sparse_add_section(int nid, unsigned long pfn,
 363                unsigned long nr_pages, struct vmem_altmap *altmap);
 364extern void sparse_remove_section(struct mem_section *ms,
 365                unsigned long pfn, unsigned long nr_pages,
 366                unsigned long map_offset, struct vmem_altmap *altmap);
 367extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
 368                                          unsigned long pnum);
 369extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
 370                unsigned long nr_pages);
 371extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
 372                                      struct mhp_params *params);
 373void arch_remove_linear_mapping(u64 start, u64 size);
 374extern bool mhp_supports_memmap_on_memory(unsigned long size);
 375#endif /* CONFIG_MEMORY_HOTPLUG */
 376
 377#endif /* __LINUX_MEMORY_HOTPLUG_H */
 378