1#ifndef __LINUX_MEMORY_HOTPLUG_H
2#define __LINUX_MEMORY_HOTPLUG_H
3
4#include <linux/mmzone.h>
5#include <linux/spinlock.h>
6#include <linux/notifier.h>
7#include <linux/bug.h>
8
9struct page;
10struct zone;
11struct pglist_data;
12struct mem_section;
13struct memory_block;
14struct vmem_altmap;
15
16#ifdef CONFIG_MEMORY_HOTPLUG
17
18
19
20
21
22enum {
23 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
24 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
25 MIX_SECTION_INFO,
26 NODE_INFO,
27 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
28};
29
30
31enum {
32 ONLINE_KEEP,
33 ONLINE_KERNEL,
34 ONLINE_MOVABLE,
35};
36
37
38
39
40static inline
41void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
42{
43 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
44}
45static inline
46void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
47{
48 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
49}
50static inline
51void pgdat_resize_init(struct pglist_data *pgdat)
52{
53 spin_lock_init(&pgdat->node_size_lock);
54}
55
56
57
58
59
60
61
62static inline unsigned zone_span_seqbegin(struct zone *zone)
63{
64 return read_seqbegin(&zone->span_seqlock);
65}
66static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
67{
68 return read_seqretry(&zone->span_seqlock, iv);
69}
70static inline void zone_span_writelock(struct zone *zone)
71{
72 write_seqlock(&zone->span_seqlock);
73}
74static inline void zone_span_writeunlock(struct zone *zone)
75{
76 write_sequnlock(&zone->span_seqlock);
77}
78static inline void zone_seqlock_init(struct zone *zone)
79{
80 seqlock_init(&zone->span_seqlock);
81}
82extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
83extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
84extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
85
86extern int online_pages(unsigned long, unsigned long, int);
87extern void __offline_isolated_pages(unsigned long, unsigned long);
88
89typedef void (*online_page_callback_t)(struct page *page);
90
91extern int set_online_page_callback(online_page_callback_t callback);
92extern int restore_online_page_callback(online_page_callback_t callback);
93
94extern void __online_page_set_limits(struct page *page);
95extern void __online_page_increment_counters(struct page *page);
96extern void __online_page_free(struct page *page);
97
98extern int try_online_node(int nid);
99
100#ifdef CONFIG_MEMORY_HOTREMOVE
101extern bool is_pageblock_removable_nolock(struct page *page);
102extern int arch_remove_memory(u64 start, u64 size,
103 struct vmem_altmap *altmap);
104extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
105 unsigned long nr_pages, struct vmem_altmap *altmap);
106#endif
107
108
109extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
110 unsigned long nr_pages, struct vmem_altmap *altmap);
111
112#ifdef CONFIG_NUMA
113extern int memory_add_physaddr_to_nid(u64 start);
114#else
115static inline int memory_add_physaddr_to_nid(u64 start)
116{
117 return 0;
118}
119#endif
120
121#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
122
123
124
125
126
127
128
129
130
131
132extern pg_data_t *arch_alloc_nodedata(int nid);
133extern void arch_free_nodedata(pg_data_t *pgdat);
134extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
135
136#else
137
138#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
139#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
140
141#ifdef CONFIG_NUMA
142
143
144
145
146
147
148#define generic_alloc_nodedata(nid) \
149({ \
150 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
151})
152
153
154
155
156#define generic_free_nodedata(pgdat) kfree(pgdat)
157
158extern pg_data_t *node_data[];
159static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
160{
161 node_data[nid] = pgdat;
162}
163
164#else
165
166
167static inline pg_data_t *generic_alloc_nodedata(int nid)
168{
169 BUG();
170 return NULL;
171}
172static inline void generic_free_nodedata(pg_data_t *pgdat)
173{
174}
175static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
176{
177}
178#endif
179#endif
180
181#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
182extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
183#else
184static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
185{
186}
187#endif
188extern void put_page_bootmem(struct page *page);
189extern void get_page_bootmem(unsigned long ingo, struct page *page,
190 unsigned long type);
191
192void get_online_mems(void);
193void put_online_mems(void);
194
195void mem_hotplug_begin(void);
196void mem_hotplug_done(void);
197
198#else
199
200
201
202static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
203static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
204static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
205
206static inline unsigned zone_span_seqbegin(struct zone *zone)
207{
208 return 0;
209}
210static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
211{
212 return 0;
213}
214static inline void zone_span_writelock(struct zone *zone) {}
215static inline void zone_span_writeunlock(struct zone *zone) {}
216static inline void zone_seqlock_init(struct zone *zone) {}
217
218static inline int mhp_notimplemented(const char *func)
219{
220 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
221 dump_stack();
222 return -ENOSYS;
223}
224
225static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
226{
227}
228
229static inline int try_online_node(int nid)
230{
231 return 0;
232}
233
234static inline void get_online_mems(void) {}
235static inline void put_online_mems(void) {}
236
237static inline void mem_hotplug_begin(void) {}
238static inline void mem_hotplug_done(void) {}
239
240#endif
241
242#ifdef CONFIG_MEMORY_HOTREMOVE
243
244extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
245extern void try_offline_node(int nid);
246extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
247extern void remove_memory(int nid, u64 start, u64 size);
248
249#else
250static inline int is_mem_section_removable(unsigned long pfn,
251 unsigned long nr_pages)
252{
253 return 0;
254}
255
256static inline void try_offline_node(int nid) {}
257
258static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
259{
260 return -EINVAL;
261}
262
263static inline void remove_memory(int nid, u64 start, u64 size) {}
264#endif
265
266extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
267 void *arg, int (*func)(struct memory_block *, void *));
268extern int add_memory(int nid, u64 start, u64 size);
269extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
270 bool for_device);
271extern int arch_add_memory(int nid, u64 start, u64 size,
272 struct vmem_altmap *altmap, bool for_device);
273extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
274extern bool is_memblock_offlined(struct memory_block *mem);
275extern void remove_memory(int nid, u64 start, u64 size);
276extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
277 struct vmem_altmap *altmap);
278extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
279 unsigned long map_offset, struct vmem_altmap *altmap);
280extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
281 unsigned long pnum);
282int add_pages(int nid, unsigned long start,
283 unsigned long size, struct vmem_altmap *altmap, bool for_device);
284
285#endif
286