1#ifndef _LINUX_MEMBLOCK_H
2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__
4
5#ifdef CONFIG_HAVE_MEMBLOCK
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/init.h>
18#include <linux/mm.h>
19
20#define INIT_MEMBLOCK_REGIONS 128
21#define INIT_PHYSMEM_REGIONS 4
22
23
24enum {
25 MEMBLOCK_NONE = 0x0,
26 MEMBLOCK_HOTPLUG = 0x1,
27 MEMBLOCK_MIRROR = 0x2,
28 MEMBLOCK_NOMAP = 0x4,
29};
30
31struct memblock_region {
32 phys_addr_t base;
33 phys_addr_t size;
34 unsigned long flags;
35#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
36 int nid;
37#endif
38};
39
40struct memblock_type {
41 unsigned long cnt;
42 unsigned long max;
43 phys_addr_t total_size;
44 struct memblock_region *regions;
45};
46
47struct memblock {
48 bool bottom_up;
49 phys_addr_t current_limit;
50 struct memblock_type memory;
51 struct memblock_type reserved;
52#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
53 struct memblock_type physmem;
54#endif
55};
56
57extern struct memblock memblock;
58extern int memblock_debug;
59#ifdef CONFIG_MOVABLE_NODE
60
61extern bool movable_node_enabled;
62#endif
63
64#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
65#define __init_memblock __meminit
66#define __initdata_memblock __meminitdata
67#else
68#define __init_memblock
69#define __initdata_memblock
70#endif
71
72#define memblock_dbg(fmt, ...) \
73 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
74
75phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
76 phys_addr_t start, phys_addr_t end,
77 int nid, ulong flags);
78phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
79 phys_addr_t size, phys_addr_t align);
80phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
81phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
82void memblock_allow_resize(void);
83int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
84int memblock_add(phys_addr_t base, phys_addr_t size);
85int memblock_remove(phys_addr_t base, phys_addr_t size);
86int memblock_free(phys_addr_t base, phys_addr_t size);
87int memblock_reserve(phys_addr_t base, phys_addr_t size);
88void memblock_trim_memory(phys_addr_t align);
89bool memblock_overlaps_region(struct memblock_type *type,
90 phys_addr_t base, phys_addr_t size);
91int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
92int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
93int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
94int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
95ulong choose_memblock_flags(void);
96
97
98int memblock_add_range(struct memblock_type *type,
99 phys_addr_t base, phys_addr_t size,
100 int nid, unsigned long flags);
101
102void __next_mem_range(u64 *idx, int nid, ulong flags,
103 struct memblock_type *type_a,
104 struct memblock_type *type_b, phys_addr_t *out_start,
105 phys_addr_t *out_end, int *out_nid);
106
107void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
108 struct memblock_type *type_a,
109 struct memblock_type *type_b, phys_addr_t *out_start,
110 phys_addr_t *out_end, int *out_nid);
111
112void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
113 phys_addr_t *out_end);
114
115
116
117
118
119
120
121
122
123
124
125
126
127#define for_each_mem_range(i, type_a, type_b, nid, flags, \
128 p_start, p_end, p_nid) \
129 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
130 p_start, p_end, p_nid); \
131 i != (u64)ULLONG_MAX; \
132 __next_mem_range(&i, nid, flags, type_a, type_b, \
133 p_start, p_end, p_nid))
134
135
136
137
138
139
140
141
142
143
144
145
146
147#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
148 p_start, p_end, p_nid) \
149 for (i = (u64)ULLONG_MAX, \
150 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
151 p_start, p_end, p_nid); \
152 i != (u64)ULLONG_MAX; \
153 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
154 p_start, p_end, p_nid))
155
156
157
158
159
160
161
162
163
164
165#define for_each_reserved_mem_region(i, p_start, p_end) \
166 for (i = 0UL, \
167 __next_reserved_mem_region(&i, p_start, p_end); \
168 i != (u64)ULLONG_MAX; \
169 __next_reserved_mem_region(&i, p_start, p_end))
170
171#ifdef CONFIG_MOVABLE_NODE
172static inline bool memblock_is_hotpluggable(struct memblock_region *m)
173{
174 return m->flags & MEMBLOCK_HOTPLUG;
175}
176
177static inline bool __init_memblock movable_node_is_enabled(void)
178{
179 return movable_node_enabled;
180}
181#else
182static inline bool memblock_is_hotpluggable(struct memblock_region *m)
183{
184 return false;
185}
186static inline bool movable_node_is_enabled(void)
187{
188 return false;
189}
190#endif
191
192static inline bool memblock_is_mirror(struct memblock_region *m)
193{
194 return m->flags & MEMBLOCK_MIRROR;
195}
196
197static inline bool memblock_is_nomap(struct memblock_region *m)
198{
199 return m->flags & MEMBLOCK_NOMAP;
200}
201
202#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
203int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
204 unsigned long *end_pfn);
205void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
206 unsigned long *out_end_pfn, int *out_nid);
207
208
209
210
211
212
213
214
215
216
217
218#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
219 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
220 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
221#endif
222
223
224
225
226
227
228
229
230
231
232
233
234
235#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
236 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
237 nid, flags, p_start, p_end, p_nid)
238
239
240
241
242
243
244
245
246
247
248
249
250
251#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
252 p_nid) \
253 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
254 nid, flags, p_start, p_end, p_nid)
255
256static inline void memblock_set_region_flags(struct memblock_region *r,
257 unsigned long flags)
258{
259 r->flags |= flags;
260}
261
262static inline void memblock_clear_region_flags(struct memblock_region *r,
263 unsigned long flags)
264{
265 r->flags &= ~flags;
266}
267
268#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
269int memblock_set_node(phys_addr_t base, phys_addr_t size,
270 struct memblock_type *type, int nid);
271
272static inline void memblock_set_region_node(struct memblock_region *r, int nid)
273{
274 r->nid = nid;
275}
276
277static inline int memblock_get_region_node(const struct memblock_region *r)
278{
279 return r->nid;
280}
281#else
282static inline void memblock_set_region_node(struct memblock_region *r, int nid)
283{
284}
285
286static inline int memblock_get_region_node(const struct memblock_region *r)
287{
288 return 0;
289}
290#endif
291
292phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
293phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
294
295phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
296
297#ifdef CONFIG_MOVABLE_NODE
298
299
300
301static inline void __init memblock_set_bottom_up(bool enable)
302{
303 memblock.bottom_up = enable;
304}
305
306
307
308
309
310
311static inline bool memblock_bottom_up(void)
312{
313 return memblock.bottom_up;
314}
315#else
316static inline void __init memblock_set_bottom_up(bool enable) {}
317static inline bool memblock_bottom_up(void) { return false; }
318#endif
319
320
321#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
322#define MEMBLOCK_ALLOC_ACCESSIBLE 0
323
324phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
325 phys_addr_t start, phys_addr_t end,
326 ulong flags);
327phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
328 phys_addr_t max_addr);
329phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
330 phys_addr_t max_addr);
331phys_addr_t memblock_phys_mem_size(void);
332phys_addr_t memblock_mem_size(unsigned long limit_pfn);
333phys_addr_t memblock_start_of_DRAM(void);
334phys_addr_t memblock_end_of_DRAM(void);
335void memblock_enforce_memory_limit(phys_addr_t memory_limit);
336bool memblock_is_memory(phys_addr_t addr);
337int memblock_is_map_memory(phys_addr_t addr);
338int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
339bool memblock_is_reserved(phys_addr_t addr);
340bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
341
342extern void __memblock_dump_all(void);
343
344static inline void memblock_dump_all(void)
345{
346 if (memblock_debug)
347 __memblock_dump_all();
348}
349
350
351
352
353
354
355
356void memblock_set_current_limit(phys_addr_t limit);
357
358
359phys_addr_t memblock_get_current_limit(void);
360
361
362
363
364
365
366
367
368
369
370
371
372
373static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
374{
375 return PFN_UP(reg->base);
376}
377
378
379
380
381
382static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
383{
384 return PFN_DOWN(reg->base + reg->size);
385}
386
387
388
389
390
391static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
392{
393 return PFN_DOWN(reg->base);
394}
395
396
397
398
399
400static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
401{
402 return PFN_UP(reg->base + reg->size);
403}
404
405#define for_each_memblock(memblock_type, region) \
406 for (region = memblock.memblock_type.regions; \
407 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
408 region++)
409
410#define for_each_memblock_type(memblock_type, rgn) \
411 idx = 0; \
412 rgn = &memblock_type->regions[idx]; \
413 for (idx = 0; idx < memblock_type->cnt; \
414 idx++,rgn = &memblock_type->regions[idx])
415
416#ifdef CONFIG_MEMTEST
417extern void early_memtest(phys_addr_t start, phys_addr_t end);
418#else
419static inline void early_memtest(phys_addr_t start, phys_addr_t end)
420{
421}
422#endif
423
424#else
425static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
426{
427 return 0;
428}
429
430#endif
431
432#endif
433
434#endif
435