1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/bootmem.h>
18#include <linux/efi.h>
19#include <linux/memblock.h>
20#include <linux/mm.h>
21#include <linux/nmi.h>
22#include <linux/swap.h>
23
24#include <asm/meminit.h>
25#include <asm/pgalloc.h>
26#include <asm/pgtable.h>
27#include <asm/sections.h>
28#include <asm/mca.h>
29
30#ifdef CONFIG_VIRTUAL_MEM_MAP
31static unsigned long max_gap;
32#endif
33
34
35
36
37
38
39
40void show_mem(unsigned int filter)
41{
42 int i, total_reserved = 0;
43 int total_shared = 0, total_cached = 0;
44 unsigned long total_present = 0;
45 pg_data_t *pgdat;
46
47 printk(KERN_INFO "Mem-info:\n");
48 show_free_areas(filter);
49 printk(KERN_INFO "Node memory in pages:\n");
50 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
51 return;
52 for_each_online_pgdat(pgdat) {
53 unsigned long present;
54 unsigned long flags;
55 int shared = 0, cached = 0, reserved = 0;
56 int nid = pgdat->node_id;
57
58 if (skip_free_areas_node(filter, nid))
59 continue;
60 pgdat_resize_lock(pgdat, &flags);
61 present = pgdat->node_present_pages;
62 for(i = 0; i < pgdat->node_spanned_pages; i++) {
63 struct page *page;
64 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
65 touch_nmi_watchdog();
66 if (pfn_valid(pgdat->node_start_pfn + i))
67 page = pfn_to_page(pgdat->node_start_pfn + i);
68 else {
69#ifdef CONFIG_VIRTUAL_MEM_MAP
70 if (max_gap < LARGE_GAP)
71 continue;
72#endif
73 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
74 continue;
75 }
76 if (PageReserved(page))
77 reserved++;
78 else if (PageSwapCache(page))
79 cached++;
80 else if (page_count(page))
81 shared += page_count(page)-1;
82 }
83 pgdat_resize_unlock(pgdat, &flags);
84 total_present += present;
85 total_reserved += reserved;
86 total_cached += cached;
87 total_shared += shared;
88 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
89 "shrd: %10d, swpd: %10d\n", nid,
90 present, reserved, shared, cached);
91 }
92 printk(KERN_INFO "%ld pages of RAM\n", total_present);
93 printk(KERN_INFO "%d reserved pages\n", total_reserved);
94 printk(KERN_INFO "%d pages shared\n", total_shared);
95 printk(KERN_INFO "%d pages swap cached\n", total_cached);
96 printk(KERN_INFO "Total of %ld pages in page table cache\n",
97 quicklist_total_size());
98 printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
99}
100
101
102
103unsigned long bootmap_start;
104
105
106
107
108
109
110
111
112
113
114static int __init
115find_bootmap_location (u64 start, u64 end, void *arg)
116{
117 u64 needed = *(unsigned long *)arg;
118 u64 range_start, range_end, free_start;
119 int i;
120
121#if IGNORE_PFN0
122 if (start == PAGE_OFFSET) {
123 start += PAGE_SIZE;
124 if (start >= end)
125 return 0;
126 }
127#endif
128
129 free_start = PAGE_OFFSET;
130
131 for (i = 0; i < num_rsvd_regions; i++) {
132 range_start = max(start, free_start);
133 range_end = min(end, rsvd_region[i].start & PAGE_MASK);
134
135 free_start = PAGE_ALIGN(rsvd_region[i].end);
136
137 if (range_end <= range_start)
138 continue;
139
140 if (range_end - range_start >= needed) {
141 bootmap_start = __pa(range_start);
142 return -1;
143 }
144
145
146 if (range_end == end)
147 return 0;
148 }
149 return 0;
150}
151
152#ifdef CONFIG_SMP
153static void *cpu_data;
154
155
156
157
158
159void *per_cpu_init(void)
160{
161 static bool first_time = true;
162 void *cpu0_data = __cpu0_per_cpu;
163 unsigned int cpu;
164
165 if (!first_time)
166 goto skip;
167 first_time = false;
168
169
170
171
172
173
174 for_each_possible_cpu(cpu) {
175 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
176
177 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
178 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
179 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
180
181
182
183
184
185
186
187
188
189
190 if (cpu == 0)
191 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
192 (unsigned long)__per_cpu_start);
193
194 cpu_data += PERCPU_PAGE_SIZE;
195 }
196skip:
197 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
198}
199
200static inline void
201alloc_per_cpu_data(void)
202{
203 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
204 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
205}
206
207
208
209
210
211
212
213
214
215void __init
216setup_per_cpu_areas(void)
217{
218 struct pcpu_alloc_info *ai;
219 struct pcpu_group_info *gi;
220 unsigned int cpu;
221 ssize_t static_size, reserved_size, dyn_size;
222 int rc;
223
224 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
225 if (!ai)
226 panic("failed to allocate pcpu_alloc_info");
227 gi = &ai->groups[0];
228
229
230 for_each_possible_cpu(cpu)
231 gi->cpu_map[gi->nr_units++] = cpu;
232
233
234 static_size = __per_cpu_end - __per_cpu_start;
235 reserved_size = PERCPU_MODULE_RESERVE;
236 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
237 if (dyn_size < 0)
238 panic("percpu area overflow static=%zd reserved=%zd\n",
239 static_size, reserved_size);
240
241 ai->static_size = static_size;
242 ai->reserved_size = reserved_size;
243 ai->dyn_size = dyn_size;
244 ai->unit_size = PERCPU_PAGE_SIZE;
245 ai->atom_size = PAGE_SIZE;
246 ai->alloc_size = PERCPU_PAGE_SIZE;
247
248 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
249 if (rc)
250 panic("failed to setup percpu area (err=%d)", rc);
251
252 pcpu_free_alloc_info(ai);
253}
254#else
255#define alloc_per_cpu_data() do { } while (0)
256#endif
257
258
259
260
261
262
263
264void __init
265find_memory (void)
266{
267 unsigned long bootmap_size;
268
269 reserve_memory();
270
271
272 min_low_pfn = ~0UL;
273 max_low_pfn = 0;
274 efi_memmap_walk(find_max_min_low_pfn, NULL);
275 max_pfn = max_low_pfn;
276
277 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
278
279
280 bootmap_start = ~0UL;
281 efi_memmap_walk(find_bootmap_location, &bootmap_size);
282 if (bootmap_start == ~0UL)
283 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
284
285 bootmap_size = init_bootmem_node(NODE_DATA(0),
286 (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
287
288
289 efi_memmap_walk(filter_rsvd_memory, free_bootmem);
290 reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
291
292 find_initrd();
293
294 alloc_per_cpu_data();
295}
296
297
298
299
300
301void __init
302paging_init (void)
303{
304 unsigned long max_dma;
305 unsigned long max_zone_pfns[MAX_NR_ZONES];
306
307 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
308#ifdef CONFIG_ZONE_DMA
309 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
310 max_zone_pfns[ZONE_DMA] = max_dma;
311#endif
312 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
313
314#ifdef CONFIG_VIRTUAL_MEM_MAP
315 efi_memmap_walk(filter_memory, register_active_ranges);
316 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
317 if (max_gap < LARGE_GAP) {
318 vmem_map = (struct page *) 0;
319 free_area_init_nodes(max_zone_pfns);
320 } else {
321 unsigned long map_size;
322
323
324
325 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
326 sizeof(struct page));
327 VMALLOC_END -= map_size;
328 vmem_map = (struct page *) VMALLOC_END;
329 efi_memmap_walk(create_mem_map_page_table, NULL);
330
331
332
333
334
335 NODE_DATA(0)->node_mem_map = vmem_map +
336 find_min_pfn_with_active_regions();
337 free_area_init_nodes(max_zone_pfns);
338
339 printk("Virtual mem_map starts at 0x%p\n", mem_map);
340 }
341#else
342 memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
343 free_area_init_nodes(max_zone_pfns);
344#endif
345 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
346}
347