1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/nmi.h>
21#include <linux/swap.h>
22#include <linux/memblock.h>
23#include <linux/acpi.h>
24#include <linux/efi.h>
25#include <linux/nodemask.h>
26#include <linux/slab.h>
27#include <asm/efi.h>
28#include <asm/tlb.h>
29#include <asm/meminit.h>
30#include <asm/numa.h>
31#include <asm/sections.h>
32
33
34
35
36
37struct early_node_data {
38 struct ia64_node_data *node_data;
39 unsigned long pernode_addr;
40 unsigned long pernode_size;
41 unsigned long min_pfn;
42 unsigned long max_pfn;
43};
44
45static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
46static nodemask_t memory_less_mask __initdata;
47
48pg_data_t *pgdat_list[MAX_NUMNODES];
49
50
51
52
53
54#define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
55#define NODEDATA_ALIGN(addr, node) \
56 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
57 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
58
59
60
61
62
63
64
65
66
67
68
69
70
71static int __init build_node_maps(unsigned long start, unsigned long len,
72 int node)
73{
74 unsigned long spfn, epfn, end = start + len;
75
76 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
77 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
78
79 if (!mem_data[node].min_pfn) {
80 mem_data[node].min_pfn = spfn;
81 mem_data[node].max_pfn = epfn;
82 } else {
83 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn);
84 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn);
85 }
86
87 return 0;
88}
89
90
91
92
93
94
95
96
97
98static int early_nr_cpus_node(int node)
99{
100 int cpu, n = 0;
101
102 for_each_possible_early_cpu(cpu)
103 if (node == node_cpuid[cpu].nid)
104 n++;
105
106 return n;
107}
108
109
110
111
112
113static unsigned long compute_pernodesize(int node)
114{
115 unsigned long pernodesize = 0, cpus;
116
117 cpus = early_nr_cpus_node(node);
118 pernodesize += PERCPU_PAGE_SIZE * cpus;
119 pernodesize += node * L1_CACHE_BYTES;
120 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
121 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
122 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
123 pernodesize = PAGE_ALIGN(pernodesize);
124 return pernodesize;
125}
126
127
128
129
130
131
132
133
134
135
136static void *per_cpu_node_setup(void *cpu_data, int node)
137{
138#ifdef CONFIG_SMP
139 int cpu;
140
141 for_each_possible_early_cpu(cpu) {
142 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
143
144 if (node != node_cpuid[cpu].nid)
145 continue;
146
147 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
148 __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
149 __per_cpu_start;
150
151
152
153
154
155
156
157
158
159
160 if (cpu == 0)
161 ia64_set_kr(IA64_KR_PER_CPU_DATA,
162 (unsigned long)cpu_data -
163 (unsigned long)__per_cpu_start);
164
165 cpu_data += PERCPU_PAGE_SIZE;
166 }
167#endif
168 return cpu_data;
169}
170
171#ifdef CONFIG_SMP
172
173
174
175
176
177
178
179
180void __init setup_per_cpu_areas(void)
181{
182 struct pcpu_alloc_info *ai;
183 struct pcpu_group_info *gi;
184 unsigned int *cpu_map;
185 void *base;
186 unsigned long base_offset;
187 unsigned int cpu;
188 ssize_t static_size, reserved_size, dyn_size;
189 int node, prev_node, unit, nr_units;
190
191 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
192 if (!ai)
193 panic("failed to allocate pcpu_alloc_info");
194 cpu_map = ai->groups[0].cpu_map;
195
196
197 base = (void *)ULONG_MAX;
198 for_each_possible_cpu(cpu)
199 base = min(base,
200 (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
201 base_offset = (void *)__per_cpu_start - base;
202
203
204 unit = 0;
205 for_each_node(node)
206 for_each_possible_cpu(cpu)
207 if (node == node_cpuid[cpu].nid)
208 cpu_map[unit++] = cpu;
209 nr_units = unit;
210
211
212 static_size = __per_cpu_end - __per_cpu_start;
213 reserved_size = PERCPU_MODULE_RESERVE;
214 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
215 if (dyn_size < 0)
216 panic("percpu area overflow static=%zd reserved=%zd\n",
217 static_size, reserved_size);
218
219 ai->static_size = static_size;
220 ai->reserved_size = reserved_size;
221 ai->dyn_size = dyn_size;
222 ai->unit_size = PERCPU_PAGE_SIZE;
223 ai->atom_size = PAGE_SIZE;
224 ai->alloc_size = PERCPU_PAGE_SIZE;
225
226
227
228
229
230 prev_node = NUMA_NO_NODE;
231 ai->nr_groups = 0;
232 for (unit = 0; unit < nr_units; unit++) {
233 cpu = cpu_map[unit];
234 node = node_cpuid[cpu].nid;
235
236 if (node == prev_node) {
237 gi->nr_units++;
238 continue;
239 }
240 prev_node = node;
241
242 gi = &ai->groups[ai->nr_groups++];
243 gi->nr_units = 1;
244 gi->base_offset = __per_cpu_offset[cpu] + base_offset;
245 gi->cpu_map = &cpu_map[unit];
246 }
247
248 pcpu_setup_first_chunk(ai, base);
249 pcpu_free_alloc_info(ai);
250}
251#endif
252
253
254
255
256
257
258
259static void __init fill_pernode(int node, unsigned long pernode,
260 unsigned long pernodesize)
261{
262 void *cpu_data;
263 int cpus = early_nr_cpus_node(node);
264
265 mem_data[node].pernode_addr = pernode;
266 mem_data[node].pernode_size = pernodesize;
267 memset(__va(pernode), 0, pernodesize);
268
269 cpu_data = (void *)pernode;
270 pernode += PERCPU_PAGE_SIZE * cpus;
271 pernode += node * L1_CACHE_BYTES;
272
273 pgdat_list[node] = __va(pernode);
274 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
275
276 mem_data[node].node_data = __va(pernode);
277 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
278 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
279
280 cpu_data = per_cpu_node_setup(cpu_data, node);
281
282 return;
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313static int __init find_pernode_space(unsigned long start, unsigned long len,
314 int node)
315{
316 unsigned long spfn, epfn;
317 unsigned long pernodesize = 0, pernode;
318
319 spfn = start >> PAGE_SHIFT;
320 epfn = (start + len) >> PAGE_SHIFT;
321
322
323
324
325
326 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn)
327 return 0;
328
329
330 if (mem_data[node].pernode_addr)
331 return 0;
332
333
334
335
336
337 pernodesize = compute_pernodesize(node);
338 pernode = NODEDATA_ALIGN(start, node);
339
340
341 if (start + len > (pernode + pernodesize))
342 fill_pernode(node, pernode, pernodesize);
343
344 return 0;
345}
346
347
348
349
350
351
352
353
354static void __init reserve_pernode_space(void)
355{
356 unsigned long base, size;
357 int node;
358
359 for_each_online_node(node) {
360 if (node_isset(node, memory_less_mask))
361 continue;
362
363
364 size = mem_data[node].pernode_size;
365 base = __pa(mem_data[node].pernode_addr);
366 memblock_reserve(base, size);
367 }
368}
369
370static void scatter_node_data(void)
371{
372 pg_data_t **dst;
373 int node;
374
375
376
377
378
379
380
381
382
383 for_each_node(node) {
384 if (pgdat_list[node]) {
385 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
386 memcpy(dst, pgdat_list, sizeof(pgdat_list));
387 }
388 }
389}
390
391
392
393
394
395
396
397
398static void __init initialize_pernode_data(void)
399{
400 int cpu, node;
401
402 scatter_node_data();
403
404#ifdef CONFIG_SMP
405
406 for_each_possible_early_cpu(cpu) {
407 node = node_cpuid[cpu].nid;
408 per_cpu(ia64_cpu_info, cpu).node_data =
409 mem_data[node].node_data;
410 }
411#else
412 {
413 struct cpuinfo_ia64 *cpu0_cpu_info;
414 cpu = 0;
415 node = node_cpuid[cpu].nid;
416 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
417 ((char *)&ia64_cpu_info - __per_cpu_start));
418 cpu0_cpu_info->node_data = mem_data[node].node_data;
419 }
420#endif
421}
422
423
424
425
426
427
428
429
430static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
431{
432 void *ptr = NULL;
433 u8 best = 0xff;
434 int bestnode = NUMA_NO_NODE, node, anynode = 0;
435
436 for_each_online_node(node) {
437 if (node_isset(node, memory_less_mask))
438 continue;
439 else if (node_distance(nid, node) < best) {
440 best = node_distance(nid, node);
441 bestnode = node;
442 }
443 anynode = node;
444 }
445
446 if (bestnode == NUMA_NO_NODE)
447 bestnode = anynode;
448
449 ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
450 __pa(MAX_DMA_ADDRESS),
451 MEMBLOCK_ALLOC_ACCESSIBLE,
452 bestnode);
453 if (!ptr)
454 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
455 __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
456 __pa(MAX_DMA_ADDRESS));
457
458 return ptr;
459}
460
461
462
463
464
465static void __init memory_less_nodes(void)
466{
467 unsigned long pernodesize;
468 void *pernode;
469 int node;
470
471 for_each_node_mask(node, memory_less_mask) {
472 pernodesize = compute_pernodesize(node);
473 pernode = memory_less_node_alloc(node, pernodesize);
474 fill_pernode(node, __pa(pernode), pernodesize);
475 }
476
477 return;
478}
479
480
481
482
483
484
485
486void __init find_memory(void)
487{
488 int node;
489
490 reserve_memory();
491 efi_memmap_walk(filter_memory, register_active_ranges);
492
493 if (num_online_nodes() == 0) {
494 printk(KERN_ERR "node info missing!\n");
495 node_set_online(0);
496 }
497
498 nodes_or(memory_less_mask, memory_less_mask, node_online_map);
499 min_low_pfn = -1;
500 max_low_pfn = 0;
501
502
503 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
504 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
505 efi_memmap_walk(find_max_min_low_pfn, NULL);
506
507 for_each_online_node(node)
508 if (mem_data[node].min_pfn)
509 node_clear(node, memory_less_mask);
510
511 reserve_pernode_space();
512 memory_less_nodes();
513 initialize_pernode_data();
514
515 max_pfn = max_low_pfn;
516
517 find_initrd();
518}
519
520#ifdef CONFIG_SMP
521
522
523
524
525
526
527void *per_cpu_init(void)
528{
529 int cpu;
530 static int first_time = 1;
531
532 if (first_time) {
533 first_time = 0;
534 for_each_possible_early_cpu(cpu)
535 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
536 }
537
538 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
539}
540#endif
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
556{
557 unsigned long rs, re, end = start + len;
558 void (*func)(unsigned long, unsigned long, int);
559 int i;
560
561 start = PAGE_ALIGN(start);
562 end &= PAGE_MASK;
563 if (start >= end)
564 return;
565
566 func = arg;
567
568 if (!num_node_memblks) {
569
570 if (start < end)
571 (*func)(start, end - start, 0);
572 return;
573 }
574
575 for (i = 0; i < num_node_memblks; i++) {
576 rs = max(start, node_memblk[i].start_paddr);
577 re = min(end, node_memblk[i].start_paddr +
578 node_memblk[i].size);
579
580 if (rs < re)
581 (*func)(rs, re - rs, node_memblk[i].nid);
582
583 if (re == end)
584 break;
585 }
586}
587
588
589
590
591
592
593
594void __init paging_init(void)
595{
596 unsigned long max_dma;
597 unsigned long max_zone_pfns[MAX_NR_ZONES];
598
599 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
600
601 sparse_init();
602
603 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
604 max_zone_pfns[ZONE_DMA32] = max_dma;
605 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
606 free_area_init(max_zone_pfns);
607
608 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
609}
610
611#ifdef CONFIG_MEMORY_HOTPLUG
612pg_data_t *arch_alloc_nodedata(int nid)
613{
614 unsigned long size = compute_pernodesize(nid);
615
616 return kzalloc(size, GFP_KERNEL);
617}
618
619void arch_free_nodedata(pg_data_t *pgdat)
620{
621 kfree(pgdat);
622}
623
624void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
625{
626 pgdat_list[update_node] = update_pgdat;
627 scatter_node_data();
628}
629#endif
630
631#ifdef CONFIG_SPARSEMEM_VMEMMAP
632int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
633 struct vmem_altmap *altmap)
634{
635 return vmemmap_populate_basepages(start, end, node, NULL);
636}
637
638void vmemmap_free(unsigned long start, unsigned long end,
639 struct vmem_altmap *altmap)
640{
641}
642#endif
643