1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mmzone.h>
18#include <linux/bootmem.h>
19#include <linux/module.h>
20#include <linux/node.h>
21#include <linux/cpu.h>
22#include <linux/ioport.h>
23#include <linux/irq.h>
24#include <linux/kexec.h>
25#include <linux/pci.h>
26#include <linux/swiotlb.h>
27#include <linux/initrd.h>
28#include <linux/io.h>
29#include <linux/highmem.h>
30#include <linux/smp.h>
31#include <linux/timex.h>
32#include <linux/hugetlb.h>
33#include <linux/start_kernel.h>
34#include <linux/screen_info.h>
35#include <asm/setup.h>
36#include <asm/sections.h>
37#include <asm/cacheflush.h>
38#include <asm/pgalloc.h>
39#include <asm/mmu_context.h>
40#include <hv/hypervisor.h>
41#include <arch/interrupts.h>
42
43
44#ifndef CONFIG_SMP
45#define setup_max_cpus 1
46#endif
47
48static inline int ABS(int x) { return x >= 0 ? x : -x; }
49
50
51char chip_model[64] __write_once;
52
53#ifdef CONFIG_VT
54struct screen_info screen_info;
55#endif
56
57struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
58EXPORT_SYMBOL(node_data);
59
60
61unsigned long node_start_pfn[MAX_NUMNODES];
62unsigned long node_end_pfn[MAX_NUMNODES];
63unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
64unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
65unsigned long __initdata node_free_pfn[MAX_NUMNODES];
66
67static unsigned long __initdata node_percpu[MAX_NUMNODES];
68
69
70
71
72DEFINE_PER_CPU(unsigned long, boot_sp) =
73 (unsigned long)init_stack + THREAD_SIZE;
74
75#ifdef CONFIG_SMP
76DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
77#else
78
79
80
81
82unsigned long __initdata boot_pc = (unsigned long)start_kernel;
83#endif
84
85#ifdef CONFIG_HIGHMEM
86
87unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
88
89
90static unsigned long __initdata mappable_physpages;
91#endif
92
93
94int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
95
96#ifdef CONFIG_HIGHMEM
97
98unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
99 __write_once __attribute__((aligned(L2_CACHE_BYTES)));
100EXPORT_SYMBOL(pbase_map);
101
102
103void *vbase_map[NR_PA_HIGHBIT_VALUES]
104 __write_once __attribute__((aligned(L2_CACHE_BYTES)));
105EXPORT_SYMBOL(vbase_map);
106#endif
107
108
109int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
110EXPORT_SYMBOL(highbits_to_node);
111
112static unsigned int __initdata maxmem_pfn = -1U;
113static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
114 [0 ... MAX_NUMNODES-1] = -1U
115};
116static nodemask_t __initdata isolnodes;
117
118#if defined(CONFIG_PCI) && !defined(__tilegx__)
119enum { DEFAULT_PCI_RESERVE_MB = 64 };
120static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
121unsigned long __initdata pci_reserve_start_pfn = -1U;
122unsigned long __initdata pci_reserve_end_pfn = -1U;
123#endif
124
125static int __init setup_maxmem(char *str)
126{
127 unsigned long long maxmem;
128 if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
129 return -EINVAL;
130
131 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
132 pr_info("Forcing RAM used to no more than %dMB\n",
133 maxmem_pfn >> (20 - PAGE_SHIFT));
134 return 0;
135}
136early_param("maxmem", setup_maxmem);
137
138static int __init setup_maxnodemem(char *str)
139{
140 char *endp;
141 unsigned long long maxnodemem;
142 long node;
143
144 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
145 if (node >= MAX_NUMNODES || *endp != ':')
146 return -EINVAL;
147
148 maxnodemem = memparse(endp+1, NULL);
149 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
150 (HPAGE_SHIFT - PAGE_SHIFT);
151 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
152 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
153 return 0;
154}
155early_param("maxnodemem", setup_maxnodemem);
156
157struct memmap_entry {
158 u64 addr;
159 u64 size;
160};
161static struct memmap_entry memmap_map[64];
162static int memmap_nr;
163
164static void add_memmap_region(u64 addr, u64 size)
165{
166 if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
167 pr_err("Ooops! Too many entries in the memory map!\n");
168 return;
169 }
170 memmap_map[memmap_nr].addr = addr;
171 memmap_map[memmap_nr].size = size;
172 memmap_nr++;
173}
174
175static int __init setup_memmap(char *p)
176{
177 char *oldp;
178 u64 start_at, mem_size;
179
180 if (!p)
181 return -EINVAL;
182
183 if (!strncmp(p, "exactmap", 8)) {
184 pr_err("\"memmap=exactmap\" not valid on tile\n");
185 return 0;
186 }
187
188 oldp = p;
189 mem_size = memparse(p, &p);
190 if (p == oldp)
191 return -EINVAL;
192
193 if (*p == '@') {
194 pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
195 } else if (*p == '#') {
196 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
197 } else if (*p == '$') {
198 start_at = memparse(p+1, &p);
199 add_memmap_region(start_at, mem_size);
200 } else {
201 if (mem_size == 0)
202 return -EINVAL;
203 maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
204 (HPAGE_SHIFT - PAGE_SHIFT);
205 }
206 return *p == '\0' ? 0 : -EINVAL;
207}
208early_param("memmap", setup_memmap);
209
210static int __init setup_mem(char *str)
211{
212 return setup_maxmem(str);
213}
214early_param("mem", setup_mem);
215
216static int __init setup_isolnodes(char *str)
217{
218 if (str == NULL || nodelist_parse(str, isolnodes) != 0)
219 return -EINVAL;
220
221 pr_info("Set isolnodes value to '%*pbl'\n",
222 nodemask_pr_args(&isolnodes));
223 return 0;
224}
225early_param("isolnodes", setup_isolnodes);
226
227#if defined(CONFIG_PCI) && !defined(__tilegx__)
228static int __init setup_pci_reserve(char* str)
229{
230 if (str == NULL || kstrtouint(str, 0, &pci_reserve_mb) != 0 ||
231 pci_reserve_mb > 3 * 1024)
232 return -EINVAL;
233
234 pr_info("Reserving %dMB for PCIE root complex mappings\n",
235 pci_reserve_mb);
236 return 0;
237}
238early_param("pci_reserve", setup_pci_reserve);
239#endif
240
241#ifndef __tilegx__
242
243
244
245
246static int __init parse_vmalloc(char *arg)
247{
248 if (!arg)
249 return -EINVAL;
250
251 VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
252
253
254 if ((long)_VMALLOC_START >= 0)
255 early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
256 VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
257
258 return 0;
259}
260early_param("vmalloc", parse_vmalloc);
261#endif
262
263#ifdef CONFIG_HIGHMEM
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282static void *__init setup_pa_va_mapping(void)
283{
284 unsigned long curr_pages = 0;
285 unsigned long vaddr = PAGE_OFFSET;
286 nodemask_t highonlynodes = isolnodes;
287 int i, j;
288
289 memset(pbase_map, -1, sizeof(pbase_map));
290 memset(vbase_map, -1, sizeof(vbase_map));
291
292
293 node_clear(0, highonlynodes);
294
295
296 mappable_physpages = 0;
297 for_each_online_node(i) {
298 if (!node_isset(i, highonlynodes))
299 mappable_physpages +=
300 node_end_pfn[i] - node_start_pfn[i];
301 }
302
303 for_each_online_node(i) {
304 unsigned long start = node_start_pfn[i];
305 unsigned long end = node_end_pfn[i];
306 unsigned long size = end - start;
307 unsigned long vaddr_end;
308
309 if (node_isset(i, highonlynodes)) {
310
311 node_lowmem_end_pfn[i] = start;
312 continue;
313 }
314
315 curr_pages += size;
316 if (mappable_physpages > MAXMEM_PFN) {
317 vaddr_end = PAGE_OFFSET +
318 (((u64)curr_pages * MAXMEM_PFN /
319 mappable_physpages)
320 << PAGE_SHIFT);
321 } else {
322 vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
323 }
324 for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
325 unsigned long this_pfn =
326 start + (j << HUGETLB_PAGE_ORDER);
327 pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
328 if (vbase_map[__pfn_to_highbits(this_pfn)] ==
329 (void *)-1)
330 vbase_map[__pfn_to_highbits(this_pfn)] =
331 (void *)(vaddr & HPAGE_MASK);
332 }
333 node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
334 BUG_ON(node_lowmem_end_pfn[i] > end);
335 }
336
337
338 return (void *)vaddr;
339}
340#endif
341
342
343
344
345
346
347
348static void store_permanent_mappings(void)
349{
350 int i;
351
352 for_each_online_node(i) {
353 HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
354#ifdef CONFIG_HIGHMEM
355 HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
356#else
357 HV_PhysAddr high_mapped_pa = node_end_pfn[i];
358#endif
359
360 unsigned long pages = high_mapped_pa - node_start_pfn[i];
361 HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
362 hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
363 }
364
365 hv_store_mapping((HV_VirtAddr)_text,
366 (uint32_t)(_einittext - _text), 0);
367}
368
369
370
371
372
373
374static void __init setup_memory(void)
375{
376 int i, j;
377 int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
378#ifdef CONFIG_HIGHMEM
379 long highmem_pages;
380#endif
381#ifndef __tilegx__
382 int cap;
383#endif
384#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
385 long lowmem_pages;
386#endif
387 unsigned long physpages = 0;
388
389
390 BUILD_BUG_ON(MAX_NUMNODES > 127);
391
392
393 for (i = 0; ; ++i) {
394 unsigned long start, size, end, highbits;
395 HV_PhysAddrRange range = hv_inquire_physical(i);
396 if (range.size == 0)
397 break;
398#ifdef CONFIG_FLATMEM
399 if (i > 0) {
400 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
401 range.size, range.start + range.size);
402 continue;
403 }
404#endif
405#ifndef __tilegx__
406 if ((unsigned long)range.start) {
407 pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
408 range.start, range.start + range.size);
409 continue;
410 }
411#endif
412 if ((range.start & (HPAGE_SIZE-1)) != 0 ||
413 (range.size & (HPAGE_SIZE-1)) != 0) {
414 unsigned long long start_pa = range.start;
415 unsigned long long orig_size = range.size;
416 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
417 range.size -= (range.start - start_pa);
418 range.size &= HPAGE_MASK;
419 pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
420 start_pa, start_pa + orig_size,
421 range.start, range.start + range.size);
422 }
423 highbits = __pa_to_highbits(range.start);
424 if (highbits >= NR_PA_HIGHBIT_VALUES) {
425 pr_err("PA high bits too high: %#llx..%#llx\n",
426 range.start, range.start + range.size);
427 continue;
428 }
429 if (highbits_seen[highbits]) {
430 pr_err("Range overlaps in high bits: %#llx..%#llx\n",
431 range.start, range.start + range.size);
432 continue;
433 }
434 highbits_seen[highbits] = 1;
435 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
436 int max_size = maxnodemem_pfn[i];
437 if (max_size > 0) {
438 pr_err("Maxnodemem reduced node %d to %d pages\n",
439 i, max_size);
440 range.size = PFN_PHYS(max_size);
441 } else {
442 pr_err("Maxnodemem disabled node %d\n", i);
443 continue;
444 }
445 }
446 if (physpages + PFN_DOWN(range.size) > maxmem_pfn) {
447 int max_size = maxmem_pfn - physpages;
448 if (max_size > 0) {
449 pr_err("Maxmem reduced node %d to %d pages\n",
450 i, max_size);
451 range.size = PFN_PHYS(max_size);
452 } else {
453 pr_err("Maxmem disabled node %d\n", i);
454 continue;
455 }
456 }
457 if (i >= MAX_NUMNODES) {
458 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
459 i, range.size, range.size + range.start);
460 continue;
461 }
462
463 start = range.start >> PAGE_SHIFT;
464 size = range.size >> PAGE_SHIFT;
465 end = start + size;
466
467#ifndef __tilegx__
468 if (((HV_PhysAddr)end << PAGE_SHIFT) !=
469 (range.start + range.size)) {
470 pr_err("PAs too high to represent: %#llx..%#llx\n",
471 range.start, range.start + range.size);
472 continue;
473 }
474#endif
475#if defined(CONFIG_PCI) && !defined(__tilegx__)
476
477
478
479
480
481
482
483 if (start <= pci_reserve_start_pfn &&
484 end > pci_reserve_start_pfn) {
485 unsigned int per_cpu_size =
486 __per_cpu_end - __per_cpu_start;
487 unsigned int percpu_pages =
488 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
489 if (end < pci_reserve_end_pfn + percpu_pages) {
490 end = pci_reserve_start_pfn;
491 pr_err("PCI mapping region reduced node %d to %ld pages\n",
492 i, end - start);
493 }
494 }
495#endif
496
497 for (j = __pfn_to_highbits(start);
498 j <= __pfn_to_highbits(end - 1); j++)
499 highbits_to_node[j] = i;
500
501 node_start_pfn[i] = start;
502 node_end_pfn[i] = end;
503 node_controller[i] = range.controller;
504 physpages += size;
505 max_pfn = end;
506
507
508 node_set(i, node_online_map);
509 node_set(i, node_possible_map);
510 }
511
512#ifndef __tilegx__
513
514
515
516
517
518
519
520
521
522 cap = 8 * 1024 * 1024;
523 if (physpages > cap) {
524 int num_nodes = num_online_nodes();
525 int cap_each = cap / num_nodes;
526 unsigned long dropped_pages = 0;
527 for (i = 0; i < num_nodes; ++i) {
528 int size = node_end_pfn[i] - node_start_pfn[i];
529 if (size > cap_each) {
530 dropped_pages += (size - cap_each);
531 node_end_pfn[i] = node_start_pfn[i] + cap_each;
532 }
533 }
534 physpages -= dropped_pages;
535 pr_warn("Only using %ldMB memory - ignoring %ldMB\n",
536 physpages >> (20 - PAGE_SHIFT),
537 dropped_pages >> (20 - PAGE_SHIFT));
538 pr_warn("Consider using a larger page size\n");
539 }
540#endif
541
542
543 min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
544
545#ifdef CONFIG_HIGHMEM
546
547 high_memory = setup_pa_va_mapping();
548
549
550 max_low_pfn = node_lowmem_end_pfn[0];
551
552 lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
553 MAXMEM_PFN : mappable_physpages;
554 highmem_pages = (long) (physpages - lowmem_pages);
555
556 pr_notice("%ldMB HIGHMEM available\n",
557 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
558 pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
559#else
560
561 max_low_pfn = node_end_pfn[0];
562
563#ifndef __tilegx__
564 if (node_end_pfn[0] > MAXMEM_PFN) {
565 pr_warn("Only using %ldMB LOWMEM\n", MAXMEM >> 20);
566 pr_warn("Use a HIGHMEM enabled kernel\n");
567 max_low_pfn = MAXMEM_PFN;
568 max_pfn = MAXMEM_PFN;
569 node_end_pfn[0] = MAXMEM_PFN;
570 } else {
571 pr_notice("%ldMB memory available\n",
572 pages_to_mb(node_end_pfn[0]));
573 }
574 for (i = 1; i < MAX_NUMNODES; ++i) {
575 node_start_pfn[i] = 0;
576 node_end_pfn[i] = 0;
577 }
578 high_memory = __va(node_end_pfn[0]);
579#else
580 lowmem_pages = 0;
581 for (i = 0; i < MAX_NUMNODES; ++i) {
582 int pages = node_end_pfn[i] - node_start_pfn[i];
583 lowmem_pages += pages;
584 if (pages)
585 high_memory = pfn_to_kaddr(node_end_pfn[i]);
586 }
587 pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
588#endif
589#endif
590}
591
592
593
594
595
596
597
598
599
600static inline int node_has_bootmem(int nid)
601{
602#ifdef CONFIG_64BIT
603 return 1;
604#else
605 return nid == 0;
606#endif
607}
608
609static inline unsigned long alloc_bootmem_pfn(int nid,
610 unsigned long size,
611 unsigned long goal)
612{
613 void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
614 PAGE_SIZE, goal);
615 unsigned long pfn = kaddr_to_pfn(kva);
616 BUG_ON(goal && PFN_PHYS(pfn) != goal);
617 return pfn;
618}
619
620static void __init setup_bootmem_allocator_node(int i)
621{
622 unsigned long start, end, mapsize, mapstart;
623
624 if (node_has_bootmem(i)) {
625 NODE_DATA(i)->bdata = &bootmem_node_data[i];
626 } else {
627
628 NODE_DATA(i)->bdata = &bootmem_node_data[0];
629 return;
630 }
631
632
633 start = (i == 0) ? min_low_pfn : node_start_pfn[i];
634
635
636#ifdef CONFIG_HIGHMEM
637 end = node_lowmem_end_pfn[i];
638#else
639 end = node_end_pfn[i];
640#endif
641
642
643 if (end == start)
644 return;
645
646
647 mapsize = bootmem_bootmap_pages(end - start);
648 if (i == 0) {
649
650 mapstart = start;
651 start += mapsize;
652 } else {
653
654 mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
655 }
656
657
658 init_bootmem_node(NODE_DATA(i), mapstart, start, end);
659
660
661 free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
662
663#if defined(CONFIG_PCI) && !defined(__tilegx__)
664
665
666
667 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
668 start = max(pci_reserve_start_pfn, start);
669 end = min(pci_reserve_end_pfn, end);
670 reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
671 BOOTMEM_EXCLUSIVE);
672 }
673#endif
674}
675
676static void __init setup_bootmem_allocator(void)
677{
678 int i;
679 for (i = 0; i < MAX_NUMNODES; ++i)
680 setup_bootmem_allocator_node(i);
681
682
683 for (i = 0; i < memmap_nr; ++i) {
684 struct memmap_entry *m = &memmap_map[i];
685 reserve_bootmem(m->addr, m->size, BOOTMEM_DEFAULT);
686 }
687
688#ifdef CONFIG_BLK_DEV_INITRD
689 if (initrd_start) {
690
691 if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
692 BOOTMEM_EXCLUSIVE)) {
693 pr_crit("The initrd memory region has been polluted. Disabling it.\n");
694 initrd_start = 0;
695 initrd_end = 0;
696 } else {
697
698
699
700
701 initrd_start += PAGE_OFFSET;
702 initrd_end += PAGE_OFFSET;
703 }
704 }
705#endif
706
707#ifdef CONFIG_KEXEC
708 if (crashk_res.start != crashk_res.end)
709 reserve_bootmem(crashk_res.start, resource_size(&crashk_res),
710 BOOTMEM_DEFAULT);
711#endif
712}
713
714void *__init alloc_remap(int nid, unsigned long size)
715{
716 int pages = node_end_pfn[nid] - node_start_pfn[nid];
717 void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
718 BUG_ON(size != pages * sizeof(struct page));
719 memset(map, 0, size);
720 return map;
721}
722
723static int __init percpu_size(void)
724{
725 int size = __per_cpu_end - __per_cpu_start;
726 size += PERCPU_MODULE_RESERVE;
727 size += PERCPU_DYNAMIC_EARLY_SIZE;
728 if (size < PCPU_MIN_UNIT_SIZE)
729 size = PCPU_MIN_UNIT_SIZE;
730 size = roundup(size, PAGE_SIZE);
731
732
733 BUG_ON(kdata_huge && size > HPAGE_SIZE);
734 return size;
735}
736
737static void __init zone_sizes_init(void)
738{
739 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
740 int size = percpu_size();
741 int num_cpus = smp_height * smp_width;
742 const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
743
744 int i;
745
746 for (i = 0; i < num_cpus; ++i)
747 node_percpu[cpu_to_node(i)] += size;
748
749 for_each_online_node(i) {
750 unsigned long start = node_start_pfn[i];
751 unsigned long end = node_end_pfn[i];
752#ifdef CONFIG_HIGHMEM
753 unsigned long lowmem_end = node_lowmem_end_pfn[i];
754#else
755 unsigned long lowmem_end = end;
756#endif
757 int memmap_size = (end - start) * sizeof(struct page);
758 node_free_pfn[i] = start;
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776 if (i != 0 && cpu_isset(i, isolnodes)) {
777 node_memmap_pfn[i] =
778 alloc_bootmem_pfn(0, memmap_size, 0);
779 BUG_ON(node_percpu[i] != 0);
780 } else if (node_has_bootmem(start)) {
781 unsigned long goal = 0;
782 node_memmap_pfn[i] =
783 alloc_bootmem_pfn(i, memmap_size, 0);
784 if (kdata_huge)
785 goal = PFN_PHYS(lowmem_end) - node_percpu[i];
786 if (node_percpu[i])
787 node_percpu_pfn[i] =
788 alloc_bootmem_pfn(i, node_percpu[i],
789 goal);
790 } else {
791
792 node_memmap_pfn[i] = node_free_pfn[i];
793 node_free_pfn[i] += PFN_UP(memmap_size);
794 if (!kdata_huge) {
795 node_percpu_pfn[i] = node_free_pfn[i];
796 node_free_pfn[i] += PFN_UP(node_percpu[i]);
797 } else {
798 node_percpu_pfn[i] =
799 lowmem_end - PFN_UP(node_percpu[i]);
800 }
801 }
802
803#ifdef CONFIG_HIGHMEM
804 if (start > lowmem_end) {
805 zones_size[ZONE_NORMAL] = 0;
806 zones_size[ZONE_HIGHMEM] = end - start;
807 } else {
808 zones_size[ZONE_NORMAL] = lowmem_end - start;
809 zones_size[ZONE_HIGHMEM] = end - lowmem_end;
810 }
811#else
812 zones_size[ZONE_NORMAL] = end - start;
813#endif
814
815 if (start < dma_end) {
816 zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
817 dma_end - start);
818 zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
819 } else {
820 zones_size[ZONE_DMA] = 0;
821 }
822
823
824 if (node_isset(i, isolnodes))
825 NODE_DATA(i)->bdata = &bootmem_node_data[0];
826
827 free_area_init_node(i, zones_size, start, NULL);
828 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
829 PFN_UP(node_percpu[i]));
830
831
832 if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
833 node_set_state(i, N_NORMAL_MEMORY);
834#ifdef CONFIG_HIGHMEM
835 if (end != start)
836 node_set_state(i, N_HIGH_MEMORY);
837#endif
838
839 node_set_online(i);
840 }
841}
842
843#ifdef CONFIG_NUMA
844
845
846struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
847EXPORT_SYMBOL(node_2_cpu_mask);
848
849
850char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
851EXPORT_SYMBOL(cpu_2_node);
852
853
854static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
855{
856 if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
857 return -1;
858 else
859 return cpu_to_node(cpu);
860}
861
862
863static int __init node_neighbors(int node, int cpu,
864 struct cpumask *unbound_cpus)
865{
866 int neighbors = 0;
867 int w = smp_width;
868 int h = smp_height;
869 int x = cpu % w;
870 int y = cpu / w;
871 if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
872 ++neighbors;
873 if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
874 ++neighbors;
875 if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
876 ++neighbors;
877 if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
878 ++neighbors;
879 return neighbors;
880}
881
882static void __init setup_numa_mapping(void)
883{
884 int distance[MAX_NUMNODES][NR_CPUS];
885 HV_Coord coord;
886 int cpu, node, cpus, i, x, y;
887 int num_nodes = num_online_nodes();
888 struct cpumask unbound_cpus;
889 nodemask_t default_nodes;
890
891 cpumask_clear(&unbound_cpus);
892
893
894 nodes_andnot(default_nodes, node_online_map, isolnodes);
895 if (nodes_empty(default_nodes)) {
896 BUG_ON(!node_isset(0, node_online_map));
897 pr_err("Forcing NUMA node zero available as a default node\n");
898 node_set(0, default_nodes);
899 }
900
901
902 memset(distance, -1, sizeof(distance));
903 cpu = 0;
904 for (coord.y = 0; coord.y < smp_height; ++coord.y) {
905 for (coord.x = 0; coord.x < smp_width;
906 ++coord.x, ++cpu) {
907 BUG_ON(cpu >= nr_cpu_ids);
908 if (!cpu_possible(cpu)) {
909 cpu_2_node[cpu] = -1;
910 continue;
911 }
912 for_each_node_mask(node, default_nodes) {
913 HV_MemoryControllerInfo info =
914 hv_inquire_memory_controller(
915 coord, node_controller[node]);
916 distance[node][cpu] =
917 ABS(info.coord.x) + ABS(info.coord.y);
918 }
919 cpumask_set_cpu(cpu, &unbound_cpus);
920 }
921 }
922 cpus = cpu;
923
924
925
926
927
928
929
930
931
932 node = first_node(default_nodes);
933 while (!cpumask_empty(&unbound_cpus)) {
934 int best_cpu = -1;
935 int best_distance = INT_MAX;
936 for (cpu = 0; cpu < cpus; ++cpu) {
937 if (cpumask_test_cpu(cpu, &unbound_cpus)) {
938
939
940
941
942
943
944
945
946
947 int d = distance[node][cpu] * num_nodes;
948 for_each_node_mask(i, default_nodes) {
949 if (i != node)
950 d -= distance[i][cpu];
951 }
952 d *= 8;
953 d -= node_neighbors(node, cpu, &unbound_cpus);
954 if (d < best_distance) {
955 best_cpu = cpu;
956 best_distance = d;
957 }
958 }
959 }
960 BUG_ON(best_cpu < 0);
961 cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
962 cpu_2_node[best_cpu] = node;
963 cpumask_clear_cpu(best_cpu, &unbound_cpus);
964 node = next_node(node, default_nodes);
965 if (node == MAX_NUMNODES)
966 node = first_node(default_nodes);
967 }
968
969
970 cpu = 0;
971 for (y = 0; y < smp_height; ++y) {
972 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
973 for (x = 0; x < smp_width; ++x, ++cpu) {
974 if (cpu_to_node(cpu) < 0) {
975 pr_cont(" -");
976 cpu_2_node[cpu] = first_node(default_nodes);
977 } else {
978 pr_cont(" %d", cpu_to_node(cpu));
979 }
980 }
981 pr_cont("\n");
982 }
983}
984
985static struct cpu cpu_devices[NR_CPUS];
986
987static int __init topology_init(void)
988{
989 int i;
990
991 for_each_online_node(i)
992 register_one_node(i);
993
994 for (i = 0; i < smp_height * smp_width; ++i)
995 register_cpu(&cpu_devices[i], i);
996
997 return 0;
998}
999
1000subsys_initcall(topology_init);
1001
1002#else
1003
1004#define setup_numa_mapping() do { } while (0)
1005
1006#endif
1007
1008
1009
1010
1011
1012
1013
1014
1015static void init_super_pages(void)
1016{
1017#ifdef CONFIG_HUGETLB_SUPER_PAGES
1018 int i;
1019 for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
1020 hv_set_pte_super_shift(i, huge_shift[i]);
1021#endif
1022}
1023
1024
1025
1026
1027
1028
1029
1030void setup_cpu(int boot)
1031{
1032
1033 if (!boot)
1034 store_permanent_mappings();
1035
1036
1037#if CHIP_HAS_TILE_DMA()
1038 arch_local_irq_unmask(INT_DMATLB_MISS);
1039 arch_local_irq_unmask(INT_DMATLB_ACCESS);
1040#endif
1041#ifdef __tilegx__
1042 arch_local_irq_unmask(INT_SINGLE_STEP_K);
1043#endif
1044
1045
1046
1047
1048
1049 __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
1050
1051#if CHIP_HAS_SN()
1052
1053 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
1054#endif
1055
1056
1057
1058
1059
1060
1061 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
1062 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
1063
1064
1065 setup_irq_regs();
1066
1067#ifdef CONFIG_HARDWALL
1068
1069 reset_network_state();
1070#endif
1071
1072 init_super_pages();
1073}
1074
1075#ifdef CONFIG_BLK_DEV_INITRD
1076
1077static int __initdata set_initramfs_file;
1078static char __initdata initramfs_file[128] = "initramfs";
1079
1080static int __init setup_initramfs_file(char *str)
1081{
1082 if (str == NULL)
1083 return -EINVAL;
1084 strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
1085 set_initramfs_file = 1;
1086
1087 return 0;
1088}
1089early_param("initramfs_file", setup_initramfs_file);
1090
1091
1092
1093
1094
1095
1096static void __init load_hv_initrd(void)
1097{
1098 HV_FS_StatInfo stat;
1099 int fd, rc;
1100 void *initrd;
1101
1102
1103 if (initrd_start)
1104 return;
1105
1106 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1107 if (fd == HV_ENOENT) {
1108 if (set_initramfs_file) {
1109 pr_warn("No such hvfs initramfs file '%s'\n",
1110 initramfs_file);
1111 return;
1112 } else {
1113
1114 fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
1115 if (fd == HV_ENOENT)
1116 return;
1117 }
1118 }
1119 BUG_ON(fd < 0);
1120 stat = hv_fs_fstat(fd);
1121 BUG_ON(stat.size < 0);
1122 if (stat.flags & HV_FS_ISDIR) {
1123 pr_warn("Ignoring hvfs file '%s': it's a directory\n",
1124 initramfs_file);
1125 return;
1126 }
1127 initrd = alloc_bootmem_pages(stat.size);
1128 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
1129 if (rc != stat.size) {
1130 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
1131 stat.size, initramfs_file, rc);
1132 free_initrd_mem((unsigned long) initrd, stat.size);
1133 return;
1134 }
1135 initrd_start = (unsigned long) initrd;
1136 initrd_end = initrd_start + stat.size;
1137}
1138
1139void __init free_initrd_mem(unsigned long begin, unsigned long end)
1140{
1141 free_bootmem(__pa(begin), end - begin);
1142}
1143
1144static int __init setup_initrd(char *str)
1145{
1146 char *endp;
1147 unsigned long initrd_size;
1148
1149 initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
1150 if (initrd_size == 0 || *endp != '@')
1151 return -EINVAL;
1152
1153 initrd_start = simple_strtoul(endp+1, &endp, 0);
1154 if (initrd_start == 0)
1155 return -EINVAL;
1156
1157 initrd_end = initrd_start + initrd_size;
1158
1159 return 0;
1160}
1161early_param("initrd", setup_initrd);
1162
1163#else
1164static inline void load_hv_initrd(void) {}
1165#endif
1166
1167static void __init validate_hv(void)
1168{
1169
1170
1171
1172
1173 unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
1174 int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
1175 int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
1176 HV_ASIDRange asid_range;
1177
1178#ifndef CONFIG_SMP
1179 HV_Topology topology = hv_inquire_topology();
1180 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
1181 if (topology.width != 1 || topology.height != 1) {
1182 pr_warn("Warning: booting UP kernel on %dx%d grid; will ignore all but first tile\n",
1183 topology.width, topology.height);
1184 }
1185#endif
1186
1187 if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
1188 early_panic("Hypervisor glue size %ld is too big!\n",
1189 glue_size);
1190 if (hv_page_size != PAGE_SIZE)
1191 early_panic("Hypervisor page size %#x != our %#lx\n",
1192 hv_page_size, PAGE_SIZE);
1193 if (hv_hpage_size != HPAGE_SIZE)
1194 early_panic("Hypervisor huge page size %#x != our %#lx\n",
1195 hv_hpage_size, HPAGE_SIZE);
1196
1197#ifdef CONFIG_SMP
1198
1199
1200
1201
1202
1203 if ((smp_height * smp_width) > nr_cpu_ids)
1204 early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n",
1205 smp_height, smp_width, nr_cpu_ids);
1206#endif
1207
1208
1209
1210
1211
1212 asid_range = hv_inquire_asid(0);
1213 min_asid = asid_range.start;
1214 __this_cpu_write(current_asid, min_asid);
1215 max_asid = asid_range.start + asid_range.size - 1;
1216
1217 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1218 sizeof(chip_model)) < 0) {
1219 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1220 strlcpy(chip_model, "unknown", sizeof(chip_model));
1221 }
1222}
1223
1224static void __init validate_va(void)
1225{
1226#ifndef __tilegx__
1227
1228
1229
1230
1231
1232
1233
1234 int i, user_kernel_ok = 0;
1235 unsigned long max_va = 0;
1236 unsigned long list_va =
1237 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
1238
1239 for (i = 0; ; ++i) {
1240 HV_VirtAddrRange range = hv_inquire_virtual(i);
1241 if (range.size == 0)
1242 break;
1243 if (range.start <= MEM_USER_INTRPT &&
1244 range.start + range.size >= MEM_HV_START)
1245 user_kernel_ok = 1;
1246 if (range.start == 0)
1247 max_va = range.size;
1248 BUG_ON(range.start + range.size > list_va);
1249 }
1250 if (!user_kernel_ok)
1251 early_panic("Hypervisor not configured for user/kernel VAs\n");
1252 if (max_va == 0)
1253 early_panic("Hypervisor not configured for low VAs\n");
1254 if (max_va < KERNEL_HIGH_VADDR)
1255 early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1256 max_va, KERNEL_HIGH_VADDR);
1257
1258
1259 if ((long)VMALLOC_START >= 0)
1260 early_panic("Linux VMALLOC region below the 2GB line (%#lx)!\n"
1261 "Reconfigure the kernel with smaller VMALLOC_RESERVE\n",
1262 VMALLOC_START);
1263#endif
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273struct cpumask __write_once cpu_lotar_map;
1274EXPORT_SYMBOL(cpu_lotar_map);
1275
1276
1277
1278
1279
1280
1281
1282
1283struct cpumask hash_for_home_map;
1284EXPORT_SYMBOL(hash_for_home_map);
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295struct cpumask __write_once cpu_cacheable_map;
1296EXPORT_SYMBOL(cpu_cacheable_map);
1297
1298static __initdata struct cpumask disabled_map;
1299
1300static int __init disabled_cpus(char *str)
1301{
1302 int boot_cpu = smp_processor_id();
1303
1304 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1305 return -EINVAL;
1306 if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1307 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1308 cpumask_clear_cpu(boot_cpu, &disabled_map);
1309 }
1310 return 0;
1311}
1312
1313early_param("disabled_cpus", disabled_cpus);
1314
1315void __init print_disabled_cpus(void)
1316{
1317 if (!cpumask_empty(&disabled_map))
1318 pr_info("CPUs not available for Linux: %*pbl\n",
1319 cpumask_pr_args(&disabled_map));
1320}
1321
1322static void __init setup_cpu_maps(void)
1323{
1324 struct cpumask hv_disabled_map, cpu_possible_init;
1325 int boot_cpu = smp_processor_id();
1326 int cpus, i, rc;
1327
1328
1329 rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
1330 (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
1331 sizeof(cpu_cacheable_map));
1332 if (rc < 0)
1333 early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
1334 if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
1335 early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
1336
1337
1338 cpumask_complement(&hv_disabled_map, &cpu_possible_init);
1339
1340
1341 cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
1342
1343
1344
1345
1346
1347
1348 cpus = 1;
1349 cpumask_set_cpu(boot_cpu, &disabled_map);
1350 for (i = 0; cpus < setup_max_cpus; ++i)
1351 if (!cpumask_test_cpu(i, &disabled_map))
1352 ++cpus;
1353 for (; i < smp_height * smp_width; ++i)
1354 cpumask_set_cpu(i, &disabled_map);
1355 cpumask_clear_cpu(boot_cpu, &disabled_map);
1356 for (i = smp_height * smp_width; i < NR_CPUS; ++i)
1357 cpumask_clear_cpu(i, &disabled_map);
1358
1359
1360
1361
1362
1363 cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
1364 init_cpu_possible(&cpu_possible_init);
1365
1366
1367 rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
1368 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1369 sizeof(cpu_lotar_map));
1370 if (rc < 0) {
1371 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1372 cpu_lotar_map = *cpu_possible_mask;
1373 }
1374
1375
1376 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1377 (HV_VirtAddr) hash_for_home_map.bits,
1378 sizeof(hash_for_home_map));
1379 if (rc < 0)
1380 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1381 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
1382}
1383
1384
1385static int __init dataplane(char *str)
1386{
1387 pr_warn("WARNING: dataplane support disabled in this kernel\n");
1388 return 0;
1389}
1390
1391early_param("dataplane", dataplane);
1392
1393#ifdef CONFIG_CMDLINE_BOOL
1394static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
1395#endif
1396
1397void __init setup_arch(char **cmdline_p)
1398{
1399 int len;
1400
1401#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1402 len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1403 COMMAND_LINE_SIZE);
1404 if (boot_command_line[0])
1405 pr_warn("WARNING: ignoring dynamic command line \"%s\"\n",
1406 boot_command_line);
1407 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1408#else
1409 char *hv_cmdline;
1410#if defined(CONFIG_CMDLINE_BOOL)
1411 if (builtin_cmdline[0]) {
1412 int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
1413 COMMAND_LINE_SIZE);
1414 if (builtin_len < COMMAND_LINE_SIZE-1)
1415 boot_command_line[builtin_len++] = ' ';
1416 hv_cmdline = &boot_command_line[builtin_len];
1417 len = COMMAND_LINE_SIZE - builtin_len;
1418 } else
1419#endif
1420 {
1421 hv_cmdline = boot_command_line;
1422 len = COMMAND_LINE_SIZE;
1423 }
1424 len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
1425 if (len < 0 || len > COMMAND_LINE_SIZE)
1426 early_panic("hv_get_command_line failed: %d\n", len);
1427#endif
1428
1429 *cmdline_p = boot_command_line;
1430
1431
1432 parse_early_param();
1433
1434
1435 validate_hv();
1436 validate_va();
1437
1438 setup_cpu_maps();
1439
1440
1441#if defined(CONFIG_PCI) && !defined(__tilegx__)
1442
1443
1444
1445
1446
1447 if (tile_pci_init() == 0)
1448 pci_reserve_mb = 0;
1449
1450
1451 pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT));
1452 pci_reserve_start_pfn = pci_reserve_end_pfn -
1453 (pci_reserve_mb << (20 - PAGE_SHIFT));
1454#endif
1455
1456 init_mm.start_code = (unsigned long) _text;
1457 init_mm.end_code = (unsigned long) _etext;
1458 init_mm.end_data = (unsigned long) _edata;
1459 init_mm.brk = (unsigned long) _end;
1460
1461 setup_memory();
1462 store_permanent_mappings();
1463 setup_bootmem_allocator();
1464
1465
1466
1467
1468
1469
1470#ifdef CONFIG_SWIOTLB
1471 swiotlb_init(0);
1472#endif
1473
1474 paging_init();
1475 setup_numa_mapping();
1476 zone_sizes_init();
1477 set_page_homes();
1478 setup_cpu(1);
1479 setup_clock();
1480 load_hv_initrd();
1481}
1482
1483
1484
1485
1486
1487
1488unsigned long __per_cpu_offset[NR_CPUS] __write_once;
1489EXPORT_SYMBOL(__per_cpu_offset);
1490
1491static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
1492static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
1493
1494
1495
1496
1497
1498static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1499{
1500 int nid = cpu_to_node(cpu);
1501 unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
1502
1503 BUG_ON(size % PAGE_SIZE != 0);
1504 pfn_offset[nid] += size / PAGE_SIZE;
1505 BUG_ON(node_percpu[nid] < size);
1506 node_percpu[nid] -= size;
1507 if (percpu_pfn[cpu] == 0)
1508 percpu_pfn[cpu] = pfn;
1509 return pfn_to_kaddr(pfn);
1510}
1511
1512
1513
1514
1515
1516static void __init pcpu_fc_free(void *ptr, size_t size)
1517{
1518}
1519
1520
1521
1522
1523static void __init pcpu_fc_populate_pte(unsigned long addr)
1524{
1525 pgd_t *pgd;
1526 pud_t *pud;
1527 pmd_t *pmd;
1528 pte_t *pte;
1529
1530 BUG_ON(pgd_addr_invalid(addr));
1531 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1532 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
1533 addr, VMALLOC_START, VMALLOC_END);
1534
1535 pgd = swapper_pg_dir + pgd_index(addr);
1536 pud = pud_offset(pgd, addr);
1537 BUG_ON(!pud_present(*pud));
1538 pmd = pmd_offset(pud, addr);
1539 if (pmd_present(*pmd)) {
1540 BUG_ON(pmd_huge_page(*pmd));
1541 } else {
1542 pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
1543 HV_PAGE_TABLE_ALIGN, 0);
1544 pmd_populate_kernel(&init_mm, pmd, pte);
1545 }
1546}
1547
1548void __init setup_per_cpu_areas(void)
1549{
1550 struct page *pg;
1551 unsigned long delta, pfn, lowmem_va;
1552 unsigned long size = percpu_size();
1553 char *ptr;
1554 int rc, cpu, i;
1555
1556 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
1557 pcpu_fc_free, pcpu_fc_populate_pte);
1558 if (rc < 0)
1559 panic("Cannot initialize percpu area (err=%d)", rc);
1560
1561 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1562 for_each_possible_cpu(cpu) {
1563 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1564
1565
1566 ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
1567 __finv_buffer(ptr, size);
1568 pfn = percpu_pfn[cpu];
1569
1570
1571 pg = pfn_to_page(pfn);
1572 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1573
1574
1575 unsigned long addr = (unsigned long)ptr + i;
1576 pte_t *ptep = virt_to_kpte(addr);
1577 pte_t pte = *ptep;
1578 BUG_ON(pfn != pte_pfn(pte));
1579 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1580 pte = set_remote_cache_cpu(pte, cpu);
1581 set_pte_at(&init_mm, addr, ptep, pte);
1582
1583
1584 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1585 ptep = virt_to_kpte(lowmem_va);
1586 if (pte_huge(*ptep)) {
1587 printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
1588 lowmem_va);
1589 shatter_pmd((pmd_t *)ptep);
1590 ptep = virt_to_kpte(lowmem_va);
1591 BUG_ON(pte_huge(*ptep));
1592 }
1593 BUG_ON(pfn != pte_pfn(*ptep));
1594 set_pte_at(&init_mm, lowmem_va, ptep, pte);
1595 }
1596 }
1597
1598
1599 set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
1600
1601
1602 mb_incoherent();
1603
1604
1605 local_flush_tlb_all();
1606}
1607
1608static struct resource data_resource = {
1609 .name = "Kernel data",
1610 .start = 0,
1611 .end = 0,
1612 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1613};
1614
1615static struct resource code_resource = {
1616 .name = "Kernel code",
1617 .start = 0,
1618 .end = 0,
1619 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1620};
1621
1622
1623
1624
1625
1626#if defined(CONFIG_PCI) && !defined(__tilegx__)
1627static struct resource* __init
1628insert_non_bus_resource(void)
1629{
1630 struct resource *res =
1631 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1632 if (!res)
1633 return NULL;
1634 res->name = "Non-Bus Physical Address Space";
1635 res->start = (1ULL << 32);
1636 res->end = -1LL;
1637 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1638 if (insert_resource(&iomem_resource, res)) {
1639 kfree(res);
1640 return NULL;
1641 }
1642 return res;
1643}
1644#endif
1645
1646static struct resource* __init
1647insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
1648{
1649 struct resource *res =
1650 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1651 if (!res)
1652 return NULL;
1653 res->name = reserved ? "Reserved" : "System RAM";
1654 res->start = start_pfn << PAGE_SHIFT;
1655 res->end = (end_pfn << PAGE_SHIFT) - 1;
1656 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1657 if (insert_resource(&iomem_resource, res)) {
1658 kfree(res);
1659 return NULL;
1660 }
1661 return res;
1662}
1663
1664
1665
1666
1667
1668
1669
1670static int __init request_standard_resources(void)
1671{
1672 int i;
1673 enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
1674
1675#if defined(CONFIG_PCI) && !defined(__tilegx__)
1676 insert_non_bus_resource();
1677#endif
1678
1679 for_each_online_node(i) {
1680 u64 start_pfn = node_start_pfn[i];
1681 u64 end_pfn = node_end_pfn[i];
1682
1683#if defined(CONFIG_PCI) && !defined(__tilegx__)
1684 if (start_pfn <= pci_reserve_start_pfn &&
1685 end_pfn > pci_reserve_start_pfn) {
1686 if (end_pfn > pci_reserve_end_pfn)
1687 insert_ram_resource(pci_reserve_end_pfn,
1688 end_pfn, 0);
1689 end_pfn = pci_reserve_start_pfn;
1690 }
1691#endif
1692 insert_ram_resource(start_pfn, end_pfn, 0);
1693 }
1694
1695 code_resource.start = __pa(_text - CODE_DELTA);
1696 code_resource.end = __pa(_etext - CODE_DELTA)-1;
1697 data_resource.start = __pa(_sdata);
1698 data_resource.end = __pa(_end)-1;
1699
1700 insert_resource(&iomem_resource, &code_resource);
1701 insert_resource(&iomem_resource, &data_resource);
1702
1703
1704 for (i = 0; i < memmap_nr; ++i) {
1705 struct memmap_entry *m = &memmap_map[i];
1706 insert_ram_resource(PFN_DOWN(m->addr),
1707 PFN_UP(m->addr + m->size - 1), 1);
1708 }
1709
1710#ifdef CONFIG_KEXEC
1711 insert_resource(&iomem_resource, &crashk_res);
1712#endif
1713
1714 return 0;
1715}
1716
1717subsys_initcall(request_standard_resources);
1718