1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mmzone.h>
18#include <linux/bootmem.h>
19#include <linux/module.h>
20#include <linux/node.h>
21#include <linux/cpu.h>
22#include <linux/ioport.h>
23#include <linux/irq.h>
24#include <linux/kexec.h>
25#include <linux/pci.h>
26#include <linux/swiotlb.h>
27#include <linux/initrd.h>
28#include <linux/io.h>
29#include <linux/highmem.h>
30#include <linux/smp.h>
31#include <linux/timex.h>
32#include <linux/hugetlb.h>
33#include <linux/start_kernel.h>
34#include <linux/screen_info.h>
35#include <asm/setup.h>
36#include <asm/sections.h>
37#include <asm/cacheflush.h>
38#include <asm/pgalloc.h>
39#include <asm/mmu_context.h>
40#include <hv/hypervisor.h>
41#include <arch/interrupts.h>
42
43
44#ifndef CONFIG_SMP
45#define setup_max_cpus 1
46#endif
47
48static inline int ABS(int x) { return x >= 0 ? x : -x; }
49
50
51char chip_model[64] __write_once;
52
53#ifdef CONFIG_VT
54struct screen_info screen_info;
55#endif
56
57struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
58EXPORT_SYMBOL(node_data);
59
60
61unsigned long node_start_pfn[MAX_NUMNODES];
62unsigned long node_end_pfn[MAX_NUMNODES];
63unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
64unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
65unsigned long __initdata node_free_pfn[MAX_NUMNODES];
66
67static unsigned long __initdata node_percpu[MAX_NUMNODES];
68
69
70
71
72DEFINE_PER_CPU(unsigned long, boot_sp) =
73 (unsigned long)init_stack + THREAD_SIZE;
74
75#ifdef CONFIG_SMP
76DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
77#else
78
79
80
81
82unsigned long __initdata boot_pc = (unsigned long)start_kernel;
83#endif
84
85#ifdef CONFIG_HIGHMEM
86
87unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
88
89
90static unsigned long __initdata mappable_physpages;
91#endif
92
93
94int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
95
96#ifdef CONFIG_HIGHMEM
97
98unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
99 __write_once __attribute__((aligned(L2_CACHE_BYTES)));
100EXPORT_SYMBOL(pbase_map);
101
102
103void *vbase_map[NR_PA_HIGHBIT_VALUES]
104 __write_once __attribute__((aligned(L2_CACHE_BYTES)));
105EXPORT_SYMBOL(vbase_map);
106#endif
107
108
109int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
110EXPORT_SYMBOL(highbits_to_node);
111
112static unsigned int __initdata maxmem_pfn = -1U;
113static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
114 [0 ... MAX_NUMNODES-1] = -1U
115};
116static nodemask_t __initdata isolnodes;
117
118#if defined(CONFIG_PCI) && !defined(__tilegx__)
119enum { DEFAULT_PCI_RESERVE_MB = 64 };
120static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
121unsigned long __initdata pci_reserve_start_pfn = -1U;
122unsigned long __initdata pci_reserve_end_pfn = -1U;
123#endif
124
125static int __init setup_maxmem(char *str)
126{
127 unsigned long long maxmem;
128 if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
129 return -EINVAL;
130
131 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
132 pr_info("Forcing RAM used to no more than %dMB\n",
133 maxmem_pfn >> (20 - PAGE_SHIFT));
134 return 0;
135}
136early_param("maxmem", setup_maxmem);
137
138static int __init setup_maxnodemem(char *str)
139{
140 char *endp;
141 unsigned long long maxnodemem;
142 long node;
143
144 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
145 if (node >= MAX_NUMNODES || *endp != ':')
146 return -EINVAL;
147
148 maxnodemem = memparse(endp+1, NULL);
149 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
150 (HPAGE_SHIFT - PAGE_SHIFT);
151 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
152 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
153 return 0;
154}
155early_param("maxnodemem", setup_maxnodemem);
156
157static int __init setup_isolnodes(char *str)
158{
159 char buf[MAX_NUMNODES * 5];
160 if (str == NULL || nodelist_parse(str, isolnodes) != 0)
161 return -EINVAL;
162
163 nodelist_scnprintf(buf, sizeof(buf), isolnodes);
164 pr_info("Set isolnodes value to '%s'\n", buf);
165 return 0;
166}
167early_param("isolnodes", setup_isolnodes);
168
169#if defined(CONFIG_PCI) && !defined(__tilegx__)
170static int __init setup_pci_reserve(char* str)
171{
172 unsigned long mb;
173
174 if (str == NULL || strict_strtoul(str, 0, &mb) != 0 ||
175 mb > 3 * 1024)
176 return -EINVAL;
177
178 pci_reserve_mb = mb;
179 pr_info("Reserving %dMB for PCIE root complex mappings\n",
180 pci_reserve_mb);
181 return 0;
182}
183early_param("pci_reserve", setup_pci_reserve);
184#endif
185
186#ifndef __tilegx__
187
188
189
190
191static int __init parse_vmalloc(char *arg)
192{
193 if (!arg)
194 return -EINVAL;
195
196 VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
197
198
199 if ((long)_VMALLOC_START >= 0)
200 early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
201 VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
202
203 return 0;
204}
205early_param("vmalloc", parse_vmalloc);
206#endif
207
208#ifdef CONFIG_HIGHMEM
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227static void *__init setup_pa_va_mapping(void)
228{
229 unsigned long curr_pages = 0;
230 unsigned long vaddr = PAGE_OFFSET;
231 nodemask_t highonlynodes = isolnodes;
232 int i, j;
233
234 memset(pbase_map, -1, sizeof(pbase_map));
235 memset(vbase_map, -1, sizeof(vbase_map));
236
237
238 node_clear(0, highonlynodes);
239
240
241 mappable_physpages = 0;
242 for_each_online_node(i) {
243 if (!node_isset(i, highonlynodes))
244 mappable_physpages +=
245 node_end_pfn[i] - node_start_pfn[i];
246 }
247
248 for_each_online_node(i) {
249 unsigned long start = node_start_pfn[i];
250 unsigned long end = node_end_pfn[i];
251 unsigned long size = end - start;
252 unsigned long vaddr_end;
253
254 if (node_isset(i, highonlynodes)) {
255
256 node_lowmem_end_pfn[i] = start;
257 continue;
258 }
259
260 curr_pages += size;
261 if (mappable_physpages > MAXMEM_PFN) {
262 vaddr_end = PAGE_OFFSET +
263 (((u64)curr_pages * MAXMEM_PFN /
264 mappable_physpages)
265 << PAGE_SHIFT);
266 } else {
267 vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
268 }
269 for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
270 unsigned long this_pfn =
271 start + (j << HUGETLB_PAGE_ORDER);
272 pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
273 if (vbase_map[__pfn_to_highbits(this_pfn)] ==
274 (void *)-1)
275 vbase_map[__pfn_to_highbits(this_pfn)] =
276 (void *)(vaddr & HPAGE_MASK);
277 }
278 node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
279 BUG_ON(node_lowmem_end_pfn[i] > end);
280 }
281
282
283 return (void *)vaddr;
284}
285#endif
286
287
288
289
290
291
292
293static void store_permanent_mappings(void)
294{
295 int i;
296
297 for_each_online_node(i) {
298 HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
299#ifdef CONFIG_HIGHMEM
300 HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
301#else
302 HV_PhysAddr high_mapped_pa = node_end_pfn[i];
303#endif
304
305 unsigned long pages = high_mapped_pa - node_start_pfn[i];
306 HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
307 hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
308 }
309
310 hv_store_mapping((HV_VirtAddr)_text,
311 (uint32_t)(_einittext - _text), 0);
312}
313
314
315
316
317
318
319static void __init setup_memory(void)
320{
321 int i, j;
322 int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
323#ifdef CONFIG_HIGHMEM
324 long highmem_pages;
325#endif
326#ifndef __tilegx__
327 int cap;
328#endif
329#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
330 long lowmem_pages;
331#endif
332 unsigned long physpages = 0;
333
334
335 BUILD_BUG_ON(MAX_NUMNODES > 127);
336
337
338 for (i = 0; ; ++i) {
339 unsigned long start, size, end, highbits;
340 HV_PhysAddrRange range = hv_inquire_physical(i);
341 if (range.size == 0)
342 break;
343#ifdef CONFIG_FLATMEM
344 if (i > 0) {
345 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
346 range.size, range.start + range.size);
347 continue;
348 }
349#endif
350#ifndef __tilegx__
351 if ((unsigned long)range.start) {
352 pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
353 range.start, range.start + range.size);
354 continue;
355 }
356#endif
357 if ((range.start & (HPAGE_SIZE-1)) != 0 ||
358 (range.size & (HPAGE_SIZE-1)) != 0) {
359 unsigned long long start_pa = range.start;
360 unsigned long long orig_size = range.size;
361 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
362 range.size -= (range.start - start_pa);
363 range.size &= HPAGE_MASK;
364 pr_err("Range not hugepage-aligned: %#llx..%#llx:"
365 " now %#llx-%#llx\n",
366 start_pa, start_pa + orig_size,
367 range.start, range.start + range.size);
368 }
369 highbits = __pa_to_highbits(range.start);
370 if (highbits >= NR_PA_HIGHBIT_VALUES) {
371 pr_err("PA high bits too high: %#llx..%#llx\n",
372 range.start, range.start + range.size);
373 continue;
374 }
375 if (highbits_seen[highbits]) {
376 pr_err("Range overlaps in high bits: %#llx..%#llx\n",
377 range.start, range.start + range.size);
378 continue;
379 }
380 highbits_seen[highbits] = 1;
381 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
382 int max_size = maxnodemem_pfn[i];
383 if (max_size > 0) {
384 pr_err("Maxnodemem reduced node %d to"
385 " %d pages\n", i, max_size);
386 range.size = PFN_PHYS(max_size);
387 } else {
388 pr_err("Maxnodemem disabled node %d\n", i);
389 continue;
390 }
391 }
392 if (physpages + PFN_DOWN(range.size) > maxmem_pfn) {
393 int max_size = maxmem_pfn - physpages;
394 if (max_size > 0) {
395 pr_err("Maxmem reduced node %d to %d pages\n",
396 i, max_size);
397 range.size = PFN_PHYS(max_size);
398 } else {
399 pr_err("Maxmem disabled node %d\n", i);
400 continue;
401 }
402 }
403 if (i >= MAX_NUMNODES) {
404 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
405 i, range.size, range.size + range.start);
406 continue;
407 }
408
409 start = range.start >> PAGE_SHIFT;
410 size = range.size >> PAGE_SHIFT;
411 end = start + size;
412
413#ifndef __tilegx__
414 if (((HV_PhysAddr)end << PAGE_SHIFT) !=
415 (range.start + range.size)) {
416 pr_err("PAs too high to represent: %#llx..%#llx\n",
417 range.start, range.start + range.size);
418 continue;
419 }
420#endif
421#if defined(CONFIG_PCI) && !defined(__tilegx__)
422
423
424
425
426
427
428
429 if (start <= pci_reserve_start_pfn &&
430 end > pci_reserve_start_pfn) {
431 unsigned int per_cpu_size =
432 __per_cpu_end - __per_cpu_start;
433 unsigned int percpu_pages =
434 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
435 if (end < pci_reserve_end_pfn + percpu_pages) {
436 end = pci_reserve_start_pfn;
437 pr_err("PCI mapping region reduced node %d to"
438 " %ld pages\n", i, end - start);
439 }
440 }
441#endif
442
443 for (j = __pfn_to_highbits(start);
444 j <= __pfn_to_highbits(end - 1); j++)
445 highbits_to_node[j] = i;
446
447 node_start_pfn[i] = start;
448 node_end_pfn[i] = end;
449 node_controller[i] = range.controller;
450 physpages += size;
451 max_pfn = end;
452
453
454 node_set(i, node_online_map);
455 node_set(i, node_possible_map);
456 }
457
458#ifndef __tilegx__
459
460
461
462
463
464
465
466
467
468 cap = 8 * 1024 * 1024;
469 if (physpages > cap) {
470 int num_nodes = num_online_nodes();
471 int cap_each = cap / num_nodes;
472 unsigned long dropped_pages = 0;
473 for (i = 0; i < num_nodes; ++i) {
474 int size = node_end_pfn[i] - node_start_pfn[i];
475 if (size > cap_each) {
476 dropped_pages += (size - cap_each);
477 node_end_pfn[i] = node_start_pfn[i] + cap_each;
478 }
479 }
480 physpages -= dropped_pages;
481 pr_warning("Only using %ldMB memory;"
482 " ignoring %ldMB.\n",
483 physpages >> (20 - PAGE_SHIFT),
484 dropped_pages >> (20 - PAGE_SHIFT));
485 pr_warning("Consider using a larger page size.\n");
486 }
487#endif
488
489
490 min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
491
492#ifdef CONFIG_HIGHMEM
493
494 high_memory = setup_pa_va_mapping();
495
496
497 max_low_pfn = node_lowmem_end_pfn[0];
498
499 lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
500 MAXMEM_PFN : mappable_physpages;
501 highmem_pages = (long) (physpages - lowmem_pages);
502
503 pr_notice("%ldMB HIGHMEM available.\n",
504 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
505 pr_notice("%ldMB LOWMEM available.\n",
506 pages_to_mb(lowmem_pages));
507#else
508
509 max_low_pfn = node_end_pfn[0];
510
511#ifndef __tilegx__
512 if (node_end_pfn[0] > MAXMEM_PFN) {
513 pr_warning("Only using %ldMB LOWMEM.\n",
514 MAXMEM>>20);
515 pr_warning("Use a HIGHMEM enabled kernel.\n");
516 max_low_pfn = MAXMEM_PFN;
517 max_pfn = MAXMEM_PFN;
518 node_end_pfn[0] = MAXMEM_PFN;
519 } else {
520 pr_notice("%ldMB memory available.\n",
521 pages_to_mb(node_end_pfn[0]));
522 }
523 for (i = 1; i < MAX_NUMNODES; ++i) {
524 node_start_pfn[i] = 0;
525 node_end_pfn[i] = 0;
526 }
527 high_memory = __va(node_end_pfn[0]);
528#else
529 lowmem_pages = 0;
530 for (i = 0; i < MAX_NUMNODES; ++i) {
531 int pages = node_end_pfn[i] - node_start_pfn[i];
532 lowmem_pages += pages;
533 if (pages)
534 high_memory = pfn_to_kaddr(node_end_pfn[i]);
535 }
536 pr_notice("%ldMB memory available.\n",
537 pages_to_mb(lowmem_pages));
538#endif
539#endif
540}
541
542
543
544
545
546
547
548
549
550static inline int node_has_bootmem(int nid)
551{
552#ifdef CONFIG_64BIT
553 return 1;
554#else
555 return nid == 0;
556#endif
557}
558
559static inline unsigned long alloc_bootmem_pfn(int nid,
560 unsigned long size,
561 unsigned long goal)
562{
563 void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
564 PAGE_SIZE, goal);
565 unsigned long pfn = kaddr_to_pfn(kva);
566 BUG_ON(goal && PFN_PHYS(pfn) != goal);
567 return pfn;
568}
569
570static void __init setup_bootmem_allocator_node(int i)
571{
572 unsigned long start, end, mapsize, mapstart;
573
574 if (node_has_bootmem(i)) {
575 NODE_DATA(i)->bdata = &bootmem_node_data[i];
576 } else {
577
578 NODE_DATA(i)->bdata = &bootmem_node_data[0];
579 return;
580 }
581
582
583 start = (i == 0) ? min_low_pfn : node_start_pfn[i];
584
585
586#ifdef CONFIG_HIGHMEM
587 end = node_lowmem_end_pfn[i];
588#else
589 end = node_end_pfn[i];
590#endif
591
592
593 if (end == start)
594 return;
595
596
597 mapsize = bootmem_bootmap_pages(end - start);
598 if (i == 0) {
599
600 mapstart = start;
601 start += mapsize;
602 } else {
603
604 mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
605 }
606
607
608 init_bootmem_node(NODE_DATA(i), mapstart, start, end);
609
610
611 free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
612
613#if defined(CONFIG_PCI) && !defined(__tilegx__)
614
615
616
617 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
618 reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
619 PFN_PHYS(pci_reserve_end_pfn -
620 pci_reserve_start_pfn),
621 BOOTMEM_EXCLUSIVE);
622#endif
623}
624
625static void __init setup_bootmem_allocator(void)
626{
627 int i;
628 for (i = 0; i < MAX_NUMNODES; ++i)
629 setup_bootmem_allocator_node(i);
630
631#ifdef CONFIG_KEXEC
632 if (crashk_res.start != crashk_res.end)
633 reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0);
634#endif
635}
636
637void *__init alloc_remap(int nid, unsigned long size)
638{
639 int pages = node_end_pfn[nid] - node_start_pfn[nid];
640 void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
641 BUG_ON(size != pages * sizeof(struct page));
642 memset(map, 0, size);
643 return map;
644}
645
646static int __init percpu_size(void)
647{
648 int size = __per_cpu_end - __per_cpu_start;
649 size += PERCPU_MODULE_RESERVE;
650 size += PERCPU_DYNAMIC_EARLY_SIZE;
651 if (size < PCPU_MIN_UNIT_SIZE)
652 size = PCPU_MIN_UNIT_SIZE;
653 size = roundup(size, PAGE_SIZE);
654
655
656 BUG_ON(kdata_huge && size > HPAGE_SIZE);
657 return size;
658}
659
660static void __init zone_sizes_init(void)
661{
662 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
663 int size = percpu_size();
664 int num_cpus = smp_height * smp_width;
665 const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
666
667 int i;
668
669 for (i = 0; i < num_cpus; ++i)
670 node_percpu[cpu_to_node(i)] += size;
671
672 for_each_online_node(i) {
673 unsigned long start = node_start_pfn[i];
674 unsigned long end = node_end_pfn[i];
675#ifdef CONFIG_HIGHMEM
676 unsigned long lowmem_end = node_lowmem_end_pfn[i];
677#else
678 unsigned long lowmem_end = end;
679#endif
680 int memmap_size = (end - start) * sizeof(struct page);
681 node_free_pfn[i] = start;
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699 if (i != 0 && cpu_isset(i, isolnodes)) {
700 node_memmap_pfn[i] =
701 alloc_bootmem_pfn(0, memmap_size, 0);
702 BUG_ON(node_percpu[i] != 0);
703 } else if (node_has_bootmem(start)) {
704 unsigned long goal = 0;
705 node_memmap_pfn[i] =
706 alloc_bootmem_pfn(i, memmap_size, 0);
707 if (kdata_huge)
708 goal = PFN_PHYS(lowmem_end) - node_percpu[i];
709 if (node_percpu[i])
710 node_percpu_pfn[i] =
711 alloc_bootmem_pfn(i, node_percpu[i],
712 goal);
713 } else {
714
715 node_memmap_pfn[i] = node_free_pfn[i];
716 node_free_pfn[i] += PFN_UP(memmap_size);
717 if (!kdata_huge) {
718 node_percpu_pfn[i] = node_free_pfn[i];
719 node_free_pfn[i] += PFN_UP(node_percpu[i]);
720 } else {
721 node_percpu_pfn[i] =
722 lowmem_end - PFN_UP(node_percpu[i]);
723 }
724 }
725
726#ifdef CONFIG_HIGHMEM
727 if (start > lowmem_end) {
728 zones_size[ZONE_NORMAL] = 0;
729 zones_size[ZONE_HIGHMEM] = end - start;
730 } else {
731 zones_size[ZONE_NORMAL] = lowmem_end - start;
732 zones_size[ZONE_HIGHMEM] = end - lowmem_end;
733 }
734#else
735 zones_size[ZONE_NORMAL] = end - start;
736#endif
737
738 if (start < dma_end) {
739 zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
740 dma_end - start);
741 zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
742 } else {
743 zones_size[ZONE_DMA] = 0;
744 }
745
746
747 if (node_isset(i, isolnodes))
748 NODE_DATA(i)->bdata = &bootmem_node_data[0];
749
750 free_area_init_node(i, zones_size, start, NULL);
751 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
752 PFN_UP(node_percpu[i]));
753
754
755 if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
756 node_set_state(i, N_NORMAL_MEMORY);
757#ifdef CONFIG_HIGHMEM
758 if (end != start)
759 node_set_state(i, N_HIGH_MEMORY);
760#endif
761
762 node_set_online(i);
763 }
764}
765
766#ifdef CONFIG_NUMA
767
768
769struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
770EXPORT_SYMBOL(node_2_cpu_mask);
771
772
773char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
774EXPORT_SYMBOL(cpu_2_node);
775
776
777static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
778{
779 if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
780 return -1;
781 else
782 return cpu_to_node(cpu);
783}
784
785
786static int __init node_neighbors(int node, int cpu,
787 struct cpumask *unbound_cpus)
788{
789 int neighbors = 0;
790 int w = smp_width;
791 int h = smp_height;
792 int x = cpu % w;
793 int y = cpu / w;
794 if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
795 ++neighbors;
796 if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
797 ++neighbors;
798 if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
799 ++neighbors;
800 if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
801 ++neighbors;
802 return neighbors;
803}
804
805static void __init setup_numa_mapping(void)
806{
807 int distance[MAX_NUMNODES][NR_CPUS];
808 HV_Coord coord;
809 int cpu, node, cpus, i, x, y;
810 int num_nodes = num_online_nodes();
811 struct cpumask unbound_cpus;
812 nodemask_t default_nodes;
813
814 cpumask_clear(&unbound_cpus);
815
816
817 nodes_andnot(default_nodes, node_online_map, isolnodes);
818 if (nodes_empty(default_nodes)) {
819 BUG_ON(!node_isset(0, node_online_map));
820 pr_err("Forcing NUMA node zero available as a default node\n");
821 node_set(0, default_nodes);
822 }
823
824
825 memset(distance, -1, sizeof(distance));
826 cpu = 0;
827 for (coord.y = 0; coord.y < smp_height; ++coord.y) {
828 for (coord.x = 0; coord.x < smp_width;
829 ++coord.x, ++cpu) {
830 BUG_ON(cpu >= nr_cpu_ids);
831 if (!cpu_possible(cpu)) {
832 cpu_2_node[cpu] = -1;
833 continue;
834 }
835 for_each_node_mask(node, default_nodes) {
836 HV_MemoryControllerInfo info =
837 hv_inquire_memory_controller(
838 coord, node_controller[node]);
839 distance[node][cpu] =
840 ABS(info.coord.x) + ABS(info.coord.y);
841 }
842 cpumask_set_cpu(cpu, &unbound_cpus);
843 }
844 }
845 cpus = cpu;
846
847
848
849
850
851
852
853
854
855 node = first_node(default_nodes);
856 while (!cpumask_empty(&unbound_cpus)) {
857 int best_cpu = -1;
858 int best_distance = INT_MAX;
859 for (cpu = 0; cpu < cpus; ++cpu) {
860 if (cpumask_test_cpu(cpu, &unbound_cpus)) {
861
862
863
864
865
866
867
868
869
870 int d = distance[node][cpu] * num_nodes;
871 for_each_node_mask(i, default_nodes) {
872 if (i != node)
873 d -= distance[i][cpu];
874 }
875 d *= 8;
876 d -= node_neighbors(node, cpu, &unbound_cpus);
877 if (d < best_distance) {
878 best_cpu = cpu;
879 best_distance = d;
880 }
881 }
882 }
883 BUG_ON(best_cpu < 0);
884 cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
885 cpu_2_node[best_cpu] = node;
886 cpumask_clear_cpu(best_cpu, &unbound_cpus);
887 node = next_node(node, default_nodes);
888 if (node == MAX_NUMNODES)
889 node = first_node(default_nodes);
890 }
891
892
893 cpu = 0;
894 for (y = 0; y < smp_height; ++y) {
895 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
896 for (x = 0; x < smp_width; ++x, ++cpu) {
897 if (cpu_to_node(cpu) < 0) {
898 pr_cont(" -");
899 cpu_2_node[cpu] = first_node(default_nodes);
900 } else {
901 pr_cont(" %d", cpu_to_node(cpu));
902 }
903 }
904 pr_cont("\n");
905 }
906}
907
908static struct cpu cpu_devices[NR_CPUS];
909
910static int __init topology_init(void)
911{
912 int i;
913
914 for_each_online_node(i)
915 register_one_node(i);
916
917 for (i = 0; i < smp_height * smp_width; ++i)
918 register_cpu(&cpu_devices[i], i);
919
920 return 0;
921}
922
923subsys_initcall(topology_init);
924
925#else
926
927#define setup_numa_mapping() do { } while (0)
928
929#endif
930
931
932
933
934
935
936
937
938static void init_super_pages(void)
939{
940#ifdef CONFIG_HUGETLB_SUPER_PAGES
941 int i;
942 for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
943 hv_set_pte_super_shift(i, huge_shift[i]);
944#endif
945}
946
947
948
949
950
951
952
953void setup_cpu(int boot)
954{
955
956 if (!boot)
957 store_permanent_mappings();
958
959
960#if CHIP_HAS_TILE_DMA()
961 arch_local_irq_unmask(INT_DMATLB_MISS);
962 arch_local_irq_unmask(INT_DMATLB_ACCESS);
963#endif
964#if CHIP_HAS_SN_PROC()
965 arch_local_irq_unmask(INT_SNITLB_MISS);
966#endif
967#ifdef __tilegx__
968 arch_local_irq_unmask(INT_SINGLE_STEP_K);
969#endif
970
971
972
973
974
975 __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
976
977#if CHIP_HAS_SN()
978
979 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
980#endif
981#if CHIP_HAS_SN_PROC()
982 __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
983 __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
984#endif
985
986
987
988
989
990
991 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
992 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
993
994
995 setup_irq_regs();
996
997#ifdef CONFIG_HARDWALL
998
999 reset_network_state();
1000#endif
1001
1002 init_super_pages();
1003}
1004
1005#ifdef CONFIG_BLK_DEV_INITRD
1006
1007static int __initdata set_initramfs_file;
1008static char __initdata initramfs_file[128] = "initramfs";
1009
1010static int __init setup_initramfs_file(char *str)
1011{
1012 if (str == NULL)
1013 return -EINVAL;
1014 strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
1015 set_initramfs_file = 1;
1016
1017 return 0;
1018}
1019early_param("initramfs_file", setup_initramfs_file);
1020
1021
1022
1023
1024
1025
1026static void __init load_hv_initrd(void)
1027{
1028 HV_FS_StatInfo stat;
1029 int fd, rc;
1030 void *initrd;
1031
1032 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1033 if (fd == HV_ENOENT) {
1034 if (set_initramfs_file) {
1035 pr_warning("No such hvfs initramfs file '%s'\n",
1036 initramfs_file);
1037 return;
1038 } else {
1039
1040 fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
1041 if (fd == HV_ENOENT)
1042 return;
1043 }
1044 }
1045 BUG_ON(fd < 0);
1046 stat = hv_fs_fstat(fd);
1047 BUG_ON(stat.size < 0);
1048 if (stat.flags & HV_FS_ISDIR) {
1049 pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
1050 initramfs_file);
1051 return;
1052 }
1053 initrd = alloc_bootmem_pages(stat.size);
1054 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
1055 if (rc != stat.size) {
1056 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
1057 stat.size, initramfs_file, rc);
1058 free_initrd_mem((unsigned long) initrd, stat.size);
1059 return;
1060 }
1061 initrd_start = (unsigned long) initrd;
1062 initrd_end = initrd_start + stat.size;
1063}
1064
1065void __init free_initrd_mem(unsigned long begin, unsigned long end)
1066{
1067 free_bootmem(__pa(begin), end - begin);
1068}
1069
1070#else
1071static inline void load_hv_initrd(void) {}
1072#endif
1073
1074static void __init validate_hv(void)
1075{
1076
1077
1078
1079
1080 unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
1081 int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
1082 int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
1083 HV_ASIDRange asid_range;
1084
1085#ifndef CONFIG_SMP
1086 HV_Topology topology = hv_inquire_topology();
1087 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
1088 if (topology.width != 1 || topology.height != 1) {
1089 pr_warning("Warning: booting UP kernel on %dx%d grid;"
1090 " will ignore all but first tile.\n",
1091 topology.width, topology.height);
1092 }
1093#endif
1094
1095 if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
1096 early_panic("Hypervisor glue size %ld is too big!\n",
1097 glue_size);
1098 if (hv_page_size != PAGE_SIZE)
1099 early_panic("Hypervisor page size %#x != our %#lx\n",
1100 hv_page_size, PAGE_SIZE);
1101 if (hv_hpage_size != HPAGE_SIZE)
1102 early_panic("Hypervisor huge page size %#x != our %#lx\n",
1103 hv_hpage_size, HPAGE_SIZE);
1104
1105#ifdef CONFIG_SMP
1106
1107
1108
1109
1110
1111 if ((smp_height * smp_width) > nr_cpu_ids)
1112 early_panic("Hypervisor %d x %d grid too big for Linux"
1113 " NR_CPUS %d\n", smp_height, smp_width,
1114 nr_cpu_ids);
1115#endif
1116
1117
1118
1119
1120
1121 asid_range = hv_inquire_asid(0);
1122 __get_cpu_var(current_asid) = min_asid = asid_range.start;
1123 max_asid = asid_range.start + asid_range.size - 1;
1124
1125 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1126 sizeof(chip_model)) < 0) {
1127 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1128 strlcpy(chip_model, "unknown", sizeof(chip_model));
1129 }
1130}
1131
1132static void __init validate_va(void)
1133{
1134#ifndef __tilegx__
1135
1136
1137
1138
1139
1140
1141
1142 int i, user_kernel_ok = 0;
1143 unsigned long max_va = 0;
1144 unsigned long list_va =
1145 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
1146
1147 for (i = 0; ; ++i) {
1148 HV_VirtAddrRange range = hv_inquire_virtual(i);
1149 if (range.size == 0)
1150 break;
1151 if (range.start <= MEM_USER_INTRPT &&
1152 range.start + range.size >= MEM_HV_INTRPT)
1153 user_kernel_ok = 1;
1154 if (range.start == 0)
1155 max_va = range.size;
1156 BUG_ON(range.start + range.size > list_va);
1157 }
1158 if (!user_kernel_ok)
1159 early_panic("Hypervisor not configured for user/kernel VAs\n");
1160 if (max_va == 0)
1161 early_panic("Hypervisor not configured for low VAs\n");
1162 if (max_va < KERNEL_HIGH_VADDR)
1163 early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1164 max_va, KERNEL_HIGH_VADDR);
1165
1166
1167 if ((long)VMALLOC_START >= 0)
1168 early_panic(
1169 "Linux VMALLOC region below the 2GB line (%#lx)!\n"
1170 "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n"
1171 "or smaller VMALLOC_RESERVE.\n",
1172 VMALLOC_START);
1173#endif
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183struct cpumask __write_once cpu_lotar_map;
1184EXPORT_SYMBOL(cpu_lotar_map);
1185
1186#if CHIP_HAS_CBOX_HOME_MAP()
1187
1188
1189
1190
1191
1192
1193
1194struct cpumask hash_for_home_map;
1195EXPORT_SYMBOL(hash_for_home_map);
1196#endif
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207struct cpumask __write_once cpu_cacheable_map;
1208EXPORT_SYMBOL(cpu_cacheable_map);
1209
1210static __initdata struct cpumask disabled_map;
1211
1212static int __init disabled_cpus(char *str)
1213{
1214 int boot_cpu = smp_processor_id();
1215
1216 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1217 return -EINVAL;
1218 if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1219 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1220 cpumask_clear_cpu(boot_cpu, &disabled_map);
1221 }
1222 return 0;
1223}
1224
1225early_param("disabled_cpus", disabled_cpus);
1226
1227void __init print_disabled_cpus(void)
1228{
1229 if (!cpumask_empty(&disabled_map)) {
1230 char buf[100];
1231 cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
1232 pr_info("CPUs not available for Linux: %s\n", buf);
1233 }
1234}
1235
1236static void __init setup_cpu_maps(void)
1237{
1238 struct cpumask hv_disabled_map, cpu_possible_init;
1239 int boot_cpu = smp_processor_id();
1240 int cpus, i, rc;
1241
1242
1243 rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
1244 (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
1245 sizeof(cpu_cacheable_map));
1246 if (rc < 0)
1247 early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
1248 if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
1249 early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
1250
1251
1252 cpumask_complement(&hv_disabled_map, &cpu_possible_init);
1253
1254
1255 cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
1256
1257
1258
1259
1260
1261
1262 cpus = 1;
1263 cpumask_set_cpu(boot_cpu, &disabled_map);
1264 for (i = 0; cpus < setup_max_cpus; ++i)
1265 if (!cpumask_test_cpu(i, &disabled_map))
1266 ++cpus;
1267 for (; i < smp_height * smp_width; ++i)
1268 cpumask_set_cpu(i, &disabled_map);
1269 cpumask_clear_cpu(boot_cpu, &disabled_map);
1270 for (i = smp_height * smp_width; i < NR_CPUS; ++i)
1271 cpumask_clear_cpu(i, &disabled_map);
1272
1273
1274
1275
1276
1277 cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
1278 init_cpu_possible(&cpu_possible_init);
1279
1280
1281 rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
1282 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1283 sizeof(cpu_lotar_map));
1284 if (rc < 0) {
1285 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1286 cpu_lotar_map = *cpu_possible_mask;
1287 }
1288
1289#if CHIP_HAS_CBOX_HOME_MAP()
1290
1291 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1292 (HV_VirtAddr) hash_for_home_map.bits,
1293 sizeof(hash_for_home_map));
1294 if (rc < 0)
1295 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1296 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
1297#else
1298 cpu_cacheable_map = *cpu_possible_mask;
1299#endif
1300}
1301
1302
1303static int __init dataplane(char *str)
1304{
1305 pr_warning("WARNING: dataplane support disabled in this kernel\n");
1306 return 0;
1307}
1308
1309early_param("dataplane", dataplane);
1310
1311#ifdef CONFIG_CMDLINE_BOOL
1312static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
1313#endif
1314
1315void __init setup_arch(char **cmdline_p)
1316{
1317 int len;
1318
1319#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1320 len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1321 COMMAND_LINE_SIZE);
1322 if (boot_command_line[0])
1323 pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1324 boot_command_line);
1325 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1326#else
1327 char *hv_cmdline;
1328#if defined(CONFIG_CMDLINE_BOOL)
1329 if (builtin_cmdline[0]) {
1330 int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
1331 COMMAND_LINE_SIZE);
1332 if (builtin_len < COMMAND_LINE_SIZE-1)
1333 boot_command_line[builtin_len++] = ' ';
1334 hv_cmdline = &boot_command_line[builtin_len];
1335 len = COMMAND_LINE_SIZE - builtin_len;
1336 } else
1337#endif
1338 {
1339 hv_cmdline = boot_command_line;
1340 len = COMMAND_LINE_SIZE;
1341 }
1342 len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
1343 if (len < 0 || len > COMMAND_LINE_SIZE)
1344 early_panic("hv_get_command_line failed: %d\n", len);
1345#endif
1346
1347 *cmdline_p = boot_command_line;
1348
1349
1350 parse_early_param();
1351
1352
1353 validate_hv();
1354 validate_va();
1355
1356 setup_cpu_maps();
1357
1358
1359#if defined(CONFIG_PCI) && !defined(__tilegx__)
1360
1361
1362
1363
1364
1365 if (tile_pci_init() == 0)
1366 pci_reserve_mb = 0;
1367
1368
1369 pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT));
1370 pci_reserve_start_pfn = pci_reserve_end_pfn -
1371 (pci_reserve_mb << (20 - PAGE_SHIFT));
1372#endif
1373
1374 init_mm.start_code = (unsigned long) _text;
1375 init_mm.end_code = (unsigned long) _etext;
1376 init_mm.end_data = (unsigned long) _edata;
1377 init_mm.brk = (unsigned long) _end;
1378
1379 setup_memory();
1380 store_permanent_mappings();
1381 setup_bootmem_allocator();
1382
1383
1384
1385
1386
1387
1388#ifdef CONFIG_SWIOTLB
1389 swiotlb_init(0);
1390#endif
1391
1392 paging_init();
1393 setup_numa_mapping();
1394 zone_sizes_init();
1395 set_page_homes();
1396 setup_cpu(1);
1397 setup_clock();
1398 load_hv_initrd();
1399}
1400
1401
1402
1403
1404
1405
1406unsigned long __per_cpu_offset[NR_CPUS] __write_once;
1407EXPORT_SYMBOL(__per_cpu_offset);
1408
1409static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
1410static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
1411
1412
1413
1414
1415
1416static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1417{
1418 int nid = cpu_to_node(cpu);
1419 unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
1420
1421 BUG_ON(size % PAGE_SIZE != 0);
1422 pfn_offset[nid] += size / PAGE_SIZE;
1423 BUG_ON(node_percpu[nid] < size);
1424 node_percpu[nid] -= size;
1425 if (percpu_pfn[cpu] == 0)
1426 percpu_pfn[cpu] = pfn;
1427 return pfn_to_kaddr(pfn);
1428}
1429
1430
1431
1432
1433
1434static void __init pcpu_fc_free(void *ptr, size_t size)
1435{
1436}
1437
1438
1439
1440
1441static void __init pcpu_fc_populate_pte(unsigned long addr)
1442{
1443 pgd_t *pgd;
1444 pud_t *pud;
1445 pmd_t *pmd;
1446 pte_t *pte;
1447
1448 BUG_ON(pgd_addr_invalid(addr));
1449 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1450 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1451 " try increasing CONFIG_VMALLOC_RESERVE\n",
1452 addr, VMALLOC_START, VMALLOC_END);
1453
1454 pgd = swapper_pg_dir + pgd_index(addr);
1455 pud = pud_offset(pgd, addr);
1456 BUG_ON(!pud_present(*pud));
1457 pmd = pmd_offset(pud, addr);
1458 if (pmd_present(*pmd)) {
1459 BUG_ON(pmd_huge_page(*pmd));
1460 } else {
1461 pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
1462 HV_PAGE_TABLE_ALIGN, 0);
1463 pmd_populate_kernel(&init_mm, pmd, pte);
1464 }
1465}
1466
1467void __init setup_per_cpu_areas(void)
1468{
1469 struct page *pg;
1470 unsigned long delta, pfn, lowmem_va;
1471 unsigned long size = percpu_size();
1472 char *ptr;
1473 int rc, cpu, i;
1474
1475 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
1476 pcpu_fc_free, pcpu_fc_populate_pte);
1477 if (rc < 0)
1478 panic("Cannot initialize percpu area (err=%d)", rc);
1479
1480 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1481 for_each_possible_cpu(cpu) {
1482 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1483
1484
1485 ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
1486 __finv_buffer(ptr, size);
1487 pfn = percpu_pfn[cpu];
1488
1489
1490 pg = pfn_to_page(pfn);
1491 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1492
1493
1494 unsigned long addr = (unsigned long)ptr + i;
1495 pte_t *ptep = virt_to_pte(NULL, addr);
1496 pte_t pte = *ptep;
1497 BUG_ON(pfn != pte_pfn(pte));
1498 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1499 pte = set_remote_cache_cpu(pte, cpu);
1500 set_pte_at(&init_mm, addr, ptep, pte);
1501
1502
1503 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1504 ptep = virt_to_pte(NULL, lowmem_va);
1505 if (pte_huge(*ptep)) {
1506 printk(KERN_DEBUG "early shatter of huge page"
1507 " at %#lx\n", lowmem_va);
1508 shatter_pmd((pmd_t *)ptep);
1509 ptep = virt_to_pte(NULL, lowmem_va);
1510 BUG_ON(pte_huge(*ptep));
1511 }
1512 BUG_ON(pfn != pte_pfn(*ptep));
1513 set_pte_at(&init_mm, lowmem_va, ptep, pte);
1514 }
1515 }
1516
1517
1518 set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
1519
1520
1521 mb_incoherent();
1522
1523
1524 local_flush_tlb_all();
1525}
1526
1527static struct resource data_resource = {
1528 .name = "Kernel data",
1529 .start = 0,
1530 .end = 0,
1531 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1532};
1533
1534static struct resource code_resource = {
1535 .name = "Kernel code",
1536 .start = 0,
1537 .end = 0,
1538 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1539};
1540
1541
1542
1543
1544
1545#if defined(CONFIG_PCI) && !defined(__tilegx__)
1546static struct resource* __init
1547insert_non_bus_resource(void)
1548{
1549 struct resource *res =
1550 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1551 res->name = "Non-Bus Physical Address Space";
1552 res->start = (1ULL << 32);
1553 res->end = -1LL;
1554 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1555 if (insert_resource(&iomem_resource, res)) {
1556 kfree(res);
1557 return NULL;
1558 }
1559 return res;
1560}
1561#endif
1562
1563static struct resource* __init
1564insert_ram_resource(u64 start_pfn, u64 end_pfn)
1565{
1566 struct resource *res =
1567 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1568 res->name = "System RAM";
1569 res->start = start_pfn << PAGE_SHIFT;
1570 res->end = (end_pfn << PAGE_SHIFT) - 1;
1571 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1572 if (insert_resource(&iomem_resource, res)) {
1573 kfree(res);
1574 return NULL;
1575 }
1576 return res;
1577}
1578
1579
1580
1581
1582
1583
1584
1585static int __init request_standard_resources(void)
1586{
1587 int i;
1588 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
1589
1590#if defined(CONFIG_PCI) && !defined(__tilegx__)
1591 insert_non_bus_resource();
1592#endif
1593
1594 for_each_online_node(i) {
1595 u64 start_pfn = node_start_pfn[i];
1596 u64 end_pfn = node_end_pfn[i];
1597
1598#if defined(CONFIG_PCI) && !defined(__tilegx__)
1599 if (start_pfn <= pci_reserve_start_pfn &&
1600 end_pfn > pci_reserve_start_pfn) {
1601 if (end_pfn > pci_reserve_end_pfn)
1602 insert_ram_resource(pci_reserve_end_pfn,
1603 end_pfn);
1604 end_pfn = pci_reserve_start_pfn;
1605 }
1606#endif
1607 insert_ram_resource(start_pfn, end_pfn);
1608 }
1609
1610 code_resource.start = __pa(_text - CODE_DELTA);
1611 code_resource.end = __pa(_etext - CODE_DELTA)-1;
1612 data_resource.start = __pa(_sdata);
1613 data_resource.end = __pa(_end)-1;
1614
1615 insert_resource(&iomem_resource, &code_resource);
1616 insert_resource(&iomem_resource, &data_resource);
1617
1618#ifdef CONFIG_KEXEC
1619 insert_resource(&iomem_resource, &crashk_res);
1620#endif
1621
1622 return 0;
1623}
1624
1625subsys_initcall(request_standard_resources);
1626