1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/module.h>
27#include <linux/init.h>
28
29#include <linux/acpi.h>
30#include <linux/bootmem.h>
31#include <linux/console.h>
32#include <linux/delay.h>
33#include <linux/cpu.h>
34#include <linux/kernel.h>
35#include <linux/reboot.h>
36#include <linux/sched/mm.h>
37#include <linux/sched/clock.h>
38#include <linux/sched/task_stack.h>
39#include <linux/seq_file.h>
40#include <linux/string.h>
41#include <linux/threads.h>
42#include <linux/screen_info.h>
43#include <linux/dmi.h>
44#include <linux/serial.h>
45#include <linux/serial_core.h>
46#include <linux/efi.h>
47#include <linux/initrd.h>
48#include <linux/pm.h>
49#include <linux/cpufreq.h>
50#include <linux/kexec.h>
51#include <linux/crash_dump.h>
52
53#include <asm/machvec.h>
54#include <asm/mca.h>
55#include <asm/meminit.h>
56#include <asm/page.h>
57#include <asm/patch.h>
58#include <asm/pgtable.h>
59#include <asm/processor.h>
60#include <asm/sal.h>
61#include <asm/sections.h>
62#include <asm/setup.h>
63#include <asm/smp.h>
64#include <asm/tlbflush.h>
65#include <asm/unistd.h>
66#include <asm/hpsim.h>
67
68#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
69# error "struct cpuinfo_ia64 too big!"
70#endif
71
72#ifdef CONFIG_SMP
73unsigned long __per_cpu_offset[NR_CPUS];
74EXPORT_SYMBOL(__per_cpu_offset);
75#endif
76
77DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
78EXPORT_SYMBOL(ia64_cpu_info);
79DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
80#ifdef CONFIG_SMP
81EXPORT_SYMBOL(local_per_cpu_offset);
82#endif
83unsigned long ia64_cycles_per_usec;
84struct ia64_boot_param *ia64_boot_param;
85struct screen_info screen_info;
86unsigned long vga_console_iobase;
87unsigned long vga_console_membase;
88
89static struct resource data_resource = {
90 .name = "Kernel data",
91 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
92};
93
94static struct resource code_resource = {
95 .name = "Kernel code",
96 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
97};
98
99static struct resource bss_resource = {
100 .name = "Kernel bss",
101 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
102};
103
104unsigned long ia64_max_cacheline_size;
105
106unsigned long ia64_iobase;
107EXPORT_SYMBOL(ia64_iobase);
108struct io_space io_space[MAX_IO_SPACES];
109EXPORT_SYMBOL(io_space);
110unsigned int num_io_spaces;
111
112
113
114
115
116#define I_CACHE_STRIDE_SHIFT 5
117unsigned long ia64_i_cache_stride_shift = ~0;
118
119
120
121
122
123#define CACHE_STRIDE_SHIFT 5
124unsigned long ia64_cache_stride_shift = ~0;
125
126
127
128
129struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
130int num_rsvd_regions __initdata;
131
132
133
134
135
136
137
138
139int __init
140filter_rsvd_memory (u64 start, u64 end, void *arg)
141{
142 u64 range_start, range_end, prev_start;
143 void (*func)(unsigned long, unsigned long, int);
144 int i;
145
146#if IGNORE_PFN0
147 if (start == PAGE_OFFSET) {
148 printk(KERN_WARNING "warning: skipping physical page 0\n");
149 start += PAGE_SIZE;
150 if (start >= end) return 0;
151 }
152#endif
153
154
155
156 prev_start = PAGE_OFFSET;
157 func = arg;
158
159 for (i = 0; i < num_rsvd_regions; ++i) {
160 range_start = max(start, prev_start);
161 range_end = min(end, rsvd_region[i].start);
162
163 if (range_start < range_end)
164 call_pernode_memory(__pa(range_start), range_end - range_start, func);
165
166
167 if (range_end == end) return 0;
168
169 prev_start = rsvd_region[i].end;
170 }
171
172 return 0;
173}
174
175
176
177
178
179int __init
180filter_memory(u64 start, u64 end, void *arg)
181{
182 void (*func)(unsigned long, unsigned long, int);
183
184#if IGNORE_PFN0
185 if (start == PAGE_OFFSET) {
186 printk(KERN_WARNING "warning: skipping physical page 0\n");
187 start += PAGE_SIZE;
188 if (start >= end)
189 return 0;
190 }
191#endif
192 func = arg;
193 if (start < end)
194 call_pernode_memory(__pa(start), end - start, func);
195 return 0;
196}
197
198static void __init
199sort_regions (struct rsvd_region *rsvd_region, int max)
200{
201 int j;
202
203
204 while (max--) {
205 for (j = 0; j < max; ++j) {
206 if (rsvd_region[j].start > rsvd_region[j+1].start) {
207 struct rsvd_region tmp;
208 tmp = rsvd_region[j];
209 rsvd_region[j] = rsvd_region[j + 1];
210 rsvd_region[j + 1] = tmp;
211 }
212 }
213 }
214}
215
216
217static int __init
218merge_regions (struct rsvd_region *rsvd_region, int max)
219{
220 int i;
221 for (i = 1; i < max; ++i) {
222 if (rsvd_region[i].start >= rsvd_region[i-1].end)
223 continue;
224 if (rsvd_region[i].end > rsvd_region[i-1].end)
225 rsvd_region[i-1].end = rsvd_region[i].end;
226 --max;
227 memmove(&rsvd_region[i], &rsvd_region[i+1],
228 (max - i) * sizeof(struct rsvd_region));
229 }
230 return max;
231}
232
233
234
235
236static int __init register_memory(void)
237{
238 code_resource.start = ia64_tpa(_text);
239 code_resource.end = ia64_tpa(_etext) - 1;
240 data_resource.start = ia64_tpa(_etext);
241 data_resource.end = ia64_tpa(_edata) - 1;
242 bss_resource.start = ia64_tpa(__bss_start);
243 bss_resource.end = ia64_tpa(_end) - 1;
244 efi_initialize_iomem_resources(&code_resource, &data_resource,
245 &bss_resource);
246
247 return 0;
248}
249
250__initcall(register_memory);
251
252
253#ifdef CONFIG_KEXEC
254
255
256
257
258
259
260
261
262
263
264
265static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
266{
267 if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
268 return 1;
269 else
270 return pbase < (1UL << 32);
271}
272
273static void __init setup_crashkernel(unsigned long total, int *n)
274{
275 unsigned long long base = 0, size = 0;
276 int ret;
277
278 ret = parse_crashkernel(boot_command_line, total,
279 &size, &base);
280 if (ret == 0 && size > 0) {
281 if (!base) {
282 sort_regions(rsvd_region, *n);
283 *n = merge_regions(rsvd_region, *n);
284 base = kdump_find_rsvd_region(size,
285 rsvd_region, *n);
286 }
287
288 if (!check_crashkernel_memory(base, size)) {
289 pr_warning("crashkernel: There would be kdump memory "
290 "at %ld GB but this is unusable because it "
291 "must\nbe below 4 GB. Change the memory "
292 "configuration of the machine.\n",
293 (unsigned long)(base >> 30));
294 return;
295 }
296
297 if (base != ~0UL) {
298 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
299 "for crashkernel (System RAM: %ldMB)\n",
300 (unsigned long)(size >> 20),
301 (unsigned long)(base >> 20),
302 (unsigned long)(total >> 20));
303 rsvd_region[*n].start =
304 (unsigned long)__va(base);
305 rsvd_region[*n].end =
306 (unsigned long)__va(base + size);
307 (*n)++;
308 crashk_res.start = base;
309 crashk_res.end = base + size - 1;
310 }
311 }
312 efi_memmap_res.start = ia64_boot_param->efi_memmap;
313 efi_memmap_res.end = efi_memmap_res.start +
314 ia64_boot_param->efi_memmap_size;
315 boot_param_res.start = __pa(ia64_boot_param);
316 boot_param_res.end = boot_param_res.start +
317 sizeof(*ia64_boot_param);
318}
319#else
320static inline void __init setup_crashkernel(unsigned long total, int *n)
321{}
322#endif
323
324
325
326
327
328
329
330
331void __init
332reserve_memory (void)
333{
334 int n = 0;
335 unsigned long total_memory;
336
337
338
339
340 rsvd_region[n].start = (unsigned long) ia64_boot_param;
341 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
342 n++;
343
344 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
345 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
346 n++;
347
348 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
349 rsvd_region[n].end = (rsvd_region[n].start
350 + strlen(__va(ia64_boot_param->command_line)) + 1);
351 n++;
352
353 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
354 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
355 n++;
356
357#ifdef CONFIG_BLK_DEV_INITRD
358 if (ia64_boot_param->initrd_start) {
359 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
360 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
361 n++;
362 }
363#endif
364
365#ifdef CONFIG_CRASH_DUMP
366 if (reserve_elfcorehdr(&rsvd_region[n].start,
367 &rsvd_region[n].end) == 0)
368 n++;
369#endif
370
371 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
372 n++;
373
374 setup_crashkernel(total_memory, &n);
375
376
377 rsvd_region[n].start = ~0UL;
378 rsvd_region[n].end = ~0UL;
379 n++;
380
381 num_rsvd_regions = n;
382 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
383
384 sort_regions(rsvd_region, num_rsvd_regions);
385 num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
386}
387
388
389
390
391
392
393
394
395void __init
396find_initrd (void)
397{
398#ifdef CONFIG_BLK_DEV_INITRD
399 if (ia64_boot_param->initrd_start) {
400 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
401 initrd_end = initrd_start+ia64_boot_param->initrd_size;
402
403 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
404 initrd_start, ia64_boot_param->initrd_size);
405 }
406#endif
407}
408
409static void __init
410io_port_init (void)
411{
412 unsigned long phys_iobase;
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429 phys_iobase = efi_get_iobase();
430 if (!phys_iobase) {
431 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
432 printk(KERN_INFO "No I/O port range found in EFI memory map, "
433 "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
434 }
435 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
436 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
437
438
439 io_space[0].mmio_base = ia64_iobase;
440 io_space[0].sparse = 1;
441 num_io_spaces = 1;
442}
443
444
445
446
447
448
449
450
451
452
453static inline int __init
454early_console_setup (char *cmdline)
455{
456 int earlycons = 0;
457
458#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
459 {
460 extern int sn_serial_console_early_setup(void);
461 if (!sn_serial_console_early_setup())
462 earlycons++;
463 }
464#endif
465#ifdef CONFIG_EFI_PCDP
466 if (!efi_setup_pcdp_console(cmdline))
467 earlycons++;
468#endif
469 if (!simcons_register())
470 earlycons++;
471
472 return (earlycons) ? 0 : -1;
473}
474
475static inline void
476mark_bsp_online (void)
477{
478#ifdef CONFIG_SMP
479
480 set_cpu_online(smp_processor_id(), true);
481#endif
482}
483
484static __initdata int nomca;
485static __init int setup_nomca(char *s)
486{
487 nomca = 1;
488 return 0;
489}
490early_param("nomca", setup_nomca);
491
492#ifdef CONFIG_CRASH_DUMP
493int __init reserve_elfcorehdr(u64 *start, u64 *end)
494{
495 u64 length;
496
497
498
499
500
501
502
503 if (!is_vmcore_usable())
504 return -EINVAL;
505
506 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
507 vmcore_unusable();
508 return -EINVAL;
509 }
510
511 *start = (unsigned long)__va(elfcorehdr_addr);
512 *end = *start + length;
513 return 0;
514}
515
516#endif
517
518void __init
519setup_arch (char **cmdline_p)
520{
521 unw_init();
522
523 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
524
525 *cmdline_p = __va(ia64_boot_param->command_line);
526 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
527
528 efi_init();
529 io_port_init();
530
531#ifdef CONFIG_IA64_GENERIC
532
533
534
535
536
537 machvec_init_from_cmdline(*cmdline_p);
538#endif
539
540 parse_early_param();
541
542 if (early_console_setup(*cmdline_p) == 0)
543 mark_bsp_online();
544
545#ifdef CONFIG_ACPI
546
547 acpi_table_init();
548 early_acpi_boot_init();
549# ifdef CONFIG_ACPI_NUMA
550 acpi_numa_init();
551 acpi_numa_fixup();
552# ifdef CONFIG_ACPI_HOTPLUG_CPU
553 prefill_possible_map();
554# endif
555 per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
556 32 : cpumask_weight(&early_cpu_possible_map)),
557 additional_cpus > 0 ? additional_cpus : 0);
558# endif
559#endif
560
561#ifdef CONFIG_SMP
562 smp_build_cpu_map();
563#endif
564 find_memory();
565
566
567 ia64_sal_init(__va(efi.sal_systab));
568
569#ifdef CONFIG_ITANIUM
570 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
571#else
572 {
573 unsigned long num_phys_stacked;
574
575 if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
576 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
577 }
578#endif
579
580#ifdef CONFIG_SMP
581 cpu_physical_id(0) = hard_smp_processor_id();
582#endif
583
584 cpu_init();
585 mmu_context_init();
586
587#ifdef CONFIG_VT
588 if (!conswitchp) {
589# if defined(CONFIG_DUMMY_CONSOLE)
590 conswitchp = &dummy_con;
591# endif
592# if defined(CONFIG_VGA_CONSOLE)
593
594
595
596
597
598
599 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
600 conswitchp = &vga_con;
601# endif
602 }
603#endif
604
605
606 if (!nomca)
607 ia64_mca_init();
608
609 platform_setup(cmdline_p);
610#ifndef CONFIG_IA64_HP_SIM
611 check_sal_cache_flush();
612#endif
613 paging_init();
614
615 clear_sched_clock_stable();
616}
617
618
619
620
621static int
622show_cpuinfo (struct seq_file *m, void *v)
623{
624#ifdef CONFIG_SMP
625# define lpj c->loops_per_jiffy
626# define cpunum c->cpu
627#else
628# define lpj loops_per_jiffy
629# define cpunum 0
630#endif
631 static struct {
632 unsigned long mask;
633 const char *feature_name;
634 } feature_bits[] = {
635 { 1UL << 0, "branchlong" },
636 { 1UL << 1, "spontaneous deferral"},
637 { 1UL << 2, "16-byte atomic ops" }
638 };
639 char features[128], *cp, *sep;
640 struct cpuinfo_ia64 *c = v;
641 unsigned long mask;
642 unsigned long proc_freq;
643 int i, size;
644
645 mask = c->features;
646
647
648 memcpy(features, "standard", 9);
649 cp = features;
650 size = sizeof(features);
651 sep = "";
652 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
653 if (mask & feature_bits[i].mask) {
654 cp += snprintf(cp, size, "%s%s", sep,
655 feature_bits[i].feature_name),
656 sep = ", ";
657 mask &= ~feature_bits[i].mask;
658 size = sizeof(features) - (cp - features);
659 }
660 }
661 if (mask && size > 1) {
662
663 snprintf(cp, size, "%s0x%lx", sep, mask);
664 }
665
666 proc_freq = cpufreq_quick_get(cpunum);
667 if (!proc_freq)
668 proc_freq = c->proc_freq / 1000;
669
670 seq_printf(m,
671 "processor : %d\n"
672 "vendor : %s\n"
673 "arch : IA-64\n"
674 "family : %u\n"
675 "model : %u\n"
676 "model name : %s\n"
677 "revision : %u\n"
678 "archrev : %u\n"
679 "features : %s\n"
680 "cpu number : %lu\n"
681 "cpu regs : %u\n"
682 "cpu MHz : %lu.%03lu\n"
683 "itc MHz : %lu.%06lu\n"
684 "BogoMIPS : %lu.%02lu\n",
685 cpunum, c->vendor, c->family, c->model,
686 c->model_name, c->revision, c->archrev,
687 features, c->ppn, c->number,
688 proc_freq / 1000, proc_freq % 1000,
689 c->itc_freq / 1000000, c->itc_freq % 1000000,
690 lpj*HZ/500000, (lpj*HZ/5000) % 100);
691#ifdef CONFIG_SMP
692 seq_printf(m, "siblings : %u\n",
693 cpumask_weight(&cpu_core_map[cpunum]));
694 if (c->socket_id != -1)
695 seq_printf(m, "physical id: %u\n", c->socket_id);
696 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
697 seq_printf(m,
698 "core id : %u\n"
699 "thread id : %u\n",
700 c->core_id, c->thread_id);
701#endif
702 seq_printf(m,"\n");
703
704 return 0;
705}
706
707static void *
708c_start (struct seq_file *m, loff_t *pos)
709{
710#ifdef CONFIG_SMP
711 while (*pos < nr_cpu_ids && !cpu_online(*pos))
712 ++*pos;
713#endif
714 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
715}
716
717static void *
718c_next (struct seq_file *m, void *v, loff_t *pos)
719{
720 ++*pos;
721 return c_start(m, pos);
722}
723
724static void
725c_stop (struct seq_file *m, void *v)
726{
727}
728
729const struct seq_operations cpuinfo_op = {
730 .start = c_start,
731 .next = c_next,
732 .stop = c_stop,
733 .show = show_cpuinfo
734};
735
736#define MAX_BRANDS 8
737static char brandname[MAX_BRANDS][128];
738
739static char *
740get_model_name(__u8 family, __u8 model)
741{
742 static int overflow;
743 char brand[128];
744 int i;
745
746 memcpy(brand, "Unknown", 8);
747 if (ia64_pal_get_brand_info(brand)) {
748 if (family == 0x7)
749 memcpy(brand, "Merced", 7);
750 else if (family == 0x1f) switch (model) {
751 case 0: memcpy(brand, "McKinley", 9); break;
752 case 1: memcpy(brand, "Madison", 8); break;
753 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
754 }
755 }
756 for (i = 0; i < MAX_BRANDS; i++)
757 if (strcmp(brandname[i], brand) == 0)
758 return brandname[i];
759 for (i = 0; i < MAX_BRANDS; i++)
760 if (brandname[i][0] == '\0')
761 return strcpy(brandname[i], brand);
762 if (overflow++ == 0)
763 printk(KERN_ERR
764 "%s: Table overflow. Some processor model information will be missing\n",
765 __func__);
766 return "Unknown";
767}
768
769static void
770identify_cpu (struct cpuinfo_ia64 *c)
771{
772 union {
773 unsigned long bits[5];
774 struct {
775
776 char vendor[16];
777
778
779 u64 ppn;
780
781
782 unsigned number : 8;
783 unsigned revision : 8;
784 unsigned model : 8;
785 unsigned family : 8;
786 unsigned archrev : 8;
787 unsigned reserved : 24;
788
789
790 u64 features;
791 } field;
792 } cpuid;
793 pal_vm_info_1_u_t vm1;
794 pal_vm_info_2_u_t vm2;
795 pal_status_t status;
796 unsigned long impl_va_msb = 50, phys_addr_size = 44;
797 int i;
798 for (i = 0; i < 5; ++i)
799 cpuid.bits[i] = ia64_get_cpuid(i);
800
801 memcpy(c->vendor, cpuid.field.vendor, 16);
802#ifdef CONFIG_SMP
803 c->cpu = smp_processor_id();
804
805
806
807
808 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
809 c->socket_id = -1;
810
811 identify_siblings(c);
812
813 if (c->threads_per_core > smp_num_siblings)
814 smp_num_siblings = c->threads_per_core;
815#endif
816 c->ppn = cpuid.field.ppn;
817 c->number = cpuid.field.number;
818 c->revision = cpuid.field.revision;
819 c->model = cpuid.field.model;
820 c->family = cpuid.field.family;
821 c->archrev = cpuid.field.archrev;
822 c->features = cpuid.field.features;
823 c->model_name = get_model_name(c->family, c->model);
824
825 status = ia64_pal_vm_summary(&vm1, &vm2);
826 if (status == PAL_STATUS_SUCCESS) {
827 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
828 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
829 }
830 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
831 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
832}
833
834
835
836
837
838
839
840
841static void
842get_cache_info(void)
843{
844 unsigned long line_size, max = 1;
845 unsigned long l, levels, unique_caches;
846 pal_cache_config_info_t cci;
847 long status;
848
849 status = ia64_pal_cache_summary(&levels, &unique_caches);
850 if (status != 0) {
851 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
852 __func__, status);
853 max = SMP_CACHE_BYTES;
854
855 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
856
857 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
858 goto out;
859 }
860
861 for (l = 0; l < levels; ++l) {
862
863 status = ia64_pal_cache_config_info(l, 2, &cci);
864 if (status != 0) {
865 printk(KERN_ERR "%s: ia64_pal_cache_config_info"
866 "(l=%lu, 2) failed (status=%ld)\n",
867 __func__, l, status);
868 max = SMP_CACHE_BYTES;
869
870 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
871
872 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
873 cci.pcci_unified = 1;
874 } else {
875 if (cci.pcci_stride < ia64_cache_stride_shift)
876 ia64_cache_stride_shift = cci.pcci_stride;
877
878 line_size = 1 << cci.pcci_line_size;
879 if (line_size > max)
880 max = line_size;
881 }
882
883 if (!cci.pcci_unified) {
884
885 status = ia64_pal_cache_config_info(l, 1, &cci);
886 if (status != 0) {
887 printk(KERN_ERR "%s: ia64_pal_cache_config_info"
888 "(l=%lu, 1) failed (status=%ld)\n",
889 __func__, l, status);
890
891 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
892 }
893 }
894 if (cci.pcci_stride < ia64_i_cache_stride_shift)
895 ia64_i_cache_stride_shift = cci.pcci_stride;
896 }
897 out:
898 if (max > ia64_max_cacheline_size)
899 ia64_max_cacheline_size = max;
900}
901
902
903
904
905
906void
907cpu_init (void)
908{
909 extern void ia64_mmu_init(void *);
910 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
911 unsigned long num_phys_stacked;
912 pal_vm_info_2_u_t vmi;
913 unsigned int max_ctx;
914 struct cpuinfo_ia64 *cpu_info;
915 void *cpu_data;
916
917 cpu_data = per_cpu_init();
918#ifdef CONFIG_SMP
919
920
921
922
923 if (smp_processor_id() == 0) {
924 cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
925 cpumask_set_cpu(0, &cpu_core_map[0]);
926 } else {
927
928
929
930
931
932
933
934 ia64_set_kr(IA64_KR_PER_CPU_DATA,
935 ia64_tpa(cpu_data) - (long) __per_cpu_start);
936 }
937#endif
938
939 get_cache_info();
940
941
942
943
944
945
946
947 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
948 identify_cpu(cpu_info);
949
950#ifdef CONFIG_MCKINLEY
951 {
952# define FEATURE_SET 16
953 struct ia64_pal_retval iprv;
954
955 if (cpu_info->family == 0x1f) {
956 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
957 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
958 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
959 (iprv.v1 | 0x80), FEATURE_SET, 0);
960 }
961 }
962#endif
963
964
965 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
966
967 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
968
969
970
971
972
973
974
975
976 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
977
978
979
980
981
982
983
984
985
986
987 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
988 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
989 mmgrab(&init_mm);
990 current->active_mm = &init_mm;
991 BUG_ON(current->mm);
992
993 ia64_mmu_init(ia64_imva(cpu_data));
994 ia64_mca_cpu_init(ia64_imva(cpu_data));
995
996
997 ia64_set_itc(0);
998
999
1000 ia64_set_itv(1 << 16);
1001 ia64_set_lrr0(1 << 16);
1002 ia64_set_lrr1(1 << 16);
1003 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
1004 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
1005
1006
1007 ia64_setreg(_IA64_REG_CR_TPR, 0);
1008
1009
1010 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
1011 ia64_eoi();
1012
1013#ifdef CONFIG_SMP
1014 normal_xtp();
1015#endif
1016
1017
1018 if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
1019 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
1020 setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
1021 } else {
1022 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1023 max_ctx = (1U << 15) - 1;
1024 }
1025 while (max_ctx < ia64_ctx.max_ctx) {
1026 unsigned int old = ia64_ctx.max_ctx;
1027 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
1028 break;
1029 }
1030
1031 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
1032 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
1033 "stacked regs\n");
1034 num_phys_stacked = 96;
1035 }
1036
1037 if (num_phys_stacked > max_num_phys_stacked) {
1038 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
1039 max_num_phys_stacked = num_phys_stacked;
1040 }
1041 platform_cpu_init();
1042}
1043
1044void __init
1045check_bugs (void)
1046{
1047 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
1048 (unsigned long) __end___mckinley_e9_bundles);
1049}
1050
1051static int __init run_dmi_scan(void)
1052{
1053 dmi_scan_machine();
1054 dmi_memdev_walk();
1055 dmi_set_dump_stack_arch_desc();
1056 return 0;
1057}
1058core_initcall(run_dmi_scan);
1059