1
2
3
4
5
6
7
8
9
10#include <linux/cpumask.h>
11#include <linux/hardirq.h>
12#include <linux/proc_fs.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/string.h>
17#include <linux/ctype.h>
18#include <linux/sched.h>
19#include <linux/timer.h>
20#include <linux/slab.h>
21#include <linux/cpu.h>
22#include <linux/init.h>
23#include <linux/io.h>
24#include <linux/pci.h>
25#include <linux/kdebug.h>
26#include <linux/delay.h>
27#include <linux/crash_dump.h>
28#include <linux/reboot.h>
29#include <linux/memory.h>
30#include <linux/acpi.h>
31#include <linux/efi.h>
32
33#include <asm/uv/uv_mmrs.h>
34#include <asm/uv/uv_hub.h>
35#include <asm/current.h>
36#include <asm/pgtable.h>
37#include <asm/uv/bios.h>
38#include <asm/uv/uv.h>
39#include <asm/apic.h>
40#include <asm/e820/api.h>
41#include <asm/ipi.h>
42#include <asm/smp.h>
43#include <asm/x86_init.h>
44#include <asm/nmi.h>
45
46DEFINE_PER_CPU(int, x2apic_extra_bits);
47
48static enum uv_system_type uv_system_type;
49static int uv_hubbed_system;
50static int uv_hubless_system;
51static u64 gru_start_paddr, gru_end_paddr;
52static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
53static u64 gru_dist_lmask, gru_dist_umask;
54static union uvh_apicid uvh_apicid;
55
56
57static u8 oem_id[ACPI_OEM_ID_SIZE + 1];
58static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
59
60
61static struct {
62 unsigned int apicid_shift;
63 unsigned int apicid_mask;
64 unsigned int socketid_shift;
65 unsigned int pnode_mask;
66 unsigned int gpa_shift;
67 unsigned int gnode_shift;
68} uv_cpuid;
69
70int uv_min_hub_revision_id;
71EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
72
73unsigned int uv_apicid_hibits;
74EXPORT_SYMBOL_GPL(uv_apicid_hibits);
75
76static struct apic apic_x2apic_uv_x;
77static struct uv_hub_info_s uv_hub_info_node0;
78
79
80static int disable_uv_undefined_panic = 1;
81
82unsigned long uv_undefined(char *str)
83{
84 if (likely(!disable_uv_undefined_panic))
85 panic("UV: error: undefined MMR: %s\n", str);
86 else
87 pr_crit("UV: error: undefined MMR: %s\n", str);
88
89
90 return ~0ul;
91}
92EXPORT_SYMBOL(uv_undefined);
93
94static unsigned long __init uv_early_read_mmr(unsigned long addr)
95{
96 unsigned long val, *mmr;
97
98 mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
99 val = *mmr;
100 early_iounmap(mmr, sizeof(*mmr));
101
102 return val;
103}
104
105static inline bool is_GRU_range(u64 start, u64 end)
106{
107 if (gru_dist_base) {
108 u64 su = start & gru_dist_umask;
109 u64 sl = start & gru_dist_lmask;
110 u64 eu = end & gru_dist_umask;
111 u64 el = end & gru_dist_lmask;
112
113
114 return (sl == gru_dist_base && el == gru_dist_base &&
115 su >= gru_first_node_paddr &&
116 su <= gru_last_node_paddr &&
117 eu == su);
118 } else {
119 return start >= gru_start_paddr && end <= gru_end_paddr;
120 }
121}
122
123static bool uv_is_untracked_pat_range(u64 start, u64 end)
124{
125 return is_ISA_range(start, end) || is_GRU_range(start, end);
126}
127
128static int __init early_get_pnodeid(void)
129{
130 union uvh_node_id_u node_id;
131 union uvh_rh_gam_config_mmr_u m_n_config;
132 int pnode;
133
134
135 node_id.v = uv_early_read_mmr(UVH_NODE_ID);
136 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
137 uv_min_hub_revision_id = node_id.s.revision;
138
139 switch (node_id.s.part_number) {
140 case UV2_HUB_PART_NUMBER:
141 case UV2_HUB_PART_NUMBER_X:
142 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
143 break;
144 case UV3_HUB_PART_NUMBER:
145 case UV3_HUB_PART_NUMBER_X:
146 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
147 break;
148
149
150 case UV4_HUB_PART_NUMBER:
151 uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
152 uv_cpuid.gnode_shift = 2;
153 break;
154 }
155
156 uv_hub_info->hub_revision = uv_min_hub_revision_id;
157 uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1;
158 pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask;
159 uv_cpuid.gpa_shift = 46;
160
161 pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n",
162 node_id.s.revision, node_id.s.part_number, node_id.s.node_id,
163 m_n_config.s.n_skt, uv_cpuid.pnode_mask, pnode);
164 return pnode;
165}
166
167static void __init uv_tsc_check_sync(void)
168{
169 u64 mmr;
170 int sync_state;
171 int mmr_shift;
172 char *state;
173 bool valid;
174
175
176 mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR);
177 mmr_shift =
178 is_uv1_hub() ? 0 :
179 is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT;
180 if (mmr_shift)
181 sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK;
182 else
183 sync_state = 0;
184
185 switch (sync_state) {
186 case UVH_TSC_SYNC_VALID:
187 state = "in sync";
188 valid = true;
189 break;
190
191 case UVH_TSC_SYNC_INVALID:
192 state = "unstable";
193 valid = false;
194 break;
195 default:
196 state = "unknown: assuming valid";
197 valid = true;
198 break;
199 }
200 pr_info("UV: TSC sync state from BIOS:0%d(%s)\n", sync_state, state);
201
202
203 if (valid)
204 mark_tsc_async_resets("UV BIOS");
205 else
206 mark_tsc_unstable("UV BIOS");
207}
208
209
210
211#define SMT_LEVEL 0
212#define INVALID_TYPE 0
213#define SMT_TYPE 1
214#define CORE_TYPE 2
215#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
216#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
217
218static void set_x2apic_bits(void)
219{
220 unsigned int eax, ebx, ecx, edx, sub_index;
221 unsigned int sid_shift;
222
223 cpuid(0, &eax, &ebx, &ecx, &edx);
224 if (eax < 0xb) {
225 pr_info("UV: CPU does not have CPUID.11\n");
226 return;
227 }
228
229 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
230 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) {
231 pr_info("UV: CPUID.11 not implemented\n");
232 return;
233 }
234
235 sid_shift = BITS_SHIFT_NEXT_LEVEL(eax);
236 sub_index = 1;
237 do {
238 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
239 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
240 sid_shift = BITS_SHIFT_NEXT_LEVEL(eax);
241 break;
242 }
243 sub_index++;
244 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
245
246 uv_cpuid.apicid_shift = 0;
247 uv_cpuid.apicid_mask = (~(-1 << sid_shift));
248 uv_cpuid.socketid_shift = sid_shift;
249}
250
251static void __init early_get_apic_socketid_shift(void)
252{
253 if (is_uv2_hub() || is_uv3_hub())
254 uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
255
256 set_x2apic_bits();
257
258 pr_info("UV: apicid_shift:%d apicid_mask:0x%x\n", uv_cpuid.apicid_shift, uv_cpuid.apicid_mask);
259 pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n", uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
260}
261
262
263
264
265
266
267static void __init uv_set_apicid_hibit(void)
268{
269 union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
270
271 if (is_uv1_hub()) {
272 apicid_mask.v = uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
273 uv_apicid_hibits = apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
274 }
275}
276
277static void __init uv_stringify(int len, char *to, char *from)
278{
279
280 strncpy(to, from, len-1);
281}
282
283static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
284{
285 int pnodeid;
286 int uv_apic;
287
288 uv_stringify(sizeof(oem_id), oem_id, _oem_id);
289 uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
290
291 if (strncmp(oem_id, "SGI", 3) != 0) {
292 if (strncmp(oem_id, "NSGI", 4) != 0)
293 return 0;
294
295
296 if (strncmp(oem_id, "NSGI4", 5) == 0)
297 uv_hubless_system = 0x11;
298
299
300 else
301 uv_hubless_system = 0x9;
302
303 pr_info("UV: OEM IDs %s/%s, HUBLESS(0x%x)\n",
304 oem_id, oem_table_id, uv_hubless_system);
305
306 return 0;
307 }
308
309 if (numa_off) {
310 pr_err("UV: NUMA is off, disabling UV support\n");
311 return 0;
312 }
313
314
315 uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
316
317
318
319
320
321
322
323
324 uv_hub_info->hub_revision =
325 !strncmp(oem_id, "SGI4", 4) ? UV4_HUB_REVISION_BASE :
326 !strncmp(oem_id, "SGI3", 4) ? UV3_HUB_REVISION_BASE :
327 !strcmp(oem_id, "SGI2") ? UV2_HUB_REVISION_BASE :
328 !strcmp(oem_id, "SGI") ? UV1_HUB_REVISION_BASE : 0;
329
330 if (uv_hub_info->hub_revision == 0)
331 goto badbios;
332
333 switch (uv_hub_info->hub_revision) {
334 case UV4_HUB_REVISION_BASE:
335 uv_hubbed_system = 0x11;
336 break;
337
338 case UV3_HUB_REVISION_BASE:
339 uv_hubbed_system = 0x9;
340 break;
341
342 case UV2_HUB_REVISION_BASE:
343 uv_hubbed_system = 0x5;
344 break;
345
346 case UV1_HUB_REVISION_BASE:
347 uv_hubbed_system = 0x3;
348 break;
349 }
350
351 pnodeid = early_get_pnodeid();
352 early_get_apic_socketid_shift();
353
354 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
355 x86_platform.nmi_init = uv_nmi_init;
356
357 if (!strcmp(oem_table_id, "UVX")) {
358
359 uv_system_type = UV_X2APIC;
360 uv_apic = 0;
361
362 } else if (!strcmp(oem_table_id, "UVH")) {
363
364 uv_system_type = UV_NON_UNIQUE_APIC;
365 x86_platform.legacy.warm_reset = 0;
366 __this_cpu_write(x2apic_extra_bits, pnodeid << uvh_apicid.s.pnode_shift);
367 uv_set_apicid_hibit();
368 uv_apic = 1;
369
370 } else if (!strcmp(oem_table_id, "UVL")) {
371
372 uv_system_type = UV_LEGACY_APIC;
373 uv_apic = 0;
374
375 } else {
376 goto badbios;
377 }
378
379 pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n", oem_id, oem_table_id, uv_system_type, uv_min_hub_revision_id, uv_apic);
380 uv_tsc_check_sync();
381
382 return uv_apic;
383
384badbios:
385 pr_err("UV: OEM_ID:%s OEM_TABLE_ID:%s\n", oem_id, oem_table_id);
386 pr_err("Current BIOS not supported, update kernel and/or BIOS\n");
387 BUG();
388}
389
390enum uv_system_type get_uv_system_type(void)
391{
392 return uv_system_type;
393}
394
395int is_uv_system(void)
396{
397 return uv_system_type != UV_NONE;
398}
399EXPORT_SYMBOL_GPL(is_uv_system);
400
401int is_uv_hubbed(int uvtype)
402{
403 return (uv_hubbed_system & uvtype);
404}
405EXPORT_SYMBOL_GPL(is_uv_hubbed);
406
407int is_uv_hubless(int uvtype)
408{
409 return (uv_hubless_system & uvtype);
410}
411EXPORT_SYMBOL_GPL(is_uv_hubless);
412
413void **__uv_hub_info_list;
414EXPORT_SYMBOL_GPL(__uv_hub_info_list);
415
416DEFINE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
417EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_info);
418
419short uv_possible_blades;
420EXPORT_SYMBOL_GPL(uv_possible_blades);
421
422unsigned long sn_rtc_cycles_per_second;
423EXPORT_SYMBOL(sn_rtc_cycles_per_second);
424
425
426static __initdata unsigned short *_node_to_pnode;
427static __initdata unsigned short _min_socket, _max_socket;
428static __initdata unsigned short _min_pnode, _max_pnode, _gr_table_len;
429static __initdata struct uv_gam_range_entry *uv_gre_table;
430static __initdata struct uv_gam_parameters *uv_gp_table;
431static __initdata unsigned short *_socket_to_node;
432static __initdata unsigned short *_socket_to_pnode;
433static __initdata unsigned short *_pnode_to_socket;
434
435static __initdata struct uv_gam_range_s *_gr_table;
436
437#define SOCK_EMPTY ((unsigned short)~0)
438
439extern int uv_hub_info_version(void)
440{
441 return UV_HUB_INFO_VERSION;
442}
443EXPORT_SYMBOL(uv_hub_info_version);
444
445
446static unsigned long mem_block_size = (2UL << 30);
447
448
449static int parse_mem_block_size(char *ptr)
450{
451 unsigned long size = memparse(ptr, NULL);
452
453
454 mem_block_size = size;
455 return 0;
456}
457early_param("uv_memblksize", parse_mem_block_size);
458
459static __init int adj_blksize(u32 lgre)
460{
461 unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
462 unsigned long size;
463
464 for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
465 if (IS_ALIGNED(base, size))
466 break;
467
468 if (size >= mem_block_size)
469 return 0;
470
471 mem_block_size = size;
472 return 1;
473}
474
475static __init void set_block_size(void)
476{
477 unsigned int order = ffs(mem_block_size);
478
479 if (order) {
480
481 set_memory_block_size_order(order - 1);
482 pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
483 } else {
484
485 pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
486 set_memory_block_size_order(31);
487 }
488}
489
490
491static __init void build_uv_gr_table(void)
492{
493 struct uv_gam_range_entry *gre = uv_gre_table;
494 struct uv_gam_range_s *grt;
495 unsigned long last_limit = 0, ram_limit = 0;
496 int bytes, i, sid, lsid = -1, indx = 0, lindx = -1;
497
498 if (!gre)
499 return;
500
501 bytes = _gr_table_len * sizeof(struct uv_gam_range_s);
502 grt = kzalloc(bytes, GFP_KERNEL);
503 BUG_ON(!grt);
504 _gr_table = grt;
505
506 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
507 if (gre->type == UV_GAM_RANGE_TYPE_HOLE) {
508 if (!ram_limit) {
509
510 ram_limit = last_limit;
511 last_limit = gre->limit;
512 lsid++;
513 continue;
514 }
515 last_limit = gre->limit;
516 pr_info("UV: extra hole in GAM RE table @%d\n", (int)(gre - uv_gre_table));
517 continue;
518 }
519 if (_max_socket < gre->sockid) {
520 pr_err("UV: GAM table sockid(%d) too large(>%d) @%d\n", gre->sockid, _max_socket, (int)(gre - uv_gre_table));
521 continue;
522 }
523 sid = gre->sockid - _min_socket;
524 if (lsid < sid) {
525
526 grt = &_gr_table[indx];
527 grt->base = lindx;
528 grt->nasid = gre->nasid;
529 grt->limit = last_limit = gre->limit;
530 lsid = sid;
531 lindx = indx++;
532 continue;
533 }
534
535 if (lsid == sid && !ram_limit) {
536
537 if (grt->limit == last_limit) {
538 grt->limit = last_limit = gre->limit;
539 continue;
540 }
541 }
542
543 if (!ram_limit) {
544 grt++;
545 grt->base = lindx;
546 grt->nasid = gre->nasid;
547 grt->limit = last_limit = gre->limit;
548 continue;
549 }
550
551 grt++;
552
553 grt->base = grt - _gr_table;
554 grt->nasid = gre->nasid;
555 grt->limit = last_limit = gre->limit;
556 lsid++;
557 }
558
559
560 grt++;
561 i = grt - _gr_table;
562 if (i < _gr_table_len) {
563 void *ret;
564
565 bytes = i * sizeof(struct uv_gam_range_s);
566 ret = krealloc(_gr_table, bytes, GFP_KERNEL);
567 if (ret) {
568 _gr_table = ret;
569 _gr_table_len = i;
570 }
571 }
572
573
574 for (i = 0, grt = _gr_table; i < _gr_table_len; i++, grt++) {
575 unsigned long start, end;
576 int gb = grt->base;
577
578 start = gb < 0 ? 0 : (unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT;
579 end = (unsigned long)grt->limit << UV_GAM_RANGE_SHFT;
580
581 pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n", i, grt->nasid, start, end, gb);
582 }
583}
584
585static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
586{
587 unsigned long val;
588 int pnode;
589
590 pnode = uv_apicid_to_pnode(phys_apicid);
591 phys_apicid |= uv_apicid_hibits;
592
593 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
594 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
595 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
596 APIC_DM_INIT;
597
598 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
599
600 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
601 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
602 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
603 APIC_DM_STARTUP;
604
605 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
606
607 return 0;
608}
609
610static void uv_send_IPI_one(int cpu, int vector)
611{
612 unsigned long apicid;
613 int pnode;
614
615 apicid = per_cpu(x86_cpu_to_apicid, cpu);
616 pnode = uv_apicid_to_pnode(apicid);
617 uv_hub_send_ipi(pnode, apicid, vector);
618}
619
620static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
621{
622 unsigned int cpu;
623
624 for_each_cpu(cpu, mask)
625 uv_send_IPI_one(cpu, vector);
626}
627
628static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
629{
630 unsigned int this_cpu = smp_processor_id();
631 unsigned int cpu;
632
633 for_each_cpu(cpu, mask) {
634 if (cpu != this_cpu)
635 uv_send_IPI_one(cpu, vector);
636 }
637}
638
639static void uv_send_IPI_allbutself(int vector)
640{
641 unsigned int this_cpu = smp_processor_id();
642 unsigned int cpu;
643
644 for_each_online_cpu(cpu) {
645 if (cpu != this_cpu)
646 uv_send_IPI_one(cpu, vector);
647 }
648}
649
650static void uv_send_IPI_all(int vector)
651{
652 uv_send_IPI_mask(cpu_online_mask, vector);
653}
654
655static int uv_apic_id_valid(u32 apicid)
656{
657 return 1;
658}
659
660static int uv_apic_id_registered(void)
661{
662 return 1;
663}
664
665static void uv_init_apic_ldr(void)
666{
667}
668
669static u32 apic_uv_calc_apicid(unsigned int cpu)
670{
671 return apic_default_calc_apicid(cpu) | uv_apicid_hibits;
672}
673
674static unsigned int x2apic_get_apic_id(unsigned long x)
675{
676 unsigned int id;
677
678 WARN_ON(preemptible() && num_online_cpus() > 1);
679 id = x | __this_cpu_read(x2apic_extra_bits);
680
681 return id;
682}
683
684static u32 set_apic_id(unsigned int id)
685{
686
687 return id;
688}
689
690static unsigned int uv_read_apic_id(void)
691{
692 return x2apic_get_apic_id(apic_read(APIC_ID));
693}
694
695static int uv_phys_pkg_id(int initial_apicid, int index_msb)
696{
697 return uv_read_apic_id() >> index_msb;
698}
699
700static void uv_send_IPI_self(int vector)
701{
702 apic_write(APIC_SELF_IPI, vector);
703}
704
705static int uv_probe(void)
706{
707 return apic == &apic_x2apic_uv_x;
708}
709
710static struct apic apic_x2apic_uv_x __ro_after_init = {
711
712 .name = "UV large system",
713 .probe = uv_probe,
714 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
715 .apic_id_valid = uv_apic_id_valid,
716 .apic_id_registered = uv_apic_id_registered,
717
718 .irq_delivery_mode = dest_Fixed,
719 .irq_dest_mode = 0,
720
721 .disable_esr = 0,
722 .dest_logical = APIC_DEST_LOGICAL,
723 .check_apicid_used = NULL,
724
725 .init_apic_ldr = uv_init_apic_ldr,
726
727 .ioapic_phys_id_map = NULL,
728 .setup_apic_routing = NULL,
729 .cpu_present_to_apicid = default_cpu_present_to_apicid,
730 .apicid_to_cpu_present = NULL,
731 .check_phys_apicid_present = default_check_phys_apicid_present,
732 .phys_pkg_id = uv_phys_pkg_id,
733
734 .get_apic_id = x2apic_get_apic_id,
735 .set_apic_id = set_apic_id,
736
737 .calc_dest_apicid = apic_uv_calc_apicid,
738
739 .send_IPI = uv_send_IPI_one,
740 .send_IPI_mask = uv_send_IPI_mask,
741 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
742 .send_IPI_allbutself = uv_send_IPI_allbutself,
743 .send_IPI_all = uv_send_IPI_all,
744 .send_IPI_self = uv_send_IPI_self,
745
746 .wakeup_secondary_cpu = uv_wakeup_secondary,
747 .inquire_remote_apic = NULL,
748
749 .read = native_apic_msr_read,
750 .write = native_apic_msr_write,
751 .eoi_write = native_apic_msr_eoi_write,
752 .icr_read = native_x2apic_icr_read,
753 .icr_write = native_x2apic_icr_write,
754 .wait_icr_idle = native_x2apic_wait_icr_idle,
755 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
756};
757
758static void set_x2apic_extra_bits(int pnode)
759{
760 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
761}
762
763#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3
764#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
765
766static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
767{
768 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
769 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
770 unsigned long m_redirect;
771 unsigned long m_overlay;
772 int i;
773
774 for (i = 0; i < UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH; i++) {
775 switch (i) {
776 case 0:
777 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR;
778 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
779 break;
780 case 1:
781 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR;
782 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
783 break;
784 case 2:
785 m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR;
786 m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
787 break;
788 }
789 alias.v = uv_read_local_mmr(m_overlay);
790 if (alias.s.enable && alias.s.base == 0) {
791 *size = (1UL << alias.s.m_alias);
792 redirect.v = uv_read_local_mmr(m_redirect);
793 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
794 return;
795 }
796 }
797 *base = *size = 0;
798}
799
800enum map_type {map_wb, map_uc};
801
802static __init void map_high(char *id, unsigned long base, int pshift, int bshift, int max_pnode, enum map_type map_type)
803{
804 unsigned long bytes, paddr;
805
806 paddr = base << pshift;
807 bytes = (1UL << bshift) * (max_pnode + 1);
808 if (!paddr) {
809 pr_info("UV: Map %s_HI base address NULL\n", id);
810 return;
811 }
812 pr_debug("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
813 if (map_type == map_uc)
814 init_extra_mapping_uc(paddr, bytes);
815 else
816 init_extra_mapping_wb(paddr, bytes);
817}
818
819static __init void map_gru_distributed(unsigned long c)
820{
821 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
822 u64 paddr;
823 unsigned long bytes;
824 int nid;
825
826 gru.v = c;
827
828
829 gru_dist_base = gru.v & 0x000007fff0000000UL;
830 if (!gru_dist_base) {
831 pr_info("UV: Map GRU_DIST base address NULL\n");
832 return;
833 }
834
835 bytes = 1UL << UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
836 gru_dist_lmask = ((1UL << uv_hub_info->m_val) - 1) & ~(bytes - 1);
837 gru_dist_umask = ~((1UL << uv_hub_info->m_val) - 1);
838 gru_dist_base &= gru_dist_lmask;
839
840 for_each_online_node(nid) {
841 paddr = ((u64)uv_node_to_pnode(nid) << uv_hub_info->m_val) |
842 gru_dist_base;
843 init_extra_mapping_wb(paddr, bytes);
844 gru_first_node_paddr = min(paddr, gru_first_node_paddr);
845 gru_last_node_paddr = max(paddr, gru_last_node_paddr);
846 }
847
848
849 gru_first_node_paddr &= gru_dist_umask;
850 gru_last_node_paddr &= gru_dist_umask;
851
852 pr_debug("UV: Map GRU_DIST base 0x%016llx 0x%016llx - 0x%016llx\n", gru_dist_base, gru_first_node_paddr, gru_last_node_paddr);
853}
854
855static __init void map_gru_high(int max_pnode)
856{
857 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
858 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
859 unsigned long mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK;
860 unsigned long base;
861
862 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
863 if (!gru.s.enable) {
864 pr_info("UV: GRU disabled\n");
865 return;
866 }
867
868
869 if (is_uv3_hub() && gru.s3.mode) {
870 map_gru_distributed(gru.v);
871 return;
872 }
873
874 base = (gru.v & mask) >> shift;
875 map_high("GRU", base, shift, shift, max_pnode, map_wb);
876 gru_start_paddr = ((u64)base << shift);
877 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
878}
879
880static __init void map_mmr_high(int max_pnode)
881{
882 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
883 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
884
885 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
886 if (mmr.s.enable)
887 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
888 else
889 pr_info("UV: MMR disabled\n");
890}
891
892
893static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
894{
895 unsigned long overlay;
896 unsigned long mmr;
897 unsigned long base;
898 unsigned long nasid_mask;
899 unsigned long m_overlay;
900 int i, n, shift, m_io, max_io;
901 int nasid, lnasid, fi, li;
902 char *id;
903
904 if (index == 0) {
905 id = "MMIOH0";
906 m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR;
907 overlay = uv_read_local_mmr(m_overlay);
908 base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK;
909 mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR;
910 m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
911 >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
912 shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
913 n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
914 nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK;
915 } else {
916 id = "MMIOH1";
917 m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR;
918 overlay = uv_read_local_mmr(m_overlay);
919 base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK;
920 mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR;
921 m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
922 >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
923 shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
924 n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH;
925 nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK;
926 }
927 pr_info("UV: %s overlay 0x%lx base:0x%lx m_io:%d\n", id, overlay, base, m_io);
928 if (!(overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)) {
929 pr_info("UV: %s disabled\n", id);
930 return;
931 }
932
933
934 min_pnode *= 2;
935 max_pnode *= 2;
936 max_io = lnasid = fi = li = -1;
937
938 for (i = 0; i < n; i++) {
939 unsigned long m_redirect = mmr + i * 8;
940 unsigned long redirect = uv_read_local_mmr(m_redirect);
941
942 nasid = redirect & nasid_mask;
943 if (i == 0)
944 pr_info("UV: %s redirect base 0x%lx(@0x%lx) 0x%04x\n",
945 id, redirect, m_redirect, nasid);
946
947
948 if (nasid < min_pnode || max_pnode < nasid)
949 nasid = -1;
950
951 if (nasid == lnasid) {
952 li = i;
953
954 if (i != n-1)
955 continue;
956 }
957
958
959 if (lnasid != -1 || (i == n-1 && nasid != -1)) {
960 unsigned long addr1, addr2;
961 int f, l;
962
963 if (lnasid == -1) {
964 f = l = i;
965 lnasid = nasid;
966 } else {
967 f = fi;
968 l = li;
969 }
970 addr1 = (base << shift) + f * (1ULL << m_io);
971 addr2 = (base << shift) + (l + 1) * (1ULL << m_io);
972 pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", id, fi, li, lnasid, addr1, addr2);
973 if (max_io < l)
974 max_io = l;
975 }
976 fi = li = i;
977 lnasid = nasid;
978 }
979
980 pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n", id, base, shift, m_io, max_io);
981
982 if (max_io >= 0)
983 map_high(id, base, shift, m_io, max_io, map_uc);
984}
985
986static __init void map_mmioh_high(int min_pnode, int max_pnode)
987{
988 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
989 unsigned long mmr, base;
990 int shift, enable, m_io, n_io;
991
992 if (is_uv3_hub() || is_uv4_hub()) {
993
994 map_mmioh_high_uv34(0, min_pnode, max_pnode);
995 map_mmioh_high_uv34(1, min_pnode, max_pnode);
996 return;
997 }
998
999 if (is_uv1_hub()) {
1000 mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
1001 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
1002 mmioh.v = uv_read_local_mmr(mmr);
1003 enable = !!mmioh.s1.enable;
1004 base = mmioh.s1.base;
1005 m_io = mmioh.s1.m_io;
1006 n_io = mmioh.s1.n_io;
1007 } else if (is_uv2_hub()) {
1008 mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
1009 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
1010 mmioh.v = uv_read_local_mmr(mmr);
1011 enable = !!mmioh.s2.enable;
1012 base = mmioh.s2.base;
1013 m_io = mmioh.s2.m_io;
1014 n_io = mmioh.s2.n_io;
1015 } else {
1016 return;
1017 }
1018
1019 if (enable) {
1020 max_pnode &= (1 << n_io) - 1;
1021 pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n", base, shift, m_io, n_io, max_pnode);
1022 map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
1023 } else {
1024 pr_info("UV: MMIOH disabled\n");
1025 }
1026}
1027
1028static __init void map_low_mmrs(void)
1029{
1030 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
1031 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
1032}
1033
1034static __init void uv_rtc_init(void)
1035{
1036 long status;
1037 u64 ticks_per_sec;
1038
1039 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec);
1040
1041 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
1042 pr_warn("UV: unable to determine platform RTC clock frequency, guessing.\n");
1043
1044
1045 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
1046 } else {
1047 sn_rtc_cycles_per_second = ticks_per_sec;
1048 }
1049}
1050
1051
1052
1053
1054static void uv_heartbeat(struct timer_list *timer)
1055{
1056 unsigned char bits = uv_scir_info->state;
1057
1058
1059 bits ^= SCIR_CPU_HEARTBEAT;
1060
1061
1062 if (idle_cpu(raw_smp_processor_id()))
1063 bits &= ~SCIR_CPU_ACTIVITY;
1064 else
1065 bits |= SCIR_CPU_ACTIVITY;
1066
1067
1068 uv_set_scir_bits(bits);
1069
1070
1071 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
1072}
1073
1074static int uv_heartbeat_enable(unsigned int cpu)
1075{
1076 while (!uv_cpu_scir_info(cpu)->enabled) {
1077 struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
1078
1079 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
1080 timer_setup(timer, uv_heartbeat, TIMER_PINNED);
1081 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
1082 add_timer_on(timer, cpu);
1083 uv_cpu_scir_info(cpu)->enabled = 1;
1084
1085
1086 cpu = 0;
1087 }
1088 return 0;
1089}
1090
1091#ifdef CONFIG_HOTPLUG_CPU
1092static int uv_heartbeat_disable(unsigned int cpu)
1093{
1094 if (uv_cpu_scir_info(cpu)->enabled) {
1095 uv_cpu_scir_info(cpu)->enabled = 0;
1096 del_timer(&uv_cpu_scir_info(cpu)->timer);
1097 }
1098 uv_set_cpu_scir_bits(cpu, 0xff);
1099 return 0;
1100}
1101
1102static __init void uv_scir_register_cpu_notifier(void)
1103{
1104 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/x2apic-uvx:online",
1105 uv_heartbeat_enable, uv_heartbeat_disable);
1106}
1107
1108#else
1109
1110static __init void uv_scir_register_cpu_notifier(void)
1111{
1112}
1113
1114static __init int uv_init_heartbeat(void)
1115{
1116 int cpu;
1117
1118 if (is_uv_system()) {
1119 for_each_online_cpu(cpu)
1120 uv_heartbeat_enable(cpu);
1121 }
1122
1123 return 0;
1124}
1125
1126late_initcall(uv_init_heartbeat);
1127
1128#endif
1129
1130
1131int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags)
1132{
1133 int domain, bus, rc;
1134
1135 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
1136 return 0;
1137
1138 if ((command_bits & PCI_COMMAND_IO) == 0)
1139 return 0;
1140
1141 domain = pci_domain_nr(pdev->bus);
1142 bus = pdev->bus->number;
1143
1144 rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
1145
1146 return rc;
1147}
1148
1149
1150
1151
1152
1153void uv_cpu_init(void)
1154{
1155
1156 if (smp_processor_id() == 0)
1157 return;
1158
1159 uv_hub_info->nr_online_cpus++;
1160
1161 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
1162 set_x2apic_extra_bits(uv_hub_info->pnode);
1163}
1164
1165struct mn {
1166 unsigned char m_val;
1167 unsigned char n_val;
1168 unsigned char m_shift;
1169 unsigned char n_lshift;
1170};
1171
1172static void get_mn(struct mn *mnp)
1173{
1174 union uvh_rh_gam_config_mmr_u m_n_config;
1175 union uv3h_gr0_gam_gr_config_u m_gr_config;
1176
1177
1178 memset(mnp, 0, sizeof(*mnp));
1179
1180 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
1181 mnp->n_val = m_n_config.s.n_skt;
1182
1183 if (is_uv4_hub()) {
1184 mnp->m_val = 0;
1185 mnp->n_lshift = 0;
1186 } else if (is_uv3_hub()) {
1187 mnp->m_val = m_n_config.s3.m_skt;
1188 m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
1189 mnp->n_lshift = m_gr_config.s3.m_skt;
1190 } else if (is_uv2_hub()) {
1191 mnp->m_val = m_n_config.s2.m_skt;
1192 mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
1193 } else if (is_uv1_hub()) {
1194 mnp->m_val = m_n_config.s1.m_skt;
1195 mnp->n_lshift = mnp->m_val;
1196 }
1197 mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
1198}
1199
1200void __init uv_init_hub_info(struct uv_hub_info_s *hi)
1201{
1202 union uvh_node_id_u node_id;
1203 struct mn mn;
1204
1205 get_mn(&mn);
1206 hi->gpa_mask = mn.m_val ?
1207 (1UL << (mn.m_val + mn.n_val)) - 1 :
1208 (1UL << uv_cpuid.gpa_shift) - 1;
1209
1210 hi->m_val = mn.m_val;
1211 hi->n_val = mn.n_val;
1212 hi->m_shift = mn.m_shift;
1213 hi->n_lshift = mn.n_lshift ? mn.n_lshift : 0;
1214 hi->hub_revision = uv_hub_info->hub_revision;
1215 hi->pnode_mask = uv_cpuid.pnode_mask;
1216 hi->min_pnode = _min_pnode;
1217 hi->min_socket = _min_socket;
1218 hi->pnode_to_socket = _pnode_to_socket;
1219 hi->socket_to_node = _socket_to_node;
1220 hi->socket_to_pnode = _socket_to_pnode;
1221 hi->gr_table_len = _gr_table_len;
1222 hi->gr_table = _gr_table;
1223
1224 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
1225 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
1226 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
1227 if (mn.m_val)
1228 hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
1229
1230 if (uv_gp_table) {
1231 hi->global_mmr_base = uv_gp_table->mmr_base;
1232 hi->global_mmr_shift = uv_gp_table->mmr_shift;
1233 hi->global_gru_base = uv_gp_table->gru_base;
1234 hi->global_gru_shift = uv_gp_table->gru_shift;
1235 hi->gpa_shift = uv_gp_table->gpa_shift;
1236 hi->gpa_mask = (1UL << hi->gpa_shift) - 1;
1237 } else {
1238 hi->global_mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE;
1239 hi->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT;
1240 }
1241
1242 get_lowmem_redirect(&hi->lowmem_remap_base, &hi->lowmem_remap_top);
1243
1244 hi->apic_pnode_shift = uv_cpuid.socketid_shift;
1245
1246
1247 pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n", hi->n_val, hi->m_val, hi->m_shift, hi->n_lshift);
1248 pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n", hi->gpa_mask, hi->gpa_shift, hi->pnode_mask, hi->apic_pnode_shift);
1249 pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift, hi->global_gru_base, hi->global_gru_shift);
1250 pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n", hi->gnode_upper, hi->gnode_extra);
1251}
1252
1253static void __init decode_gam_params(unsigned long ptr)
1254{
1255 uv_gp_table = (struct uv_gam_parameters *)ptr;
1256
1257 pr_info("UV: GAM Params...\n");
1258 pr_info("UV: mmr_base/shift:0x%llx/%d gru_base/shift:0x%llx/%d gpa_shift:%d\n",
1259 uv_gp_table->mmr_base, uv_gp_table->mmr_shift,
1260 uv_gp_table->gru_base, uv_gp_table->gru_shift,
1261 uv_gp_table->gpa_shift);
1262}
1263
1264static void __init decode_gam_rng_tbl(unsigned long ptr)
1265{
1266 struct uv_gam_range_entry *gre = (struct uv_gam_range_entry *)ptr;
1267 unsigned long lgre = 0;
1268 int index = 0;
1269 int sock_min = 999999, pnode_min = 99999;
1270 int sock_max = -1, pnode_max = -1;
1271
1272 uv_gre_table = gre;
1273 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
1274 unsigned long size = ((unsigned long)(gre->limit - lgre)
1275 << UV_GAM_RANGE_SHFT);
1276 int order = 0;
1277 char suffix[] = " KMGTPE";
1278 int flag = ' ';
1279
1280 while (size > 9999 && order < sizeof(suffix)) {
1281 size /= 1024;
1282 order++;
1283 }
1284
1285
1286 if (gre->type == 1 || gre->type == 2)
1287 if (adj_blksize(lgre))
1288 flag = '*';
1289
1290 if (!index) {
1291 pr_info("UV: GAM Range Table...\n");
1292 pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
1293 }
1294 pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n",
1295 index++,
1296 (unsigned long)lgre << UV_GAM_RANGE_SHFT,
1297 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
1298 flag, size, suffix[order],
1299 gre->type, gre->nasid, gre->sockid, gre->pnode);
1300
1301
1302 lgre = gre->limit;
1303 if (sock_min > gre->sockid)
1304 sock_min = gre->sockid;
1305 if (sock_max < gre->sockid)
1306 sock_max = gre->sockid;
1307 if (pnode_min > gre->pnode)
1308 pnode_min = gre->pnode;
1309 if (pnode_max < gre->pnode)
1310 pnode_max = gre->pnode;
1311 }
1312 _min_socket = sock_min;
1313 _max_socket = sock_max;
1314 _min_pnode = pnode_min;
1315 _max_pnode = pnode_max;
1316 _gr_table_len = index;
1317
1318 pr_info("UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n", index, _min_socket, _max_socket, _min_pnode, _max_pnode);
1319}
1320
1321static int __init decode_uv_systab(void)
1322{
1323 struct uv_systab *st;
1324 int i;
1325
1326
1327 if (is_uv_hubbed(0xfffffe) < uv(4) && is_uv_hubless(0xfffffe) < uv(4))
1328 return 0;
1329
1330 st = uv_systab;
1331 if ((!st) || (st->revision < UV_SYSTAB_VERSION_UV4_LATEST)) {
1332 int rev = st ? st->revision : 0;
1333
1334 pr_err("UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n", rev, UV_SYSTAB_VERSION_UV4_LATEST);
1335 pr_err("UV: Cannot support UV operations, switching to generic PC\n");
1336 uv_system_type = UV_NONE;
1337
1338 return -EINVAL;
1339 }
1340
1341 for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) {
1342 unsigned long ptr = st->entry[i].offset;
1343
1344 if (!ptr)
1345 continue;
1346
1347 ptr = ptr + (unsigned long)st;
1348
1349 switch (st->entry[i].type) {
1350 case UV_SYSTAB_TYPE_GAM_PARAMS:
1351 decode_gam_params(ptr);
1352 break;
1353
1354 case UV_SYSTAB_TYPE_GAM_RNG_TBL:
1355 decode_gam_rng_tbl(ptr);
1356 break;
1357 }
1358 }
1359 return 0;
1360}
1361
1362
1363
1364
1365
1366
1367static __init void boot_init_possible_blades(struct uv_hub_info_s *hub_info)
1368{
1369 int i, uv_pb = 0;
1370
1371 pr_info("UV: NODE_PRESENT_DEPTH = %d\n", UVH_NODE_PRESENT_TABLE_DEPTH);
1372 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
1373 unsigned long np;
1374
1375 np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
1376 if (np)
1377 pr_info("UV: NODE_PRESENT(%d) = 0x%016lx\n", i, np);
1378
1379 uv_pb += hweight64(np);
1380 }
1381 if (uv_possible_blades != uv_pb)
1382 uv_possible_blades = uv_pb;
1383}
1384
1385static void __init build_socket_tables(void)
1386{
1387 struct uv_gam_range_entry *gre = uv_gre_table;
1388 int num, nump;
1389 int cpu, i, lnid;
1390 int minsock = _min_socket;
1391 int maxsock = _max_socket;
1392 int minpnode = _min_pnode;
1393 int maxpnode = _max_pnode;
1394 size_t bytes;
1395
1396 if (!gre) {
1397 if (is_uv1_hub() || is_uv2_hub() || is_uv3_hub()) {
1398 pr_info("UV: No UVsystab socket table, ignoring\n");
1399 return;
1400 }
1401 pr_crit("UV: Error: UVsystab address translations not available!\n");
1402 BUG();
1403 }
1404
1405
1406 num = maxsock - minsock + 1;
1407 bytes = num * sizeof(_socket_to_node[0]);
1408 _socket_to_node = kmalloc(bytes, GFP_KERNEL);
1409 _socket_to_pnode = kmalloc(bytes, GFP_KERNEL);
1410
1411 nump = maxpnode - minpnode + 1;
1412 bytes = nump * sizeof(_pnode_to_socket[0]);
1413 _pnode_to_socket = kmalloc(bytes, GFP_KERNEL);
1414 BUG_ON(!_socket_to_node || !_socket_to_pnode || !_pnode_to_socket);
1415
1416 for (i = 0; i < num; i++)
1417 _socket_to_node[i] = _socket_to_pnode[i] = SOCK_EMPTY;
1418
1419 for (i = 0; i < nump; i++)
1420 _pnode_to_socket[i] = SOCK_EMPTY;
1421
1422
1423 pr_info("UV: GAM Building socket/pnode conversion tables\n");
1424 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
1425 if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
1426 continue;
1427 i = gre->sockid - minsock;
1428
1429 if (_socket_to_pnode[i] != SOCK_EMPTY)
1430 continue;
1431 _socket_to_pnode[i] = gre->pnode;
1432
1433 i = gre->pnode - minpnode;
1434 _pnode_to_socket[i] = gre->sockid;
1435
1436 pr_info("UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
1437 gre->sockid, gre->type, gre->nasid,
1438 _socket_to_pnode[gre->sockid - minsock],
1439 _pnode_to_socket[gre->pnode - minpnode]);
1440 }
1441
1442
1443 lnid = -1;
1444 for_each_present_cpu(cpu) {
1445 int nid = cpu_to_node(cpu);
1446 int apicid, sockid;
1447
1448 if (lnid == nid)
1449 continue;
1450 lnid = nid;
1451 apicid = per_cpu(x86_cpu_to_apicid, cpu);
1452 sockid = apicid >> uv_cpuid.socketid_shift;
1453 _socket_to_node[sockid - minsock] = nid;
1454 pr_info("UV: sid:%02x: apicid:%04x node:%2d\n",
1455 sockid, apicid, nid);
1456 }
1457
1458
1459 bytes = num_possible_nodes() * sizeof(_node_to_pnode[0]);
1460 _node_to_pnode = kmalloc(bytes, GFP_KERNEL);
1461 BUG_ON(!_node_to_pnode);
1462
1463 for (lnid = 0; lnid < num_possible_nodes(); lnid++) {
1464 unsigned short sockid;
1465
1466 for (sockid = minsock; sockid <= maxsock; sockid++) {
1467 if (lnid == _socket_to_node[sockid - minsock]) {
1468 _node_to_pnode[lnid] = _socket_to_pnode[sockid - minsock];
1469 break;
1470 }
1471 }
1472 if (sockid > maxsock) {
1473 pr_err("UV: socket for node %d not found!\n", lnid);
1474 BUG();
1475 }
1476 }
1477
1478
1479
1480
1481
1482 pr_info("UV: Checking socket->node/pnode for identity maps\n");
1483 if (minsock == 0) {
1484 for (i = 0; i < num; i++)
1485 if (_socket_to_node[i] == SOCK_EMPTY || i != _socket_to_node[i])
1486 break;
1487 if (i >= num) {
1488 kfree(_socket_to_node);
1489 _socket_to_node = NULL;
1490 pr_info("UV: 1:1 socket_to_node table removed\n");
1491 }
1492 }
1493 if (minsock == minpnode) {
1494 for (i = 0; i < num; i++)
1495 if (_socket_to_pnode[i] != SOCK_EMPTY &&
1496 _socket_to_pnode[i] != i + minpnode)
1497 break;
1498 if (i >= num) {
1499 kfree(_socket_to_pnode);
1500 _socket_to_pnode = NULL;
1501 pr_info("UV: 1:1 socket_to_pnode table removed\n");
1502 }
1503 }
1504}
1505
1506
1507static void check_efi_reboot(void)
1508{
1509
1510 if (!efi_enabled(EFI_BOOT))
1511 reboot_type = BOOT_ACPI;
1512}
1513
1514
1515static int proc_hubbed_show(struct seq_file *file, void *data)
1516{
1517 seq_printf(file, "0x%x\n", uv_hubbed_system);
1518 return 0;
1519}
1520
1521static int proc_hubless_show(struct seq_file *file, void *data)
1522{
1523 seq_printf(file, "0x%x\n", uv_hubless_system);
1524 return 0;
1525}
1526
1527static int proc_oemid_show(struct seq_file *file, void *data)
1528{
1529 seq_printf(file, "%s/%s\n", oem_id, oem_table_id);
1530 return 0;
1531}
1532
1533static int proc_hubbed_open(struct inode *inode, struct file *file)
1534{
1535 return single_open(file, proc_hubbed_show, (void *)NULL);
1536}
1537
1538static int proc_hubless_open(struct inode *inode, struct file *file)
1539{
1540 return single_open(file, proc_hubless_show, (void *)NULL);
1541}
1542
1543static int proc_oemid_open(struct inode *inode, struct file *file)
1544{
1545 return single_open(file, proc_oemid_show, (void *)NULL);
1546}
1547
1548
1549static struct file_operations proc_version_fops = {
1550 .read = seq_read,
1551 .llseek = seq_lseek,
1552 .release = single_release,
1553};
1554
1555static const struct file_operations proc_oemid_fops = {
1556 .open = proc_oemid_open,
1557 .read = seq_read,
1558 .llseek = seq_lseek,
1559 .release = single_release,
1560};
1561
1562static __init void uv_setup_proc_files(int hubless)
1563{
1564 struct proc_dir_entry *pde;
1565 char *name = hubless ? "hubless" : "hubbed";
1566
1567 pde = proc_mkdir(UV_PROC_NODE, NULL);
1568 proc_create("oemid", 0, pde, &proc_oemid_fops);
1569 proc_create(name, 0, pde, &proc_version_fops);
1570 if (hubless)
1571 proc_version_fops.open = proc_hubless_open;
1572 else
1573 proc_version_fops.open = proc_hubbed_open;
1574}
1575
1576
1577static __init int uv_system_init_hubless(void)
1578{
1579 int rc;
1580
1581
1582 uv_nmi_setup_hubless();
1583
1584
1585 rc = uv_bios_init();
1586 if (rc < 0)
1587 return rc;
1588
1589
1590 rc = decode_uv_systab();
1591 if (rc < 0)
1592 return rc;
1593
1594
1595 if (rc >= 0)
1596 uv_setup_proc_files(1);
1597
1598 check_efi_reboot();
1599
1600 return rc;
1601}
1602
1603static void __init uv_system_init_hub(void)
1604{
1605 struct uv_hub_info_s hub_info = {0};
1606 int bytes, cpu, nodeid;
1607 unsigned short min_pnode = 9999, max_pnode = 0;
1608 char *hub = is_uv4_hub() ? "UV400" :
1609 is_uv3_hub() ? "UV300" :
1610 is_uv2_hub() ? "UV2000/3000" :
1611 is_uv1_hub() ? "UV100/1000" : NULL;
1612
1613 if (!hub) {
1614 pr_err("UV: Unknown/unsupported UV hub\n");
1615 return;
1616 }
1617 pr_info("UV: Found %s hub\n", hub);
1618
1619 map_low_mmrs();
1620
1621
1622 uv_bios_init();
1623
1624
1625 if (decode_uv_systab() < 0)
1626 return;
1627
1628 build_socket_tables();
1629 build_uv_gr_table();
1630 set_block_size();
1631 uv_init_hub_info(&hub_info);
1632 uv_possible_blades = num_possible_nodes();
1633 if (!_node_to_pnode)
1634 boot_init_possible_blades(&hub_info);
1635
1636
1637 pr_info("UV: Found %d hubs, %d nodes, %d CPUs\n", uv_num_possible_blades(), num_possible_nodes(), num_possible_cpus());
1638
1639 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id, &sn_region_size, &system_serial_number);
1640 hub_info.coherency_domain_number = sn_coherency_id;
1641 uv_rtc_init();
1642
1643 bytes = sizeof(void *) * uv_num_possible_blades();
1644 __uv_hub_info_list = kzalloc(bytes, GFP_KERNEL);
1645 BUG_ON(!__uv_hub_info_list);
1646
1647 bytes = sizeof(struct uv_hub_info_s);
1648 for_each_node(nodeid) {
1649 struct uv_hub_info_s *new_hub;
1650
1651 if (__uv_hub_info_list[nodeid]) {
1652 pr_err("UV: Node %d UV HUB already initialized!?\n", nodeid);
1653 BUG();
1654 }
1655
1656
1657 new_hub = (nodeid == 0) ? &uv_hub_info_node0 : kzalloc_node(bytes, GFP_KERNEL, nodeid);
1658 BUG_ON(!new_hub);
1659 __uv_hub_info_list[nodeid] = new_hub;
1660 new_hub = uv_hub_info_list(nodeid);
1661 BUG_ON(!new_hub);
1662 *new_hub = hub_info;
1663
1664
1665 if (_node_to_pnode)
1666 new_hub->pnode = _node_to_pnode[nodeid];
1667 else
1668 new_hub->pnode = 0xffff;
1669
1670 new_hub->numa_blade_id = uv_node_to_blade_id(nodeid);
1671 new_hub->memory_nid = -1;
1672 new_hub->nr_possible_cpus = 0;
1673 new_hub->nr_online_cpus = 0;
1674 }
1675
1676
1677 for_each_possible_cpu(cpu) {
1678 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
1679 int numa_node_id;
1680 unsigned short pnode;
1681
1682 nodeid = cpu_to_node(cpu);
1683 numa_node_id = numa_cpu_node(cpu);
1684 pnode = uv_apicid_to_pnode(apicid);
1685
1686 uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
1687 uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++;
1688 if (uv_cpu_hub_info(cpu)->memory_nid == -1)
1689 uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
1690
1691
1692 if (nodeid != numa_node_id &&
1693 uv_hub_info_list(numa_node_id)->pnode == 0xffff)
1694 uv_hub_info_list(numa_node_id)->pnode = pnode;
1695 else if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
1696 uv_cpu_hub_info(cpu)->pnode = pnode;
1697
1698 uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid);
1699 }
1700
1701 for_each_node(nodeid) {
1702 unsigned short pnode = uv_hub_info_list(nodeid)->pnode;
1703
1704
1705 if (pnode == 0xffff) {
1706 unsigned long paddr;
1707
1708 paddr = node_start_pfn(nodeid) << PAGE_SHIFT;
1709 pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
1710 uv_hub_info_list(nodeid)->pnode = pnode;
1711 }
1712 min_pnode = min(pnode, min_pnode);
1713 max_pnode = max(pnode, max_pnode);
1714 pr_info("UV: UVHUB node:%2d pn:%02x nrcpus:%d\n",
1715 nodeid,
1716 uv_hub_info_list(nodeid)->pnode,
1717 uv_hub_info_list(nodeid)->nr_possible_cpus);
1718 }
1719
1720 pr_info("UV: min_pnode:%02x max_pnode:%02x\n", min_pnode, max_pnode);
1721 map_gru_high(max_pnode);
1722 map_mmr_high(max_pnode);
1723 map_mmioh_high(min_pnode, max_pnode);
1724
1725 uv_nmi_setup();
1726 uv_cpu_init();
1727 uv_scir_register_cpu_notifier();
1728 uv_setup_proc_files(0);
1729
1730
1731 pci_register_set_vga_state(uv_set_vga_state);
1732
1733 check_efi_reboot();
1734}
1735
1736
1737
1738
1739
1740void __init uv_system_init(void)
1741{
1742 if (likely(!is_uv_system() && !is_uv_hubless(1)))
1743 return;
1744
1745 if (is_uv_system())
1746 uv_system_init_hub();
1747 else
1748 uv_system_init_hubless();
1749}
1750
1751apic_driver(apic_x2apic_uv_x);
1752