1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define KMSG_COMPONENT "cpu"
20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22#include <linux/workqueue.h>
23#include <linux/memblock.h>
24#include <linux/export.h>
25#include <linux/init.h>
26#include <linux/mm.h>
27#include <linux/err.h>
28#include <linux/spinlock.h>
29#include <linux/kernel_stat.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/irqflags.h>
33#include <linux/cpu.h>
34#include <linux/slab.h>
35#include <linux/sched/hotplug.h>
36#include <linux/sched/task_stack.h>
37#include <linux/crash_dump.h>
38#include <linux/kprobes.h>
39#include <asm/asm-offsets.h>
40#include <asm/diag.h>
41#include <asm/switch_to.h>
42#include <asm/facility.h>
43#include <asm/ipl.h>
44#include <asm/setup.h>
45#include <asm/irq.h>
46#include <asm/tlbflush.h>
47#include <asm/vtimer.h>
48#include <asm/lowcore.h>
49#include <asm/sclp.h>
50#include <asm/vdso.h>
51#include <asm/debug.h>
52#include <asm/os_info.h>
53#include <asm/sigp.h>
54#include <asm/idle.h>
55#include <asm/nmi.h>
56#include <asm/topology.h>
57#include "entry.h"
58
59enum {
60 ec_schedule = 0,
61 ec_call_function_single,
62 ec_stop_cpu,
63};
64
65enum {
66 CPU_STATE_STANDBY,
67 CPU_STATE_CONFIGURED,
68};
69
70static DEFINE_PER_CPU(struct cpu *, cpu_device);
71
72struct pcpu {
73 struct lowcore *lowcore;
74 unsigned long ec_mask;
75 unsigned long ec_clk;
76 signed char state;
77 signed char polarization;
78 u16 address;
79};
80
81static u8 boot_core_type;
82static struct pcpu pcpu_devices[NR_CPUS];
83
84unsigned int smp_cpu_mt_shift;
85EXPORT_SYMBOL(smp_cpu_mt_shift);
86
87unsigned int smp_cpu_mtid;
88EXPORT_SYMBOL(smp_cpu_mtid);
89
90#ifdef CONFIG_CRASH_DUMP
91__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
92#endif
93
94static unsigned int smp_max_threads __initdata = -1U;
95
96static int __init early_nosmt(char *s)
97{
98 smp_max_threads = 1;
99 return 0;
100}
101early_param("nosmt", early_nosmt);
102
103static int __init early_smt(char *s)
104{
105 get_option(&s, &smp_max_threads);
106 return 0;
107}
108early_param("smt", early_smt);
109
110
111
112
113
114DEFINE_MUTEX(smp_cpu_state_mutex);
115
116
117
118
119static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
120{
121 int cc;
122
123 while (1) {
124 cc = __pcpu_sigp(addr, order, parm, NULL);
125 if (cc != SIGP_CC_BUSY)
126 return cc;
127 cpu_relax();
128 }
129}
130
131static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
132{
133 int cc, retry;
134
135 for (retry = 0; ; retry++) {
136 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
137 if (cc != SIGP_CC_BUSY)
138 break;
139 if (retry >= 3)
140 udelay(10);
141 }
142 return cc;
143}
144
145static inline int pcpu_stopped(struct pcpu *pcpu)
146{
147 u32 uninitialized_var(status);
148
149 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
150 0, &status) != SIGP_CC_STATUS_STORED)
151 return 0;
152 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
153}
154
155static inline int pcpu_running(struct pcpu *pcpu)
156{
157 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
158 0, NULL) != SIGP_CC_STATUS_STORED)
159 return 1;
160
161 return 0;
162}
163
164
165
166
167static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
168{
169 int cpu;
170
171 for_each_cpu(cpu, mask)
172 if (pcpu_devices[cpu].address == address)
173 return pcpu_devices + cpu;
174 return NULL;
175}
176
177static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
178{
179 int order;
180
181 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
182 return;
183 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
184 pcpu->ec_clk = get_tod_clock_fast();
185 pcpu_sigp_retry(pcpu, order, 0);
186}
187
188static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
189{
190 unsigned long async_stack, nodat_stack;
191 struct lowcore *lc;
192
193 if (pcpu != &pcpu_devices[0]) {
194 pcpu->lowcore = (struct lowcore *)
195 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
196 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
197 if (!pcpu->lowcore || !nodat_stack)
198 goto out;
199 } else {
200 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
201 }
202 async_stack = stack_alloc();
203 if (!async_stack)
204 goto out;
205 lc = pcpu->lowcore;
206 memcpy(lc, &S390_lowcore, 512);
207 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
208 lc->async_stack = async_stack + STACK_INIT_OFFSET;
209 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
210 lc->cpu_nr = cpu;
211 lc->spinlock_lockval = arch_spin_lockval(cpu);
212 lc->spinlock_index = 0;
213 lc->br_r1_trampoline = 0x07f1;
214 if (nmi_alloc_per_cpu(lc))
215 goto out_async;
216 if (vdso_alloc_per_cpu(lc))
217 goto out_mcesa;
218 lowcore_ptr[cpu] = lc;
219 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
220 return 0;
221
222out_mcesa:
223 nmi_free_per_cpu(lc);
224out_async:
225 stack_free(async_stack);
226out:
227 if (pcpu != &pcpu_devices[0]) {
228 free_pages(nodat_stack, THREAD_SIZE_ORDER);
229 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
230 }
231 return -ENOMEM;
232}
233
234#ifdef CONFIG_HOTPLUG_CPU
235
236static void pcpu_free_lowcore(struct pcpu *pcpu)
237{
238 unsigned long async_stack, nodat_stack, lowcore;
239
240 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
241 async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
242 lowcore = (unsigned long) pcpu->lowcore;
243
244 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
245 lowcore_ptr[pcpu - pcpu_devices] = NULL;
246 vdso_free_per_cpu(pcpu->lowcore);
247 nmi_free_per_cpu(pcpu->lowcore);
248 stack_free(async_stack);
249 if (pcpu == &pcpu_devices[0])
250 return;
251 free_pages(nodat_stack, THREAD_SIZE_ORDER);
252 free_pages(lowcore, LC_ORDER);
253}
254
255#endif
256
257static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
258{
259 struct lowcore *lc = pcpu->lowcore;
260
261 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
262 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
263 lc->cpu_nr = cpu;
264 lc->spinlock_lockval = arch_spin_lockval(cpu);
265 lc->spinlock_index = 0;
266 lc->percpu_offset = __per_cpu_offset[cpu];
267 lc->kernel_asce = S390_lowcore.kernel_asce;
268 lc->machine_flags = S390_lowcore.machine_flags;
269 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
270 __ctl_store(lc->cregs_save_area, 0, 15);
271 save_access_regs((unsigned int *) lc->access_regs_save_area);
272 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
273 sizeof(lc->stfle_fac_list));
274 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
275 sizeof(lc->alt_stfle_fac_list));
276 arch_spin_lock_setup(cpu);
277}
278
279static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
280{
281 struct lowcore *lc = pcpu->lowcore;
282
283 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
284 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
285 lc->current_task = (unsigned long) tsk;
286 lc->lpp = LPP_MAGIC;
287 lc->current_pid = tsk->pid;
288 lc->user_timer = tsk->thread.user_timer;
289 lc->guest_timer = tsk->thread.guest_timer;
290 lc->system_timer = tsk->thread.system_timer;
291 lc->hardirq_timer = tsk->thread.hardirq_timer;
292 lc->softirq_timer = tsk->thread.softirq_timer;
293 lc->steal_timer = 0;
294}
295
296static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
297{
298 struct lowcore *lc = pcpu->lowcore;
299
300 lc->restart_stack = lc->nodat_stack;
301 lc->restart_fn = (unsigned long) func;
302 lc->restart_data = (unsigned long) data;
303 lc->restart_source = -1UL;
304 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
305}
306
307
308
309
310static void __pcpu_delegate(void (*func)(void*), void *data)
311{
312 func(data);
313}
314
315static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
316 void (*func)(void *),
317 void *data, unsigned long stack)
318{
319 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
320 unsigned long source_cpu = stap();
321
322 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
323 if (pcpu->address == source_cpu)
324 CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
325
326 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
327
328 mem_assign_absolute(lc->restart_stack, stack);
329 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
330 mem_assign_absolute(lc->restart_data, (unsigned long) data);
331 mem_assign_absolute(lc->restart_source, source_cpu);
332 __bpon();
333 asm volatile(
334 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
335 " brc 2,0b # busy, try again\n"
336 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
337 " brc 2,1b # busy, try again\n"
338 : : "d" (pcpu->address), "d" (source_cpu),
339 "K" (SIGP_RESTART), "K" (SIGP_STOP)
340 : "0", "1", "cc");
341 for (;;) ;
342}
343
344
345
346
347static int pcpu_set_smt(unsigned int mtid)
348{
349 int cc;
350
351 if (smp_cpu_mtid == mtid)
352 return 0;
353 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
354 if (cc == 0) {
355 smp_cpu_mtid = mtid;
356 smp_cpu_mt_shift = 0;
357 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
358 smp_cpu_mt_shift++;
359 pcpu_devices[0].address = stap();
360 }
361 return cc;
362}
363
364
365
366
367void smp_call_online_cpu(void (*func)(void *), void *data)
368{
369 struct pcpu *pcpu;
370
371
372 pcpu = pcpu_find_address(cpu_online_mask, stap());
373 if (!pcpu)
374
375 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
376 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
377}
378
379
380
381
382void smp_call_ipl_cpu(void (*func)(void *), void *data)
383{
384 struct lowcore *lc = pcpu_devices->lowcore;
385
386 if (pcpu_devices[0].address == stap())
387 lc = &S390_lowcore;
388
389 pcpu_delegate(&pcpu_devices[0], func, data,
390 lc->nodat_stack);
391}
392
393int smp_find_processor_id(u16 address)
394{
395 int cpu;
396
397 for_each_present_cpu(cpu)
398 if (pcpu_devices[cpu].address == address)
399 return cpu;
400 return -1;
401}
402
403bool arch_vcpu_is_preempted(int cpu)
404{
405 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
406 return false;
407 if (pcpu_running(pcpu_devices + cpu))
408 return false;
409 return true;
410}
411EXPORT_SYMBOL(arch_vcpu_is_preempted);
412
413void smp_yield_cpu(int cpu)
414{
415 if (MACHINE_HAS_DIAG9C) {
416 diag_stat_inc_norecursion(DIAG_STAT_X09C);
417 asm volatile("diag %0,0,0x9c"
418 : : "d" (pcpu_devices[cpu].address));
419 } else if (MACHINE_HAS_DIAG44) {
420 diag_stat_inc_norecursion(DIAG_STAT_X044);
421 asm volatile("diag 0,0,0x44");
422 }
423}
424
425
426
427
428
429void notrace smp_emergency_stop(void)
430{
431 cpumask_t cpumask;
432 u64 end;
433 int cpu;
434
435 cpumask_copy(&cpumask, cpu_online_mask);
436 cpumask_clear_cpu(smp_processor_id(), &cpumask);
437
438 end = get_tod_clock() + (1000000UL << 12);
439 for_each_cpu(cpu, &cpumask) {
440 struct pcpu *pcpu = pcpu_devices + cpu;
441 set_bit(ec_stop_cpu, &pcpu->ec_mask);
442 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
443 0, NULL) == SIGP_CC_BUSY &&
444 get_tod_clock() < end)
445 cpu_relax();
446 }
447 while (get_tod_clock() < end) {
448 for_each_cpu(cpu, &cpumask)
449 if (pcpu_stopped(pcpu_devices + cpu))
450 cpumask_clear_cpu(cpu, &cpumask);
451 if (cpumask_empty(&cpumask))
452 break;
453 cpu_relax();
454 }
455}
456NOKPROBE_SYMBOL(smp_emergency_stop);
457
458
459
460
461void smp_send_stop(void)
462{
463 int cpu;
464
465
466 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
467 trace_hardirqs_off();
468
469 debug_set_critical();
470
471 if (oops_in_progress)
472 smp_emergency_stop();
473
474
475 for_each_online_cpu(cpu) {
476 if (cpu == smp_processor_id())
477 continue;
478 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
479 while (!pcpu_stopped(pcpu_devices + cpu))
480 cpu_relax();
481 }
482}
483
484
485
486
487
488static void smp_handle_ext_call(void)
489{
490 unsigned long bits;
491
492
493 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
494 if (test_bit(ec_stop_cpu, &bits))
495 smp_stop_cpu();
496 if (test_bit(ec_schedule, &bits))
497 scheduler_ipi();
498 if (test_bit(ec_call_function_single, &bits))
499 generic_smp_call_function_single_interrupt();
500}
501
502static void do_ext_call_interrupt(struct ext_code ext_code,
503 unsigned int param32, unsigned long param64)
504{
505 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
506 smp_handle_ext_call();
507}
508
509void arch_send_call_function_ipi_mask(const struct cpumask *mask)
510{
511 int cpu;
512
513 for_each_cpu(cpu, mask)
514 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
515}
516
517void arch_send_call_function_single_ipi(int cpu)
518{
519 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
520}
521
522
523
524
525
526
527void smp_send_reschedule(int cpu)
528{
529 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
530}
531
532
533
534
535struct ec_creg_mask_parms {
536 unsigned long orval;
537 unsigned long andval;
538 int cr;
539};
540
541
542
543
544static void smp_ctl_bit_callback(void *info)
545{
546 struct ec_creg_mask_parms *pp = info;
547 unsigned long cregs[16];
548
549 __ctl_store(cregs, 0, 15);
550 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
551 __ctl_load(cregs, 0, 15);
552}
553
554
555
556
557void smp_ctl_set_bit(int cr, int bit)
558{
559 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
560
561 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
562}
563EXPORT_SYMBOL(smp_ctl_set_bit);
564
565
566
567
568void smp_ctl_clear_bit(int cr, int bit)
569{
570 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
571
572 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
573}
574EXPORT_SYMBOL(smp_ctl_clear_bit);
575
576#ifdef CONFIG_CRASH_DUMP
577
578int smp_store_status(int cpu)
579{
580 struct pcpu *pcpu = pcpu_devices + cpu;
581 unsigned long pa;
582
583 pa = __pa(&pcpu->lowcore->floating_pt_save_area);
584 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
585 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
586 return -EIO;
587 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
588 return 0;
589 pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
590 if (MACHINE_HAS_GS)
591 pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
592 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
593 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
594 return -EIO;
595 return 0;
596}
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
625 bool is_boot_cpu, unsigned long page)
626{
627 __vector128 *vxrs = (__vector128 *) page;
628
629 if (is_boot_cpu)
630 vxrs = boot_cpu_vector_save_area;
631 else
632 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
633 save_area_add_vxrs(sa, vxrs);
634}
635
636static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
637 bool is_boot_cpu, unsigned long page)
638{
639 void *regs = (void *) page;
640
641 if (is_boot_cpu)
642 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
643 else
644 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
645 save_area_add_regs(sa, regs);
646}
647
648void __init smp_save_dump_cpus(void)
649{
650 int addr, boot_cpu_addr, max_cpu_addr;
651 struct save_area *sa;
652 unsigned long page;
653 bool is_boot_cpu;
654
655 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
656
657 return;
658
659 page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
660
661 pcpu_set_smt(sclp.mtid_prev);
662 boot_cpu_addr = stap();
663 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
664 for (addr = 0; addr <= max_cpu_addr; addr++) {
665 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
666 SIGP_CC_NOT_OPERATIONAL)
667 continue;
668 is_boot_cpu = (addr == boot_cpu_addr);
669
670 sa = save_area_alloc(is_boot_cpu);
671 if (!sa)
672 panic("could not allocate memory for save area\n");
673 if (MACHINE_HAS_VX)
674
675 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
676
677
678
679
680
681
682 if (!is_boot_cpu || OLDMEM_BASE)
683
684 smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
685 }
686 memblock_free(page, PAGE_SIZE);
687 diag308_reset();
688 pcpu_set_smt(0);
689}
690#endif
691
692void smp_cpu_set_polarization(int cpu, int val)
693{
694 pcpu_devices[cpu].polarization = val;
695}
696
697int smp_cpu_get_polarization(int cpu)
698{
699 return pcpu_devices[cpu].polarization;
700}
701
702static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
703{
704 static int use_sigp_detection;
705 int address;
706
707 if (use_sigp_detection || sclp_get_core_info(info, early)) {
708 use_sigp_detection = 1;
709 for (address = 0;
710 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
711 address += (1U << smp_cpu_mt_shift)) {
712 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
713 SIGP_CC_NOT_OPERATIONAL)
714 continue;
715 info->core[info->configured].core_id =
716 address >> smp_cpu_mt_shift;
717 info->configured++;
718 }
719 info->combined = info->configured;
720 }
721}
722
723static int smp_add_present_cpu(int cpu);
724
725static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
726{
727 struct pcpu *pcpu;
728 cpumask_t avail;
729 int cpu, nr, i, j;
730 u16 address;
731
732 nr = 0;
733 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
734 cpu = cpumask_first(&avail);
735 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
736 if (sclp.has_core_type && info->core[i].type != boot_core_type)
737 continue;
738 address = info->core[i].core_id << smp_cpu_mt_shift;
739 for (j = 0; j <= smp_cpu_mtid; j++) {
740 if (pcpu_find_address(cpu_present_mask, address + j))
741 continue;
742 pcpu = pcpu_devices + cpu;
743 pcpu->address = address + j;
744 pcpu->state =
745 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
746 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
747 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
748 set_cpu_present(cpu, true);
749 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
750 set_cpu_present(cpu, false);
751 else
752 nr++;
753 cpu = cpumask_next(cpu, &avail);
754 if (cpu >= nr_cpu_ids)
755 break;
756 }
757 }
758 return nr;
759}
760
761void __init smp_detect_cpus(void)
762{
763 unsigned int cpu, mtid, c_cpus, s_cpus;
764 struct sclp_core_info *info;
765 u16 address;
766
767
768 info = memblock_alloc(sizeof(*info), 8);
769 smp_get_core_info(info, 1);
770
771 if (sclp.has_core_type) {
772 address = stap();
773 for (cpu = 0; cpu < info->combined; cpu++)
774 if (info->core[cpu].core_id == address) {
775
776 boot_core_type = info->core[cpu].type;
777 break;
778 }
779 if (cpu >= info->combined)
780 panic("Could not find boot CPU type");
781 }
782
783
784 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
785 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
786 pcpu_set_smt(mtid);
787
788
789 c_cpus = s_cpus = 0;
790 for (cpu = 0; cpu < info->combined; cpu++) {
791 if (sclp.has_core_type &&
792 info->core[cpu].type != boot_core_type)
793 continue;
794 if (cpu < info->configured)
795 c_cpus += smp_cpu_mtid + 1;
796 else
797 s_cpus += smp_cpu_mtid + 1;
798 }
799 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
800
801
802 get_online_cpus();
803 __smp_rescan_cpus(info, 0);
804 put_online_cpus();
805 memblock_free_early((unsigned long)info, sizeof(*info));
806}
807
808static void smp_init_secondary(void)
809{
810 int cpu = smp_processor_id();
811
812 S390_lowcore.last_update_clock = get_tod_clock();
813 restore_access_regs(S390_lowcore.access_regs_save_area);
814 cpu_init();
815 preempt_disable();
816 init_cpu_timer();
817 vtime_init();
818 pfault_init();
819 notify_cpu_starting(smp_processor_id());
820 if (topology_cpu_dedicated(cpu))
821 set_cpu_flag(CIF_DEDICATED_CPU);
822 else
823 clear_cpu_flag(CIF_DEDICATED_CPU);
824 set_cpu_online(smp_processor_id(), true);
825 inc_irq_stat(CPU_RST);
826 local_irq_enable();
827 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
828}
829
830
831
832
833static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
834{
835 S390_lowcore.restart_stack = (unsigned long) restart_stack;
836 S390_lowcore.restart_fn = (unsigned long) do_restart;
837 S390_lowcore.restart_data = 0;
838 S390_lowcore.restart_source = -1UL;
839 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
840 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
841 CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
842}
843
844
845int __cpu_up(unsigned int cpu, struct task_struct *tidle)
846{
847 struct pcpu *pcpu;
848 int base, i, rc;
849
850 pcpu = pcpu_devices + cpu;
851 if (pcpu->state != CPU_STATE_CONFIGURED)
852 return -EIO;
853 base = smp_get_base_cpu(cpu);
854 for (i = 0; i <= smp_cpu_mtid; i++) {
855 if (base + i < nr_cpu_ids)
856 if (cpu_online(base + i))
857 break;
858 }
859
860
861
862
863 if (i > smp_cpu_mtid &&
864 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
865 SIGP_CC_ORDER_CODE_ACCEPTED)
866 return -EIO;
867
868 rc = pcpu_alloc_lowcore(pcpu, cpu);
869 if (rc)
870 return rc;
871 pcpu_prepare_secondary(pcpu, cpu);
872 pcpu_attach_task(pcpu, tidle);
873 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
874
875 while (!cpu_online(cpu))
876 cpu_relax();
877 return 0;
878}
879
880static unsigned int setup_possible_cpus __initdata;
881
882static int __init _setup_possible_cpus(char *s)
883{
884 get_option(&s, &setup_possible_cpus);
885 return 0;
886}
887early_param("possible_cpus", _setup_possible_cpus);
888
889#ifdef CONFIG_HOTPLUG_CPU
890
891int __cpu_disable(void)
892{
893 unsigned long cregs[16];
894
895
896 smp_handle_ext_call();
897 set_cpu_online(smp_processor_id(), false);
898
899 pfault_fini();
900
901 __ctl_store(cregs, 0, 15);
902 cregs[0] &= ~0x0000ee70UL;
903 cregs[6] &= ~0xff000000UL;
904 cregs[14] &= ~0x1f000000UL;
905 __ctl_load(cregs, 0, 15);
906 clear_cpu_flag(CIF_NOHZ_DELAY);
907 return 0;
908}
909
910void __cpu_die(unsigned int cpu)
911{
912 struct pcpu *pcpu;
913
914
915 pcpu = pcpu_devices + cpu;
916 while (!pcpu_stopped(pcpu))
917 cpu_relax();
918 pcpu_free_lowcore(pcpu);
919 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
920 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
921}
922
923void __noreturn cpu_die(void)
924{
925 idle_task_exit();
926 __bpon();
927 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
928 for (;;) ;
929}
930
931#endif
932
933void __init smp_fill_possible_mask(void)
934{
935 unsigned int possible, sclp_max, cpu;
936
937 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
938 sclp_max = min(smp_max_threads, sclp_max);
939 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
940 possible = setup_possible_cpus ?: nr_cpu_ids;
941 possible = min(possible, sclp_max);
942 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
943 set_cpu_possible(cpu, true);
944}
945
946void __init smp_prepare_cpus(unsigned int max_cpus)
947{
948
949 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
950 panic("Couldn't request external interrupt 0x1201");
951
952 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
953 panic("Couldn't request external interrupt 0x1202");
954}
955
956void __init smp_prepare_boot_cpu(void)
957{
958 struct pcpu *pcpu = pcpu_devices;
959
960 WARN_ON(!cpu_present(0) || !cpu_online(0));
961 pcpu->state = CPU_STATE_CONFIGURED;
962 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
963 S390_lowcore.percpu_offset = __per_cpu_offset[0];
964 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
965}
966
967void __init smp_cpus_done(unsigned int max_cpus)
968{
969}
970
971void __init smp_setup_processor_id(void)
972{
973 pcpu_devices[0].address = stap();
974 S390_lowcore.cpu_nr = 0;
975 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
976 S390_lowcore.spinlock_index = 0;
977}
978
979
980
981
982
983
984
985int setup_profiling_timer(unsigned int multiplier)
986{
987 return 0;
988}
989
990#ifdef CONFIG_HOTPLUG_CPU
991static ssize_t cpu_configure_show(struct device *dev,
992 struct device_attribute *attr, char *buf)
993{
994 ssize_t count;
995
996 mutex_lock(&smp_cpu_state_mutex);
997 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
998 mutex_unlock(&smp_cpu_state_mutex);
999 return count;
1000}
1001
1002static ssize_t cpu_configure_store(struct device *dev,
1003 struct device_attribute *attr,
1004 const char *buf, size_t count)
1005{
1006 struct pcpu *pcpu;
1007 int cpu, val, rc, i;
1008 char delim;
1009
1010 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1011 return -EINVAL;
1012 if (val != 0 && val != 1)
1013 return -EINVAL;
1014 get_online_cpus();
1015 mutex_lock(&smp_cpu_state_mutex);
1016 rc = -EBUSY;
1017
1018 cpu = dev->id;
1019 cpu = smp_get_base_cpu(cpu);
1020 if (cpu == 0)
1021 goto out;
1022 for (i = 0; i <= smp_cpu_mtid; i++)
1023 if (cpu_online(cpu + i))
1024 goto out;
1025 pcpu = pcpu_devices + cpu;
1026 rc = 0;
1027 switch (val) {
1028 case 0:
1029 if (pcpu->state != CPU_STATE_CONFIGURED)
1030 break;
1031 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1032 if (rc)
1033 break;
1034 for (i = 0; i <= smp_cpu_mtid; i++) {
1035 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1036 continue;
1037 pcpu[i].state = CPU_STATE_STANDBY;
1038 smp_cpu_set_polarization(cpu + i,
1039 POLARIZATION_UNKNOWN);
1040 }
1041 topology_expect_change();
1042 break;
1043 case 1:
1044 if (pcpu->state != CPU_STATE_STANDBY)
1045 break;
1046 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1047 if (rc)
1048 break;
1049 for (i = 0; i <= smp_cpu_mtid; i++) {
1050 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1051 continue;
1052 pcpu[i].state = CPU_STATE_CONFIGURED;
1053 smp_cpu_set_polarization(cpu + i,
1054 POLARIZATION_UNKNOWN);
1055 }
1056 topology_expect_change();
1057 break;
1058 default:
1059 break;
1060 }
1061out:
1062 mutex_unlock(&smp_cpu_state_mutex);
1063 put_online_cpus();
1064 return rc ? rc : count;
1065}
1066static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1067#endif
1068
1069static ssize_t show_cpu_address(struct device *dev,
1070 struct device_attribute *attr, char *buf)
1071{
1072 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1073}
1074static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1075
1076static struct attribute *cpu_common_attrs[] = {
1077#ifdef CONFIG_HOTPLUG_CPU
1078 &dev_attr_configure.attr,
1079#endif
1080 &dev_attr_address.attr,
1081 NULL,
1082};
1083
1084static struct attribute_group cpu_common_attr_group = {
1085 .attrs = cpu_common_attrs,
1086};
1087
1088static struct attribute *cpu_online_attrs[] = {
1089 &dev_attr_idle_count.attr,
1090 &dev_attr_idle_time_us.attr,
1091 NULL,
1092};
1093
1094static struct attribute_group cpu_online_attr_group = {
1095 .attrs = cpu_online_attrs,
1096};
1097
1098static int smp_cpu_online(unsigned int cpu)
1099{
1100 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1101
1102 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1103}
1104static int smp_cpu_pre_down(unsigned int cpu)
1105{
1106 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1107
1108 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1109 return 0;
1110}
1111
1112static int smp_add_present_cpu(int cpu)
1113{
1114 struct device *s;
1115 struct cpu *c;
1116 int rc;
1117
1118 c = kzalloc(sizeof(*c), GFP_KERNEL);
1119 if (!c)
1120 return -ENOMEM;
1121 per_cpu(cpu_device, cpu) = c;
1122 s = &c->dev;
1123 c->hotpluggable = 1;
1124 rc = register_cpu(c, cpu);
1125 if (rc)
1126 goto out;
1127 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1128 if (rc)
1129 goto out_cpu;
1130 rc = topology_cpu_init(c);
1131 if (rc)
1132 goto out_topology;
1133 return 0;
1134
1135out_topology:
1136 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1137out_cpu:
1138#ifdef CONFIG_HOTPLUG_CPU
1139 unregister_cpu(c);
1140#endif
1141out:
1142 return rc;
1143}
1144
1145#ifdef CONFIG_HOTPLUG_CPU
1146
1147int __ref smp_rescan_cpus(void)
1148{
1149 struct sclp_core_info *info;
1150 int nr;
1151
1152 info = kzalloc(sizeof(*info), GFP_KERNEL);
1153 if (!info)
1154 return -ENOMEM;
1155 smp_get_core_info(info, 0);
1156 get_online_cpus();
1157 mutex_lock(&smp_cpu_state_mutex);
1158 nr = __smp_rescan_cpus(info, 1);
1159 mutex_unlock(&smp_cpu_state_mutex);
1160 put_online_cpus();
1161 kfree(info);
1162 if (nr)
1163 topology_schedule_update();
1164 return 0;
1165}
1166
1167static ssize_t __ref rescan_store(struct device *dev,
1168 struct device_attribute *attr,
1169 const char *buf,
1170 size_t count)
1171{
1172 int rc;
1173
1174 rc = lock_device_hotplug_sysfs();
1175 if (rc)
1176 return rc;
1177 rc = smp_rescan_cpus();
1178 unlock_device_hotplug();
1179 return rc ? rc : count;
1180}
1181static DEVICE_ATTR_WO(rescan);
1182#endif
1183
1184static int __init s390_smp_init(void)
1185{
1186 int cpu, rc = 0;
1187
1188#ifdef CONFIG_HOTPLUG_CPU
1189 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1190 if (rc)
1191 return rc;
1192#endif
1193 for_each_present_cpu(cpu) {
1194 rc = smp_add_present_cpu(cpu);
1195 if (rc)
1196 goto out;
1197 }
1198
1199 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1200 smp_cpu_online, smp_cpu_pre_down);
1201 rc = rc <= 0 ? rc : 0;
1202out:
1203 return rc;
1204}
1205subsys_initcall(s390_smp_init);
1206