1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/mm.h>
25#include <linux/err.h>
26#include <linux/spinlock.h>
27#include <linux/kernel_stat.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/irqflags.h>
31#include <linux/cpu.h>
32#include <linux/slab.h>
33#include <linux/crash_dump.h>
34#include <linux/memblock.h>
35#include <asm/asm-offsets.h>
36#include <asm/diag.h>
37#include <asm/switch_to.h>
38#include <asm/facility.h>
39#include <asm/ipl.h>
40#include <asm/setup.h>
41#include <asm/irq.h>
42#include <asm/tlbflush.h>
43#include <asm/vtimer.h>
44#include <asm/lowcore.h>
45#include <asm/sclp.h>
46#include <asm/vdso.h>
47#include <asm/debug.h>
48#include <asm/os_info.h>
49#include <asm/sigp.h>
50#include <asm/idle.h>
51#include "entry.h"
52
53enum {
54 ec_schedule = 0,
55 ec_call_function_single,
56 ec_stop_cpu,
57};
58
59enum {
60 CPU_STATE_STANDBY,
61 CPU_STATE_CONFIGURED,
62};
63
64static DEFINE_PER_CPU(struct cpu *, cpu_device);
65
66struct pcpu {
67 struct lowcore *lowcore;
68 unsigned long ec_mask;
69 unsigned long ec_clk;
70 signed char state;
71 signed char polarization;
72 u16 address;
73};
74
75static u8 boot_core_type;
76static struct pcpu pcpu_devices[NR_CPUS];
77
78unsigned int smp_cpu_mt_shift;
79EXPORT_SYMBOL(smp_cpu_mt_shift);
80
81unsigned int smp_cpu_mtid;
82EXPORT_SYMBOL(smp_cpu_mtid);
83
84#ifdef CONFIG_CRASH_DUMP
85__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
86#endif
87
88static unsigned int smp_max_threads __initdata = -1U;
89
90static int __init early_nosmt(char *s)
91{
92 smp_max_threads = 1;
93 return 0;
94}
95early_param("nosmt", early_nosmt);
96
97static int __init early_smt(char *s)
98{
99 get_option(&s, &smp_max_threads);
100 return 0;
101}
102early_param("smt", early_smt);
103
104
105
106
107
108DEFINE_MUTEX(smp_cpu_state_mutex);
109
110
111
112
113static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
114{
115 int cc;
116
117 while (1) {
118 cc = __pcpu_sigp(addr, order, parm, NULL);
119 if (cc != SIGP_CC_BUSY)
120 return cc;
121 cpu_relax();
122 }
123}
124
125static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
126{
127 int cc, retry;
128
129 for (retry = 0; ; retry++) {
130 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
131 if (cc != SIGP_CC_BUSY)
132 break;
133 if (retry >= 3)
134 udelay(10);
135 }
136 return cc;
137}
138
139static inline int pcpu_stopped(struct pcpu *pcpu)
140{
141 u32 uninitialized_var(status);
142
143 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
144 0, &status) != SIGP_CC_STATUS_STORED)
145 return 0;
146 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
147}
148
149static inline int pcpu_running(struct pcpu *pcpu)
150{
151 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
152 0, NULL) != SIGP_CC_STATUS_STORED)
153 return 1;
154
155 return 0;
156}
157
158
159
160
161static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
162{
163 int cpu;
164
165 for_each_cpu(cpu, mask)
166 if (pcpu_devices[cpu].address == address)
167 return pcpu_devices + cpu;
168 return NULL;
169}
170
171static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
172{
173 int order;
174
175 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
176 return;
177 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
178 pcpu->ec_clk = get_tod_clock_fast();
179 pcpu_sigp_retry(pcpu, order, 0);
180}
181
182#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
183#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
184
185static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
186{
187 unsigned long async_stack, panic_stack;
188 struct lowcore *lc;
189
190 if (pcpu != &pcpu_devices[0]) {
191 pcpu->lowcore = (struct lowcore *)
192 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
193 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
194 panic_stack = __get_free_page(GFP_KERNEL);
195 if (!pcpu->lowcore || !panic_stack || !async_stack)
196 goto out;
197 } else {
198 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
199 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
200 }
201 lc = pcpu->lowcore;
202 memcpy(lc, &S390_lowcore, 512);
203 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
204 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
205 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
206 lc->cpu_nr = cpu;
207 lc->spinlock_lockval = arch_spin_lockval(cpu);
208 if (MACHINE_HAS_VX)
209 lc->vector_save_area_addr =
210 (unsigned long) &lc->vector_save_area;
211 if (vdso_alloc_per_cpu(lc))
212 goto out;
213 lowcore_ptr[cpu] = lc;
214 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
215 return 0;
216out:
217 if (pcpu != &pcpu_devices[0]) {
218 free_page(panic_stack);
219 free_pages(async_stack, ASYNC_ORDER);
220 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
221 }
222 return -ENOMEM;
223}
224
225#ifdef CONFIG_HOTPLUG_CPU
226
227static void pcpu_free_lowcore(struct pcpu *pcpu)
228{
229 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
230 lowcore_ptr[pcpu - pcpu_devices] = NULL;
231 vdso_free_per_cpu(pcpu->lowcore);
232 if (pcpu == &pcpu_devices[0])
233 return;
234 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
235 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
236 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
237}
238
239#endif
240
241static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
242{
243 struct lowcore *lc = pcpu->lowcore;
244
245 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
246 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
247 lc->cpu_nr = cpu;
248 lc->spinlock_lockval = arch_spin_lockval(cpu);
249 lc->percpu_offset = __per_cpu_offset[cpu];
250 lc->kernel_asce = S390_lowcore.kernel_asce;
251 lc->machine_flags = S390_lowcore.machine_flags;
252 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
253 __ctl_store(lc->cregs_save_area, 0, 15);
254 save_access_regs((unsigned int *) lc->access_regs_save_area);
255 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
256 MAX_FACILITY_BIT/8);
257}
258
259static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
260{
261 struct lowcore *lc = pcpu->lowcore;
262 struct thread_info *ti = task_thread_info(tsk);
263
264 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
265 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
266 lc->thread_info = (unsigned long) task_thread_info(tsk);
267 lc->current_task = (unsigned long) tsk;
268 lc->lpp = LPP_MAGIC;
269 lc->current_pid = tsk->pid;
270 lc->user_timer = ti->user_timer;
271 lc->system_timer = ti->system_timer;
272 lc->steal_timer = 0;
273}
274
275static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
276{
277 struct lowcore *lc = pcpu->lowcore;
278
279 lc->restart_stack = lc->kernel_stack;
280 lc->restart_fn = (unsigned long) func;
281 lc->restart_data = (unsigned long) data;
282 lc->restart_source = -1UL;
283 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
284}
285
286
287
288
289static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
290 void *data, unsigned long stack)
291{
292 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
293 unsigned long source_cpu = stap();
294
295 __load_psw_mask(PSW_KERNEL_BITS);
296 if (pcpu->address == source_cpu)
297 func(data);
298
299 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
300
301 mem_assign_absolute(lc->restart_stack, stack);
302 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
303 mem_assign_absolute(lc->restart_data, (unsigned long) data);
304 mem_assign_absolute(lc->restart_source, source_cpu);
305 asm volatile(
306 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
307 " brc 2,0b # busy, try again\n"
308 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
309 " brc 2,1b # busy, try again\n"
310 : : "d" (pcpu->address), "d" (source_cpu),
311 "K" (SIGP_RESTART), "K" (SIGP_STOP)
312 : "0", "1", "cc");
313 for (;;) ;
314}
315
316
317
318
319static int pcpu_set_smt(unsigned int mtid)
320{
321 int cc;
322
323 if (smp_cpu_mtid == mtid)
324 return 0;
325 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
326 if (cc == 0) {
327 smp_cpu_mtid = mtid;
328 smp_cpu_mt_shift = 0;
329 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
330 smp_cpu_mt_shift++;
331 pcpu_devices[0].address = stap();
332 }
333 return cc;
334}
335
336
337
338
339void smp_call_online_cpu(void (*func)(void *), void *data)
340{
341 struct pcpu *pcpu;
342
343
344 pcpu = pcpu_find_address(cpu_online_mask, stap());
345 if (!pcpu)
346
347 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
348 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
349}
350
351
352
353
354void smp_call_ipl_cpu(void (*func)(void *), void *data)
355{
356 pcpu_delegate(&pcpu_devices[0], func, data,
357 pcpu_devices->lowcore->panic_stack -
358 PANIC_FRAME_OFFSET + PAGE_SIZE);
359}
360
361int smp_find_processor_id(u16 address)
362{
363 int cpu;
364
365 for_each_present_cpu(cpu)
366 if (pcpu_devices[cpu].address == address)
367 return cpu;
368 return -1;
369}
370
371int smp_vcpu_scheduled(int cpu)
372{
373 return pcpu_running(pcpu_devices + cpu);
374}
375
376void smp_yield_cpu(int cpu)
377{
378 if (MACHINE_HAS_DIAG9C) {
379 diag_stat_inc_norecursion(DIAG_STAT_X09C);
380 asm volatile("diag %0,0,0x9c"
381 : : "d" (pcpu_devices[cpu].address));
382 } else if (MACHINE_HAS_DIAG44) {
383 diag_stat_inc_norecursion(DIAG_STAT_X044);
384 asm volatile("diag 0,0,0x44");
385 }
386}
387
388
389
390
391
392static void smp_emergency_stop(cpumask_t *cpumask)
393{
394 u64 end;
395 int cpu;
396
397 end = get_tod_clock() + (1000000UL << 12);
398 for_each_cpu(cpu, cpumask) {
399 struct pcpu *pcpu = pcpu_devices + cpu;
400 set_bit(ec_stop_cpu, &pcpu->ec_mask);
401 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
402 0, NULL) == SIGP_CC_BUSY &&
403 get_tod_clock() < end)
404 cpu_relax();
405 }
406 while (get_tod_clock() < end) {
407 for_each_cpu(cpu, cpumask)
408 if (pcpu_stopped(pcpu_devices + cpu))
409 cpumask_clear_cpu(cpu, cpumask);
410 if (cpumask_empty(cpumask))
411 break;
412 cpu_relax();
413 }
414}
415
416
417
418
419void smp_send_stop(void)
420{
421 cpumask_t cpumask;
422 int cpu;
423
424
425 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
426 trace_hardirqs_off();
427
428 debug_set_critical();
429 cpumask_copy(&cpumask, cpu_online_mask);
430 cpumask_clear_cpu(smp_processor_id(), &cpumask);
431
432 if (oops_in_progress)
433 smp_emergency_stop(&cpumask);
434
435
436 for_each_cpu(cpu, &cpumask) {
437 struct pcpu *pcpu = pcpu_devices + cpu;
438 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
439 while (!pcpu_stopped(pcpu))
440 cpu_relax();
441 }
442}
443
444
445
446
447
448static void smp_handle_ext_call(void)
449{
450 unsigned long bits;
451
452
453 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
454 if (test_bit(ec_stop_cpu, &bits))
455 smp_stop_cpu();
456 if (test_bit(ec_schedule, &bits))
457 scheduler_ipi();
458 if (test_bit(ec_call_function_single, &bits))
459 generic_smp_call_function_single_interrupt();
460}
461
462static void do_ext_call_interrupt(struct ext_code ext_code,
463 unsigned int param32, unsigned long param64)
464{
465 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
466 smp_handle_ext_call();
467}
468
469void arch_send_call_function_ipi_mask(const struct cpumask *mask)
470{
471 int cpu;
472
473 for_each_cpu(cpu, mask)
474 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
475}
476
477void arch_send_call_function_single_ipi(int cpu)
478{
479 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
480}
481
482
483
484
485
486
487void smp_send_reschedule(int cpu)
488{
489 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
490}
491
492
493
494
495struct ec_creg_mask_parms {
496 unsigned long orval;
497 unsigned long andval;
498 int cr;
499};
500
501
502
503
504static void smp_ctl_bit_callback(void *info)
505{
506 struct ec_creg_mask_parms *pp = info;
507 unsigned long cregs[16];
508
509 __ctl_store(cregs, 0, 15);
510 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
511 __ctl_load(cregs, 0, 15);
512}
513
514
515
516
517void smp_ctl_set_bit(int cr, int bit)
518{
519 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
520
521 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
522}
523EXPORT_SYMBOL(smp_ctl_set_bit);
524
525
526
527
528void smp_ctl_clear_bit(int cr, int bit)
529{
530 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
531
532 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
533}
534EXPORT_SYMBOL(smp_ctl_clear_bit);
535
536#ifdef CONFIG_CRASH_DUMP
537
538int smp_store_status(int cpu)
539{
540 struct pcpu *pcpu = pcpu_devices + cpu;
541 unsigned long pa;
542
543 pa = __pa(&pcpu->lowcore->floating_pt_save_area);
544 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
545 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
546 return -EIO;
547 if (!MACHINE_HAS_VX)
548 return 0;
549 pa = __pa(pcpu->lowcore->vector_save_area_addr);
550 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
551 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
552 return -EIO;
553 return 0;
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
583 bool is_boot_cpu, unsigned long page)
584{
585 __vector128 *vxrs = (__vector128 *) page;
586
587 if (is_boot_cpu)
588 vxrs = boot_cpu_vector_save_area;
589 else
590 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
591 save_area_add_vxrs(sa, vxrs);
592}
593
594static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
595 bool is_boot_cpu, unsigned long page)
596{
597 void *regs = (void *) page;
598
599 if (is_boot_cpu)
600 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
601 else
602 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
603 save_area_add_regs(sa, regs);
604}
605
606void __init smp_save_dump_cpus(void)
607{
608 int addr, boot_cpu_addr, max_cpu_addr;
609 struct save_area *sa;
610 unsigned long page;
611 bool is_boot_cpu;
612
613 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
614
615 return;
616
617 page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
618
619 pcpu_set_smt(sclp.mtid_prev);
620 boot_cpu_addr = stap();
621 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
622 for (addr = 0; addr <= max_cpu_addr; addr++) {
623 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
624 SIGP_CC_NOT_OPERATIONAL)
625 continue;
626 is_boot_cpu = (addr == boot_cpu_addr);
627
628 sa = save_area_alloc(is_boot_cpu);
629 if (!sa)
630 panic("could not allocate memory for save area\n");
631 if (MACHINE_HAS_VX)
632
633 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
634
635
636
637
638
639
640 if (!is_boot_cpu || OLDMEM_BASE)
641
642 smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
643 }
644 memblock_free(page, PAGE_SIZE);
645 diag308_reset();
646 pcpu_set_smt(0);
647}
648#endif
649
650void smp_cpu_set_polarization(int cpu, int val)
651{
652 pcpu_devices[cpu].polarization = val;
653}
654
655int smp_cpu_get_polarization(int cpu)
656{
657 return pcpu_devices[cpu].polarization;
658}
659
660static struct sclp_core_info *smp_get_core_info(void)
661{
662 static int use_sigp_detection;
663 struct sclp_core_info *info;
664 int address;
665
666 info = kzalloc(sizeof(*info), GFP_KERNEL);
667 if (info && (use_sigp_detection || sclp_get_core_info(info))) {
668 use_sigp_detection = 1;
669 for (address = 0;
670 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
671 address += (1U << smp_cpu_mt_shift)) {
672 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
673 SIGP_CC_NOT_OPERATIONAL)
674 continue;
675 info->core[info->configured].core_id =
676 address >> smp_cpu_mt_shift;
677 info->configured++;
678 }
679 info->combined = info->configured;
680 }
681 return info;
682}
683
684static int smp_add_present_cpu(int cpu);
685
686static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
687{
688 struct pcpu *pcpu;
689 cpumask_t avail;
690 int cpu, nr, i, j;
691 u16 address;
692
693 nr = 0;
694 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
695 cpu = cpumask_first(&avail);
696 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
697 if (sclp.has_core_type && info->core[i].type != boot_core_type)
698 continue;
699 address = info->core[i].core_id << smp_cpu_mt_shift;
700 for (j = 0; j <= smp_cpu_mtid; j++) {
701 if (pcpu_find_address(cpu_present_mask, address + j))
702 continue;
703 pcpu = pcpu_devices + cpu;
704 pcpu->address = address + j;
705 pcpu->state =
706 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
707 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
708 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
709 set_cpu_present(cpu, true);
710 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
711 set_cpu_present(cpu, false);
712 else
713 nr++;
714 cpu = cpumask_next(cpu, &avail);
715 if (cpu >= nr_cpu_ids)
716 break;
717 }
718 }
719 return nr;
720}
721
722static void __init smp_detect_cpus(void)
723{
724 unsigned int cpu, mtid, c_cpus, s_cpus;
725 struct sclp_core_info *info;
726 u16 address;
727
728
729 info = smp_get_core_info();
730 if (!info)
731 panic("smp_detect_cpus failed to allocate memory\n");
732
733
734 if (sclp.has_core_type) {
735 address = stap();
736 for (cpu = 0; cpu < info->combined; cpu++)
737 if (info->core[cpu].core_id == address) {
738
739 boot_core_type = info->core[cpu].type;
740 break;
741 }
742 if (cpu >= info->combined)
743 panic("Could not find boot CPU type");
744 }
745
746
747 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
748 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
749 pcpu_set_smt(mtid);
750
751
752 c_cpus = s_cpus = 0;
753 for (cpu = 0; cpu < info->combined; cpu++) {
754 if (sclp.has_core_type &&
755 info->core[cpu].type != boot_core_type)
756 continue;
757 if (cpu < info->configured)
758 c_cpus += smp_cpu_mtid + 1;
759 else
760 s_cpus += smp_cpu_mtid + 1;
761 }
762 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
763
764
765 get_online_cpus();
766 __smp_rescan_cpus(info, 0);
767 put_online_cpus();
768 kfree(info);
769}
770
771
772
773
774static void smp_start_secondary(void *cpuvoid)
775{
776 S390_lowcore.last_update_clock = get_tod_clock();
777 S390_lowcore.restart_stack = (unsigned long) restart_stack;
778 S390_lowcore.restart_fn = (unsigned long) do_restart;
779 S390_lowcore.restart_data = 0;
780 S390_lowcore.restart_source = -1UL;
781 restore_access_regs(S390_lowcore.access_regs_save_area);
782 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
783 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
784 cpu_init();
785 preempt_disable();
786 init_cpu_timer();
787 vtime_init();
788 pfault_init();
789 notify_cpu_starting(smp_processor_id());
790 set_cpu_online(smp_processor_id(), true);
791 inc_irq_stat(CPU_RST);
792 local_irq_enable();
793 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
794}
795
796
797int __cpu_up(unsigned int cpu, struct task_struct *tidle)
798{
799 struct pcpu *pcpu;
800 int base, i, rc;
801
802 pcpu = pcpu_devices + cpu;
803 if (pcpu->state != CPU_STATE_CONFIGURED)
804 return -EIO;
805 base = cpu - (cpu % (smp_cpu_mtid + 1));
806 for (i = 0; i <= smp_cpu_mtid; i++) {
807 if (base + i < nr_cpu_ids)
808 if (cpu_online(base + i))
809 break;
810 }
811
812
813
814
815 if (i > smp_cpu_mtid &&
816 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
817 SIGP_CC_ORDER_CODE_ACCEPTED)
818 return -EIO;
819
820 rc = pcpu_alloc_lowcore(pcpu, cpu);
821 if (rc)
822 return rc;
823 pcpu_prepare_secondary(pcpu, cpu);
824 pcpu_attach_task(pcpu, tidle);
825 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
826
827 while (!cpu_online(cpu))
828 cpu_relax();
829 return 0;
830}
831
832static unsigned int setup_possible_cpus __initdata;
833
834static int __init _setup_possible_cpus(char *s)
835{
836 get_option(&s, &setup_possible_cpus);
837 return 0;
838}
839early_param("possible_cpus", _setup_possible_cpus);
840
841#ifdef CONFIG_HOTPLUG_CPU
842
843int __cpu_disable(void)
844{
845 unsigned long cregs[16];
846
847
848 smp_handle_ext_call();
849 set_cpu_online(smp_processor_id(), false);
850
851 pfault_fini();
852
853 __ctl_store(cregs, 0, 15);
854 cregs[0] &= ~0x0000ee70UL;
855 cregs[6] &= ~0xff000000UL;
856 cregs[14] &= ~0x1f000000UL;
857 __ctl_load(cregs, 0, 15);
858 clear_cpu_flag(CIF_NOHZ_DELAY);
859 return 0;
860}
861
862void __cpu_die(unsigned int cpu)
863{
864 struct pcpu *pcpu;
865
866
867 pcpu = pcpu_devices + cpu;
868 while (!pcpu_stopped(pcpu))
869 cpu_relax();
870 pcpu_free_lowcore(pcpu);
871 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
872 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
873}
874
875void __noreturn cpu_die(void)
876{
877 idle_task_exit();
878 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
879 for (;;) ;
880}
881
882#endif
883
884void __init smp_fill_possible_mask(void)
885{
886 unsigned int possible, sclp_max, cpu;
887
888 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
889 sclp_max = min(smp_max_threads, sclp_max);
890 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
891 possible = setup_possible_cpus ?: nr_cpu_ids;
892 possible = min(possible, sclp_max);
893 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
894 set_cpu_possible(cpu, true);
895}
896
897void __init smp_prepare_cpus(unsigned int max_cpus)
898{
899
900 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
901 panic("Couldn't request external interrupt 0x1201");
902
903 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
904 panic("Couldn't request external interrupt 0x1202");
905 smp_detect_cpus();
906}
907
908void __init smp_prepare_boot_cpu(void)
909{
910 struct pcpu *pcpu = pcpu_devices;
911
912 pcpu->state = CPU_STATE_CONFIGURED;
913 pcpu->address = stap();
914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
915 S390_lowcore.percpu_offset = __per_cpu_offset[0];
916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
917 set_cpu_present(0, true);
918 set_cpu_online(0, true);
919}
920
921void __init smp_cpus_done(unsigned int max_cpus)
922{
923}
924
925void __init smp_setup_processor_id(void)
926{
927 S390_lowcore.cpu_nr = 0;
928 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
929}
930
931
932
933
934
935
936
937int setup_profiling_timer(unsigned int multiplier)
938{
939 return 0;
940}
941
942#ifdef CONFIG_HOTPLUG_CPU
943static ssize_t cpu_configure_show(struct device *dev,
944 struct device_attribute *attr, char *buf)
945{
946 ssize_t count;
947
948 mutex_lock(&smp_cpu_state_mutex);
949 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
950 mutex_unlock(&smp_cpu_state_mutex);
951 return count;
952}
953
954static ssize_t cpu_configure_store(struct device *dev,
955 struct device_attribute *attr,
956 const char *buf, size_t count)
957{
958 struct pcpu *pcpu;
959 int cpu, val, rc, i;
960 char delim;
961
962 if (sscanf(buf, "%d %c", &val, &delim) != 1)
963 return -EINVAL;
964 if (val != 0 && val != 1)
965 return -EINVAL;
966 get_online_cpus();
967 mutex_lock(&smp_cpu_state_mutex);
968 rc = -EBUSY;
969
970 cpu = dev->id;
971 cpu -= cpu % (smp_cpu_mtid + 1);
972 if (cpu == 0)
973 goto out;
974 for (i = 0; i <= smp_cpu_mtid; i++)
975 if (cpu_online(cpu + i))
976 goto out;
977 pcpu = pcpu_devices + cpu;
978 rc = 0;
979 switch (val) {
980 case 0:
981 if (pcpu->state != CPU_STATE_CONFIGURED)
982 break;
983 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
984 if (rc)
985 break;
986 for (i = 0; i <= smp_cpu_mtid; i++) {
987 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
988 continue;
989 pcpu[i].state = CPU_STATE_STANDBY;
990 smp_cpu_set_polarization(cpu + i,
991 POLARIZATION_UNKNOWN);
992 }
993 topology_expect_change();
994 break;
995 case 1:
996 if (pcpu->state != CPU_STATE_STANDBY)
997 break;
998 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
999 if (rc)
1000 break;
1001 for (i = 0; i <= smp_cpu_mtid; i++) {
1002 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1003 continue;
1004 pcpu[i].state = CPU_STATE_CONFIGURED;
1005 smp_cpu_set_polarization(cpu + i,
1006 POLARIZATION_UNKNOWN);
1007 }
1008 topology_expect_change();
1009 break;
1010 default:
1011 break;
1012 }
1013out:
1014 mutex_unlock(&smp_cpu_state_mutex);
1015 put_online_cpus();
1016 return rc ? rc : count;
1017}
1018static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1019#endif
1020
1021static ssize_t show_cpu_address(struct device *dev,
1022 struct device_attribute *attr, char *buf)
1023{
1024 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1025}
1026static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1027
1028static struct attribute *cpu_common_attrs[] = {
1029#ifdef CONFIG_HOTPLUG_CPU
1030 &dev_attr_configure.attr,
1031#endif
1032 &dev_attr_address.attr,
1033 NULL,
1034};
1035
1036static struct attribute_group cpu_common_attr_group = {
1037 .attrs = cpu_common_attrs,
1038};
1039
1040static struct attribute *cpu_online_attrs[] = {
1041 &dev_attr_idle_count.attr,
1042 &dev_attr_idle_time_us.attr,
1043 NULL,
1044};
1045
1046static struct attribute_group cpu_online_attr_group = {
1047 .attrs = cpu_online_attrs,
1048};
1049
1050static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1051 void *hcpu)
1052{
1053 unsigned int cpu = (unsigned int)(long)hcpu;
1054 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1055 int err = 0;
1056
1057 switch (action & ~CPU_TASKS_FROZEN) {
1058 case CPU_ONLINE:
1059 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1060 break;
1061 case CPU_DEAD:
1062 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1063 break;
1064 }
1065 return notifier_from_errno(err);
1066}
1067
1068static int smp_add_present_cpu(int cpu)
1069{
1070 struct device *s;
1071 struct cpu *c;
1072 int rc;
1073
1074 c = kzalloc(sizeof(*c), GFP_KERNEL);
1075 if (!c)
1076 return -ENOMEM;
1077 per_cpu(cpu_device, cpu) = c;
1078 s = &c->dev;
1079 c->hotpluggable = 1;
1080 rc = register_cpu(c, cpu);
1081 if (rc)
1082 goto out;
1083 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1084 if (rc)
1085 goto out_cpu;
1086 if (cpu_online(cpu)) {
1087 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1088 if (rc)
1089 goto out_online;
1090 }
1091 rc = topology_cpu_init(c);
1092 if (rc)
1093 goto out_topology;
1094 return 0;
1095
1096out_topology:
1097 if (cpu_online(cpu))
1098 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1099out_online:
1100 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1101out_cpu:
1102#ifdef CONFIG_HOTPLUG_CPU
1103 unregister_cpu(c);
1104#endif
1105out:
1106 return rc;
1107}
1108
1109#ifdef CONFIG_HOTPLUG_CPU
1110
1111int __ref smp_rescan_cpus(void)
1112{
1113 struct sclp_core_info *info;
1114 int nr;
1115
1116 info = smp_get_core_info();
1117 if (!info)
1118 return -ENOMEM;
1119 get_online_cpus();
1120 mutex_lock(&smp_cpu_state_mutex);
1121 nr = __smp_rescan_cpus(info, 1);
1122 mutex_unlock(&smp_cpu_state_mutex);
1123 put_online_cpus();
1124 kfree(info);
1125 if (nr)
1126 topology_schedule_update();
1127 return 0;
1128}
1129
1130static ssize_t __ref rescan_store(struct device *dev,
1131 struct device_attribute *attr,
1132 const char *buf,
1133 size_t count)
1134{
1135 int rc;
1136
1137 rc = smp_rescan_cpus();
1138 return rc ? rc : count;
1139}
1140static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1141#endif
1142
1143static int __init s390_smp_init(void)
1144{
1145 int cpu, rc = 0;
1146
1147#ifdef CONFIG_HOTPLUG_CPU
1148 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1149 if (rc)
1150 return rc;
1151#endif
1152 cpu_notifier_register_begin();
1153 for_each_present_cpu(cpu) {
1154 rc = smp_add_present_cpu(cpu);
1155 if (rc)
1156 goto out;
1157 }
1158
1159 __hotcpu_notifier(smp_cpu_notify, 0);
1160
1161out:
1162 cpu_notifier_register_done();
1163 return rc;
1164}
1165subsys_initcall(s390_smp_init);
1166