1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/mm.h>
25#include <linux/err.h>
26#include <linux/spinlock.h>
27#include <linux/kernel_stat.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/irqflags.h>
31#include <linux/cpu.h>
32#include <linux/slab.h>
33#include <linux/crash_dump.h>
34#include <linux/memblock.h>
35#include <asm/asm-offsets.h>
36#include <asm/diag.h>
37#include <asm/switch_to.h>
38#include <asm/facility.h>
39#include <asm/ipl.h>
40#include <asm/setup.h>
41#include <asm/irq.h>
42#include <asm/tlbflush.h>
43#include <asm/vtimer.h>
44#include <asm/lowcore.h>
45#include <asm/sclp.h>
46#include <asm/vdso.h>
47#include <asm/debug.h>
48#include <asm/os_info.h>
49#include <asm/sigp.h>
50#include <asm/idle.h>
51#include "entry.h"
52
53enum {
54 ec_schedule = 0,
55 ec_call_function_single,
56 ec_stop_cpu,
57};
58
59enum {
60 CPU_STATE_STANDBY,
61 CPU_STATE_CONFIGURED,
62};
63
64static DEFINE_PER_CPU(struct cpu *, cpu_device);
65
66struct pcpu {
67 struct _lowcore *lowcore;
68 unsigned long ec_mask;
69 signed char state;
70 signed char polarization;
71 u16 address;
72};
73
74static u8 boot_core_type;
75static struct pcpu pcpu_devices[NR_CPUS];
76
77unsigned int smp_cpu_mt_shift;
78EXPORT_SYMBOL(smp_cpu_mt_shift);
79
80unsigned int smp_cpu_mtid;
81EXPORT_SYMBOL(smp_cpu_mtid);
82
83static unsigned int smp_max_threads __initdata = -1U;
84
85static int __init early_nosmt(char *s)
86{
87 smp_max_threads = 1;
88 return 0;
89}
90early_param("nosmt", early_nosmt);
91
92static int __init early_smt(char *s)
93{
94 get_option(&s, &smp_max_threads);
95 return 0;
96}
97early_param("smt", early_smt);
98
99
100
101
102
103DEFINE_MUTEX(smp_cpu_state_mutex);
104
105
106
107
108static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
109 u32 *status)
110{
111 int cc;
112
113 while (1) {
114 cc = __pcpu_sigp(addr, order, parm, NULL);
115 if (cc != SIGP_CC_BUSY)
116 return cc;
117 cpu_relax();
118 }
119}
120
121static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
122{
123 int cc, retry;
124
125 for (retry = 0; ; retry++) {
126 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
127 if (cc != SIGP_CC_BUSY)
128 break;
129 if (retry >= 3)
130 udelay(10);
131 }
132 return cc;
133}
134
135static inline int pcpu_stopped(struct pcpu *pcpu)
136{
137 u32 uninitialized_var(status);
138
139 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
140 0, &status) != SIGP_CC_STATUS_STORED)
141 return 0;
142 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
143}
144
145static inline int pcpu_running(struct pcpu *pcpu)
146{
147 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
148 0, NULL) != SIGP_CC_STATUS_STORED)
149 return 1;
150
151 return 0;
152}
153
154
155
156
157static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
158{
159 int cpu;
160
161 for_each_cpu(cpu, mask)
162 if (pcpu_devices[cpu].address == address)
163 return pcpu_devices + cpu;
164 return NULL;
165}
166
167static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
168{
169 int order;
170
171 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
172 return;
173 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
174 pcpu_sigp_retry(pcpu, order, 0);
175}
176
177#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
178#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
179
180static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
181{
182 unsigned long async_stack, panic_stack;
183 struct _lowcore *lc;
184
185 if (pcpu != &pcpu_devices[0]) {
186 pcpu->lowcore = (struct _lowcore *)
187 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
188 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
189 panic_stack = __get_free_page(GFP_KERNEL);
190 if (!pcpu->lowcore || !panic_stack || !async_stack)
191 goto out;
192 } else {
193 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
194 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
195 }
196 lc = pcpu->lowcore;
197 memcpy(lc, &S390_lowcore, 512);
198 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
199 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
200 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
201 lc->cpu_nr = cpu;
202 lc->spinlock_lockval = arch_spin_lockval(cpu);
203 if (MACHINE_HAS_VX)
204 lc->vector_save_area_addr =
205 (unsigned long) &lc->vector_save_area;
206 if (vdso_alloc_per_cpu(lc))
207 goto out;
208 lowcore_ptr[cpu] = lc;
209 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
210 return 0;
211out:
212 if (pcpu != &pcpu_devices[0]) {
213 free_page(panic_stack);
214 free_pages(async_stack, ASYNC_ORDER);
215 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
216 }
217 return -ENOMEM;
218}
219
220#ifdef CONFIG_HOTPLUG_CPU
221
222static void pcpu_free_lowcore(struct pcpu *pcpu)
223{
224 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
225 lowcore_ptr[pcpu - pcpu_devices] = NULL;
226 vdso_free_per_cpu(pcpu->lowcore);
227 if (pcpu == &pcpu_devices[0])
228 return;
229 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
230 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
231 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
232}
233
234#endif
235
236static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
237{
238 struct _lowcore *lc = pcpu->lowcore;
239
240 if (MACHINE_HAS_TLB_LC)
241 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
242 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
243 atomic_inc(&init_mm.context.attach_count);
244 lc->cpu_nr = cpu;
245 lc->spinlock_lockval = arch_spin_lockval(cpu);
246 lc->percpu_offset = __per_cpu_offset[cpu];
247 lc->kernel_asce = S390_lowcore.kernel_asce;
248 lc->machine_flags = S390_lowcore.machine_flags;
249 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
250 __ctl_store(lc->cregs_save_area, 0, 15);
251 save_access_regs((unsigned int *) lc->access_regs_save_area);
252 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
253 MAX_FACILITY_BIT/8);
254}
255
256static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
257{
258 struct _lowcore *lc = pcpu->lowcore;
259 struct thread_info *ti = task_thread_info(tsk);
260
261 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
262 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
263 lc->thread_info = (unsigned long) task_thread_info(tsk);
264 lc->current_task = (unsigned long) tsk;
265 lc->lpp = LPP_MAGIC;
266 lc->current_pid = tsk->pid;
267 lc->user_timer = ti->user_timer;
268 lc->system_timer = ti->system_timer;
269 lc->steal_timer = 0;
270}
271
272static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
273{
274 struct _lowcore *lc = pcpu->lowcore;
275
276 lc->restart_stack = lc->kernel_stack;
277 lc->restart_fn = (unsigned long) func;
278 lc->restart_data = (unsigned long) data;
279 lc->restart_source = -1UL;
280 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
281}
282
283
284
285
286static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
287 void *data, unsigned long stack)
288{
289 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
290 unsigned long source_cpu = stap();
291
292 __load_psw_mask(PSW_KERNEL_BITS);
293 if (pcpu->address == source_cpu)
294 func(data);
295
296 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
297
298 mem_assign_absolute(lc->restart_stack, stack);
299 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
300 mem_assign_absolute(lc->restart_data, (unsigned long) data);
301 mem_assign_absolute(lc->restart_source, source_cpu);
302 asm volatile(
303 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
304 " brc 2,0b # busy, try again\n"
305 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
306 " brc 2,1b # busy, try again\n"
307 : : "d" (pcpu->address), "d" (source_cpu),
308 "K" (SIGP_RESTART), "K" (SIGP_STOP)
309 : "0", "1", "cc");
310 for (;;) ;
311}
312
313
314
315
316static int pcpu_set_smt(unsigned int mtid)
317{
318 register unsigned long reg1 asm ("1") = (unsigned long) mtid;
319 int cc;
320
321 if (smp_cpu_mtid == mtid)
322 return 0;
323 asm volatile(
324 " sigp %1,0,%2 # sigp set multi-threading\n"
325 " ipm %0\n"
326 " srl %0,28\n"
327 : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
328 : "cc");
329 if (cc == 0) {
330 smp_cpu_mtid = mtid;
331 smp_cpu_mt_shift = 0;
332 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
333 smp_cpu_mt_shift++;
334 pcpu_devices[0].address = stap();
335 }
336 return cc;
337}
338
339
340
341
342void smp_call_online_cpu(void (*func)(void *), void *data)
343{
344 struct pcpu *pcpu;
345
346
347 pcpu = pcpu_find_address(cpu_online_mask, stap());
348 if (!pcpu)
349
350 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
351 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
352}
353
354
355
356
357void smp_call_ipl_cpu(void (*func)(void *), void *data)
358{
359 pcpu_delegate(&pcpu_devices[0], func, data,
360 pcpu_devices->lowcore->panic_stack -
361 PANIC_FRAME_OFFSET + PAGE_SIZE);
362}
363
364int smp_find_processor_id(u16 address)
365{
366 int cpu;
367
368 for_each_present_cpu(cpu)
369 if (pcpu_devices[cpu].address == address)
370 return cpu;
371 return -1;
372}
373
374int smp_vcpu_scheduled(int cpu)
375{
376 return pcpu_running(pcpu_devices + cpu);
377}
378
379void smp_yield_cpu(int cpu)
380{
381 if (MACHINE_HAS_DIAG9C) {
382 diag_stat_inc_norecursion(DIAG_STAT_X09C);
383 asm volatile("diag %0,0,0x9c"
384 : : "d" (pcpu_devices[cpu].address));
385 } else if (MACHINE_HAS_DIAG44) {
386 diag_stat_inc_norecursion(DIAG_STAT_X044);
387 asm volatile("diag 0,0,0x44");
388 }
389}
390
391
392
393
394
395static void smp_emergency_stop(cpumask_t *cpumask)
396{
397 u64 end;
398 int cpu;
399
400 end = get_tod_clock() + (1000000UL << 12);
401 for_each_cpu(cpu, cpumask) {
402 struct pcpu *pcpu = pcpu_devices + cpu;
403 set_bit(ec_stop_cpu, &pcpu->ec_mask);
404 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
405 0, NULL) == SIGP_CC_BUSY &&
406 get_tod_clock() < end)
407 cpu_relax();
408 }
409 while (get_tod_clock() < end) {
410 for_each_cpu(cpu, cpumask)
411 if (pcpu_stopped(pcpu_devices + cpu))
412 cpumask_clear_cpu(cpu, cpumask);
413 if (cpumask_empty(cpumask))
414 break;
415 cpu_relax();
416 }
417}
418
419
420
421
422void smp_send_stop(void)
423{
424 cpumask_t cpumask;
425 int cpu;
426
427
428 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
429 trace_hardirqs_off();
430
431 debug_set_critical();
432 cpumask_copy(&cpumask, cpu_online_mask);
433 cpumask_clear_cpu(smp_processor_id(), &cpumask);
434
435 if (oops_in_progress)
436 smp_emergency_stop(&cpumask);
437
438
439 for_each_cpu(cpu, &cpumask) {
440 struct pcpu *pcpu = pcpu_devices + cpu;
441 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
442 while (!pcpu_stopped(pcpu))
443 cpu_relax();
444 }
445}
446
447
448
449
450
451static void smp_handle_ext_call(void)
452{
453 unsigned long bits;
454
455
456 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
457 if (test_bit(ec_stop_cpu, &bits))
458 smp_stop_cpu();
459 if (test_bit(ec_schedule, &bits))
460 scheduler_ipi();
461 if (test_bit(ec_call_function_single, &bits))
462 generic_smp_call_function_single_interrupt();
463}
464
465static void do_ext_call_interrupt(struct ext_code ext_code,
466 unsigned int param32, unsigned long param64)
467{
468 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
469 smp_handle_ext_call();
470}
471
472void arch_send_call_function_ipi_mask(const struct cpumask *mask)
473{
474 int cpu;
475
476 for_each_cpu(cpu, mask)
477 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
478}
479
480void arch_send_call_function_single_ipi(int cpu)
481{
482 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
483}
484
485
486
487
488
489
490void smp_send_reschedule(int cpu)
491{
492 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
493}
494
495
496
497
498struct ec_creg_mask_parms {
499 unsigned long orval;
500 unsigned long andval;
501 int cr;
502};
503
504
505
506
507static void smp_ctl_bit_callback(void *info)
508{
509 struct ec_creg_mask_parms *pp = info;
510 unsigned long cregs[16];
511
512 __ctl_store(cregs, 0, 15);
513 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
514 __ctl_load(cregs, 0, 15);
515}
516
517
518
519
520void smp_ctl_set_bit(int cr, int bit)
521{
522 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
523
524 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
525}
526EXPORT_SYMBOL(smp_ctl_set_bit);
527
528
529
530
531void smp_ctl_clear_bit(int cr, int bit)
532{
533 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
534
535 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
536}
537EXPORT_SYMBOL(smp_ctl_clear_bit);
538
539#ifdef CONFIG_CRASH_DUMP
540
541static void __init __smp_store_cpu_state(struct save_area_ext *sa_ext,
542 u16 address, int is_boot_cpu)
543{
544 void *lc = (void *)(unsigned long) store_prefix();
545 unsigned long vx_sa;
546
547 if (is_boot_cpu) {
548
549 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
550 SAVE_AREA_BASE - PAGE_SIZE, 0);
551 if (MACHINE_HAS_VX)
552 save_vx_regs_safe(sa_ext->vx_regs);
553 return;
554 }
555
556 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
557 memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
558 if (!MACHINE_HAS_VX)
559 return;
560
561 vx_sa = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
562 if (!vx_sa)
563 panic("could not allocate memory for VX save area\n");
564 __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
565 memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
566 memblock_free(vx_sa, PAGE_SIZE);
567}
568
569int smp_store_status(int cpu)
570{
571 unsigned long vx_sa;
572 struct pcpu *pcpu;
573
574 pcpu = pcpu_devices + cpu;
575 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
576 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
577 return -EIO;
578 if (!MACHINE_HAS_VX)
579 return 0;
580 vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
581 __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
582 vx_sa, NULL);
583 return 0;
584}
585
586#endif
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615void __init smp_save_dump_cpus(void)
616{
617#ifdef CONFIG_CRASH_DUMP
618 int addr, cpu, boot_cpu_addr, max_cpu_addr;
619 struct save_area_ext *sa_ext;
620 bool is_boot_cpu;
621
622 if (is_kdump_kernel())
623
624 return;
625 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
626
627 return;
628
629 pcpu_set_smt(sclp.mtid_prev);
630 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
631 for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) {
632 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) ==
633 SIGP_CC_NOT_OPERATIONAL)
634 continue;
635 cpu += 1;
636 }
637 dump_save_areas.areas = (void *)memblock_alloc(sizeof(void *) * cpu, 8);
638 dump_save_areas.count = cpu;
639 boot_cpu_addr = stap();
640 for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) {
641 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) ==
642 SIGP_CC_NOT_OPERATIONAL)
643 continue;
644 sa_ext = (void *) memblock_alloc(sizeof(*sa_ext), 8);
645 dump_save_areas.areas[cpu] = sa_ext;
646 if (!sa_ext)
647 panic("could not allocate memory for save area\n");
648 is_boot_cpu = (addr == boot_cpu_addr);
649 cpu += 1;
650 if (is_boot_cpu && !OLDMEM_BASE)
651
652 continue;
653
654 __smp_store_cpu_state(sa_ext, addr, is_boot_cpu);
655 }
656 diag308_reset();
657 pcpu_set_smt(0);
658#endif
659}
660
661void smp_cpu_set_polarization(int cpu, int val)
662{
663 pcpu_devices[cpu].polarization = val;
664}
665
666int smp_cpu_get_polarization(int cpu)
667{
668 return pcpu_devices[cpu].polarization;
669}
670
671static struct sclp_core_info *smp_get_core_info(void)
672{
673 static int use_sigp_detection;
674 struct sclp_core_info *info;
675 int address;
676
677 info = kzalloc(sizeof(*info), GFP_KERNEL);
678 if (info && (use_sigp_detection || sclp_get_core_info(info))) {
679 use_sigp_detection = 1;
680 for (address = 0;
681 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
682 address += (1U << smp_cpu_mt_shift)) {
683 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
684 SIGP_CC_NOT_OPERATIONAL)
685 continue;
686 info->core[info->configured].core_id =
687 address >> smp_cpu_mt_shift;
688 info->configured++;
689 }
690 info->combined = info->configured;
691 }
692 return info;
693}
694
695static int smp_add_present_cpu(int cpu);
696
697static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
698{
699 struct pcpu *pcpu;
700 cpumask_t avail;
701 int cpu, nr, i, j;
702 u16 address;
703
704 nr = 0;
705 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
706 cpu = cpumask_first(&avail);
707 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
708 if (sclp.has_core_type && info->core[i].type != boot_core_type)
709 continue;
710 address = info->core[i].core_id << smp_cpu_mt_shift;
711 for (j = 0; j <= smp_cpu_mtid; j++) {
712 if (pcpu_find_address(cpu_present_mask, address + j))
713 continue;
714 pcpu = pcpu_devices + cpu;
715 pcpu->address = address + j;
716 pcpu->state =
717 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
718 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
719 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
720 set_cpu_present(cpu, true);
721 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
722 set_cpu_present(cpu, false);
723 else
724 nr++;
725 cpu = cpumask_next(cpu, &avail);
726 if (cpu >= nr_cpu_ids)
727 break;
728 }
729 }
730 return nr;
731}
732
733static void __init smp_detect_cpus(void)
734{
735 unsigned int cpu, mtid, c_cpus, s_cpus;
736 struct sclp_core_info *info;
737 u16 address;
738
739
740 info = smp_get_core_info();
741 if (!info)
742 panic("smp_detect_cpus failed to allocate memory\n");
743
744
745 if (sclp.has_core_type) {
746 address = stap();
747 for (cpu = 0; cpu < info->combined; cpu++)
748 if (info->core[cpu].core_id == address) {
749
750 boot_core_type = info->core[cpu].type;
751 break;
752 }
753 if (cpu >= info->combined)
754 panic("Could not find boot CPU type");
755 }
756
757
758 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
759 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
760 pcpu_set_smt(mtid);
761
762
763 c_cpus = s_cpus = 0;
764 for (cpu = 0; cpu < info->combined; cpu++) {
765 if (sclp.has_core_type &&
766 info->core[cpu].type != boot_core_type)
767 continue;
768 if (cpu < info->configured)
769 c_cpus += smp_cpu_mtid + 1;
770 else
771 s_cpus += smp_cpu_mtid + 1;
772 }
773 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
774
775
776 get_online_cpus();
777 __smp_rescan_cpus(info, 0);
778 put_online_cpus();
779 kfree(info);
780}
781
782
783
784
785static void smp_start_secondary(void *cpuvoid)
786{
787 S390_lowcore.last_update_clock = get_tod_clock();
788 S390_lowcore.restart_stack = (unsigned long) restart_stack;
789 S390_lowcore.restart_fn = (unsigned long) do_restart;
790 S390_lowcore.restart_data = 0;
791 S390_lowcore.restart_source = -1UL;
792 restore_access_regs(S390_lowcore.access_regs_save_area);
793 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
794 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
795 cpu_init();
796 preempt_disable();
797 init_cpu_timer();
798 vtime_init();
799 pfault_init();
800 notify_cpu_starting(smp_processor_id());
801 set_cpu_online(smp_processor_id(), true);
802 inc_irq_stat(CPU_RST);
803 local_irq_enable();
804 cpu_startup_entry(CPUHP_ONLINE);
805}
806
807
808int __cpu_up(unsigned int cpu, struct task_struct *tidle)
809{
810 struct pcpu *pcpu;
811 int base, i, rc;
812
813 pcpu = pcpu_devices + cpu;
814 if (pcpu->state != CPU_STATE_CONFIGURED)
815 return -EIO;
816 base = cpu - (cpu % (smp_cpu_mtid + 1));
817 for (i = 0; i <= smp_cpu_mtid; i++) {
818 if (base + i < nr_cpu_ids)
819 if (cpu_online(base + i))
820 break;
821 }
822
823
824
825
826 if (i > smp_cpu_mtid &&
827 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
828 SIGP_CC_ORDER_CODE_ACCEPTED)
829 return -EIO;
830
831 rc = pcpu_alloc_lowcore(pcpu, cpu);
832 if (rc)
833 return rc;
834 pcpu_prepare_secondary(pcpu, cpu);
835 pcpu_attach_task(pcpu, tidle);
836 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
837
838 while (!cpu_online(cpu) || !cpu_active(cpu))
839 cpu_relax();
840 return 0;
841}
842
843static unsigned int setup_possible_cpus __initdata;
844
845static int __init _setup_possible_cpus(char *s)
846{
847 get_option(&s, &setup_possible_cpus);
848 return 0;
849}
850early_param("possible_cpus", _setup_possible_cpus);
851
852#ifdef CONFIG_HOTPLUG_CPU
853
854int __cpu_disable(void)
855{
856 unsigned long cregs[16];
857
858
859 smp_handle_ext_call();
860 set_cpu_online(smp_processor_id(), false);
861
862 pfault_fini();
863
864 __ctl_store(cregs, 0, 15);
865 cregs[0] &= ~0x0000ee70UL;
866 cregs[6] &= ~0xff000000UL;
867 cregs[14] &= ~0x1f000000UL;
868 __ctl_load(cregs, 0, 15);
869 clear_cpu_flag(CIF_NOHZ_DELAY);
870 return 0;
871}
872
873void __cpu_die(unsigned int cpu)
874{
875 struct pcpu *pcpu;
876
877
878 pcpu = pcpu_devices + cpu;
879 while (!pcpu_stopped(pcpu))
880 cpu_relax();
881 pcpu_free_lowcore(pcpu);
882 atomic_dec(&init_mm.context.attach_count);
883 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
884 if (MACHINE_HAS_TLB_LC)
885 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
886}
887
888void __noreturn cpu_die(void)
889{
890 idle_task_exit();
891 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
892 for (;;) ;
893}
894
895#endif
896
897void __init smp_fill_possible_mask(void)
898{
899 unsigned int possible, sclp_max, cpu;
900
901 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
902 sclp_max = min(smp_max_threads, sclp_max);
903 sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids;
904 possible = setup_possible_cpus ?: nr_cpu_ids;
905 possible = min(possible, sclp_max);
906 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
907 set_cpu_possible(cpu, true);
908}
909
910void __init smp_prepare_cpus(unsigned int max_cpus)
911{
912
913 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
914 panic("Couldn't request external interrupt 0x1201");
915
916 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
917 panic("Couldn't request external interrupt 0x1202");
918 smp_detect_cpus();
919}
920
921void __init smp_prepare_boot_cpu(void)
922{
923 struct pcpu *pcpu = pcpu_devices;
924
925 pcpu->state = CPU_STATE_CONFIGURED;
926 pcpu->address = stap();
927 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
928 S390_lowcore.percpu_offset = __per_cpu_offset[0];
929 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
930 set_cpu_present(0, true);
931 set_cpu_online(0, true);
932}
933
934void __init smp_cpus_done(unsigned int max_cpus)
935{
936}
937
938void __init smp_setup_processor_id(void)
939{
940 S390_lowcore.cpu_nr = 0;
941 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
942}
943
944
945
946
947
948
949
950int setup_profiling_timer(unsigned int multiplier)
951{
952 return 0;
953}
954
955#ifdef CONFIG_HOTPLUG_CPU
956static ssize_t cpu_configure_show(struct device *dev,
957 struct device_attribute *attr, char *buf)
958{
959 ssize_t count;
960
961 mutex_lock(&smp_cpu_state_mutex);
962 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
963 mutex_unlock(&smp_cpu_state_mutex);
964 return count;
965}
966
967static ssize_t cpu_configure_store(struct device *dev,
968 struct device_attribute *attr,
969 const char *buf, size_t count)
970{
971 struct pcpu *pcpu;
972 int cpu, val, rc, i;
973 char delim;
974
975 if (sscanf(buf, "%d %c", &val, &delim) != 1)
976 return -EINVAL;
977 if (val != 0 && val != 1)
978 return -EINVAL;
979 get_online_cpus();
980 mutex_lock(&smp_cpu_state_mutex);
981 rc = -EBUSY;
982
983 cpu = dev->id;
984 cpu -= cpu % (smp_cpu_mtid + 1);
985 if (cpu == 0)
986 goto out;
987 for (i = 0; i <= smp_cpu_mtid; i++)
988 if (cpu_online(cpu + i))
989 goto out;
990 pcpu = pcpu_devices + cpu;
991 rc = 0;
992 switch (val) {
993 case 0:
994 if (pcpu->state != CPU_STATE_CONFIGURED)
995 break;
996 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
997 if (rc)
998 break;
999 for (i = 0; i <= smp_cpu_mtid; i++) {
1000 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1001 continue;
1002 pcpu[i].state = CPU_STATE_STANDBY;
1003 smp_cpu_set_polarization(cpu + i,
1004 POLARIZATION_UNKNOWN);
1005 }
1006 topology_expect_change();
1007 break;
1008 case 1:
1009 if (pcpu->state != CPU_STATE_STANDBY)
1010 break;
1011 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1012 if (rc)
1013 break;
1014 for (i = 0; i <= smp_cpu_mtid; i++) {
1015 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1016 continue;
1017 pcpu[i].state = CPU_STATE_CONFIGURED;
1018 smp_cpu_set_polarization(cpu + i,
1019 POLARIZATION_UNKNOWN);
1020 }
1021 topology_expect_change();
1022 break;
1023 default:
1024 break;
1025 }
1026out:
1027 mutex_unlock(&smp_cpu_state_mutex);
1028 put_online_cpus();
1029 return rc ? rc : count;
1030}
1031static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1032#endif
1033
1034static ssize_t show_cpu_address(struct device *dev,
1035 struct device_attribute *attr, char *buf)
1036{
1037 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1038}
1039static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1040
1041static struct attribute *cpu_common_attrs[] = {
1042#ifdef CONFIG_HOTPLUG_CPU
1043 &dev_attr_configure.attr,
1044#endif
1045 &dev_attr_address.attr,
1046 NULL,
1047};
1048
1049static struct attribute_group cpu_common_attr_group = {
1050 .attrs = cpu_common_attrs,
1051};
1052
1053static struct attribute *cpu_online_attrs[] = {
1054 &dev_attr_idle_count.attr,
1055 &dev_attr_idle_time_us.attr,
1056 NULL,
1057};
1058
1059static struct attribute_group cpu_online_attr_group = {
1060 .attrs = cpu_online_attrs,
1061};
1062
1063static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1064 void *hcpu)
1065{
1066 unsigned int cpu = (unsigned int)(long)hcpu;
1067 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1068 int err = 0;
1069
1070 switch (action & ~CPU_TASKS_FROZEN) {
1071 case CPU_ONLINE:
1072 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1073 break;
1074 case CPU_DEAD:
1075 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1076 break;
1077 }
1078 return notifier_from_errno(err);
1079}
1080
1081static int smp_add_present_cpu(int cpu)
1082{
1083 struct device *s;
1084 struct cpu *c;
1085 int rc;
1086
1087 c = kzalloc(sizeof(*c), GFP_KERNEL);
1088 if (!c)
1089 return -ENOMEM;
1090 per_cpu(cpu_device, cpu) = c;
1091 s = &c->dev;
1092 c->hotpluggable = 1;
1093 rc = register_cpu(c, cpu);
1094 if (rc)
1095 goto out;
1096 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1097 if (rc)
1098 goto out_cpu;
1099 if (cpu_online(cpu)) {
1100 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1101 if (rc)
1102 goto out_online;
1103 }
1104 rc = topology_cpu_init(c);
1105 if (rc)
1106 goto out_topology;
1107 return 0;
1108
1109out_topology:
1110 if (cpu_online(cpu))
1111 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1112out_online:
1113 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1114out_cpu:
1115#ifdef CONFIG_HOTPLUG_CPU
1116 unregister_cpu(c);
1117#endif
1118out:
1119 return rc;
1120}
1121
1122#ifdef CONFIG_HOTPLUG_CPU
1123
1124int __ref smp_rescan_cpus(void)
1125{
1126 struct sclp_core_info *info;
1127 int nr;
1128
1129 info = smp_get_core_info();
1130 if (!info)
1131 return -ENOMEM;
1132 get_online_cpus();
1133 mutex_lock(&smp_cpu_state_mutex);
1134 nr = __smp_rescan_cpus(info, 1);
1135 mutex_unlock(&smp_cpu_state_mutex);
1136 put_online_cpus();
1137 kfree(info);
1138 if (nr)
1139 topology_schedule_update();
1140 return 0;
1141}
1142
1143static ssize_t __ref rescan_store(struct device *dev,
1144 struct device_attribute *attr,
1145 const char *buf,
1146 size_t count)
1147{
1148 int rc;
1149
1150 rc = smp_rescan_cpus();
1151 return rc ? rc : count;
1152}
1153static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1154#endif
1155
1156static int __init s390_smp_init(void)
1157{
1158 int cpu, rc = 0;
1159
1160#ifdef CONFIG_HOTPLUG_CPU
1161 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1162 if (rc)
1163 return rc;
1164#endif
1165 cpu_notifier_register_begin();
1166 for_each_present_cpu(cpu) {
1167 rc = smp_add_present_cpu(cpu);
1168 if (rc)
1169 goto out;
1170 }
1171
1172 __hotcpu_notifier(smp_cpu_notify, 0);
1173
1174out:
1175 cpu_notifier_register_done();
1176 return rc;
1177}
1178subsys_initcall(s390_smp_init);
1179