linux/arch/arm64/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * SMP initialisation and IPI support
   3 * Based on arch/arm/kernel/smp.c
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include <linux/acpi.h>
  21#include <linux/arm_sdei.h>
  22#include <linux/delay.h>
  23#include <linux/init.h>
  24#include <linux/spinlock.h>
  25#include <linux/sched/mm.h>
  26#include <linux/sched/hotplug.h>
  27#include <linux/sched/task_stack.h>
  28#include <linux/interrupt.h>
  29#include <linux/cache.h>
  30#include <linux/profile.h>
  31#include <linux/errno.h>
  32#include <linux/mm.h>
  33#include <linux/err.h>
  34#include <linux/cpu.h>
  35#include <linux/smp.h>
  36#include <linux/seq_file.h>
  37#include <linux/irq.h>
  38#include <linux/percpu.h>
  39#include <linux/clockchips.h>
  40#include <linux/completion.h>
  41#include <linux/of.h>
  42#include <linux/irq_work.h>
  43#include <linux/kexec.h>
  44
  45#include <asm/alternative.h>
  46#include <asm/atomic.h>
  47#include <asm/cacheflush.h>
  48#include <asm/cpu.h>
  49#include <asm/cputype.h>
  50#include <asm/cpu_ops.h>
  51#include <asm/daifflags.h>
  52#include <asm/mmu_context.h>
  53#include <asm/numa.h>
  54#include <asm/pgtable.h>
  55#include <asm/pgalloc.h>
  56#include <asm/processor.h>
  57#include <asm/smp_plat.h>
  58#include <asm/sections.h>
  59#include <asm/tlbflush.h>
  60#include <asm/ptrace.h>
  61#include <asm/virt.h>
  62
  63#define CREATE_TRACE_POINTS
  64#include <trace/events/ipi.h>
  65
  66DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
  67EXPORT_PER_CPU_SYMBOL(cpu_number);
  68
  69/*
  70 * as from 2.5, kernels no longer have an init_tasks structure
  71 * so we need some other way of telling a new secondary core
  72 * where to place its SVC stack
  73 */
  74struct secondary_data secondary_data;
  75/* Number of CPUs which aren't online, but looping in kernel text. */
  76int cpus_stuck_in_kernel;
  77
  78enum ipi_msg_type {
  79        IPI_RESCHEDULE,
  80        IPI_CALL_FUNC,
  81        IPI_CPU_STOP,
  82        IPI_CPU_CRASH_STOP,
  83        IPI_TIMER,
  84        IPI_IRQ_WORK,
  85        IPI_WAKEUP
  86};
  87
  88#ifdef CONFIG_HOTPLUG_CPU
  89static int op_cpu_kill(unsigned int cpu);
  90#else
  91static inline int op_cpu_kill(unsigned int cpu)
  92{
  93        return -ENOSYS;
  94}
  95#endif
  96
  97
  98/*
  99 * Boot a secondary CPU, and assign it the specified idle task.
 100 * This also gives us the initial stack to use for this CPU.
 101 */
 102static int boot_secondary(unsigned int cpu, struct task_struct *idle)
 103{
 104        if (cpu_ops[cpu]->cpu_boot)
 105                return cpu_ops[cpu]->cpu_boot(cpu);
 106
 107        return -EOPNOTSUPP;
 108}
 109
 110static DECLARE_COMPLETION(cpu_running);
 111bool va52mismatch __ro_after_init;
 112
 113int __cpu_up(unsigned int cpu, struct task_struct *idle)
 114{
 115        int ret;
 116        long status;
 117
 118        /*
 119         * We need to tell the secondary core where to find its stack and the
 120         * page tables.
 121         */
 122        secondary_data.task = idle;
 123        secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
 124        update_cpu_boot_status(CPU_MMU_OFF);
 125        __flush_dcache_area(&secondary_data, sizeof(secondary_data));
 126
 127        /*
 128         * Now bring the CPU into our world.
 129         */
 130        ret = boot_secondary(cpu, idle);
 131        if (ret == 0) {
 132                /*
 133                 * CPU was successfully started, wait for it to come online or
 134                 * time out.
 135                 */
 136                wait_for_completion_timeout(&cpu_running,
 137                                            msecs_to_jiffies(1000));
 138
 139                if (!cpu_online(cpu)) {
 140                        pr_crit("CPU%u: failed to come online\n", cpu);
 141
 142                        if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52) && va52mismatch)
 143                                pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
 144
 145                        ret = -EIO;
 146                }
 147        } else {
 148                pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
 149        }
 150
 151        secondary_data.task = NULL;
 152        secondary_data.stack = NULL;
 153        status = READ_ONCE(secondary_data.status);
 154        if (ret && status) {
 155
 156                if (status == CPU_MMU_OFF)
 157                        status = READ_ONCE(__early_cpu_boot_status);
 158
 159                switch (status) {
 160                default:
 161                        pr_err("CPU%u: failed in unknown state : 0x%lx\n",
 162                                        cpu, status);
 163                        break;
 164                case CPU_KILL_ME:
 165                        if (!op_cpu_kill(cpu)) {
 166                                pr_crit("CPU%u: died during early boot\n", cpu);
 167                                break;
 168                        }
 169                        /* Fall through */
 170                        pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
 171                case CPU_STUCK_IN_KERNEL:
 172                        pr_crit("CPU%u: is stuck in kernel\n", cpu);
 173                        cpus_stuck_in_kernel++;
 174                        break;
 175                case CPU_PANIC_KERNEL:
 176                        panic("CPU%u detected unsupported configuration\n", cpu);
 177                }
 178        }
 179
 180        return ret;
 181}
 182
 183/*
 184 * This is the secondary CPU boot entry.  We're using this CPUs
 185 * idle thread stack, but a set of temporary page tables.
 186 */
 187asmlinkage notrace void secondary_start_kernel(void)
 188{
 189        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
 190        struct mm_struct *mm = &init_mm;
 191        unsigned int cpu;
 192
 193        cpu = task_cpu(current);
 194        set_my_cpu_offset(per_cpu_offset(cpu));
 195
 196        /*
 197         * All kernel threads share the same mm context; grab a
 198         * reference and switch to it.
 199         */
 200        mmgrab(mm);
 201        current->active_mm = mm;
 202
 203        /*
 204         * TTBR0 is only used for the identity mapping at this stage. Make it
 205         * point to zero page to avoid speculatively fetching new entries.
 206         */
 207        cpu_uninstall_idmap();
 208
 209        preempt_disable();
 210        trace_hardirqs_off();
 211
 212        /*
 213         * If the system has established the capabilities, make sure
 214         * this CPU ticks all of those. If it doesn't, the CPU will
 215         * fail to come online.
 216         */
 217        check_local_cpu_capabilities();
 218
 219        if (cpu_ops[cpu]->cpu_postboot)
 220                cpu_ops[cpu]->cpu_postboot();
 221
 222        /*
 223         * Log the CPU info before it is marked online and might get read.
 224         */
 225        cpuinfo_store_cpu();
 226
 227        /*
 228         * Enable GIC and timers.
 229         */
 230        notify_cpu_starting(cpu);
 231
 232        store_cpu_topology(cpu);
 233        numa_add_cpu(cpu);
 234
 235        /*
 236         * OK, now it's safe to let the boot CPU continue.  Wait for
 237         * the CPU migration code to notice that the CPU is online
 238         * before we continue.
 239         */
 240        pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
 241                                         cpu, (unsigned long)mpidr,
 242                                         read_cpuid_id());
 243        update_cpu_boot_status(CPU_BOOT_SUCCESS);
 244        set_cpu_online(cpu, true);
 245        complete(&cpu_running);
 246
 247        local_daif_restore(DAIF_PROCCTX);
 248
 249        /*
 250         * OK, it's off to the idle thread for us
 251         */
 252        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 253}
 254
 255#ifdef CONFIG_HOTPLUG_CPU
 256static int op_cpu_disable(unsigned int cpu)
 257{
 258        /*
 259         * If we don't have a cpu_die method, abort before we reach the point
 260         * of no return. CPU0 may not have an cpu_ops, so test for it.
 261         */
 262        if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
 263                return -EOPNOTSUPP;
 264
 265        /*
 266         * We may need to abort a hot unplug for some other mechanism-specific
 267         * reason.
 268         */
 269        if (cpu_ops[cpu]->cpu_disable)
 270                return cpu_ops[cpu]->cpu_disable(cpu);
 271
 272        return 0;
 273}
 274
 275/*
 276 * __cpu_disable runs on the processor to be shutdown.
 277 */
 278int __cpu_disable(void)
 279{
 280        unsigned int cpu = smp_processor_id();
 281        int ret;
 282
 283        ret = op_cpu_disable(cpu);
 284        if (ret)
 285                return ret;
 286
 287        remove_cpu_topology(cpu);
 288        numa_remove_cpu(cpu);
 289
 290        /*
 291         * Take this CPU offline.  Once we clear this, we can't return,
 292         * and we must not schedule until we're ready to give up the cpu.
 293         */
 294        set_cpu_online(cpu, false);
 295
 296        /*
 297         * OK - migrate IRQs away from this CPU
 298         */
 299        irq_migrate_all_off_this_cpu();
 300
 301        return 0;
 302}
 303
 304static int op_cpu_kill(unsigned int cpu)
 305{
 306        /*
 307         * If we have no means of synchronising with the dying CPU, then assume
 308         * that it is really dead. We can only wait for an arbitrary length of
 309         * time and hope that it's dead, so let's skip the wait and just hope.
 310         */
 311        if (!cpu_ops[cpu]->cpu_kill)
 312                return 0;
 313
 314        return cpu_ops[cpu]->cpu_kill(cpu);
 315}
 316
 317/*
 318 * called on the thread which is asking for a CPU to be shutdown -
 319 * waits until shutdown has completed, or it is timed out.
 320 */
 321void __cpu_die(unsigned int cpu)
 322{
 323        int err;
 324
 325        if (!cpu_wait_death(cpu, 5)) {
 326                pr_crit("CPU%u: cpu didn't die\n", cpu);
 327                return;
 328        }
 329        pr_notice("CPU%u: shutdown\n", cpu);
 330
 331        /*
 332         * Now that the dying CPU is beyond the point of no return w.r.t.
 333         * in-kernel synchronisation, try to get the firwmare to help us to
 334         * verify that it has really left the kernel before we consider
 335         * clobbering anything it might still be using.
 336         */
 337        err = op_cpu_kill(cpu);
 338        if (err)
 339                pr_warn("CPU%d may not have shut down cleanly: %d\n",
 340                        cpu, err);
 341}
 342
 343/*
 344 * Called from the idle thread for the CPU which has been shutdown.
 345 *
 346 */
 347void cpu_die(void)
 348{
 349        unsigned int cpu = smp_processor_id();
 350
 351        idle_task_exit();
 352
 353        local_daif_mask();
 354
 355        /* Tell __cpu_die() that this CPU is now safe to dispose of */
 356        (void)cpu_report_death();
 357
 358        /*
 359         * Actually shutdown the CPU. This must never fail. The specific hotplug
 360         * mechanism must perform all required cache maintenance to ensure that
 361         * no dirty lines are lost in the process of shutting down the CPU.
 362         */
 363        cpu_ops[cpu]->cpu_die(cpu);
 364
 365        BUG();
 366}
 367#endif
 368
 369/*
 370 * Kill the calling secondary CPU, early in bringup before it is turned
 371 * online.
 372 */
 373void cpu_die_early(void)
 374{
 375        int cpu = smp_processor_id();
 376
 377        pr_crit("CPU%d: will not boot\n", cpu);
 378
 379        /* Mark this CPU absent */
 380        set_cpu_present(cpu, 0);
 381
 382#ifdef CONFIG_HOTPLUG_CPU
 383        update_cpu_boot_status(CPU_KILL_ME);
 384        /* Check if we can park ourselves */
 385        if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
 386                cpu_ops[cpu]->cpu_die(cpu);
 387#endif
 388        update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
 389
 390        cpu_park_loop();
 391}
 392
 393static void __init hyp_mode_check(void)
 394{
 395        if (is_hyp_mode_available())
 396                pr_info("CPU: All CPU(s) started at EL2\n");
 397        else if (is_hyp_mode_mismatched())
 398                WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
 399                           "CPU: CPUs started in inconsistent modes");
 400        else
 401                pr_info("CPU: All CPU(s) started at EL1\n");
 402}
 403
 404void __init smp_cpus_done(unsigned int max_cpus)
 405{
 406        pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
 407        setup_cpu_features();
 408        hyp_mode_check();
 409        apply_alternatives_all();
 410        mark_linear_text_alias_ro();
 411}
 412
 413void __init smp_prepare_boot_cpu(void)
 414{
 415        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 416        /*
 417         * Initialise the static keys early as they may be enabled by the
 418         * cpufeature code.
 419         */
 420        jump_label_init();
 421        cpuinfo_store_boot_cpu();
 422}
 423
 424static u64 __init of_get_cpu_mpidr(struct device_node *dn)
 425{
 426        const __be32 *cell;
 427        u64 hwid;
 428
 429        /*
 430         * A cpu node with missing "reg" property is
 431         * considered invalid to build a cpu_logical_map
 432         * entry.
 433         */
 434        cell = of_get_property(dn, "reg", NULL);
 435        if (!cell) {
 436                pr_err("%pOF: missing reg property\n", dn);
 437                return INVALID_HWID;
 438        }
 439
 440        hwid = of_read_number(cell, of_n_addr_cells(dn));
 441        /*
 442         * Non affinity bits must be set to 0 in the DT
 443         */
 444        if (hwid & ~MPIDR_HWID_BITMASK) {
 445                pr_err("%pOF: invalid reg property\n", dn);
 446                return INVALID_HWID;
 447        }
 448        return hwid;
 449}
 450
 451/*
 452 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
 453 * entries and check for duplicates. If any is found just ignore the
 454 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
 455 * matching valid MPIDR values.
 456 */
 457static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
 458{
 459        unsigned int i;
 460
 461        for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
 462                if (cpu_logical_map(i) == hwid)
 463                        return true;
 464        return false;
 465}
 466
 467/*
 468 * Initialize cpu operations for a logical cpu and
 469 * set it in the possible mask on success
 470 */
 471static int __init smp_cpu_setup(int cpu)
 472{
 473        if (cpu_read_ops(cpu))
 474                return -ENODEV;
 475
 476        if (cpu_ops[cpu]->cpu_init(cpu))
 477                return -ENODEV;
 478
 479        set_cpu_possible(cpu, true);
 480
 481        return 0;
 482}
 483
 484static bool bootcpu_valid __initdata;
 485static unsigned int cpu_count = 1;
 486
 487#ifdef CONFIG_ACPI
 488static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
 489
 490struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
 491{
 492        return &cpu_madt_gicc[cpu];
 493}
 494
 495/*
 496 * acpi_map_gic_cpu_interface - parse processor MADT entry
 497 *
 498 * Carry out sanity checks on MADT processor entry and initialize
 499 * cpu_logical_map on success
 500 */
 501static void __init
 502acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
 503{
 504        u64 hwid = processor->arm_mpidr;
 505
 506        if (!(processor->flags & ACPI_MADT_ENABLED)) {
 507                pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
 508                return;
 509        }
 510
 511        if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
 512                pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
 513                return;
 514        }
 515
 516        if (is_mpidr_duplicate(cpu_count, hwid)) {
 517                pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
 518                return;
 519        }
 520
 521        /* Check if GICC structure of boot CPU is available in the MADT */
 522        if (cpu_logical_map(0) == hwid) {
 523                if (bootcpu_valid) {
 524                        pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
 525                               hwid);
 526                        return;
 527                }
 528                bootcpu_valid = true;
 529                cpu_madt_gicc[0] = *processor;
 530                return;
 531        }
 532
 533        if (cpu_count >= NR_CPUS)
 534                return;
 535
 536        /* map the logical cpu id to cpu MPIDR */
 537        cpu_logical_map(cpu_count) = hwid;
 538
 539        cpu_madt_gicc[cpu_count] = *processor;
 540
 541        /*
 542         * Set-up the ACPI parking protocol cpu entries
 543         * while initializing the cpu_logical_map to
 544         * avoid parsing MADT entries multiple times for
 545         * nothing (ie a valid cpu_logical_map entry should
 546         * contain a valid parking protocol data set to
 547         * initialize the cpu if the parking protocol is
 548         * the only available enable method).
 549         */
 550        acpi_set_mailbox_entry(cpu_count, processor);
 551
 552        cpu_count++;
 553}
 554
 555static int __init
 556acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
 557                             const unsigned long end)
 558{
 559        struct acpi_madt_generic_interrupt *processor;
 560
 561        processor = (struct acpi_madt_generic_interrupt *)header;
 562        if (BAD_MADT_GICC_ENTRY(processor, end))
 563                return -EINVAL;
 564
 565        acpi_table_print_madt_entry(&header->common);
 566
 567        acpi_map_gic_cpu_interface(processor);
 568
 569        return 0;
 570}
 571
 572static void __init acpi_parse_and_init_cpus(void)
 573{
 574        int i;
 575
 576        /*
 577         * do a walk of MADT to determine how many CPUs
 578         * we have including disabled CPUs, and get information
 579         * we need for SMP init.
 580         */
 581        acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
 582                                      acpi_parse_gic_cpu_interface, 0);
 583
 584        /*
 585         * In ACPI, SMP and CPU NUMA information is provided in separate
 586         * static tables, namely the MADT and the SRAT.
 587         *
 588         * Thus, it is simpler to first create the cpu logical map through
 589         * an MADT walk and then map the logical cpus to their node ids
 590         * as separate steps.
 591         */
 592        acpi_map_cpus_to_nodes();
 593
 594        for (i = 0; i < nr_cpu_ids; i++)
 595                early_map_cpu_to_node(i, acpi_numa_get_nid(i));
 596}
 597#else
 598#define acpi_parse_and_init_cpus(...)   do { } while (0)
 599#endif
 600
 601/*
 602 * Enumerate the possible CPU set from the device tree and build the
 603 * cpu logical map array containing MPIDR values related to logical
 604 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
 605 */
 606static void __init of_parse_and_init_cpus(void)
 607{
 608        struct device_node *dn;
 609
 610        for_each_node_by_type(dn, "cpu") {
 611                u64 hwid = of_get_cpu_mpidr(dn);
 612
 613                if (hwid == INVALID_HWID)
 614                        goto next;
 615
 616                if (is_mpidr_duplicate(cpu_count, hwid)) {
 617                        pr_err("%pOF: duplicate cpu reg properties in the DT\n",
 618                                dn);
 619                        goto next;
 620                }
 621
 622                /*
 623                 * The numbering scheme requires that the boot CPU
 624                 * must be assigned logical id 0. Record it so that
 625                 * the logical map built from DT is validated and can
 626                 * be used.
 627                 */
 628                if (hwid == cpu_logical_map(0)) {
 629                        if (bootcpu_valid) {
 630                                pr_err("%pOF: duplicate boot cpu reg property in DT\n",
 631                                        dn);
 632                                goto next;
 633                        }
 634
 635                        bootcpu_valid = true;
 636                        early_map_cpu_to_node(0, of_node_to_nid(dn));
 637
 638                        /*
 639                         * cpu_logical_map has already been
 640                         * initialized and the boot cpu doesn't need
 641                         * the enable-method so continue without
 642                         * incrementing cpu.
 643                         */
 644                        continue;
 645                }
 646
 647                if (cpu_count >= NR_CPUS)
 648                        goto next;
 649
 650                pr_debug("cpu logical map 0x%llx\n", hwid);
 651                cpu_logical_map(cpu_count) = hwid;
 652
 653                early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
 654next:
 655                cpu_count++;
 656        }
 657}
 658
 659/*
 660 * Enumerate the possible CPU set from the device tree or ACPI and build the
 661 * cpu logical map array containing MPIDR values related to logical
 662 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
 663 */
 664void __init smp_init_cpus(void)
 665{
 666        int i;
 667
 668        if (acpi_disabled)
 669                of_parse_and_init_cpus();
 670        else
 671                acpi_parse_and_init_cpus();
 672
 673        if (cpu_count > nr_cpu_ids)
 674                pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
 675                        cpu_count, nr_cpu_ids);
 676
 677        if (!bootcpu_valid) {
 678                pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
 679                return;
 680        }
 681
 682        /*
 683         * We need to set the cpu_logical_map entries before enabling
 684         * the cpus so that cpu processor description entries (DT cpu nodes
 685         * and ACPI MADT entries) can be retrieved by matching the cpu hwid
 686         * with entries in cpu_logical_map while initializing the cpus.
 687         * If the cpu set-up fails, invalidate the cpu_logical_map entry.
 688         */
 689        for (i = 1; i < nr_cpu_ids; i++) {
 690                if (cpu_logical_map(i) != INVALID_HWID) {
 691                        if (smp_cpu_setup(i))
 692                                cpu_logical_map(i) = INVALID_HWID;
 693                }
 694        }
 695}
 696
 697void __init smp_prepare_cpus(unsigned int max_cpus)
 698{
 699        int err;
 700        unsigned int cpu;
 701        unsigned int this_cpu;
 702
 703        init_cpu_topology();
 704
 705        this_cpu = smp_processor_id();
 706        store_cpu_topology(this_cpu);
 707        numa_store_cpu_info(this_cpu);
 708        numa_add_cpu(this_cpu);
 709
 710        /*
 711         * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
 712         * secondary CPUs present.
 713         */
 714        if (max_cpus == 0)
 715                return;
 716
 717        /*
 718         * Initialise the present map (which describes the set of CPUs
 719         * actually populated at the present time) and release the
 720         * secondaries from the bootloader.
 721         */
 722        for_each_possible_cpu(cpu) {
 723
 724                per_cpu(cpu_number, cpu) = cpu;
 725
 726                if (cpu == smp_processor_id())
 727                        continue;
 728
 729                if (!cpu_ops[cpu])
 730                        continue;
 731
 732                err = cpu_ops[cpu]->cpu_prepare(cpu);
 733                if (err)
 734                        continue;
 735
 736                set_cpu_present(cpu, true);
 737                numa_store_cpu_info(cpu);
 738        }
 739}
 740
 741void (*__smp_cross_call)(const struct cpumask *, unsigned int);
 742
 743void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 744{
 745        __smp_cross_call = fn;
 746}
 747
 748static const char *ipi_types[NR_IPI] __tracepoint_string = {
 749#define S(x,s)  [x] = s
 750        S(IPI_RESCHEDULE, "Rescheduling interrupts"),
 751        S(IPI_CALL_FUNC, "Function call interrupts"),
 752        S(IPI_CPU_STOP, "CPU stop interrupts"),
 753        S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
 754        S(IPI_TIMER, "Timer broadcast interrupts"),
 755        S(IPI_IRQ_WORK, "IRQ work interrupts"),
 756        S(IPI_WAKEUP, "CPU wake-up interrupts"),
 757};
 758
 759static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 760{
 761        trace_ipi_raise(target, ipi_types[ipinr]);
 762        __smp_cross_call(target, ipinr);
 763}
 764
 765void show_ipi_list(struct seq_file *p, int prec)
 766{
 767        unsigned int cpu, i;
 768
 769        for (i = 0; i < NR_IPI; i++) {
 770                seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
 771                           prec >= 4 ? " " : "");
 772                for_each_online_cpu(cpu)
 773                        seq_printf(p, "%10u ",
 774                                   __get_irq_stat(cpu, ipi_irqs[i]));
 775                seq_printf(p, "      %s\n", ipi_types[i]);
 776        }
 777}
 778
 779u64 smp_irq_stat_cpu(unsigned int cpu)
 780{
 781        u64 sum = 0;
 782        int i;
 783
 784        for (i = 0; i < NR_IPI; i++)
 785                sum += __get_irq_stat(cpu, ipi_irqs[i]);
 786
 787        return sum;
 788}
 789
 790void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 791{
 792        smp_cross_call(mask, IPI_CALL_FUNC);
 793}
 794
 795void arch_send_call_function_single_ipi(int cpu)
 796{
 797        smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 798}
 799
 800#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
 801void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
 802{
 803        smp_cross_call(mask, IPI_WAKEUP);
 804}
 805#endif
 806
 807#ifdef CONFIG_IRQ_WORK
 808void arch_irq_work_raise(void)
 809{
 810        if (__smp_cross_call)
 811                smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
 812}
 813#endif
 814
 815/*
 816 * ipi_cpu_stop - handle IPI from smp_send_stop()
 817 */
 818static void ipi_cpu_stop(unsigned int cpu)
 819{
 820        set_cpu_online(cpu, false);
 821
 822        local_daif_mask();
 823        sdei_mask_local_cpu();
 824
 825        while (1)
 826                cpu_relax();
 827}
 828
 829#ifdef CONFIG_KEXEC_CORE
 830static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
 831#endif
 832
 833static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
 834{
 835#ifdef CONFIG_KEXEC_CORE
 836        crash_save_cpu(regs, cpu);
 837
 838        atomic_dec(&waiting_for_crash_ipi);
 839
 840        local_irq_disable();
 841        sdei_mask_local_cpu();
 842
 843#ifdef CONFIG_HOTPLUG_CPU
 844        if (cpu_ops[cpu]->cpu_die)
 845                cpu_ops[cpu]->cpu_die(cpu);
 846#endif
 847
 848        /* just in case */
 849        cpu_park_loop();
 850#endif
 851}
 852
 853/*
 854 * Main handler for inter-processor interrupts
 855 */
 856void handle_IPI(int ipinr, struct pt_regs *regs)
 857{
 858        unsigned int cpu = smp_processor_id();
 859        struct pt_regs *old_regs = set_irq_regs(regs);
 860
 861        if ((unsigned)ipinr < NR_IPI) {
 862                trace_ipi_entry_rcuidle(ipi_types[ipinr]);
 863                __inc_irq_stat(cpu, ipi_irqs[ipinr]);
 864        }
 865
 866        switch (ipinr) {
 867        case IPI_RESCHEDULE:
 868                scheduler_ipi();
 869                break;
 870
 871        case IPI_CALL_FUNC:
 872                irq_enter();
 873                generic_smp_call_function_interrupt();
 874                irq_exit();
 875                break;
 876
 877        case IPI_CPU_STOP:
 878                irq_enter();
 879                ipi_cpu_stop(cpu);
 880                irq_exit();
 881                break;
 882
 883        case IPI_CPU_CRASH_STOP:
 884                if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
 885                        irq_enter();
 886                        ipi_cpu_crash_stop(cpu, regs);
 887
 888                        unreachable();
 889                }
 890                break;
 891
 892#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 893        case IPI_TIMER:
 894                irq_enter();
 895                tick_receive_broadcast();
 896                irq_exit();
 897                break;
 898#endif
 899
 900#ifdef CONFIG_IRQ_WORK
 901        case IPI_IRQ_WORK:
 902                irq_enter();
 903                irq_work_run();
 904                irq_exit();
 905                break;
 906#endif
 907
 908#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
 909        case IPI_WAKEUP:
 910                WARN_ONCE(!acpi_parking_protocol_valid(cpu),
 911                          "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
 912                          cpu);
 913                break;
 914#endif
 915
 916        default:
 917                pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
 918                break;
 919        }
 920
 921        if ((unsigned)ipinr < NR_IPI)
 922                trace_ipi_exit_rcuidle(ipi_types[ipinr]);
 923        set_irq_regs(old_regs);
 924}
 925
 926void smp_send_reschedule(int cpu)
 927{
 928        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 929}
 930
 931#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 932void tick_broadcast(const struct cpumask *mask)
 933{
 934        smp_cross_call(mask, IPI_TIMER);
 935}
 936#endif
 937
 938void smp_send_stop(void)
 939{
 940        unsigned long timeout;
 941
 942        if (num_online_cpus() > 1) {
 943                cpumask_t mask;
 944
 945                cpumask_copy(&mask, cpu_online_mask);
 946                cpumask_clear_cpu(smp_processor_id(), &mask);
 947
 948                if (system_state <= SYSTEM_RUNNING)
 949                        pr_crit("SMP: stopping secondary CPUs\n");
 950                smp_cross_call(&mask, IPI_CPU_STOP);
 951        }
 952
 953        /* Wait up to one second for other CPUs to stop */
 954        timeout = USEC_PER_SEC;
 955        while (num_online_cpus() > 1 && timeout--)
 956                udelay(1);
 957
 958        if (num_online_cpus() > 1)
 959                pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
 960                           cpumask_pr_args(cpu_online_mask));
 961
 962        sdei_mask_local_cpu();
 963}
 964
 965#ifdef CONFIG_KEXEC_CORE
 966void crash_smp_send_stop(void)
 967{
 968        static int cpus_stopped;
 969        cpumask_t mask;
 970        unsigned long timeout;
 971
 972        /*
 973         * This function can be called twice in panic path, but obviously
 974         * we execute this only once.
 975         */
 976        if (cpus_stopped)
 977                return;
 978
 979        cpus_stopped = 1;
 980
 981        if (num_online_cpus() == 1) {
 982                sdei_mask_local_cpu();
 983                return;
 984        }
 985
 986        cpumask_copy(&mask, cpu_online_mask);
 987        cpumask_clear_cpu(smp_processor_id(), &mask);
 988
 989        atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
 990
 991        pr_crit("SMP: stopping secondary CPUs\n");
 992        smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
 993
 994        /* Wait up to one second for other CPUs to stop */
 995        timeout = USEC_PER_SEC;
 996        while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
 997                udelay(1);
 998
 999        if (atomic_read(&waiting_for_crash_ipi) > 0)
1000                pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
1001                           cpumask_pr_args(&mask));
1002
1003        sdei_mask_local_cpu();
1004}
1005
1006bool smp_crash_stop_failed(void)
1007{
1008        return (atomic_read(&waiting_for_crash_ipi) > 0);
1009}
1010#endif
1011
1012/*
1013 * not supported here
1014 */
1015int setup_profiling_timer(unsigned int multiplier)
1016{
1017        return -EINVAL;
1018}
1019
1020static bool have_cpu_die(void)
1021{
1022#ifdef CONFIG_HOTPLUG_CPU
1023        int any_cpu = raw_smp_processor_id();
1024
1025        if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
1026                return true;
1027#endif
1028        return false;
1029}
1030
1031bool cpus_are_stuck_in_kernel(void)
1032{
1033        bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1034
1035        return !!cpus_stuck_in_kernel || smp_spin_tables;
1036}
1037