linux/arch/powerpc/kernel/smp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SMP support for ppc.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
   6 * deal of code from the sparc and intel versions.
   7 *
   8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   9 *
  10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  12 */
  13
  14#undef DEBUG
  15
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/task_stack.h>
  20#include <linux/sched/topology.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/spinlock.h>
  26#include <linux/cache.h>
  27#include <linux/err.h>
  28#include <linux/device.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/topology.h>
  32#include <linux/profile.h>
  33#include <linux/processor.h>
  34#include <linux/random.h>
  35#include <linux/stackprotector.h>
  36#include <linux/pgtable.h>
  37
  38#include <asm/ptrace.h>
  39#include <linux/atomic.h>
  40#include <asm/irq.h>
  41#include <asm/hw_irq.h>
  42#include <asm/kvm_ppc.h>
  43#include <asm/dbell.h>
  44#include <asm/page.h>
  45#include <asm/prom.h>
  46#include <asm/smp.h>
  47#include <asm/time.h>
  48#include <asm/machdep.h>
  49#include <asm/cputhreads.h>
  50#include <asm/cputable.h>
  51#include <asm/mpic.h>
  52#include <asm/vdso_datapage.h>
  53#ifdef CONFIG_PPC64
  54#include <asm/paca.h>
  55#endif
  56#include <asm/vdso.h>
  57#include <asm/debug.h>
  58#include <asm/kexec.h>
  59#include <asm/asm-prototypes.h>
  60#include <asm/cpu_has_feature.h>
  61#include <asm/ftrace.h>
  62#include <asm/kup.h>
  63
  64#ifdef DEBUG
  65#include <asm/udbg.h>
  66#define DBG(fmt...) udbg_printf(fmt)
  67#else
  68#define DBG(fmt...)
  69#endif
  70
  71#ifdef CONFIG_HOTPLUG_CPU
  72/* State of each CPU during hotplug phases */
  73static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  74#endif
  75
  76struct task_struct *secondary_current;
  77bool has_big_cores;
  78bool coregroup_enabled;
  79bool thread_group_shares_l2;
  80
  81DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  82DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
  83DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
  84DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  85DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
  86
  87EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  88EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
  89EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  90EXPORT_SYMBOL_GPL(has_big_cores);
  91
  92enum {
  93#ifdef CONFIG_SCHED_SMT
  94        smt_idx,
  95#endif
  96        cache_idx,
  97        mc_idx,
  98        die_idx,
  99};
 100
 101#define MAX_THREAD_LIST_SIZE    8
 102#define THREAD_GROUP_SHARE_L1   1
 103#define THREAD_GROUP_SHARE_L2   2
 104struct thread_groups {
 105        unsigned int property;
 106        unsigned int nr_groups;
 107        unsigned int threads_per_group;
 108        unsigned int thread_list[MAX_THREAD_LIST_SIZE];
 109};
 110
 111/* Maximum number of properties that groups of threads within a core can share */
 112#define MAX_THREAD_GROUP_PROPERTIES 2
 113
 114struct thread_groups_list {
 115        unsigned int nr_properties;
 116        struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
 117};
 118
 119static struct thread_groups_list tgl[NR_CPUS] __initdata;
 120/*
 121 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
 122 * the set its siblings that share the L1-cache.
 123 */
 124DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
 125
 126/*
 127 * On some big-cores system, thread_group_l2_cache_map for each CPU
 128 * corresponds to the set its siblings within the core that share the
 129 * L2-cache.
 130 */
 131DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
 132
 133/* SMP operations for this machine */
 134struct smp_ops_t *smp_ops;
 135
 136/* Can't be static due to PowerMac hackery */
 137volatile unsigned int cpu_callin_map[NR_CPUS];
 138
 139int smt_enabled_at_boot = 1;
 140
 141/*
 142 * Returns 1 if the specified cpu should be brought up during boot.
 143 * Used to inhibit booting threads if they've been disabled or
 144 * limited on the command line
 145 */
 146int smp_generic_cpu_bootable(unsigned int nr)
 147{
 148        /* Special case - we inhibit secondary thread startup
 149         * during boot if the user requests it.
 150         */
 151        if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
 152                if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
 153                        return 0;
 154                if (smt_enabled_at_boot
 155                    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
 156                        return 0;
 157        }
 158
 159        return 1;
 160}
 161
 162
 163#ifdef CONFIG_PPC64
 164int smp_generic_kick_cpu(int nr)
 165{
 166        if (nr < 0 || nr >= nr_cpu_ids)
 167                return -EINVAL;
 168
 169        /*
 170         * The processor is currently spinning, waiting for the
 171         * cpu_start field to become non-zero After we set cpu_start,
 172         * the processor will continue on to secondary_start
 173         */
 174        if (!paca_ptrs[nr]->cpu_start) {
 175                paca_ptrs[nr]->cpu_start = 1;
 176                smp_mb();
 177                return 0;
 178        }
 179
 180#ifdef CONFIG_HOTPLUG_CPU
 181        /*
 182         * Ok it's not there, so it might be soft-unplugged, let's
 183         * try to bring it back
 184         */
 185        generic_set_cpu_up(nr);
 186        smp_wmb();
 187        smp_send_reschedule(nr);
 188#endif /* CONFIG_HOTPLUG_CPU */
 189
 190        return 0;
 191}
 192#endif /* CONFIG_PPC64 */
 193
 194static irqreturn_t call_function_action(int irq, void *data)
 195{
 196        generic_smp_call_function_interrupt();
 197        return IRQ_HANDLED;
 198}
 199
 200static irqreturn_t reschedule_action(int irq, void *data)
 201{
 202        scheduler_ipi();
 203        return IRQ_HANDLED;
 204}
 205
 206#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 207static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
 208{
 209        timer_broadcast_interrupt();
 210        return IRQ_HANDLED;
 211}
 212#endif
 213
 214#ifdef CONFIG_NMI_IPI
 215static irqreturn_t nmi_ipi_action(int irq, void *data)
 216{
 217        smp_handle_nmi_ipi(get_irq_regs());
 218        return IRQ_HANDLED;
 219}
 220#endif
 221
 222static irq_handler_t smp_ipi_action[] = {
 223        [PPC_MSG_CALL_FUNCTION] =  call_function_action,
 224        [PPC_MSG_RESCHEDULE] = reschedule_action,
 225#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 226        [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
 227#endif
 228#ifdef CONFIG_NMI_IPI
 229        [PPC_MSG_NMI_IPI] = nmi_ipi_action,
 230#endif
 231};
 232
 233/*
 234 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
 235 * than going through the call function infrastructure, and strongly
 236 * serialized, so it is more appropriate for debugging.
 237 */
 238const char *smp_ipi_name[] = {
 239        [PPC_MSG_CALL_FUNCTION] =  "ipi call function",
 240        [PPC_MSG_RESCHEDULE] = "ipi reschedule",
 241#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 242        [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
 243#endif
 244#ifdef CONFIG_NMI_IPI
 245        [PPC_MSG_NMI_IPI] = "nmi ipi",
 246#endif
 247};
 248
 249/* optional function to request ipi, for controllers with >= 4 ipis */
 250int smp_request_message_ipi(int virq, int msg)
 251{
 252        int err;
 253
 254        if (msg < 0 || msg > PPC_MSG_NMI_IPI)
 255                return -EINVAL;
 256#ifndef CONFIG_NMI_IPI
 257        if (msg == PPC_MSG_NMI_IPI)
 258                return 1;
 259#endif
 260
 261        err = request_irq(virq, smp_ipi_action[msg],
 262                          IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 263                          smp_ipi_name[msg], NULL);
 264        WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 265                virq, smp_ipi_name[msg], err);
 266
 267        return err;
 268}
 269
 270#ifdef CONFIG_PPC_SMP_MUXED_IPI
 271struct cpu_messages {
 272        long messages;                  /* current messages */
 273};
 274static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
 275
 276void smp_muxed_ipi_set_message(int cpu, int msg)
 277{
 278        struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 279        char *message = (char *)&info->messages;
 280
 281        /*
 282         * Order previous accesses before accesses in the IPI handler.
 283         */
 284        smp_mb();
 285        message[msg] = 1;
 286}
 287
 288void smp_muxed_ipi_message_pass(int cpu, int msg)
 289{
 290        smp_muxed_ipi_set_message(cpu, msg);
 291
 292        /*
 293         * cause_ipi functions are required to include a full barrier
 294         * before doing whatever causes the IPI.
 295         */
 296        smp_ops->cause_ipi(cpu);
 297}
 298
 299#ifdef __BIG_ENDIAN__
 300#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
 301#else
 302#define IPI_MESSAGE(A) (1uL << (8 * (A)))
 303#endif
 304
 305irqreturn_t smp_ipi_demux(void)
 306{
 307        mb();   /* order any irq clear */
 308
 309        return smp_ipi_demux_relaxed();
 310}
 311
 312/* sync-free variant. Callers should ensure synchronization */
 313irqreturn_t smp_ipi_demux_relaxed(void)
 314{
 315        struct cpu_messages *info;
 316        unsigned long all;
 317
 318        info = this_cpu_ptr(&ipi_message);
 319        do {
 320                all = xchg(&info->messages, 0);
 321#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
 322                /*
 323                 * Must check for PPC_MSG_RM_HOST_ACTION messages
 324                 * before PPC_MSG_CALL_FUNCTION messages because when
 325                 * a VM is destroyed, we call kick_all_cpus_sync()
 326                 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
 327                 * messages have completed before we free any VCPUs.
 328                 */
 329                if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
 330                        kvmppc_xics_ipi_action();
 331#endif
 332                if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
 333                        generic_smp_call_function_interrupt();
 334                if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
 335                        scheduler_ipi();
 336#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 337                if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
 338                        timer_broadcast_interrupt();
 339#endif
 340#ifdef CONFIG_NMI_IPI
 341                if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
 342                        nmi_ipi_action(0, NULL);
 343#endif
 344        } while (info->messages);
 345
 346        return IRQ_HANDLED;
 347}
 348#endif /* CONFIG_PPC_SMP_MUXED_IPI */
 349
 350static inline void do_message_pass(int cpu, int msg)
 351{
 352        if (smp_ops->message_pass)
 353                smp_ops->message_pass(cpu, msg);
 354#ifdef CONFIG_PPC_SMP_MUXED_IPI
 355        else
 356                smp_muxed_ipi_message_pass(cpu, msg);
 357#endif
 358}
 359
 360void smp_send_reschedule(int cpu)
 361{
 362        if (likely(smp_ops))
 363                do_message_pass(cpu, PPC_MSG_RESCHEDULE);
 364}
 365EXPORT_SYMBOL_GPL(smp_send_reschedule);
 366
 367void arch_send_call_function_single_ipi(int cpu)
 368{
 369        do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 370}
 371
 372void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 373{
 374        unsigned int cpu;
 375
 376        for_each_cpu(cpu, mask)
 377                do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 378}
 379
 380#ifdef CONFIG_NMI_IPI
 381
 382/*
 383 * "NMI IPI" system.
 384 *
 385 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
 386 * a running system. They can be used for crash, debug, halt/reboot, etc.
 387 *
 388 * The IPI call waits with interrupts disabled until all targets enter the
 389 * NMI handler, then returns. Subsequent IPIs can be issued before targets
 390 * have returned from their handlers, so there is no guarantee about
 391 * concurrency or re-entrancy.
 392 *
 393 * A new NMI can be issued before all targets exit the handler.
 394 *
 395 * The IPI call may time out without all targets entering the NMI handler.
 396 * In that case, there is some logic to recover (and ignore subsequent
 397 * NMI interrupts that may eventually be raised), but the platform interrupt
 398 * handler may not be able to distinguish this from other exception causes,
 399 * which may cause a crash.
 400 */
 401
 402static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
 403static struct cpumask nmi_ipi_pending_mask;
 404static bool nmi_ipi_busy = false;
 405static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
 406
 407static void nmi_ipi_lock_start(unsigned long *flags)
 408{
 409        raw_local_irq_save(*flags);
 410        hard_irq_disable();
 411        while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
 412                raw_local_irq_restore(*flags);
 413                spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 414                raw_local_irq_save(*flags);
 415                hard_irq_disable();
 416        }
 417}
 418
 419static void nmi_ipi_lock(void)
 420{
 421        while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
 422                spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 423}
 424
 425static void nmi_ipi_unlock(void)
 426{
 427        smp_mb();
 428        WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
 429        atomic_set(&__nmi_ipi_lock, 0);
 430}
 431
 432static void nmi_ipi_unlock_end(unsigned long *flags)
 433{
 434        nmi_ipi_unlock();
 435        raw_local_irq_restore(*flags);
 436}
 437
 438/*
 439 * Platform NMI handler calls this to ack
 440 */
 441int smp_handle_nmi_ipi(struct pt_regs *regs)
 442{
 443        void (*fn)(struct pt_regs *) = NULL;
 444        unsigned long flags;
 445        int me = raw_smp_processor_id();
 446        int ret = 0;
 447
 448        /*
 449         * Unexpected NMIs are possible here because the interrupt may not
 450         * be able to distinguish NMI IPIs from other types of NMIs, or
 451         * because the caller may have timed out.
 452         */
 453        nmi_ipi_lock_start(&flags);
 454        if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
 455                cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 456                fn = READ_ONCE(nmi_ipi_function);
 457                WARN_ON_ONCE(!fn);
 458                ret = 1;
 459        }
 460        nmi_ipi_unlock_end(&flags);
 461
 462        if (fn)
 463                fn(regs);
 464
 465        return ret;
 466}
 467
 468static void do_smp_send_nmi_ipi(int cpu, bool safe)
 469{
 470        if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
 471                return;
 472
 473        if (cpu >= 0) {
 474                do_message_pass(cpu, PPC_MSG_NMI_IPI);
 475        } else {
 476                int c;
 477
 478                for_each_online_cpu(c) {
 479                        if (c == raw_smp_processor_id())
 480                                continue;
 481                        do_message_pass(c, PPC_MSG_NMI_IPI);
 482                }
 483        }
 484}
 485
 486/*
 487 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
 488 * - fn is the target callback function.
 489 * - delay_us > 0 is the delay before giving up waiting for targets to
 490 *   begin executing the handler, == 0 specifies indefinite delay.
 491 */
 492static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
 493                                u64 delay_us, bool safe)
 494{
 495        unsigned long flags;
 496        int me = raw_smp_processor_id();
 497        int ret = 1;
 498
 499        BUG_ON(cpu == me);
 500        BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
 501
 502        if (unlikely(!smp_ops))
 503                return 0;
 504
 505        nmi_ipi_lock_start(&flags);
 506        while (nmi_ipi_busy) {
 507                nmi_ipi_unlock_end(&flags);
 508                spin_until_cond(!nmi_ipi_busy);
 509                nmi_ipi_lock_start(&flags);
 510        }
 511        nmi_ipi_busy = true;
 512        nmi_ipi_function = fn;
 513
 514        WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
 515
 516        if (cpu < 0) {
 517                /* ALL_OTHERS */
 518                cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
 519                cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 520        } else {
 521                cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
 522        }
 523
 524        nmi_ipi_unlock();
 525
 526        /* Interrupts remain hard disabled */
 527
 528        do_smp_send_nmi_ipi(cpu, safe);
 529
 530        nmi_ipi_lock();
 531        /* nmi_ipi_busy is set here, so unlock/lock is okay */
 532        while (!cpumask_empty(&nmi_ipi_pending_mask)) {
 533                nmi_ipi_unlock();
 534                udelay(1);
 535                nmi_ipi_lock();
 536                if (delay_us) {
 537                        delay_us--;
 538                        if (!delay_us)
 539                                break;
 540                }
 541        }
 542
 543        if (!cpumask_empty(&nmi_ipi_pending_mask)) {
 544                /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
 545                ret = 0;
 546                cpumask_clear(&nmi_ipi_pending_mask);
 547        }
 548
 549        nmi_ipi_function = NULL;
 550        nmi_ipi_busy = false;
 551
 552        nmi_ipi_unlock_end(&flags);
 553
 554        return ret;
 555}
 556
 557int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 558{
 559        return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
 560}
 561
 562int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 563{
 564        return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
 565}
 566#endif /* CONFIG_NMI_IPI */
 567
 568#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 569void tick_broadcast(const struct cpumask *mask)
 570{
 571        unsigned int cpu;
 572
 573        for_each_cpu(cpu, mask)
 574                do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
 575}
 576#endif
 577
 578#ifdef CONFIG_DEBUGGER
 579void debugger_ipi_callback(struct pt_regs *regs)
 580{
 581        debugger_ipi(regs);
 582}
 583
 584void smp_send_debugger_break(void)
 585{
 586        smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
 587}
 588#endif
 589
 590#ifdef CONFIG_KEXEC_CORE
 591void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 592{
 593        int cpu;
 594
 595        smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
 596        if (kdump_in_progress() && crash_wake_offline) {
 597                for_each_present_cpu(cpu) {
 598                        if (cpu_online(cpu))
 599                                continue;
 600                        /*
 601                         * crash_ipi_callback will wait for
 602                         * all cpus, including offline CPUs.
 603                         * We don't care about nmi_ipi_function.
 604                         * Offline cpus will jump straight into
 605                         * crash_ipi_callback, we can skip the
 606                         * entire NMI dance and waiting for
 607                         * cpus to clear pending mask, etc.
 608                         */
 609                        do_smp_send_nmi_ipi(cpu, false);
 610                }
 611        }
 612}
 613#endif
 614
 615#ifdef CONFIG_NMI_IPI
 616static void nmi_stop_this_cpu(struct pt_regs *regs)
 617{
 618        /*
 619         * IRQs are already hard disabled by the smp_handle_nmi_ipi.
 620         */
 621        spin_begin();
 622        while (1)
 623                spin_cpu_relax();
 624}
 625
 626void smp_send_stop(void)
 627{
 628        smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
 629}
 630
 631#else /* CONFIG_NMI_IPI */
 632
 633static void stop_this_cpu(void *dummy)
 634{
 635        hard_irq_disable();
 636        spin_begin();
 637        while (1)
 638                spin_cpu_relax();
 639}
 640
 641void smp_send_stop(void)
 642{
 643        static bool stopped = false;
 644
 645        /*
 646         * Prevent waiting on csd lock from a previous smp_send_stop.
 647         * This is racy, but in general callers try to do the right
 648         * thing and only fire off one smp_send_stop (e.g., see
 649         * kernel/panic.c)
 650         */
 651        if (stopped)
 652                return;
 653
 654        stopped = true;
 655
 656        smp_call_function(stop_this_cpu, NULL, 0);
 657}
 658#endif /* CONFIG_NMI_IPI */
 659
 660struct task_struct *current_set[NR_CPUS];
 661
 662static void smp_store_cpu_info(int id)
 663{
 664        per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 665#ifdef CONFIG_PPC_FSL_BOOK3E
 666        per_cpu(next_tlbcam_idx, id)
 667                = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 668#endif
 669}
 670
 671/*
 672 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
 673 * rather than just passing around the cpumask we pass around a function that
 674 * returns the that cpumask for the given CPU.
 675 */
 676static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
 677{
 678        cpumask_set_cpu(i, get_cpumask(j));
 679        cpumask_set_cpu(j, get_cpumask(i));
 680}
 681
 682#ifdef CONFIG_HOTPLUG_CPU
 683static void set_cpus_unrelated(int i, int j,
 684                struct cpumask *(*get_cpumask)(int))
 685{
 686        cpumask_clear_cpu(i, get_cpumask(j));
 687        cpumask_clear_cpu(j, get_cpumask(i));
 688}
 689#endif
 690
 691/*
 692 * Extends set_cpus_related. Instead of setting one CPU at a time in
 693 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
 694 */
 695static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
 696                                struct cpumask *(*dstmask)(int))
 697{
 698        struct cpumask *mask;
 699        int k;
 700
 701        mask = srcmask(j);
 702        for_each_cpu(k, srcmask(i))
 703                cpumask_or(dstmask(k), dstmask(k), mask);
 704
 705        if (i == j)
 706                return;
 707
 708        mask = srcmask(i);
 709        for_each_cpu(k, srcmask(j))
 710                cpumask_or(dstmask(k), dstmask(k), mask);
 711}
 712
 713/*
 714 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
 715 *                      property for the CPU device node @dn and stores
 716 *                      the parsed output in the thread_groups_list
 717 *                      structure @tglp.
 718 *
 719 * @dn: The device node of the CPU device.
 720 * @tglp: Pointer to a thread group list structure into which the parsed
 721 *      output of "ibm,thread-groups" is stored.
 722 *
 723 * ibm,thread-groups[0..N-1] array defines which group of threads in
 724 * the CPU-device node can be grouped together based on the property.
 725 *
 726 * This array can represent thread groupings for multiple properties.
 727 *
 728 * ibm,thread-groups[i + 0] tells us the property based on which the
 729 * threads are being grouped together. If this value is 1, it implies
 730 * that the threads in the same group share L1, translation cache. If
 731 * the value is 2, it implies that the threads in the same group share
 732 * the same L2 cache.
 733 *
 734 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
 735 * property ibm,thread-groups[i]
 736 *
 737 * ibm,thread-groups[i+2] tells us the number of threads in each such
 738 * group.
 739 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
 740 *
 741 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
 742 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
 743 * the grouping.
 744 *
 745 * Example:
 746 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
 747 * This can be decomposed up into two consecutive arrays:
 748 * a) [1,2,4,8,10,12,14,9,11,13,15]
 749 * b) [2,2,4,8,10,12,14,9,11,13,15]
 750 *
 751 * where in,
 752 *
 753 * a) provides information of Property "1" being shared by "2" groups,
 754 *  each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
 755 *  the first group is {8,10,12,14} and the
 756 *  "ibm,ppc-interrupt-server#s" of the second group is
 757 *  {9,11,13,15}. Property "1" is indicative of the thread in the
 758 *  group sharing L1 cache, translation cache and Instruction Data
 759 *  flow.
 760 *
 761 * b) provides information of Property "2" being shared by "2" groups,
 762 *  each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
 763 *  the first group is {8,10,12,14} and the
 764 *  "ibm,ppc-interrupt-server#s" of the second group is
 765 *  {9,11,13,15}. Property "2" indicates that the threads in each
 766 *  group share the L2-cache.
 767 *
 768 * Returns 0 on success, -EINVAL if the property does not exist,
 769 * -ENODATA if property does not have a value, and -EOVERFLOW if the
 770 * property data isn't large enough.
 771 */
 772static int parse_thread_groups(struct device_node *dn,
 773                               struct thread_groups_list *tglp)
 774{
 775        unsigned int property_idx = 0;
 776        u32 *thread_group_array;
 777        size_t total_threads;
 778        int ret = 0, count;
 779        u32 *thread_list;
 780        int i = 0;
 781
 782        count = of_property_count_u32_elems(dn, "ibm,thread-groups");
 783        thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
 784        ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 785                                         thread_group_array, count);
 786        if (ret)
 787                goto out_free;
 788
 789        while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
 790                int j;
 791                struct thread_groups *tg = &tglp->property_tgs[property_idx++];
 792
 793                tg->property = thread_group_array[i];
 794                tg->nr_groups = thread_group_array[i + 1];
 795                tg->threads_per_group = thread_group_array[i + 2];
 796                total_threads = tg->nr_groups * tg->threads_per_group;
 797
 798                thread_list = &thread_group_array[i + 3];
 799
 800                for (j = 0; j < total_threads; j++)
 801                        tg->thread_list[j] = thread_list[j];
 802                i = i + 3 + total_threads;
 803        }
 804
 805        tglp->nr_properties = property_idx;
 806
 807out_free:
 808        kfree(thread_group_array);
 809        return ret;
 810}
 811
 812/*
 813 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
 814 *                              that @cpu belongs to.
 815 *
 816 * @cpu : The logical CPU whose thread group is being searched.
 817 * @tg : The thread-group structure of the CPU node which @cpu belongs
 818 *       to.
 819 *
 820 * Returns the index to tg->thread_list that points to the the start
 821 * of the thread_group that @cpu belongs to.
 822 *
 823 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
 824 * tg->thread_list.
 825 */
 826static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
 827{
 828        int hw_cpu_id = get_hard_smp_processor_id(cpu);
 829        int i, j;
 830
 831        for (i = 0; i < tg->nr_groups; i++) {
 832                int group_start = i * tg->threads_per_group;
 833
 834                for (j = 0; j < tg->threads_per_group; j++) {
 835                        int idx = group_start + j;
 836
 837                        if (tg->thread_list[idx] == hw_cpu_id)
 838                                return group_start;
 839                }
 840        }
 841
 842        return -1;
 843}
 844
 845static struct thread_groups *__init get_thread_groups(int cpu,
 846                                                      int group_property,
 847                                                      int *err)
 848{
 849        struct device_node *dn = of_get_cpu_node(cpu, NULL);
 850        struct thread_groups_list *cpu_tgl = &tgl[cpu];
 851        struct thread_groups *tg = NULL;
 852        int i;
 853        *err = 0;
 854
 855        if (!dn) {
 856                *err = -ENODATA;
 857                return NULL;
 858        }
 859
 860        if (!cpu_tgl->nr_properties) {
 861                *err = parse_thread_groups(dn, cpu_tgl);
 862                if (*err)
 863                        goto out;
 864        }
 865
 866        for (i = 0; i < cpu_tgl->nr_properties; i++) {
 867                if (cpu_tgl->property_tgs[i].property == group_property) {
 868                        tg = &cpu_tgl->property_tgs[i];
 869                        break;
 870                }
 871        }
 872
 873        if (!tg)
 874                *err = -EINVAL;
 875out:
 876        of_node_put(dn);
 877        return tg;
 878}
 879
 880static int __init init_thread_group_cache_map(int cpu, int cache_property)
 881
 882{
 883        int first_thread = cpu_first_thread_sibling(cpu);
 884        int i, cpu_group_start = -1, err = 0;
 885        struct thread_groups *tg = NULL;
 886        cpumask_var_t *mask = NULL;
 887
 888        if (cache_property != THREAD_GROUP_SHARE_L1 &&
 889            cache_property != THREAD_GROUP_SHARE_L2)
 890                return -EINVAL;
 891
 892        tg = get_thread_groups(cpu, cache_property, &err);
 893        if (!tg)
 894                return err;
 895
 896        cpu_group_start = get_cpu_thread_group_start(cpu, tg);
 897
 898        if (unlikely(cpu_group_start == -1)) {
 899                WARN_ON_ONCE(1);
 900                return -ENODATA;
 901        }
 902
 903        if (cache_property == THREAD_GROUP_SHARE_L1)
 904                mask = &per_cpu(thread_group_l1_cache_map, cpu);
 905        else if (cache_property == THREAD_GROUP_SHARE_L2)
 906                mask = &per_cpu(thread_group_l2_cache_map, cpu);
 907
 908        zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
 909
 910        for (i = first_thread; i < first_thread + threads_per_core; i++) {
 911                int i_group_start = get_cpu_thread_group_start(i, tg);
 912
 913                if (unlikely(i_group_start == -1)) {
 914                        WARN_ON_ONCE(1);
 915                        return -ENODATA;
 916                }
 917
 918                if (i_group_start == cpu_group_start)
 919                        cpumask_set_cpu(i, *mask);
 920        }
 921
 922        return 0;
 923}
 924
 925static bool shared_caches;
 926
 927#ifdef CONFIG_SCHED_SMT
 928/* cpumask of CPUs with asymmetric SMT dependency */
 929static int powerpc_smt_flags(void)
 930{
 931        int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 932
 933        if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
 934                printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
 935                flags |= SD_ASYM_PACKING;
 936        }
 937        return flags;
 938}
 939#endif
 940
 941/*
 942 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
 943 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
 944 * since the migrated task remains cache hot. We want to take advantage of this
 945 * at the scheduler level so an extra topology level is required.
 946 */
 947static int powerpc_shared_cache_flags(void)
 948{
 949        return SD_SHARE_PKG_RESOURCES;
 950}
 951
 952/*
 953 * We can't just pass cpu_l2_cache_mask() directly because
 954 * returns a non-const pointer and the compiler barfs on that.
 955 */
 956static const struct cpumask *shared_cache_mask(int cpu)
 957{
 958        return per_cpu(cpu_l2_cache_map, cpu);
 959}
 960
 961#ifdef CONFIG_SCHED_SMT
 962static const struct cpumask *smallcore_smt_mask(int cpu)
 963{
 964        return cpu_smallcore_mask(cpu);
 965}
 966#endif
 967
 968static struct cpumask *cpu_coregroup_mask(int cpu)
 969{
 970        return per_cpu(cpu_coregroup_map, cpu);
 971}
 972
 973static bool has_coregroup_support(void)
 974{
 975        return coregroup_enabled;
 976}
 977
 978static const struct cpumask *cpu_mc_mask(int cpu)
 979{
 980        return cpu_coregroup_mask(cpu);
 981}
 982
 983static struct sched_domain_topology_level powerpc_topology[] = {
 984#ifdef CONFIG_SCHED_SMT
 985        { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
 986#endif
 987        { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
 988        { cpu_mc_mask, SD_INIT_NAME(MC) },
 989        { cpu_cpu_mask, SD_INIT_NAME(DIE) },
 990        { NULL, },
 991};
 992
 993static int __init init_big_cores(void)
 994{
 995        int cpu;
 996
 997        for_each_possible_cpu(cpu) {
 998                int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
 999
1000                if (err)
1001                        return err;
1002
1003                zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1004                                        GFP_KERNEL,
1005                                        cpu_to_node(cpu));
1006        }
1007
1008        has_big_cores = true;
1009
1010        for_each_possible_cpu(cpu) {
1011                int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2);
1012
1013                if (err)
1014                        return err;
1015        }
1016
1017        thread_group_shares_l2 = true;
1018        pr_debug("L2 cache only shared by the threads in the small core\n");
1019        return 0;
1020}
1021
1022void __init smp_prepare_cpus(unsigned int max_cpus)
1023{
1024        unsigned int cpu;
1025
1026        DBG("smp_prepare_cpus\n");
1027
1028        /* 
1029         * setup_cpu may need to be called on the boot cpu. We havent
1030         * spun any cpus up but lets be paranoid.
1031         */
1032        BUG_ON(boot_cpuid != smp_processor_id());
1033
1034        /* Fixup boot cpu */
1035        smp_store_cpu_info(boot_cpuid);
1036        cpu_callin_map[boot_cpuid] = 1;
1037
1038        for_each_possible_cpu(cpu) {
1039                zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1040                                        GFP_KERNEL, cpu_to_node(cpu));
1041                zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1042                                        GFP_KERNEL, cpu_to_node(cpu));
1043                zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1044                                        GFP_KERNEL, cpu_to_node(cpu));
1045                if (has_coregroup_support())
1046                        zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1047                                                GFP_KERNEL, cpu_to_node(cpu));
1048
1049#ifdef CONFIG_NEED_MULTIPLE_NODES
1050                /*
1051                 * numa_node_id() works after this.
1052                 */
1053                if (cpu_present(cpu)) {
1054                        set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1055                        set_cpu_numa_mem(cpu,
1056                                local_memory_node(numa_cpu_lookup_table[cpu]));
1057                }
1058#endif
1059                /*
1060                 * cpu_core_map is now more updated and exists only since
1061                 * its been exported for long. It only will have a snapshot
1062                 * of cpu_cpu_mask.
1063                 */
1064                cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
1065        }
1066
1067        /* Init the cpumasks so the boot CPU is related to itself */
1068        cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1069        cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1070
1071        if (has_coregroup_support())
1072                cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1073
1074        init_big_cores();
1075        if (has_big_cores) {
1076                cpumask_set_cpu(boot_cpuid,
1077                                cpu_smallcore_mask(boot_cpuid));
1078        }
1079
1080        if (smp_ops && smp_ops->probe)
1081                smp_ops->probe();
1082}
1083
1084void smp_prepare_boot_cpu(void)
1085{
1086        BUG_ON(smp_processor_id() != boot_cpuid);
1087#ifdef CONFIG_PPC64
1088        paca_ptrs[boot_cpuid]->__current = current;
1089#endif
1090        set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1091        current_set[boot_cpuid] = current;
1092}
1093
1094#ifdef CONFIG_HOTPLUG_CPU
1095
1096int generic_cpu_disable(void)
1097{
1098        unsigned int cpu = smp_processor_id();
1099
1100        if (cpu == boot_cpuid)
1101                return -EBUSY;
1102
1103        set_cpu_online(cpu, false);
1104#ifdef CONFIG_PPC64
1105        vdso_data->processorCount--;
1106#endif
1107        /* Update affinity of all IRQs previously aimed at this CPU */
1108        irq_migrate_all_off_this_cpu();
1109
1110        /*
1111         * Depending on the details of the interrupt controller, it's possible
1112         * that one of the interrupts we just migrated away from this CPU is
1113         * actually already pending on this CPU. If we leave it in that state
1114         * the interrupt will never be EOI'ed, and will never fire again. So
1115         * temporarily enable interrupts here, to allow any pending interrupt to
1116         * be received (and EOI'ed), before we take this CPU offline.
1117         */
1118        local_irq_enable();
1119        mdelay(1);
1120        local_irq_disable();
1121
1122        return 0;
1123}
1124
1125void generic_cpu_die(unsigned int cpu)
1126{
1127        int i;
1128
1129        for (i = 0; i < 100; i++) {
1130                smp_rmb();
1131                if (is_cpu_dead(cpu))
1132                        return;
1133                msleep(100);
1134        }
1135        printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1136}
1137
1138void generic_set_cpu_dead(unsigned int cpu)
1139{
1140        per_cpu(cpu_state, cpu) = CPU_DEAD;
1141}
1142
1143/*
1144 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1145 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1146 * which makes the delay in generic_cpu_die() not happen.
1147 */
1148void generic_set_cpu_up(unsigned int cpu)
1149{
1150        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1151}
1152
1153int generic_check_cpu_restart(unsigned int cpu)
1154{
1155        return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1156}
1157
1158int is_cpu_dead(unsigned int cpu)
1159{
1160        return per_cpu(cpu_state, cpu) == CPU_DEAD;
1161}
1162
1163static bool secondaries_inhibited(void)
1164{
1165        return kvm_hv_mode_active();
1166}
1167
1168#else /* HOTPLUG_CPU */
1169
1170#define secondaries_inhibited()         0
1171
1172#endif
1173
1174static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1175{
1176#ifdef CONFIG_PPC64
1177        paca_ptrs[cpu]->__current = idle;
1178        paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1179                                 THREAD_SIZE - STACK_FRAME_OVERHEAD;
1180#endif
1181        idle->cpu = cpu;
1182        secondary_current = current_set[cpu] = idle;
1183}
1184
1185int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1186{
1187        int rc, c;
1188
1189        /*
1190         * Don't allow secondary threads to come online if inhibited
1191         */
1192        if (threads_per_core > 1 && secondaries_inhibited() &&
1193            cpu_thread_in_subcore(cpu))
1194                return -EBUSY;
1195
1196        if (smp_ops == NULL ||
1197            (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1198                return -EINVAL;
1199
1200        cpu_idle_thread_init(cpu, tidle);
1201
1202        /*
1203         * The platform might need to allocate resources prior to bringing
1204         * up the CPU
1205         */
1206        if (smp_ops->prepare_cpu) {
1207                rc = smp_ops->prepare_cpu(cpu);
1208                if (rc)
1209                        return rc;
1210        }
1211
1212        /* Make sure callin-map entry is 0 (can be leftover a CPU
1213         * hotplug
1214         */
1215        cpu_callin_map[cpu] = 0;
1216
1217        /* The information for processor bringup must
1218         * be written out to main store before we release
1219         * the processor.
1220         */
1221        smp_mb();
1222
1223        /* wake up cpus */
1224        DBG("smp: kicking cpu %d\n", cpu);
1225        rc = smp_ops->kick_cpu(cpu);
1226        if (rc) {
1227                pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1228                return rc;
1229        }
1230
1231        /*
1232         * wait to see if the cpu made a callin (is actually up).
1233         * use this value that I found through experimentation.
1234         * -- Cort
1235         */
1236        if (system_state < SYSTEM_RUNNING)
1237                for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1238                        udelay(100);
1239#ifdef CONFIG_HOTPLUG_CPU
1240        else
1241                /*
1242                 * CPUs can take much longer to come up in the
1243                 * hotplug case.  Wait five seconds.
1244                 */
1245                for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1246                        msleep(1);
1247#endif
1248
1249        if (!cpu_callin_map[cpu]) {
1250                printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1251                return -ENOENT;
1252        }
1253
1254        DBG("Processor %u found.\n", cpu);
1255
1256        if (smp_ops->give_timebase)
1257                smp_ops->give_timebase();
1258
1259        /* Wait until cpu puts itself in the online & active maps */
1260        spin_until_cond(cpu_online(cpu));
1261
1262        return 0;
1263}
1264
1265/* Return the value of the reg property corresponding to the given
1266 * logical cpu.
1267 */
1268int cpu_to_core_id(int cpu)
1269{
1270        struct device_node *np;
1271        const __be32 *reg;
1272        int id = -1;
1273
1274        np = of_get_cpu_node(cpu, NULL);
1275        if (!np)
1276                goto out;
1277
1278        reg = of_get_property(np, "reg", NULL);
1279        if (!reg)
1280                goto out;
1281
1282        id = be32_to_cpup(reg);
1283out:
1284        of_node_put(np);
1285        return id;
1286}
1287EXPORT_SYMBOL_GPL(cpu_to_core_id);
1288
1289/* Helper routines for cpu to core mapping */
1290int cpu_core_index_of_thread(int cpu)
1291{
1292        return cpu >> threads_shift;
1293}
1294EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1295
1296int cpu_first_thread_of_core(int core)
1297{
1298        return core << threads_shift;
1299}
1300EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1301
1302/* Must be called when no change can occur to cpu_present_mask,
1303 * i.e. during cpu online or offline.
1304 */
1305static struct device_node *cpu_to_l2cache(int cpu)
1306{
1307        struct device_node *np;
1308        struct device_node *cache;
1309
1310        if (!cpu_present(cpu))
1311                return NULL;
1312
1313        np = of_get_cpu_node(cpu, NULL);
1314        if (np == NULL)
1315                return NULL;
1316
1317        cache = of_find_next_cache_node(np);
1318
1319        of_node_put(np);
1320
1321        return cache;
1322}
1323
1324static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1325{
1326        struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1327        struct device_node *l2_cache, *np;
1328        int i;
1329
1330        if (has_big_cores)
1331                submask_fn = cpu_smallcore_mask;
1332
1333        /*
1334         * If the threads in a thread-group share L2 cache, then the
1335         * L2-mask can be obtained from thread_group_l2_cache_map.
1336         */
1337        if (thread_group_shares_l2) {
1338                cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1339
1340                for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1341                        if (cpu_online(i))
1342                                set_cpus_related(i, cpu, cpu_l2_cache_mask);
1343                }
1344
1345                /* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1346                if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1347                    !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1348                        pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1349                                     cpu);
1350                }
1351
1352                return true;
1353        }
1354
1355        l2_cache = cpu_to_l2cache(cpu);
1356        if (!l2_cache || !*mask) {
1357                /* Assume only core siblings share cache with this CPU */
1358                for_each_cpu(i, submask_fn(cpu))
1359                        set_cpus_related(cpu, i, cpu_l2_cache_mask);
1360
1361                return false;
1362        }
1363
1364        cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1365
1366        /* Update l2-cache mask with all the CPUs that are part of submask */
1367        or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1368
1369        /* Skip all CPUs already part of current CPU l2-cache mask */
1370        cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1371
1372        for_each_cpu(i, *mask) {
1373                /*
1374                 * when updating the marks the current CPU has not been marked
1375                 * online, but we need to update the cache masks
1376                 */
1377                np = cpu_to_l2cache(i);
1378
1379                /* Skip all CPUs already part of current CPU l2-cache */
1380                if (np == l2_cache) {
1381                        or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1382                        cpumask_andnot(*mask, *mask, submask_fn(i));
1383                } else {
1384                        cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1385                }
1386
1387                of_node_put(np);
1388        }
1389        of_node_put(l2_cache);
1390
1391        return true;
1392}
1393
1394#ifdef CONFIG_HOTPLUG_CPU
1395static void remove_cpu_from_masks(int cpu)
1396{
1397        struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1398        int i;
1399
1400        if (shared_caches)
1401                mask_fn = cpu_l2_cache_mask;
1402
1403        for_each_cpu(i, mask_fn(cpu)) {
1404                set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1405                set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1406                if (has_big_cores)
1407                        set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1408        }
1409
1410        if (has_coregroup_support()) {
1411                for_each_cpu(i, cpu_coregroup_mask(cpu))
1412                        set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1413        }
1414}
1415#endif
1416
1417static inline void add_cpu_to_smallcore_masks(int cpu)
1418{
1419        int i;
1420
1421        if (!has_big_cores)
1422                return;
1423
1424        cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1425
1426        for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1427                if (cpu_online(i))
1428                        set_cpus_related(i, cpu, cpu_smallcore_mask);
1429        }
1430}
1431
1432static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1433{
1434        struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1435        int coregroup_id = cpu_to_coregroup_id(cpu);
1436        int i;
1437
1438        if (shared_caches)
1439                submask_fn = cpu_l2_cache_mask;
1440
1441        if (!*mask) {
1442                /* Assume only siblings are part of this CPU's coregroup */
1443                for_each_cpu(i, submask_fn(cpu))
1444                        set_cpus_related(cpu, i, cpu_coregroup_mask);
1445
1446                return;
1447        }
1448
1449        cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1450
1451        /* Update coregroup mask with all the CPUs that are part of submask */
1452        or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1453
1454        /* Skip all CPUs already part of coregroup mask */
1455        cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1456
1457        for_each_cpu(i, *mask) {
1458                /* Skip all CPUs not part of this coregroup */
1459                if (coregroup_id == cpu_to_coregroup_id(i)) {
1460                        or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1461                        cpumask_andnot(*mask, *mask, submask_fn(i));
1462                } else {
1463                        cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1464                }
1465        }
1466}
1467
1468static void add_cpu_to_masks(int cpu)
1469{
1470        int first_thread = cpu_first_thread_sibling(cpu);
1471        cpumask_var_t mask;
1472        int i;
1473
1474        /*
1475         * This CPU will not be in the online mask yet so we need to manually
1476         * add it to it's own thread sibling mask.
1477         */
1478        cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1479
1480        for (i = first_thread; i < first_thread + threads_per_core; i++)
1481                if (cpu_online(i))
1482                        set_cpus_related(i, cpu, cpu_sibling_mask);
1483
1484        add_cpu_to_smallcore_masks(cpu);
1485
1486        /* In CPU-hotplug path, hence use GFP_ATOMIC */
1487        alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1488        update_mask_by_l2(cpu, &mask);
1489
1490        if (has_coregroup_support())
1491                update_coregroup_mask(cpu, &mask);
1492
1493        free_cpumask_var(mask);
1494}
1495
1496/* Activate a secondary processor. */
1497void start_secondary(void *unused)
1498{
1499        unsigned int cpu = raw_smp_processor_id();
1500
1501        mmgrab(&init_mm);
1502        current->active_mm = &init_mm;
1503
1504        smp_store_cpu_info(cpu);
1505        set_dec(tb_ticks_per_jiffy);
1506        rcu_cpu_starting(cpu);
1507        preempt_disable();
1508        cpu_callin_map[cpu] = 1;
1509
1510        if (smp_ops->setup_cpu)
1511                smp_ops->setup_cpu(cpu);
1512        if (smp_ops->take_timebase)
1513                smp_ops->take_timebase();
1514
1515        secondary_cpu_time_init();
1516
1517#ifdef CONFIG_PPC64
1518        if (system_state == SYSTEM_RUNNING)
1519                vdso_data->processorCount++;
1520
1521        vdso_getcpu_init();
1522#endif
1523        /* Update topology CPU masks */
1524        add_cpu_to_masks(cpu);
1525
1526        /*
1527         * Check for any shared caches. Note that this must be done on a
1528         * per-core basis because one core in the pair might be disabled.
1529         */
1530        if (!shared_caches) {
1531                struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1532                struct cpumask *mask = cpu_l2_cache_mask(cpu);
1533
1534                if (has_big_cores)
1535                        sibling_mask = cpu_smallcore_mask;
1536
1537                if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1538                        shared_caches = true;
1539        }
1540
1541        set_numa_node(numa_cpu_lookup_table[cpu]);
1542        set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1543
1544        smp_wmb();
1545        notify_cpu_starting(cpu);
1546        set_cpu_online(cpu, true);
1547
1548        boot_init_stack_canary();
1549
1550        local_irq_enable();
1551
1552        /* We can enable ftrace for secondary cpus now */
1553        this_cpu_enable_ftrace();
1554
1555        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1556
1557        BUG();
1558}
1559
1560int setup_profiling_timer(unsigned int multiplier)
1561{
1562        return 0;
1563}
1564
1565static void fixup_topology(void)
1566{
1567        int i;
1568
1569#ifdef CONFIG_SCHED_SMT
1570        if (has_big_cores) {
1571                pr_info("Big cores detected but using small core scheduling\n");
1572                powerpc_topology[smt_idx].mask = smallcore_smt_mask;
1573        }
1574#endif
1575
1576        if (!has_coregroup_support())
1577                powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
1578
1579        /*
1580         * Try to consolidate topology levels here instead of
1581         * allowing scheduler to degenerate.
1582         * - Dont consolidate if masks are different.
1583         * - Dont consolidate if sd_flags exists and are different.
1584         */
1585        for (i = 1; i <= die_idx; i++) {
1586                if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1587                        continue;
1588
1589                if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1590                                powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1591                        continue;
1592
1593                if (!powerpc_topology[i - 1].sd_flags)
1594                        powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
1595
1596                powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1597                powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1598#ifdef CONFIG_SCHED_DEBUG
1599                powerpc_topology[i].name = powerpc_topology[i + 1].name;
1600#endif
1601        }
1602}
1603
1604void __init smp_cpus_done(unsigned int max_cpus)
1605{
1606        /*
1607         * We are running pinned to the boot CPU, see rest_init().
1608         */
1609        if (smp_ops && smp_ops->setup_cpu)
1610                smp_ops->setup_cpu(boot_cpuid);
1611
1612        if (smp_ops && smp_ops->bringup_done)
1613                smp_ops->bringup_done();
1614
1615        dump_numa_cpu_topology();
1616
1617        fixup_topology();
1618        set_sched_topology(powerpc_topology);
1619}
1620
1621#ifdef CONFIG_HOTPLUG_CPU
1622int __cpu_disable(void)
1623{
1624        int cpu = smp_processor_id();
1625        int err;
1626
1627        if (!smp_ops->cpu_disable)
1628                return -ENOSYS;
1629
1630        this_cpu_disable_ftrace();
1631
1632        err = smp_ops->cpu_disable();
1633        if (err)
1634                return err;
1635
1636        /* Update sibling maps */
1637        remove_cpu_from_masks(cpu);
1638
1639        return 0;
1640}
1641
1642void __cpu_die(unsigned int cpu)
1643{
1644        if (smp_ops->cpu_die)
1645                smp_ops->cpu_die(cpu);
1646}
1647
1648void arch_cpu_idle_dead(void)
1649{
1650        sched_preempt_enable_no_resched();
1651
1652        /*
1653         * Disable on the down path. This will be re-enabled by
1654         * start_secondary() via start_secondary_resume() below
1655         */
1656        this_cpu_disable_ftrace();
1657
1658        if (smp_ops->cpu_offline_self)
1659                smp_ops->cpu_offline_self();
1660
1661        /* If we return, we re-enter start_secondary */
1662        start_secondary_resume();
1663}
1664
1665#endif
1666