linux/arch/powerpc/kernel/setup_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * 
   4 * Common boot and setup code.
   5 *
   6 * Copyright (C) 2001 PPC64 Team, IBM Corp
   7 */
   8
   9#include <linux/export.h>
  10#include <linux/string.h>
  11#include <linux/sched.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/reboot.h>
  15#include <linux/delay.h>
  16#include <linux/initrd.h>
  17#include <linux/seq_file.h>
  18#include <linux/ioport.h>
  19#include <linux/console.h>
  20#include <linux/utsname.h>
  21#include <linux/tty.h>
  22#include <linux/root_dev.h>
  23#include <linux/notifier.h>
  24#include <linux/cpu.h>
  25#include <linux/unistd.h>
  26#include <linux/serial.h>
  27#include <linux/serial_8250.h>
  28#include <linux/memblock.h>
  29#include <linux/pci.h>
  30#include <linux/lockdep.h>
  31#include <linux/memory.h>
  32#include <linux/nmi.h>
  33#include <linux/pgtable.h>
  34
  35#include <asm/debugfs.h>
  36#include <asm/io.h>
  37#include <asm/kdump.h>
  38#include <asm/prom.h>
  39#include <asm/processor.h>
  40#include <asm/smp.h>
  41#include <asm/elf.h>
  42#include <asm/machdep.h>
  43#include <asm/paca.h>
  44#include <asm/time.h>
  45#include <asm/cputable.h>
  46#include <asm/dt_cpu_ftrs.h>
  47#include <asm/sections.h>
  48#include <asm/btext.h>
  49#include <asm/nvram.h>
  50#include <asm/setup.h>
  51#include <asm/rtas.h>
  52#include <asm/iommu.h>
  53#include <asm/serial.h>
  54#include <asm/cache.h>
  55#include <asm/page.h>
  56#include <asm/mmu.h>
  57#include <asm/firmware.h>
  58#include <asm/xmon.h>
  59#include <asm/udbg.h>
  60#include <asm/kexec.h>
  61#include <asm/code-patching.h>
  62#include <asm/livepatch.h>
  63#include <asm/opal.h>
  64#include <asm/cputhreads.h>
  65#include <asm/hw_irq.h>
  66#include <asm/feature-fixups.h>
  67#include <asm/kup.h>
  68#include <asm/early_ioremap.h>
  69#include <asm/pgalloc.h>
  70
  71#include "setup.h"
  72
  73int spinning_secondaries;
  74u64 ppc64_pft_size;
  75
  76struct ppc64_caches ppc64_caches = {
  77        .l1d = {
  78                .block_size = 0x40,
  79                .log_block_size = 6,
  80        },
  81        .l1i = {
  82                .block_size = 0x40,
  83                .log_block_size = 6
  84        },
  85};
  86EXPORT_SYMBOL_GPL(ppc64_caches);
  87
  88#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
  89void __init setup_tlb_core_data(void)
  90{
  91        int cpu;
  92
  93        BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
  94
  95        for_each_possible_cpu(cpu) {
  96                int first = cpu_first_thread_sibling(cpu);
  97
  98                /*
  99                 * If we boot via kdump on a non-primary thread,
 100                 * make sure we point at the thread that actually
 101                 * set up this TLB.
 102                 */
 103                if (cpu_first_thread_sibling(boot_cpuid) == first)
 104                        first = boot_cpuid;
 105
 106                paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
 107
 108                /*
 109                 * If we have threads, we need either tlbsrx.
 110                 * or e6500 tablewalk mode, or else TLB handlers
 111                 * will be racy and could produce duplicate entries.
 112                 * Should we panic instead?
 113                 */
 114                WARN_ONCE(smt_enabled_at_boot >= 2 &&
 115                          !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
 116                          book3e_htw_mode != PPC_HTW_E6500,
 117                          "%s: unsupported MMU configuration\n", __func__);
 118        }
 119}
 120#endif
 121
 122#ifdef CONFIG_SMP
 123
 124static char *smt_enabled_cmdline;
 125
 126/* Look for ibm,smt-enabled OF option */
 127void __init check_smt_enabled(void)
 128{
 129        struct device_node *dn;
 130        const char *smt_option;
 131
 132        /* Default to enabling all threads */
 133        smt_enabled_at_boot = threads_per_core;
 134
 135        /* Allow the command line to overrule the OF option */
 136        if (smt_enabled_cmdline) {
 137                if (!strcmp(smt_enabled_cmdline, "on"))
 138                        smt_enabled_at_boot = threads_per_core;
 139                else if (!strcmp(smt_enabled_cmdline, "off"))
 140                        smt_enabled_at_boot = 0;
 141                else {
 142                        int smt;
 143                        int rc;
 144
 145                        rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
 146                        if (!rc)
 147                                smt_enabled_at_boot =
 148                                        min(threads_per_core, smt);
 149                }
 150        } else {
 151                dn = of_find_node_by_path("/options");
 152                if (dn) {
 153                        smt_option = of_get_property(dn, "ibm,smt-enabled",
 154                                                     NULL);
 155
 156                        if (smt_option) {
 157                                if (!strcmp(smt_option, "on"))
 158                                        smt_enabled_at_boot = threads_per_core;
 159                                else if (!strcmp(smt_option, "off"))
 160                                        smt_enabled_at_boot = 0;
 161                        }
 162
 163                        of_node_put(dn);
 164                }
 165        }
 166}
 167
 168/* Look for smt-enabled= cmdline option */
 169static int __init early_smt_enabled(char *p)
 170{
 171        smt_enabled_cmdline = p;
 172        return 0;
 173}
 174early_param("smt-enabled", early_smt_enabled);
 175
 176#endif /* CONFIG_SMP */
 177
 178/** Fix up paca fields required for the boot cpu */
 179static void __init fixup_boot_paca(void)
 180{
 181        /* The boot cpu is started */
 182        get_paca()->cpu_start = 1;
 183        /* Allow percpu accesses to work until we setup percpu data */
 184        get_paca()->data_offset = 0;
 185        /* Mark interrupts disabled in PACA */
 186        irq_soft_mask_set(IRQS_DISABLED);
 187}
 188
 189static void __init configure_exceptions(void)
 190{
 191        /*
 192         * Setup the trampolines from the lowmem exception vectors
 193         * to the kdump kernel when not using a relocatable kernel.
 194         */
 195        setup_kdump_trampoline();
 196
 197        /* Under a PAPR hypervisor, we need hypercalls */
 198        if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
 199                /* Enable AIL if possible */
 200                if (!pseries_enable_reloc_on_exc()) {
 201                        init_task.thread.fscr &= ~FSCR_SCV;
 202                        cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
 203                }
 204
 205                /*
 206                 * Tell the hypervisor that we want our exceptions to
 207                 * be taken in little endian mode.
 208                 *
 209                 * We don't call this for big endian as our calling convention
 210                 * makes us always enter in BE, and the call may fail under
 211                 * some circumstances with kdump.
 212                 */
 213#ifdef __LITTLE_ENDIAN__
 214                pseries_little_endian_exceptions();
 215#endif
 216        } else {
 217                /* Set endian mode using OPAL */
 218                if (firmware_has_feature(FW_FEATURE_OPAL))
 219                        opal_configure_cores();
 220
 221                /* AIL on native is done in cpu_ready_for_interrupts() */
 222        }
 223}
 224
 225static void cpu_ready_for_interrupts(void)
 226{
 227        /*
 228         * Enable AIL if supported, and we are in hypervisor mode. This
 229         * is called once for every processor.
 230         *
 231         * If we are not in hypervisor mode the job is done once for
 232         * the whole partition in configure_exceptions().
 233         */
 234        if (cpu_has_feature(CPU_FTR_HVMODE) &&
 235            cpu_has_feature(CPU_FTR_ARCH_207S)) {
 236                unsigned long lpcr = mfspr(SPRN_LPCR);
 237                mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
 238        }
 239
 240        /*
 241         * Set HFSCR:TM based on CPU features:
 242         * In the special case of TM no suspend (P9N DD2.1), Linux is
 243         * told TM is off via the dt-ftrs but told to (partially) use
 244         * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
 245         * will be off from dt-ftrs but we need to turn it on for the
 246         * no suspend case.
 247         */
 248        if (cpu_has_feature(CPU_FTR_HVMODE)) {
 249                if (cpu_has_feature(CPU_FTR_TM_COMP))
 250                        mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
 251                else
 252                        mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
 253        }
 254
 255        /* Set IR and DR in PACA MSR */
 256        get_paca()->kernel_msr = MSR_KERNEL;
 257}
 258
 259unsigned long spr_default_dscr = 0;
 260
 261void __init record_spr_defaults(void)
 262{
 263        if (early_cpu_has_feature(CPU_FTR_DSCR))
 264                spr_default_dscr = mfspr(SPRN_DSCR);
 265}
 266
 267/*
 268 * Early initialization entry point. This is called by head.S
 269 * with MMU translation disabled. We rely on the "feature" of
 270 * the CPU that ignores the top 2 bits of the address in real
 271 * mode so we can access kernel globals normally provided we
 272 * only toy with things in the RMO region. From here, we do
 273 * some early parsing of the device-tree to setup out MEMBLOCK
 274 * data structures, and allocate & initialize the hash table
 275 * and segment tables so we can start running with translation
 276 * enabled.
 277 *
 278 * It is this function which will call the probe() callback of
 279 * the various platform types and copy the matching one to the
 280 * global ppc_md structure. Your platform can eventually do
 281 * some very early initializations from the probe() routine, but
 282 * this is not recommended, be very careful as, for example, the
 283 * device-tree is not accessible via normal means at this point.
 284 */
 285
 286void __init __nostackprotector early_setup(unsigned long dt_ptr)
 287{
 288        static __initdata struct paca_struct boot_paca;
 289
 290        /* -------- printk is _NOT_ safe to use here ! ------- */
 291
 292        /*
 293         * Assume we're on cpu 0 for now.
 294         *
 295         * We need to load a PACA very early for a few reasons.
 296         *
 297         * The stack protector canary is stored in the paca, so as soon as we
 298         * call any stack protected code we need r13 pointing somewhere valid.
 299         *
 300         * If we are using kcov it will call in_task() in its instrumentation,
 301         * which relies on the current task from the PACA.
 302         *
 303         * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
 304         * printk(), which can trigger both stack protector and kcov.
 305         *
 306         * percpu variables and spin locks also use the paca.
 307         *
 308         * So set up a temporary paca. It will be replaced below once we know
 309         * what CPU we are on.
 310         */
 311        initialise_paca(&boot_paca, 0);
 312        setup_paca(&boot_paca);
 313        fixup_boot_paca();
 314
 315        /* -------- printk is now safe to use ------- */
 316
 317        /* Try new device tree based feature discovery ... */
 318        if (!dt_cpu_ftrs_init(__va(dt_ptr)))
 319                /* Otherwise use the old style CPU table */
 320                identify_cpu(0, mfspr(SPRN_PVR));
 321
 322        /* Enable early debugging if any specified (see udbg.h) */
 323        udbg_early_init();
 324
 325        udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
 326
 327        /*
 328         * Do early initialization using the flattened device
 329         * tree, such as retrieving the physical memory map or
 330         * calculating/retrieving the hash table size.
 331         */
 332        early_init_devtree(__va(dt_ptr));
 333
 334        /* Now we know the logical id of our boot cpu, setup the paca. */
 335        if (boot_cpuid != 0) {
 336                /* Poison paca_ptrs[0] again if it's not the boot cpu */
 337                memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
 338        }
 339        setup_paca(paca_ptrs[boot_cpuid]);
 340        fixup_boot_paca();
 341
 342        /*
 343         * Configure exception handlers. This include setting up trampolines
 344         * if needed, setting exception endian mode, etc...
 345         */
 346        configure_exceptions();
 347
 348        /*
 349         * Configure Kernel Userspace Protection. This needs to happen before
 350         * feature fixups for platforms that implement this using features.
 351         */
 352        setup_kup();
 353
 354        /* Apply all the dynamic patching */
 355        apply_feature_fixups();
 356        setup_feature_keys();
 357
 358        early_ioremap_setup();
 359
 360        /* Initialize the hash table or TLB handling */
 361        early_init_mmu();
 362
 363        /*
 364         * After firmware and early platform setup code has set things up,
 365         * we note the SPR values for configurable control/performance
 366         * registers, and use those as initial defaults.
 367         */
 368        record_spr_defaults();
 369
 370        /*
 371         * At this point, we can let interrupts switch to virtual mode
 372         * (the MMU has been setup), so adjust the MSR in the PACA to
 373         * have IR and DR set and enable AIL if it exists
 374         */
 375        cpu_ready_for_interrupts();
 376
 377        /*
 378         * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
 379         * will only actually get enabled on the boot cpu much later once
 380         * ftrace itself has been initialized.
 381         */
 382        this_cpu_enable_ftrace();
 383
 384        udbg_printf(" <- %s()\n", __func__);
 385
 386#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
 387        /*
 388         * This needs to be done *last* (after the above udbg_printf() even)
 389         *
 390         * Right after we return from this function, we turn on the MMU
 391         * which means the real-mode access trick that btext does will
 392         * no longer work, it needs to switch to using a real MMU
 393         * mapping. This call will ensure that it does
 394         */
 395        btext_map();
 396#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
 397}
 398
 399#ifdef CONFIG_SMP
 400void early_setup_secondary(void)
 401{
 402        /* Mark interrupts disabled in PACA */
 403        irq_soft_mask_set(IRQS_DISABLED);
 404
 405        /* Initialize the hash table or TLB handling */
 406        early_init_mmu_secondary();
 407
 408        /* Perform any KUP setup that is per-cpu */
 409        setup_kup();
 410
 411        /*
 412         * At this point, we can let interrupts switch to virtual mode
 413         * (the MMU has been setup), so adjust the MSR in the PACA to
 414         * have IR and DR set.
 415         */
 416        cpu_ready_for_interrupts();
 417}
 418
 419#endif /* CONFIG_SMP */
 420
 421void panic_smp_self_stop(void)
 422{
 423        hard_irq_disable();
 424        spin_begin();
 425        while (1)
 426                spin_cpu_relax();
 427}
 428
 429#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
 430static bool use_spinloop(void)
 431{
 432        if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
 433                /*
 434                 * See comments in head_64.S -- not all platforms insert
 435                 * secondaries at __secondary_hold and wait at the spin
 436                 * loop.
 437                 */
 438                if (firmware_has_feature(FW_FEATURE_OPAL))
 439                        return false;
 440                return true;
 441        }
 442
 443        /*
 444         * When book3e boots from kexec, the ePAPR spin table does
 445         * not get used.
 446         */
 447        return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
 448}
 449
 450void smp_release_cpus(void)
 451{
 452        unsigned long *ptr;
 453        int i;
 454
 455        if (!use_spinloop())
 456                return;
 457
 458        /* All secondary cpus are spinning on a common spinloop, release them
 459         * all now so they can start to spin on their individual paca
 460         * spinloops. For non SMP kernels, the secondary cpus never get out
 461         * of the common spinloop.
 462         */
 463
 464        ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
 465                        - PHYSICAL_START);
 466        *ptr = ppc_function_entry(generic_secondary_smp_init);
 467
 468        /* And wait a bit for them to catch up */
 469        for (i = 0; i < 100000; i++) {
 470                mb();
 471                HMT_low();
 472                if (spinning_secondaries == 0)
 473                        break;
 474                udelay(1);
 475        }
 476        pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
 477}
 478#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
 479
 480/*
 481 * Initialize some remaining members of the ppc64_caches and systemcfg
 482 * structures
 483 * (at least until we get rid of them completely). This is mostly some
 484 * cache informations about the CPU that will be used by cache flush
 485 * routines and/or provided to userland
 486 */
 487
 488static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
 489                            u32 bsize, u32 sets)
 490{
 491        info->size = size;
 492        info->sets = sets;
 493        info->line_size = lsize;
 494        info->block_size = bsize;
 495        info->log_block_size = __ilog2(bsize);
 496        if (bsize)
 497                info->blocks_per_page = PAGE_SIZE / bsize;
 498        else
 499                info->blocks_per_page = 0;
 500
 501        if (sets == 0)
 502                info->assoc = 0xffff;
 503        else
 504                info->assoc = size / (sets * lsize);
 505}
 506
 507static bool __init parse_cache_info(struct device_node *np,
 508                                    bool icache,
 509                                    struct ppc_cache_info *info)
 510{
 511        static const char *ipropnames[] __initdata = {
 512                "i-cache-size",
 513                "i-cache-sets",
 514                "i-cache-block-size",
 515                "i-cache-line-size",
 516        };
 517        static const char *dpropnames[] __initdata = {
 518                "d-cache-size",
 519                "d-cache-sets",
 520                "d-cache-block-size",
 521                "d-cache-line-size",
 522        };
 523        const char **propnames = icache ? ipropnames : dpropnames;
 524        const __be32 *sizep, *lsizep, *bsizep, *setsp;
 525        u32 size, lsize, bsize, sets;
 526        bool success = true;
 527
 528        size = 0;
 529        sets = -1u;
 530        lsize = bsize = cur_cpu_spec->dcache_bsize;
 531        sizep = of_get_property(np, propnames[0], NULL);
 532        if (sizep != NULL)
 533                size = be32_to_cpu(*sizep);
 534        setsp = of_get_property(np, propnames[1], NULL);
 535        if (setsp != NULL)
 536                sets = be32_to_cpu(*setsp);
 537        bsizep = of_get_property(np, propnames[2], NULL);
 538        lsizep = of_get_property(np, propnames[3], NULL);
 539        if (bsizep == NULL)
 540                bsizep = lsizep;
 541        if (lsizep == NULL)
 542                lsizep = bsizep;
 543        if (lsizep != NULL)
 544                lsize = be32_to_cpu(*lsizep);
 545        if (bsizep != NULL)
 546                bsize = be32_to_cpu(*bsizep);
 547        if (sizep == NULL || bsizep == NULL || lsizep == NULL)
 548                success = false;
 549
 550        /*
 551         * OF is weird .. it represents fully associative caches
 552         * as "1 way" which doesn't make much sense and doesn't
 553         * leave room for direct mapped. We'll assume that 0
 554         * in OF means direct mapped for that reason.
 555         */
 556        if (sets == 1)
 557                sets = 0;
 558        else if (sets == 0)
 559                sets = 1;
 560
 561        init_cache_info(info, size, lsize, bsize, sets);
 562
 563        return success;
 564}
 565
 566void __init initialize_cache_info(void)
 567{
 568        struct device_node *cpu = NULL, *l2, *l3 = NULL;
 569        u32 pvr;
 570
 571        /*
 572         * All shipping POWER8 machines have a firmware bug that
 573         * puts incorrect information in the device-tree. This will
 574         * be (hopefully) fixed for future chips but for now hard
 575         * code the values if we are running on one of these
 576         */
 577        pvr = PVR_VER(mfspr(SPRN_PVR));
 578        if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
 579            pvr == PVR_POWER8NVL) {
 580                                                /* size    lsize   blk  sets */
 581                init_cache_info(&ppc64_caches.l1i, 0x8000,   128,  128, 32);
 582                init_cache_info(&ppc64_caches.l1d, 0x10000,  128,  128, 64);
 583                init_cache_info(&ppc64_caches.l2,  0x80000,  128,  0,   512);
 584                init_cache_info(&ppc64_caches.l3,  0x800000, 128,  0,   8192);
 585        } else
 586                cpu = of_find_node_by_type(NULL, "cpu");
 587
 588        /*
 589         * We're assuming *all* of the CPUs have the same
 590         * d-cache and i-cache sizes... -Peter
 591         */
 592        if (cpu) {
 593                if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
 594                        pr_warn("Argh, can't find dcache properties !\n");
 595
 596                if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
 597                        pr_warn("Argh, can't find icache properties !\n");
 598
 599                /*
 600                 * Try to find the L2 and L3 if any. Assume they are
 601                 * unified and use the D-side properties.
 602                 */
 603                l2 = of_find_next_cache_node(cpu);
 604                of_node_put(cpu);
 605                if (l2) {
 606                        parse_cache_info(l2, false, &ppc64_caches.l2);
 607                        l3 = of_find_next_cache_node(l2);
 608                        of_node_put(l2);
 609                }
 610                if (l3) {
 611                        parse_cache_info(l3, false, &ppc64_caches.l3);
 612                        of_node_put(l3);
 613                }
 614        }
 615
 616        /* For use by binfmt_elf */
 617        dcache_bsize = ppc64_caches.l1d.block_size;
 618        icache_bsize = ppc64_caches.l1i.block_size;
 619
 620        cur_cpu_spec->dcache_bsize = dcache_bsize;
 621        cur_cpu_spec->icache_bsize = icache_bsize;
 622}
 623
 624/*
 625 * This returns the limit below which memory accesses to the linear
 626 * mapping are guarnateed not to cause an architectural exception (e.g.,
 627 * TLB or SLB miss fault).
 628 *
 629 * This is used to allocate PACAs and various interrupt stacks that
 630 * that are accessed early in interrupt handlers that must not cause
 631 * re-entrant interrupts.
 632 */
 633__init u64 ppc64_bolted_size(void)
 634{
 635#ifdef CONFIG_PPC_BOOK3E
 636        /* Freescale BookE bolts the entire linear mapping */
 637        /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
 638        if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
 639                return linear_map_top;
 640        /* Other BookE, we assume the first GB is bolted */
 641        return 1ul << 30;
 642#else
 643        /* BookS radix, does not take faults on linear mapping */
 644        if (early_radix_enabled())
 645                return ULONG_MAX;
 646
 647        /* BookS hash, the first segment is bolted */
 648        if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
 649                return 1UL << SID_SHIFT_1T;
 650        return 1UL << SID_SHIFT;
 651#endif
 652}
 653
 654static void *__init alloc_stack(unsigned long limit, int cpu)
 655{
 656        void *ptr;
 657
 658        BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
 659
 660        ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
 661                                     MEMBLOCK_LOW_LIMIT, limit,
 662                                     early_cpu_to_node(cpu));
 663        if (!ptr)
 664                panic("cannot allocate stacks");
 665
 666        return ptr;
 667}
 668
 669void __init irqstack_early_init(void)
 670{
 671        u64 limit = ppc64_bolted_size();
 672        unsigned int i;
 673
 674        /*
 675         * Interrupt stacks must be in the first segment since we
 676         * cannot afford to take SLB misses on them. They are not
 677         * accessed in realmode.
 678         */
 679        for_each_possible_cpu(i) {
 680                softirq_ctx[i] = alloc_stack(limit, i);
 681                hardirq_ctx[i] = alloc_stack(limit, i);
 682        }
 683}
 684
 685#ifdef CONFIG_PPC_BOOK3E
 686void __init exc_lvl_early_init(void)
 687{
 688        unsigned int i;
 689
 690        for_each_possible_cpu(i) {
 691                void *sp;
 692
 693                sp = alloc_stack(ULONG_MAX, i);
 694                critirq_ctx[i] = sp;
 695                paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
 696
 697                sp = alloc_stack(ULONG_MAX, i);
 698                dbgirq_ctx[i] = sp;
 699                paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
 700
 701                sp = alloc_stack(ULONG_MAX, i);
 702                mcheckirq_ctx[i] = sp;
 703                paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
 704        }
 705
 706        if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
 707                patch_exception(0x040, exc_debug_debug_book3e);
 708}
 709#endif
 710
 711/*
 712 * Stack space used when we detect a bad kernel stack pointer, and
 713 * early in SMP boots before relocation is enabled. Exclusive emergency
 714 * stack for machine checks.
 715 */
 716void __init emergency_stack_init(void)
 717{
 718        u64 limit, mce_limit;
 719        unsigned int i;
 720
 721        /*
 722         * Emergency stacks must be under 256MB, we cannot afford to take
 723         * SLB misses on them. The ABI also requires them to be 128-byte
 724         * aligned.
 725         *
 726         * Since we use these as temporary stacks during secondary CPU
 727         * bringup, machine check, system reset, and HMI, we need to get
 728         * at them in real mode. This means they must also be within the RMO
 729         * region.
 730         *
 731         * The IRQ stacks allocated elsewhere in this file are zeroed and
 732         * initialized in kernel/irq.c. These are initialized here in order
 733         * to have emergency stacks available as early as possible.
 734         */
 735        limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
 736
 737        /*
 738         * Machine check on pseries calls rtas, but can't use the static
 739         * rtas_args due to a machine check hitting while the lock is held.
 740         * rtas args have to be under 4GB, so the machine check stack is
 741         * limited to 4GB so args can be put on stack.
 742         */
 743        if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
 744                mce_limit = SZ_4G;
 745
 746        for_each_possible_cpu(i) {
 747                paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
 748
 749#ifdef CONFIG_PPC_BOOK3S_64
 750                /* emergency stack for NMI exception handling. */
 751                paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
 752
 753                /* emergency stack for machine check exception handling. */
 754                paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
 755#endif
 756        }
 757}
 758
 759#ifdef CONFIG_SMP
 760/**
 761 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
 762 * @cpu: cpu to allocate for
 763 * @size: size allocation in bytes
 764 * @align: alignment
 765 *
 766 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
 767 * does the right thing for NUMA regardless of the current
 768 * configuration.
 769 *
 770 * RETURNS:
 771 * Pointer to the allocated area on success, NULL on failure.
 772 */
 773static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
 774                                        size_t align)
 775{
 776        const unsigned long goal = __pa(MAX_DMA_ADDRESS);
 777#ifdef CONFIG_NEED_MULTIPLE_NODES
 778        int node = early_cpu_to_node(cpu);
 779        void *ptr;
 780
 781        if (!node_online(node) || !NODE_DATA(node)) {
 782                ptr = memblock_alloc_from(size, align, goal);
 783                pr_info("cpu %d has no node %d or node-local memory\n",
 784                        cpu, node);
 785                pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
 786                         cpu, size, __pa(ptr));
 787        } else {
 788                ptr = memblock_alloc_try_nid(size, align, goal,
 789                                             MEMBLOCK_ALLOC_ACCESSIBLE, node);
 790                pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
 791                         "%016lx\n", cpu, size, node, __pa(ptr));
 792        }
 793        return ptr;
 794#else
 795        return memblock_alloc_from(size, align, goal);
 796#endif
 797}
 798
 799static void __init pcpu_free_bootmem(void *ptr, size_t size)
 800{
 801        memblock_free(__pa(ptr), size);
 802}
 803
 804static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 805{
 806        if (early_cpu_to_node(from) == early_cpu_to_node(to))
 807                return LOCAL_DISTANCE;
 808        else
 809                return REMOTE_DISTANCE;
 810}
 811
 812unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
 813EXPORT_SYMBOL(__per_cpu_offset);
 814
 815static void __init pcpu_populate_pte(unsigned long addr)
 816{
 817        pgd_t *pgd = pgd_offset_k(addr);
 818        p4d_t *p4d;
 819        pud_t *pud;
 820        pmd_t *pmd;
 821
 822        p4d = p4d_offset(pgd, addr);
 823        if (p4d_none(*p4d)) {
 824                pud_t *new;
 825
 826                new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
 827                if (!new)
 828                        goto err_alloc;
 829                p4d_populate(&init_mm, p4d, new);
 830        }
 831
 832        pud = pud_offset(p4d, addr);
 833        if (pud_none(*pud)) {
 834                pmd_t *new;
 835
 836                new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
 837                if (!new)
 838                        goto err_alloc;
 839                pud_populate(&init_mm, pud, new);
 840        }
 841
 842        pmd = pmd_offset(pud, addr);
 843        if (!pmd_present(*pmd)) {
 844                pte_t *new;
 845
 846                new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
 847                if (!new)
 848                        goto err_alloc;
 849                pmd_populate_kernel(&init_mm, pmd, new);
 850        }
 851
 852        return;
 853
 854err_alloc:
 855        panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
 856              __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
 857}
 858
 859
 860void __init setup_per_cpu_areas(void)
 861{
 862        const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
 863        size_t atom_size;
 864        unsigned long delta;
 865        unsigned int cpu;
 866        int rc = -EINVAL;
 867
 868        /*
 869         * Linear mapping is one of 4K, 1M and 16M.  For 4K, no need
 870         * to group units.  For larger mappings, use 1M atom which
 871         * should be large enough to contain a number of units.
 872         */
 873        if (mmu_linear_psize == MMU_PAGE_4K)
 874                atom_size = PAGE_SIZE;
 875        else
 876                atom_size = 1 << 20;
 877
 878        if (pcpu_chosen_fc != PCPU_FC_PAGE) {
 879                rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
 880                                            pcpu_alloc_bootmem, pcpu_free_bootmem);
 881                if (rc)
 882                        pr_warn("PERCPU: %s allocator failed (%d), "
 883                                "falling back to page size\n",
 884                                pcpu_fc_names[pcpu_chosen_fc], rc);
 885        }
 886
 887        if (rc < 0)
 888                rc = pcpu_page_first_chunk(0, pcpu_alloc_bootmem, pcpu_free_bootmem,
 889                                           pcpu_populate_pte);
 890        if (rc < 0)
 891                panic("cannot initialize percpu area (err=%d)", rc);
 892
 893        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
 894        for_each_possible_cpu(cpu) {
 895                __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
 896                paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
 897        }
 898}
 899#endif
 900
 901#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 902unsigned long memory_block_size_bytes(void)
 903{
 904        if (ppc_md.memory_block_size)
 905                return ppc_md.memory_block_size();
 906
 907        return MIN_MEMORY_BLOCK_SIZE;
 908}
 909#endif
 910
 911#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
 912struct ppc_pci_io ppc_pci_io;
 913EXPORT_SYMBOL(ppc_pci_io);
 914#endif
 915
 916#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
 917u64 hw_nmi_get_sample_period(int watchdog_thresh)
 918{
 919        return ppc_proc_freq * watchdog_thresh;
 920}
 921#endif
 922
 923/*
 924 * The perf based hardlockup detector breaks PMU event based branches, so
 925 * disable it by default. Book3S has a soft-nmi hardlockup detector based
 926 * on the decrementer interrupt, so it does not suffer from this problem.
 927 *
 928 * It is likely to get false positives in VM guests, so disable it there
 929 * by default too.
 930 */
 931static int __init disable_hardlockup_detector(void)
 932{
 933#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
 934        hardlockup_detector_disable();
 935#else
 936        if (firmware_has_feature(FW_FEATURE_LPAR))
 937                hardlockup_detector_disable();
 938#endif
 939
 940        return 0;
 941}
 942early_initcall(disable_hardlockup_detector);
 943
 944#ifdef CONFIG_PPC_BOOK3S_64
 945static enum l1d_flush_type enabled_flush_types;
 946static void *l1d_flush_fallback_area;
 947static bool no_rfi_flush;
 948static bool no_entry_flush;
 949static bool no_uaccess_flush;
 950bool rfi_flush;
 951bool entry_flush;
 952bool uaccess_flush;
 953DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
 954EXPORT_SYMBOL(uaccess_flush_key);
 955
 956static int __init handle_no_rfi_flush(char *p)
 957{
 958        pr_info("rfi-flush: disabled on command line.");
 959        no_rfi_flush = true;
 960        return 0;
 961}
 962early_param("no_rfi_flush", handle_no_rfi_flush);
 963
 964static int __init handle_no_entry_flush(char *p)
 965{
 966        pr_info("entry-flush: disabled on command line.");
 967        no_entry_flush = true;
 968        return 0;
 969}
 970early_param("no_entry_flush", handle_no_entry_flush);
 971
 972static int __init handle_no_uaccess_flush(char *p)
 973{
 974        pr_info("uaccess-flush: disabled on command line.");
 975        no_uaccess_flush = true;
 976        return 0;
 977}
 978early_param("no_uaccess_flush", handle_no_uaccess_flush);
 979
 980/*
 981 * The RFI flush is not KPTI, but because users will see doco that says to use
 982 * nopti we hijack that option here to also disable the RFI flush.
 983 */
 984static int __init handle_no_pti(char *p)
 985{
 986        pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
 987        handle_no_rfi_flush(NULL);
 988        return 0;
 989}
 990early_param("nopti", handle_no_pti);
 991
 992static void do_nothing(void *unused)
 993{
 994        /*
 995         * We don't need to do the flush explicitly, just enter+exit kernel is
 996         * sufficient, the RFI exit handlers will do the right thing.
 997         */
 998}
 999
1000void rfi_flush_enable(bool enable)
1001{
1002        if (enable) {
1003                do_rfi_flush_fixups(enabled_flush_types);
1004                on_each_cpu(do_nothing, NULL, 1);
1005        } else
1006                do_rfi_flush_fixups(L1D_FLUSH_NONE);
1007
1008        rfi_flush = enable;
1009}
1010
1011void entry_flush_enable(bool enable)
1012{
1013        if (enable) {
1014                do_entry_flush_fixups(enabled_flush_types);
1015                on_each_cpu(do_nothing, NULL, 1);
1016        } else {
1017                do_entry_flush_fixups(L1D_FLUSH_NONE);
1018        }
1019
1020        entry_flush = enable;
1021}
1022
1023void uaccess_flush_enable(bool enable)
1024{
1025        if (enable) {
1026                do_uaccess_flush_fixups(enabled_flush_types);
1027                static_branch_enable(&uaccess_flush_key);
1028                on_each_cpu(do_nothing, NULL, 1);
1029        } else {
1030                static_branch_disable(&uaccess_flush_key);
1031                do_uaccess_flush_fixups(L1D_FLUSH_NONE);
1032        }
1033
1034        uaccess_flush = enable;
1035}
1036
1037static void __ref init_fallback_flush(void)
1038{
1039        u64 l1d_size, limit;
1040        int cpu;
1041
1042        /* Only allocate the fallback flush area once (at boot time). */
1043        if (l1d_flush_fallback_area)
1044                return;
1045
1046        l1d_size = ppc64_caches.l1d.size;
1047
1048        /*
1049         * If there is no d-cache-size property in the device tree, l1d_size
1050         * could be zero. That leads to the loop in the asm wrapping around to
1051         * 2^64-1, and then walking off the end of the fallback area and
1052         * eventually causing a page fault which is fatal. Just default to
1053         * something vaguely sane.
1054         */
1055        if (!l1d_size)
1056                l1d_size = (64 * 1024);
1057
1058        limit = min(ppc64_bolted_size(), ppc64_rma_size);
1059
1060        /*
1061         * Align to L1d size, and size it at 2x L1d size, to catch possible
1062         * hardware prefetch runoff. We don't have a recipe for load patterns to
1063         * reliably avoid the prefetcher.
1064         */
1065        l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
1066                                                l1d_size, MEMBLOCK_LOW_LIMIT,
1067                                                limit, NUMA_NO_NODE);
1068        if (!l1d_flush_fallback_area)
1069                panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
1070                      __func__, l1d_size * 2, l1d_size, &limit);
1071
1072
1073        for_each_possible_cpu(cpu) {
1074                struct paca_struct *paca = paca_ptrs[cpu];
1075                paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
1076                paca->l1d_flush_size = l1d_size;
1077        }
1078}
1079
1080void setup_rfi_flush(enum l1d_flush_type types, bool enable)
1081{
1082        if (types & L1D_FLUSH_FALLBACK) {
1083                pr_info("rfi-flush: fallback displacement flush available\n");
1084                init_fallback_flush();
1085        }
1086
1087        if (types & L1D_FLUSH_ORI)
1088                pr_info("rfi-flush: ori type flush available\n");
1089
1090        if (types & L1D_FLUSH_MTTRIG)
1091                pr_info("rfi-flush: mttrig type flush available\n");
1092
1093        enabled_flush_types = types;
1094
1095        if (!cpu_mitigations_off() && !no_rfi_flush)
1096                rfi_flush_enable(enable);
1097}
1098
1099void setup_entry_flush(bool enable)
1100{
1101        if (cpu_mitigations_off())
1102                return;
1103
1104        if (!no_entry_flush)
1105                entry_flush_enable(enable);
1106}
1107
1108void setup_uaccess_flush(bool enable)
1109{
1110        if (cpu_mitigations_off())
1111                return;
1112
1113        if (!no_uaccess_flush)
1114                uaccess_flush_enable(enable);
1115}
1116
1117#ifdef CONFIG_DEBUG_FS
1118static int rfi_flush_set(void *data, u64 val)
1119{
1120        bool enable;
1121
1122        if (val == 1)
1123                enable = true;
1124        else if (val == 0)
1125                enable = false;
1126        else
1127                return -EINVAL;
1128
1129        /* Only do anything if we're changing state */
1130        if (enable != rfi_flush)
1131                rfi_flush_enable(enable);
1132
1133        return 0;
1134}
1135
1136static int rfi_flush_get(void *data, u64 *val)
1137{
1138        *val = rfi_flush ? 1 : 0;
1139        return 0;
1140}
1141
1142DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
1143
1144static int entry_flush_set(void *data, u64 val)
1145{
1146        bool enable;
1147
1148        if (val == 1)
1149                enable = true;
1150        else if (val == 0)
1151                enable = false;
1152        else
1153                return -EINVAL;
1154
1155        /* Only do anything if we're changing state */
1156        if (enable != entry_flush)
1157                entry_flush_enable(enable);
1158
1159        return 0;
1160}
1161
1162static int entry_flush_get(void *data, u64 *val)
1163{
1164        *val = entry_flush ? 1 : 0;
1165        return 0;
1166}
1167
1168DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
1169
1170static int uaccess_flush_set(void *data, u64 val)
1171{
1172        bool enable;
1173
1174        if (val == 1)
1175                enable = true;
1176        else if (val == 0)
1177                enable = false;
1178        else
1179                return -EINVAL;
1180
1181        /* Only do anything if we're changing state */
1182        if (enable != uaccess_flush)
1183                uaccess_flush_enable(enable);
1184
1185        return 0;
1186}
1187
1188static int uaccess_flush_get(void *data, u64 *val)
1189{
1190        *val = uaccess_flush ? 1 : 0;
1191        return 0;
1192}
1193
1194DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
1195
1196static __init int rfi_flush_debugfs_init(void)
1197{
1198        debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
1199        debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
1200        debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
1201        return 0;
1202}
1203device_initcall(rfi_flush_debugfs_init);
1204#endif
1205#endif /* CONFIG_PPC_BOOK3S_64 */
1206