linux/arch/powerpc/kernel/setup_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * 
   4 * Common boot and setup code.
   5 *
   6 * Copyright (C) 2001 PPC64 Team, IBM Corp
   7 */
   8
   9#include <linux/export.h>
  10#include <linux/string.h>
  11#include <linux/sched.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/reboot.h>
  15#include <linux/delay.h>
  16#include <linux/initrd.h>
  17#include <linux/seq_file.h>
  18#include <linux/ioport.h>
  19#include <linux/console.h>
  20#include <linux/utsname.h>
  21#include <linux/tty.h>
  22#include <linux/root_dev.h>
  23#include <linux/notifier.h>
  24#include <linux/cpu.h>
  25#include <linux/unistd.h>
  26#include <linux/serial.h>
  27#include <linux/serial_8250.h>
  28#include <linux/memblock.h>
  29#include <linux/pci.h>
  30#include <linux/lockdep.h>
  31#include <linux/memory.h>
  32#include <linux/nmi.h>
  33#include <linux/pgtable.h>
  34
  35#include <asm/kvm_guest.h>
  36#include <asm/io.h>
  37#include <asm/kdump.h>
  38#include <asm/prom.h>
  39#include <asm/processor.h>
  40#include <asm/smp.h>
  41#include <asm/elf.h>
  42#include <asm/machdep.h>
  43#include <asm/paca.h>
  44#include <asm/time.h>
  45#include <asm/cputable.h>
  46#include <asm/dt_cpu_ftrs.h>
  47#include <asm/sections.h>
  48#include <asm/btext.h>
  49#include <asm/nvram.h>
  50#include <asm/setup.h>
  51#include <asm/rtas.h>
  52#include <asm/iommu.h>
  53#include <asm/serial.h>
  54#include <asm/cache.h>
  55#include <asm/page.h>
  56#include <asm/mmu.h>
  57#include <asm/firmware.h>
  58#include <asm/xmon.h>
  59#include <asm/udbg.h>
  60#include <asm/kexec.h>
  61#include <asm/code-patching.h>
  62#include <asm/livepatch.h>
  63#include <asm/opal.h>
  64#include <asm/cputhreads.h>
  65#include <asm/hw_irq.h>
  66#include <asm/feature-fixups.h>
  67#include <asm/kup.h>
  68#include <asm/early_ioremap.h>
  69#include <asm/pgalloc.h>
  70#include <asm/asm-prototypes.h>
  71
  72#include "setup.h"
  73
  74int spinning_secondaries;
  75u64 ppc64_pft_size;
  76
  77struct ppc64_caches ppc64_caches = {
  78        .l1d = {
  79                .block_size = 0x40,
  80                .log_block_size = 6,
  81        },
  82        .l1i = {
  83                .block_size = 0x40,
  84                .log_block_size = 6
  85        },
  86};
  87EXPORT_SYMBOL_GPL(ppc64_caches);
  88
  89#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
  90void __init setup_tlb_core_data(void)
  91{
  92        int cpu;
  93
  94        BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
  95
  96        for_each_possible_cpu(cpu) {
  97                int first = cpu_first_thread_sibling(cpu);
  98
  99                /*
 100                 * If we boot via kdump on a non-primary thread,
 101                 * make sure we point at the thread that actually
 102                 * set up this TLB.
 103                 */
 104                if (cpu_first_thread_sibling(boot_cpuid) == first)
 105                        first = boot_cpuid;
 106
 107                paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
 108
 109                /*
 110                 * If we have threads, we need either tlbsrx.
 111                 * or e6500 tablewalk mode, or else TLB handlers
 112                 * will be racy and could produce duplicate entries.
 113                 * Should we panic instead?
 114                 */
 115                WARN_ONCE(smt_enabled_at_boot >= 2 &&
 116                          !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
 117                          book3e_htw_mode != PPC_HTW_E6500,
 118                          "%s: unsupported MMU configuration\n", __func__);
 119        }
 120}
 121#endif
 122
 123#ifdef CONFIG_SMP
 124
 125static char *smt_enabled_cmdline;
 126
 127/* Look for ibm,smt-enabled OF option */
 128void __init check_smt_enabled(void)
 129{
 130        struct device_node *dn;
 131        const char *smt_option;
 132
 133        /* Default to enabling all threads */
 134        smt_enabled_at_boot = threads_per_core;
 135
 136        /* Allow the command line to overrule the OF option */
 137        if (smt_enabled_cmdline) {
 138                if (!strcmp(smt_enabled_cmdline, "on"))
 139                        smt_enabled_at_boot = threads_per_core;
 140                else if (!strcmp(smt_enabled_cmdline, "off"))
 141                        smt_enabled_at_boot = 0;
 142                else {
 143                        int smt;
 144                        int rc;
 145
 146                        rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
 147                        if (!rc)
 148                                smt_enabled_at_boot =
 149                                        min(threads_per_core, smt);
 150                }
 151        } else {
 152                dn = of_find_node_by_path("/options");
 153                if (dn) {
 154                        smt_option = of_get_property(dn, "ibm,smt-enabled",
 155                                                     NULL);
 156
 157                        if (smt_option) {
 158                                if (!strcmp(smt_option, "on"))
 159                                        smt_enabled_at_boot = threads_per_core;
 160                                else if (!strcmp(smt_option, "off"))
 161                                        smt_enabled_at_boot = 0;
 162                        }
 163
 164                        of_node_put(dn);
 165                }
 166        }
 167}
 168
 169/* Look for smt-enabled= cmdline option */
 170static int __init early_smt_enabled(char *p)
 171{
 172        smt_enabled_cmdline = p;
 173        return 0;
 174}
 175early_param("smt-enabled", early_smt_enabled);
 176
 177#endif /* CONFIG_SMP */
 178
 179/** Fix up paca fields required for the boot cpu */
 180static void __init fixup_boot_paca(void)
 181{
 182        /* The boot cpu is started */
 183        get_paca()->cpu_start = 1;
 184        /* Allow percpu accesses to work until we setup percpu data */
 185        get_paca()->data_offset = 0;
 186        /* Mark interrupts disabled in PACA */
 187        irq_soft_mask_set(IRQS_DISABLED);
 188}
 189
 190static void __init configure_exceptions(void)
 191{
 192        /*
 193         * Setup the trampolines from the lowmem exception vectors
 194         * to the kdump kernel when not using a relocatable kernel.
 195         */
 196        setup_kdump_trampoline();
 197
 198        /* Under a PAPR hypervisor, we need hypercalls */
 199        if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
 200                /* Enable AIL if possible */
 201                if (!pseries_enable_reloc_on_exc()) {
 202                        init_task.thread.fscr &= ~FSCR_SCV;
 203                        cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
 204                }
 205
 206                /*
 207                 * Tell the hypervisor that we want our exceptions to
 208                 * be taken in little endian mode.
 209                 *
 210                 * We don't call this for big endian as our calling convention
 211                 * makes us always enter in BE, and the call may fail under
 212                 * some circumstances with kdump.
 213                 */
 214#ifdef __LITTLE_ENDIAN__
 215                pseries_little_endian_exceptions();
 216#endif
 217        } else {
 218                /* Set endian mode using OPAL */
 219                if (firmware_has_feature(FW_FEATURE_OPAL))
 220                        opal_configure_cores();
 221
 222                /* AIL on native is done in cpu_ready_for_interrupts() */
 223        }
 224}
 225
 226static void cpu_ready_for_interrupts(void)
 227{
 228        /*
 229         * Enable AIL if supported, and we are in hypervisor mode. This
 230         * is called once for every processor.
 231         *
 232         * If we are not in hypervisor mode the job is done once for
 233         * the whole partition in configure_exceptions().
 234         */
 235        if (cpu_has_feature(CPU_FTR_HVMODE)) {
 236                unsigned long lpcr = mfspr(SPRN_LPCR);
 237                unsigned long new_lpcr = lpcr;
 238
 239                if (cpu_has_feature(CPU_FTR_ARCH_31)) {
 240                        /* P10 DD1 does not have HAIL */
 241                        if (pvr_version_is(PVR_POWER10) &&
 242                                        (mfspr(SPRN_PVR) & 0xf00) == 0x100)
 243                                new_lpcr |= LPCR_AIL_3;
 244                        else
 245                                new_lpcr |= LPCR_HAIL;
 246                } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
 247                        new_lpcr |= LPCR_AIL_3;
 248                }
 249
 250                if (new_lpcr != lpcr)
 251                        mtspr(SPRN_LPCR, new_lpcr);
 252        }
 253
 254        /*
 255         * Set HFSCR:TM based on CPU features:
 256         * In the special case of TM no suspend (P9N DD2.1), Linux is
 257         * told TM is off via the dt-ftrs but told to (partially) use
 258         * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
 259         * will be off from dt-ftrs but we need to turn it on for the
 260         * no suspend case.
 261         */
 262        if (cpu_has_feature(CPU_FTR_HVMODE)) {
 263                if (cpu_has_feature(CPU_FTR_TM_COMP))
 264                        mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
 265                else
 266                        mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
 267        }
 268
 269        /* Set IR and DR in PACA MSR */
 270        get_paca()->kernel_msr = MSR_KERNEL;
 271}
 272
 273unsigned long spr_default_dscr = 0;
 274
 275static void __init record_spr_defaults(void)
 276{
 277        if (early_cpu_has_feature(CPU_FTR_DSCR))
 278                spr_default_dscr = mfspr(SPRN_DSCR);
 279}
 280
 281/*
 282 * Early initialization entry point. This is called by head.S
 283 * with MMU translation disabled. We rely on the "feature" of
 284 * the CPU that ignores the top 2 bits of the address in real
 285 * mode so we can access kernel globals normally provided we
 286 * only toy with things in the RMO region. From here, we do
 287 * some early parsing of the device-tree to setup out MEMBLOCK
 288 * data structures, and allocate & initialize the hash table
 289 * and segment tables so we can start running with translation
 290 * enabled.
 291 *
 292 * It is this function which will call the probe() callback of
 293 * the various platform types and copy the matching one to the
 294 * global ppc_md structure. Your platform can eventually do
 295 * some very early initializations from the probe() routine, but
 296 * this is not recommended, be very careful as, for example, the
 297 * device-tree is not accessible via normal means at this point.
 298 */
 299
 300void __init early_setup(unsigned long dt_ptr)
 301{
 302        static __initdata struct paca_struct boot_paca;
 303
 304        /* -------- printk is _NOT_ safe to use here ! ------- */
 305
 306        /*
 307         * Assume we're on cpu 0 for now.
 308         *
 309         * We need to load a PACA very early for a few reasons.
 310         *
 311         * The stack protector canary is stored in the paca, so as soon as we
 312         * call any stack protected code we need r13 pointing somewhere valid.
 313         *
 314         * If we are using kcov it will call in_task() in its instrumentation,
 315         * which relies on the current task from the PACA.
 316         *
 317         * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
 318         * printk(), which can trigger both stack protector and kcov.
 319         *
 320         * percpu variables and spin locks also use the paca.
 321         *
 322         * So set up a temporary paca. It will be replaced below once we know
 323         * what CPU we are on.
 324         */
 325        initialise_paca(&boot_paca, 0);
 326        setup_paca(&boot_paca);
 327        fixup_boot_paca();
 328
 329        /* -------- printk is now safe to use ------- */
 330
 331        /* Try new device tree based feature discovery ... */
 332        if (!dt_cpu_ftrs_init(__va(dt_ptr)))
 333                /* Otherwise use the old style CPU table */
 334                identify_cpu(0, mfspr(SPRN_PVR));
 335
 336        /* Enable early debugging if any specified (see udbg.h) */
 337        udbg_early_init();
 338
 339        udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
 340
 341        /*
 342         * Do early initialization using the flattened device
 343         * tree, such as retrieving the physical memory map or
 344         * calculating/retrieving the hash table size.
 345         */
 346        early_init_devtree(__va(dt_ptr));
 347
 348        /* Now we know the logical id of our boot cpu, setup the paca. */
 349        if (boot_cpuid != 0) {
 350                /* Poison paca_ptrs[0] again if it's not the boot cpu */
 351                memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
 352        }
 353        setup_paca(paca_ptrs[boot_cpuid]);
 354        fixup_boot_paca();
 355
 356        /*
 357         * Configure exception handlers. This include setting up trampolines
 358         * if needed, setting exception endian mode, etc...
 359         */
 360        configure_exceptions();
 361
 362        /*
 363         * Configure Kernel Userspace Protection. This needs to happen before
 364         * feature fixups for platforms that implement this using features.
 365         */
 366        setup_kup();
 367
 368        /* Apply all the dynamic patching */
 369        apply_feature_fixups();
 370        setup_feature_keys();
 371
 372        /* Initialize the hash table or TLB handling */
 373        early_init_mmu();
 374
 375        early_ioremap_setup();
 376
 377        /*
 378         * After firmware and early platform setup code has set things up,
 379         * we note the SPR values for configurable control/performance
 380         * registers, and use those as initial defaults.
 381         */
 382        record_spr_defaults();
 383
 384        /*
 385         * At this point, we can let interrupts switch to virtual mode
 386         * (the MMU has been setup), so adjust the MSR in the PACA to
 387         * have IR and DR set and enable AIL if it exists
 388         */
 389        cpu_ready_for_interrupts();
 390
 391        /*
 392         * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
 393         * will only actually get enabled on the boot cpu much later once
 394         * ftrace itself has been initialized.
 395         */
 396        this_cpu_enable_ftrace();
 397
 398        udbg_printf(" <- %s()\n", __func__);
 399
 400#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
 401        /*
 402         * This needs to be done *last* (after the above udbg_printf() even)
 403         *
 404         * Right after we return from this function, we turn on the MMU
 405         * which means the real-mode access trick that btext does will
 406         * no longer work, it needs to switch to using a real MMU
 407         * mapping. This call will ensure that it does
 408         */
 409        btext_map();
 410#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
 411}
 412
 413#ifdef CONFIG_SMP
 414void early_setup_secondary(void)
 415{
 416        /* Mark interrupts disabled in PACA */
 417        irq_soft_mask_set(IRQS_DISABLED);
 418
 419        /* Initialize the hash table or TLB handling */
 420        early_init_mmu_secondary();
 421
 422        /* Perform any KUP setup that is per-cpu */
 423        setup_kup();
 424
 425        /*
 426         * At this point, we can let interrupts switch to virtual mode
 427         * (the MMU has been setup), so adjust the MSR in the PACA to
 428         * have IR and DR set.
 429         */
 430        cpu_ready_for_interrupts();
 431}
 432
 433#endif /* CONFIG_SMP */
 434
 435void panic_smp_self_stop(void)
 436{
 437        hard_irq_disable();
 438        spin_begin();
 439        while (1)
 440                spin_cpu_relax();
 441}
 442
 443#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
 444static bool use_spinloop(void)
 445{
 446        if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
 447                /*
 448                 * See comments in head_64.S -- not all platforms insert
 449                 * secondaries at __secondary_hold and wait at the spin
 450                 * loop.
 451                 */
 452                if (firmware_has_feature(FW_FEATURE_OPAL))
 453                        return false;
 454                return true;
 455        }
 456
 457        /*
 458         * When book3e boots from kexec, the ePAPR spin table does
 459         * not get used.
 460         */
 461        return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
 462}
 463
 464void smp_release_cpus(void)
 465{
 466        unsigned long *ptr;
 467        int i;
 468
 469        if (!use_spinloop())
 470                return;
 471
 472        /* All secondary cpus are spinning on a common spinloop, release them
 473         * all now so they can start to spin on their individual paca
 474         * spinloops. For non SMP kernels, the secondary cpus never get out
 475         * of the common spinloop.
 476         */
 477
 478        ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
 479                        - PHYSICAL_START);
 480        *ptr = ppc_function_entry(generic_secondary_smp_init);
 481
 482        /* And wait a bit for them to catch up */
 483        for (i = 0; i < 100000; i++) {
 484                mb();
 485                HMT_low();
 486                if (spinning_secondaries == 0)
 487                        break;
 488                udelay(1);
 489        }
 490        pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
 491}
 492#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
 493
 494/*
 495 * Initialize some remaining members of the ppc64_caches and systemcfg
 496 * structures
 497 * (at least until we get rid of them completely). This is mostly some
 498 * cache informations about the CPU that will be used by cache flush
 499 * routines and/or provided to userland
 500 */
 501
 502static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
 503                            u32 bsize, u32 sets)
 504{
 505        info->size = size;
 506        info->sets = sets;
 507        info->line_size = lsize;
 508        info->block_size = bsize;
 509        info->log_block_size = __ilog2(bsize);
 510        if (bsize)
 511                info->blocks_per_page = PAGE_SIZE / bsize;
 512        else
 513                info->blocks_per_page = 0;
 514
 515        if (sets == 0)
 516                info->assoc = 0xffff;
 517        else
 518                info->assoc = size / (sets * lsize);
 519}
 520
 521static bool __init parse_cache_info(struct device_node *np,
 522                                    bool icache,
 523                                    struct ppc_cache_info *info)
 524{
 525        static const char *ipropnames[] __initdata = {
 526                "i-cache-size",
 527                "i-cache-sets",
 528                "i-cache-block-size",
 529                "i-cache-line-size",
 530        };
 531        static const char *dpropnames[] __initdata = {
 532                "d-cache-size",
 533                "d-cache-sets",
 534                "d-cache-block-size",
 535                "d-cache-line-size",
 536        };
 537        const char **propnames = icache ? ipropnames : dpropnames;
 538        const __be32 *sizep, *lsizep, *bsizep, *setsp;
 539        u32 size, lsize, bsize, sets;
 540        bool success = true;
 541
 542        size = 0;
 543        sets = -1u;
 544        lsize = bsize = cur_cpu_spec->dcache_bsize;
 545        sizep = of_get_property(np, propnames[0], NULL);
 546        if (sizep != NULL)
 547                size = be32_to_cpu(*sizep);
 548        setsp = of_get_property(np, propnames[1], NULL);
 549        if (setsp != NULL)
 550                sets = be32_to_cpu(*setsp);
 551        bsizep = of_get_property(np, propnames[2], NULL);
 552        lsizep = of_get_property(np, propnames[3], NULL);
 553        if (bsizep == NULL)
 554                bsizep = lsizep;
 555        if (lsizep == NULL)
 556                lsizep = bsizep;
 557        if (lsizep != NULL)
 558                lsize = be32_to_cpu(*lsizep);
 559        if (bsizep != NULL)
 560                bsize = be32_to_cpu(*bsizep);
 561        if (sizep == NULL || bsizep == NULL || lsizep == NULL)
 562                success = false;
 563
 564        /*
 565         * OF is weird .. it represents fully associative caches
 566         * as "1 way" which doesn't make much sense and doesn't
 567         * leave room for direct mapped. We'll assume that 0
 568         * in OF means direct mapped for that reason.
 569         */
 570        if (sets == 1)
 571                sets = 0;
 572        else if (sets == 0)
 573                sets = 1;
 574
 575        init_cache_info(info, size, lsize, bsize, sets);
 576
 577        return success;
 578}
 579
 580void __init initialize_cache_info(void)
 581{
 582        struct device_node *cpu = NULL, *l2, *l3 = NULL;
 583        u32 pvr;
 584
 585        /*
 586         * All shipping POWER8 machines have a firmware bug that
 587         * puts incorrect information in the device-tree. This will
 588         * be (hopefully) fixed for future chips but for now hard
 589         * code the values if we are running on one of these
 590         */
 591        pvr = PVR_VER(mfspr(SPRN_PVR));
 592        if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
 593            pvr == PVR_POWER8NVL) {
 594                                                /* size    lsize   blk  sets */
 595                init_cache_info(&ppc64_caches.l1i, 0x8000,   128,  128, 32);
 596                init_cache_info(&ppc64_caches.l1d, 0x10000,  128,  128, 64);
 597                init_cache_info(&ppc64_caches.l2,  0x80000,  128,  0,   512);
 598                init_cache_info(&ppc64_caches.l3,  0x800000, 128,  0,   8192);
 599        } else
 600                cpu = of_find_node_by_type(NULL, "cpu");
 601
 602        /*
 603         * We're assuming *all* of the CPUs have the same
 604         * d-cache and i-cache sizes... -Peter
 605         */
 606        if (cpu) {
 607                if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
 608                        pr_warn("Argh, can't find dcache properties !\n");
 609
 610                if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
 611                        pr_warn("Argh, can't find icache properties !\n");
 612
 613                /*
 614                 * Try to find the L2 and L3 if any. Assume they are
 615                 * unified and use the D-side properties.
 616                 */
 617                l2 = of_find_next_cache_node(cpu);
 618                of_node_put(cpu);
 619                if (l2) {
 620                        parse_cache_info(l2, false, &ppc64_caches.l2);
 621                        l3 = of_find_next_cache_node(l2);
 622                        of_node_put(l2);
 623                }
 624                if (l3) {
 625                        parse_cache_info(l3, false, &ppc64_caches.l3);
 626                        of_node_put(l3);
 627                }
 628        }
 629
 630        /* For use by binfmt_elf */
 631        dcache_bsize = ppc64_caches.l1d.block_size;
 632        icache_bsize = ppc64_caches.l1i.block_size;
 633
 634        cur_cpu_spec->dcache_bsize = dcache_bsize;
 635        cur_cpu_spec->icache_bsize = icache_bsize;
 636}
 637
 638/*
 639 * This returns the limit below which memory accesses to the linear
 640 * mapping are guarnateed not to cause an architectural exception (e.g.,
 641 * TLB or SLB miss fault).
 642 *
 643 * This is used to allocate PACAs and various interrupt stacks that
 644 * that are accessed early in interrupt handlers that must not cause
 645 * re-entrant interrupts.
 646 */
 647__init u64 ppc64_bolted_size(void)
 648{
 649#ifdef CONFIG_PPC_BOOK3E
 650        /* Freescale BookE bolts the entire linear mapping */
 651        /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
 652        if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
 653                return linear_map_top;
 654        /* Other BookE, we assume the first GB is bolted */
 655        return 1ul << 30;
 656#else
 657        /* BookS radix, does not take faults on linear mapping */
 658        if (early_radix_enabled())
 659                return ULONG_MAX;
 660
 661        /* BookS hash, the first segment is bolted */
 662        if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
 663                return 1UL << SID_SHIFT_1T;
 664        return 1UL << SID_SHIFT;
 665#endif
 666}
 667
 668static void *__init alloc_stack(unsigned long limit, int cpu)
 669{
 670        void *ptr;
 671
 672        BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
 673
 674        ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
 675                                     MEMBLOCK_LOW_LIMIT, limit,
 676                                     early_cpu_to_node(cpu));
 677        if (!ptr)
 678                panic("cannot allocate stacks");
 679
 680        return ptr;
 681}
 682
 683void __init irqstack_early_init(void)
 684{
 685        u64 limit = ppc64_bolted_size();
 686        unsigned int i;
 687
 688        /*
 689         * Interrupt stacks must be in the first segment since we
 690         * cannot afford to take SLB misses on them. They are not
 691         * accessed in realmode.
 692         */
 693        for_each_possible_cpu(i) {
 694                softirq_ctx[i] = alloc_stack(limit, i);
 695                hardirq_ctx[i] = alloc_stack(limit, i);
 696        }
 697}
 698
 699#ifdef CONFIG_PPC_BOOK3E
 700void __init exc_lvl_early_init(void)
 701{
 702        unsigned int i;
 703
 704        for_each_possible_cpu(i) {
 705                void *sp;
 706
 707                sp = alloc_stack(ULONG_MAX, i);
 708                critirq_ctx[i] = sp;
 709                paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
 710
 711                sp = alloc_stack(ULONG_MAX, i);
 712                dbgirq_ctx[i] = sp;
 713                paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
 714
 715                sp = alloc_stack(ULONG_MAX, i);
 716                mcheckirq_ctx[i] = sp;
 717                paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
 718        }
 719
 720        if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
 721                patch_exception(0x040, exc_debug_debug_book3e);
 722}
 723#endif
 724
 725/*
 726 * Stack space used when we detect a bad kernel stack pointer, and
 727 * early in SMP boots before relocation is enabled. Exclusive emergency
 728 * stack for machine checks.
 729 */
 730void __init emergency_stack_init(void)
 731{
 732        u64 limit, mce_limit;
 733        unsigned int i;
 734
 735        /*
 736         * Emergency stacks must be under 256MB, we cannot afford to take
 737         * SLB misses on them. The ABI also requires them to be 128-byte
 738         * aligned.
 739         *
 740         * Since we use these as temporary stacks during secondary CPU
 741         * bringup, machine check, system reset, and HMI, we need to get
 742         * at them in real mode. This means they must also be within the RMO
 743         * region.
 744         *
 745         * The IRQ stacks allocated elsewhere in this file are zeroed and
 746         * initialized in kernel/irq.c. These are initialized here in order
 747         * to have emergency stacks available as early as possible.
 748         */
 749        limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
 750
 751        /*
 752         * Machine check on pseries calls rtas, but can't use the static
 753         * rtas_args due to a machine check hitting while the lock is held.
 754         * rtas args have to be under 4GB, so the machine check stack is
 755         * limited to 4GB so args can be put on stack.
 756         */
 757        if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
 758                mce_limit = SZ_4G;
 759
 760        for_each_possible_cpu(i) {
 761                paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
 762
 763#ifdef CONFIG_PPC_BOOK3S_64
 764                /* emergency stack for NMI exception handling. */
 765                paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
 766
 767                /* emergency stack for machine check exception handling. */
 768                paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
 769#endif
 770        }
 771}
 772
 773#ifdef CONFIG_SMP
 774/**
 775 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
 776 * @cpu: cpu to allocate for
 777 * @size: size allocation in bytes
 778 * @align: alignment
 779 *
 780 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
 781 * does the right thing for NUMA regardless of the current
 782 * configuration.
 783 *
 784 * RETURNS:
 785 * Pointer to the allocated area on success, NULL on failure.
 786 */
 787static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
 788                                        size_t align)
 789{
 790        const unsigned long goal = __pa(MAX_DMA_ADDRESS);
 791#ifdef CONFIG_NUMA
 792        int node = early_cpu_to_node(cpu);
 793        void *ptr;
 794
 795        if (!node_online(node) || !NODE_DATA(node)) {
 796                ptr = memblock_alloc_from(size, align, goal);
 797                pr_info("cpu %d has no node %d or node-local memory\n",
 798                        cpu, node);
 799                pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
 800                         cpu, size, __pa(ptr));
 801        } else {
 802                ptr = memblock_alloc_try_nid(size, align, goal,
 803                                             MEMBLOCK_ALLOC_ACCESSIBLE, node);
 804                pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
 805                         "%016lx\n", cpu, size, node, __pa(ptr));
 806        }
 807        return ptr;
 808#else
 809        return memblock_alloc_from(size, align, goal);
 810#endif
 811}
 812
 813static void __init pcpu_free_bootmem(void *ptr, size_t size)
 814{
 815        memblock_free(__pa(ptr), size);
 816}
 817
 818static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 819{
 820        if (early_cpu_to_node(from) == early_cpu_to_node(to))
 821                return LOCAL_DISTANCE;
 822        else
 823                return REMOTE_DISTANCE;
 824}
 825
 826unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
 827EXPORT_SYMBOL(__per_cpu_offset);
 828
 829static void __init pcpu_populate_pte(unsigned long addr)
 830{
 831        pgd_t *pgd = pgd_offset_k(addr);
 832        p4d_t *p4d;
 833        pud_t *pud;
 834        pmd_t *pmd;
 835
 836        p4d = p4d_offset(pgd, addr);
 837        if (p4d_none(*p4d)) {
 838                pud_t *new;
 839
 840                new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
 841                if (!new)
 842                        goto err_alloc;
 843                p4d_populate(&init_mm, p4d, new);
 844        }
 845
 846        pud = pud_offset(p4d, addr);
 847        if (pud_none(*pud)) {
 848                pmd_t *new;
 849
 850                new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
 851                if (!new)
 852                        goto err_alloc;
 853                pud_populate(&init_mm, pud, new);
 854        }
 855
 856        pmd = pmd_offset(pud, addr);
 857        if (!pmd_present(*pmd)) {
 858                pte_t *new;
 859
 860                new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
 861                if (!new)
 862                        goto err_alloc;
 863                pmd_populate_kernel(&init_mm, pmd, new);
 864        }
 865
 866        return;
 867
 868err_alloc:
 869        panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
 870              __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
 871}
 872
 873
 874void __init setup_per_cpu_areas(void)
 875{
 876        const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
 877        size_t atom_size;
 878        unsigned long delta;
 879        unsigned int cpu;
 880        int rc = -EINVAL;
 881
 882        /*
 883         * Linear mapping is one of 4K, 1M and 16M.  For 4K, no need
 884         * to group units.  For larger mappings, use 1M atom which
 885         * should be large enough to contain a number of units.
 886         */
 887        if (mmu_linear_psize == MMU_PAGE_4K)
 888                atom_size = PAGE_SIZE;
 889        else
 890                atom_size = 1 << 20;
 891
 892        if (pcpu_chosen_fc != PCPU_FC_PAGE) {
 893                rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
 894                                            pcpu_alloc_bootmem, pcpu_free_bootmem);
 895                if (rc)
 896                        pr_warn("PERCPU: %s allocator failed (%d), "
 897                                "falling back to page size\n",
 898                                pcpu_fc_names[pcpu_chosen_fc], rc);
 899        }
 900
 901        if (rc < 0)
 902                rc = pcpu_page_first_chunk(0, pcpu_alloc_bootmem, pcpu_free_bootmem,
 903                                           pcpu_populate_pte);
 904        if (rc < 0)
 905                panic("cannot initialize percpu area (err=%d)", rc);
 906
 907        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
 908        for_each_possible_cpu(cpu) {
 909                __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
 910                paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
 911        }
 912}
 913#endif
 914
 915#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 916unsigned long memory_block_size_bytes(void)
 917{
 918        if (ppc_md.memory_block_size)
 919                return ppc_md.memory_block_size();
 920
 921        return MIN_MEMORY_BLOCK_SIZE;
 922}
 923#endif
 924
 925#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
 926struct ppc_pci_io ppc_pci_io;
 927EXPORT_SYMBOL(ppc_pci_io);
 928#endif
 929
 930#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
 931u64 hw_nmi_get_sample_period(int watchdog_thresh)
 932{
 933        return ppc_proc_freq * watchdog_thresh;
 934}
 935#endif
 936
 937/*
 938 * The perf based hardlockup detector breaks PMU event based branches, so
 939 * disable it by default. Book3S has a soft-nmi hardlockup detector based
 940 * on the decrementer interrupt, so it does not suffer from this problem.
 941 *
 942 * It is likely to get false positives in KVM guests, so disable it there
 943 * by default too. PowerVM will not stop or arbitrarily oversubscribe
 944 * CPUs, but give a minimum regular allotment even with SPLPAR, so enable
 945 * the detector for non-KVM guests, assume PowerVM.
 946 */
 947static int __init disable_hardlockup_detector(void)
 948{
 949#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
 950        hardlockup_detector_disable();
 951#else
 952        if (firmware_has_feature(FW_FEATURE_LPAR)) {
 953                if (is_kvm_guest())
 954                        hardlockup_detector_disable();
 955        }
 956#endif
 957
 958        return 0;
 959}
 960early_initcall(disable_hardlockup_detector);
 961