linux/arch/powerpc/kernel/setup_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Common prep/pmac/chrp boot and setup code.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/string.h>
   8#include <linux/sched.h>
   9#include <linux/init.h>
  10#include <linux/kernel.h>
  11#include <linux/reboot.h>
  12#include <linux/delay.h>
  13#include <linux/initrd.h>
  14#include <linux/tty.h>
  15#include <linux/seq_file.h>
  16#include <linux/root_dev.h>
  17#include <linux/cpu.h>
  18#include <linux/console.h>
  19#include <linux/memblock.h>
  20#include <linux/export.h>
  21#include <linux/nvram.h>
  22
  23#include <asm/io.h>
  24#include <asm/prom.h>
  25#include <asm/processor.h>
  26#include <asm/pgtable.h>
  27#include <asm/setup.h>
  28#include <asm/smp.h>
  29#include <asm/elf.h>
  30#include <asm/cputable.h>
  31#include <asm/bootx.h>
  32#include <asm/btext.h>
  33#include <asm/machdep.h>
  34#include <linux/uaccess.h>
  35#include <asm/pmac_feature.h>
  36#include <asm/sections.h>
  37#include <asm/nvram.h>
  38#include <asm/xmon.h>
  39#include <asm/time.h>
  40#include <asm/serial.h>
  41#include <asm/udbg.h>
  42#include <asm/code-patching.h>
  43#include <asm/cpu_has_feature.h>
  44#include <asm/asm-prototypes.h>
  45#include <asm/kdump.h>
  46#include <asm/feature-fixups.h>
  47
  48#include "setup.h"
  49
  50#define DBG(fmt...)
  51
  52extern void bootx_init(unsigned long r4, unsigned long phys);
  53
  54int boot_cpuid_phys;
  55EXPORT_SYMBOL_GPL(boot_cpuid_phys);
  56
  57int smp_hw_index[NR_CPUS];
  58EXPORT_SYMBOL(smp_hw_index);
  59
  60unsigned long ISA_DMA_THRESHOLD;
  61unsigned int DMA_MODE_READ;
  62unsigned int DMA_MODE_WRITE;
  63
  64EXPORT_SYMBOL(DMA_MODE_READ);
  65EXPORT_SYMBOL(DMA_MODE_WRITE);
  66
  67/*
  68 * This is run before start_kernel(), the kernel has been relocated
  69 * and we are running with enough of the MMU enabled to have our
  70 * proper kernel virtual addresses
  71 *
  72 * We do the initial parsing of the flat device-tree and prepares
  73 * for the MMU to be fully initialized.
  74 */
  75notrace void __init machine_init(u64 dt_ptr)
  76{
  77        unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache);
  78        unsigned long insn;
  79
  80        /* Configure static keys first, now that we're relocated. */
  81        setup_feature_keys();
  82
  83        /* Enable early debugging if any specified (see udbg.h) */
  84        udbg_early_init();
  85
  86        patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP);
  87
  88        insn = create_cond_branch(addr, branch_target(addr), 0x820000);
  89        patch_instruction(addr, insn);  /* replace b by bne cr0 */
  90
  91        /* Do some early initialization based on the flat device tree */
  92        early_init_devtree(__va(dt_ptr));
  93
  94        early_init_mmu();
  95
  96        setup_kdump_trampoline();
  97}
  98
  99/* Checks "l2cr=xxxx" command-line option */
 100static int __init ppc_setup_l2cr(char *str)
 101{
 102        if (cpu_has_feature(CPU_FTR_L2CR)) {
 103                unsigned long val = simple_strtoul(str, NULL, 0);
 104                printk(KERN_INFO "l2cr set to %lx\n", val);
 105                _set_L2CR(0);           /* force invalidate by disable cache */
 106                _set_L2CR(val);         /* and enable it */
 107        }
 108        return 1;
 109}
 110__setup("l2cr=", ppc_setup_l2cr);
 111
 112/* Checks "l3cr=xxxx" command-line option */
 113static int __init ppc_setup_l3cr(char *str)
 114{
 115        if (cpu_has_feature(CPU_FTR_L3CR)) {
 116                unsigned long val = simple_strtoul(str, NULL, 0);
 117                printk(KERN_INFO "l3cr set to %lx\n", val);
 118                _set_L3CR(val);         /* and enable it */
 119        }
 120        return 1;
 121}
 122__setup("l3cr=", ppc_setup_l3cr);
 123
 124static int __init ppc_init(void)
 125{
 126        /* clear the progress line */
 127        if (ppc_md.progress)
 128                ppc_md.progress("             ", 0xffff);
 129
 130        /* call platform init */
 131        if (ppc_md.init != NULL) {
 132                ppc_md.init();
 133        }
 134        return 0;
 135}
 136arch_initcall(ppc_init);
 137
 138static void *__init alloc_stack(void)
 139{
 140        void *ptr = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
 141
 142        if (!ptr)
 143                panic("cannot allocate %d bytes for stack at %pS\n",
 144                      THREAD_SIZE, (void *)_RET_IP_);
 145
 146        return ptr;
 147}
 148
 149void __init irqstack_early_init(void)
 150{
 151        unsigned int i;
 152
 153        /* interrupt stacks must be in lowmem, we get that for free on ppc32
 154         * as the memblock is limited to lowmem by default */
 155        for_each_possible_cpu(i) {
 156                softirq_ctx[i] = alloc_stack();
 157                hardirq_ctx[i] = alloc_stack();
 158        }
 159}
 160
 161#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 162void __init exc_lvl_early_init(void)
 163{
 164        unsigned int i, hw_cpu;
 165
 166        /* interrupt stacks must be in lowmem, we get that for free on ppc32
 167         * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
 168        for_each_possible_cpu(i) {
 169#ifdef CONFIG_SMP
 170                hw_cpu = get_hard_smp_processor_id(i);
 171#else
 172                hw_cpu = 0;
 173#endif
 174
 175                critirq_ctx[hw_cpu] = alloc_stack();
 176#ifdef CONFIG_BOOKE
 177                dbgirq_ctx[hw_cpu] = alloc_stack();
 178                mcheckirq_ctx[hw_cpu] = alloc_stack();
 179#endif
 180        }
 181}
 182#endif
 183
 184void __init setup_power_save(void)
 185{
 186#ifdef CONFIG_PPC_BOOK3S_32
 187        if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
 188            cpu_has_feature(CPU_FTR_CAN_NAP))
 189                ppc_md.power_save = ppc6xx_idle;
 190#endif
 191
 192#ifdef CONFIG_E500
 193        if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
 194            cpu_has_feature(CPU_FTR_CAN_NAP))
 195                ppc_md.power_save = e500_idle;
 196#endif
 197}
 198
 199__init void initialize_cache_info(void)
 200{
 201        /*
 202         * Set cache line size based on type of cpu as a default.
 203         * Systems with OF can look in the properties on the cpu node(s)
 204         * for a possibly more accurate value.
 205         */
 206        dcache_bsize = cur_cpu_spec->dcache_bsize;
 207        icache_bsize = cur_cpu_spec->icache_bsize;
 208        ucache_bsize = 0;
 209        if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
 210                ucache_bsize = icache_bsize = dcache_bsize;
 211}
 212