linux/arch/arm/kernel/setup.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/setup.c
   3 *
   4 *  Copyright (C) 1995-2001 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/export.h>
  11#include <linux/kernel.h>
  12#include <linux/stddef.h>
  13#include <linux/ioport.h>
  14#include <linux/delay.h>
  15#include <linux/utsname.h>
  16#include <linux/initrd.h>
  17#include <linux/console.h>
  18#include <linux/bootmem.h>
  19#include <linux/seq_file.h>
  20#include <linux/screen_info.h>
  21#include <linux/of_platform.h>
  22#include <linux/init.h>
  23#include <linux/kexec.h>
  24#include <linux/of_fdt.h>
  25#include <linux/cpu.h>
  26#include <linux/interrupt.h>
  27#include <linux/smp.h>
  28#include <linux/proc_fs.h>
  29#include <linux/memblock.h>
  30#include <linux/bug.h>
  31#include <linux/compiler.h>
  32#include <linux/sort.h>
  33
  34#include <asm/unified.h>
  35#include <asm/cp15.h>
  36#include <asm/cpu.h>
  37#include <asm/cputype.h>
  38#include <asm/elf.h>
  39#include <asm/procinfo.h>
  40#include <asm/psci.h>
  41#include <asm/sections.h>
  42#include <asm/setup.h>
  43#include <asm/smp_plat.h>
  44#include <asm/mach-types.h>
  45#include <asm/cacheflush.h>
  46#include <asm/cachetype.h>
  47#include <asm/tlbflush.h>
  48
  49#include <asm/prom.h>
  50#include <asm/mach/arch.h>
  51#include <asm/mach/irq.h>
  52#include <asm/mach/time.h>
  53#include <asm/system_info.h>
  54#include <asm/system_misc.h>
  55#include <asm/traps.h>
  56#include <asm/unwind.h>
  57#include <asm/memblock.h>
  58#include <asm/virt.h>
  59
  60#include "atags.h"
  61
  62
  63#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
  64char fpe_type[8];
  65
  66static int __init fpe_setup(char *line)
  67{
  68        memcpy(fpe_type, line, 8);
  69        return 1;
  70}
  71
  72__setup("fpe=", fpe_setup);
  73#endif
  74
  75extern void paging_init(const struct machine_desc *desc);
  76extern void sanity_check_meminfo(void);
  77extern enum reboot_mode reboot_mode;
  78extern void setup_dma_zone(const struct machine_desc *desc);
  79
  80unsigned int processor_id;
  81EXPORT_SYMBOL(processor_id);
  82unsigned int __machine_arch_type __read_mostly;
  83EXPORT_SYMBOL(__machine_arch_type);
  84unsigned int cacheid __read_mostly;
  85EXPORT_SYMBOL(cacheid);
  86
  87unsigned int __atags_pointer __initdata;
  88
  89unsigned int system_rev;
  90EXPORT_SYMBOL(system_rev);
  91
  92unsigned int system_serial_low;
  93EXPORT_SYMBOL(system_serial_low);
  94
  95unsigned int system_serial_high;
  96EXPORT_SYMBOL(system_serial_high);
  97
  98unsigned int elf_hwcap __read_mostly;
  99EXPORT_SYMBOL(elf_hwcap);
 100
 101
 102#ifdef MULTI_CPU
 103struct processor processor __read_mostly;
 104#endif
 105#ifdef MULTI_TLB
 106struct cpu_tlb_fns cpu_tlb __read_mostly;
 107#endif
 108#ifdef MULTI_USER
 109struct cpu_user_fns cpu_user __read_mostly;
 110#endif
 111#ifdef MULTI_CACHE
 112struct cpu_cache_fns cpu_cache __read_mostly;
 113#endif
 114#ifdef CONFIG_OUTER_CACHE
 115struct outer_cache_fns outer_cache __read_mostly;
 116EXPORT_SYMBOL(outer_cache);
 117#endif
 118
 119/*
 120 * Cached cpu_architecture() result for use by assembler code.
 121 * C code should use the cpu_architecture() function instead of accessing this
 122 * variable directly.
 123 */
 124int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
 125
 126struct stack {
 127        u32 irq[3];
 128        u32 abt[3];
 129        u32 und[3];
 130} ____cacheline_aligned;
 131
 132#ifndef CONFIG_CPU_V7M
 133static struct stack stacks[NR_CPUS];
 134#endif
 135
 136char elf_platform[ELF_PLATFORM_SIZE];
 137EXPORT_SYMBOL(elf_platform);
 138
 139static const char *cpu_name;
 140static const char *machine_name;
 141static char __initdata cmd_line[COMMAND_LINE_SIZE];
 142const struct machine_desc *machine_desc __initdata;
 143
 144static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
 145#define ENDIANNESS ((char)endian_test.l)
 146
 147DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
 148
 149/*
 150 * Standard memory resources
 151 */
 152static struct resource mem_res[] = {
 153        {
 154                .name = "Video RAM",
 155                .start = 0,
 156                .end = 0,
 157                .flags = IORESOURCE_MEM
 158        },
 159        {
 160                .name = "Kernel code",
 161                .start = 0,
 162                .end = 0,
 163                .flags = IORESOURCE_MEM
 164        },
 165        {
 166                .name = "Kernel data",
 167                .start = 0,
 168                .end = 0,
 169                .flags = IORESOURCE_MEM
 170        }
 171};
 172
 173#define video_ram   mem_res[0]
 174#define kernel_code mem_res[1]
 175#define kernel_data mem_res[2]
 176
 177static struct resource io_res[] = {
 178        {
 179                .name = "reserved",
 180                .start = 0x3bc,
 181                .end = 0x3be,
 182                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 183        },
 184        {
 185                .name = "reserved",
 186                .start = 0x378,
 187                .end = 0x37f,
 188                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 189        },
 190        {
 191                .name = "reserved",
 192                .start = 0x278,
 193                .end = 0x27f,
 194                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 195        }
 196};
 197
 198#define lp0 io_res[0]
 199#define lp1 io_res[1]
 200#define lp2 io_res[2]
 201
 202static const char *proc_arch[] = {
 203        "undefined/unknown",
 204        "3",
 205        "4",
 206        "4T",
 207        "5",
 208        "5T",
 209        "5TE",
 210        "5TEJ",
 211        "6TEJ",
 212        "7",
 213        "7M",
 214        "?(12)",
 215        "?(13)",
 216        "?(14)",
 217        "?(15)",
 218        "?(16)",
 219        "?(17)",
 220};
 221
 222#ifdef CONFIG_CPU_V7M
 223static int __get_cpu_architecture(void)
 224{
 225        return CPU_ARCH_ARMv7M;
 226}
 227#else
 228static int __get_cpu_architecture(void)
 229{
 230        int cpu_arch;
 231
 232        if ((read_cpuid_id() & 0x0008f000) == 0) {
 233                cpu_arch = CPU_ARCH_UNKNOWN;
 234        } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
 235                cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
 236        } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
 237                cpu_arch = (read_cpuid_id() >> 16) & 7;
 238                if (cpu_arch)
 239                        cpu_arch += CPU_ARCH_ARMv3;
 240        } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
 241                unsigned int mmfr0;
 242
 243                /* Revised CPUID format. Read the Memory Model Feature
 244                 * Register 0 and check for VMSAv7 or PMSAv7 */
 245                asm("mrc        p15, 0, %0, c0, c1, 4"
 246                    : "=r" (mmfr0));
 247                if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
 248                    (mmfr0 & 0x000000f0) >= 0x00000030)
 249                        cpu_arch = CPU_ARCH_ARMv7;
 250                else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
 251                         (mmfr0 & 0x000000f0) == 0x00000020)
 252                        cpu_arch = CPU_ARCH_ARMv6;
 253                else
 254                        cpu_arch = CPU_ARCH_UNKNOWN;
 255        } else
 256                cpu_arch = CPU_ARCH_UNKNOWN;
 257
 258        return cpu_arch;
 259}
 260#endif
 261
 262int __pure cpu_architecture(void)
 263{
 264        BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
 265
 266        return __cpu_architecture;
 267}
 268
 269static int cpu_has_aliasing_icache(unsigned int arch)
 270{
 271        int aliasing_icache;
 272        unsigned int id_reg, num_sets, line_size;
 273
 274        /* PIPT caches never alias. */
 275        if (icache_is_pipt())
 276                return 0;
 277
 278        /* arch specifies the register format */
 279        switch (arch) {
 280        case CPU_ARCH_ARMv7:
 281                asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
 282                    : /* No output operands */
 283                    : "r" (1));
 284                isb();
 285                asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
 286                    : "=r" (id_reg));
 287                line_size = 4 << ((id_reg & 0x7) + 2);
 288                num_sets = ((id_reg >> 13) & 0x7fff) + 1;
 289                aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
 290                break;
 291        case CPU_ARCH_ARMv6:
 292                aliasing_icache = read_cpuid_cachetype() & (1 << 11);
 293                break;
 294        default:
 295                /* I-cache aliases will be handled by D-cache aliasing code */
 296                aliasing_icache = 0;
 297        }
 298
 299        return aliasing_icache;
 300}
 301
 302static void __init cacheid_init(void)
 303{
 304        unsigned int arch = cpu_architecture();
 305
 306        if (arch == CPU_ARCH_ARMv7M) {
 307                cacheid = 0;
 308        } else if (arch >= CPU_ARCH_ARMv6) {
 309                unsigned int cachetype = read_cpuid_cachetype();
 310                if ((cachetype & (7 << 29)) == 4 << 29) {
 311                        /* ARMv7 register format */
 312                        arch = CPU_ARCH_ARMv7;
 313                        cacheid = CACHEID_VIPT_NONALIASING;
 314                        switch (cachetype & (3 << 14)) {
 315                        case (1 << 14):
 316                                cacheid |= CACHEID_ASID_TAGGED;
 317                                break;
 318                        case (3 << 14):
 319                                cacheid |= CACHEID_PIPT;
 320                                break;
 321                        }
 322                } else {
 323                        arch = CPU_ARCH_ARMv6;
 324                        if (cachetype & (1 << 23))
 325                                cacheid = CACHEID_VIPT_ALIASING;
 326                        else
 327                                cacheid = CACHEID_VIPT_NONALIASING;
 328                }
 329                if (cpu_has_aliasing_icache(arch))
 330                        cacheid |= CACHEID_VIPT_I_ALIASING;
 331        } else {
 332                cacheid = CACHEID_VIVT;
 333        }
 334
 335        printk("CPU: %s data cache, %s instruction cache\n",
 336                cache_is_vivt() ? "VIVT" :
 337                cache_is_vipt_aliasing() ? "VIPT aliasing" :
 338                cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
 339                cache_is_vivt() ? "VIVT" :
 340                icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
 341                icache_is_vipt_aliasing() ? "VIPT aliasing" :
 342                icache_is_pipt() ? "PIPT" :
 343                cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
 344}
 345
 346/*
 347 * These functions re-use the assembly code in head.S, which
 348 * already provide the required functionality.
 349 */
 350extern struct proc_info_list *lookup_processor_type(unsigned int);
 351
 352void __init early_print(const char *str, ...)
 353{
 354        extern void printascii(const char *);
 355        char buf[256];
 356        va_list ap;
 357
 358        va_start(ap, str);
 359        vsnprintf(buf, sizeof(buf), str, ap);
 360        va_end(ap);
 361
 362#ifdef CONFIG_DEBUG_LL
 363        printascii(buf);
 364#endif
 365        printk("%s", buf);
 366}
 367
 368static void __init cpuid_init_hwcaps(void)
 369{
 370        unsigned int divide_instrs, vmsa;
 371
 372        if (cpu_architecture() < CPU_ARCH_ARMv7)
 373                return;
 374
 375        divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
 376
 377        switch (divide_instrs) {
 378        case 2:
 379                elf_hwcap |= HWCAP_IDIVA;
 380        case 1:
 381                elf_hwcap |= HWCAP_IDIVT;
 382        }
 383
 384        /* LPAE implies atomic ldrd/strd instructions */
 385        vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
 386        if (vmsa >= 5)
 387                elf_hwcap |= HWCAP_LPAE;
 388}
 389
 390static void __init feat_v6_fixup(void)
 391{
 392        int id = read_cpuid_id();
 393
 394        if ((id & 0xff0f0000) != 0x41070000)
 395                return;
 396
 397        /*
 398         * HWCAP_TLS is available only on 1136 r1p0 and later,
 399         * see also kuser_get_tls_init.
 400         */
 401        if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
 402                elf_hwcap &= ~HWCAP_TLS;
 403}
 404
 405/*
 406 * cpu_init - initialise one CPU.
 407 *
 408 * cpu_init sets up the per-CPU stacks.
 409 */
 410void notrace cpu_init(void)
 411{
 412#ifndef CONFIG_CPU_V7M
 413        unsigned int cpu = smp_processor_id();
 414        struct stack *stk = &stacks[cpu];
 415
 416        if (cpu >= NR_CPUS) {
 417                printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
 418                BUG();
 419        }
 420
 421        /*
 422         * This only works on resume and secondary cores. For booting on the
 423         * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
 424         */
 425        set_my_cpu_offset(per_cpu_offset(cpu));
 426
 427        cpu_proc_init();
 428
 429        /*
 430         * Define the placement constraint for the inline asm directive below.
 431         * In Thumb-2, msr with an immediate value is not allowed.
 432         */
 433#ifdef CONFIG_THUMB2_KERNEL
 434#define PLC     "r"
 435#else
 436#define PLC     "I"
 437#endif
 438
 439        /*
 440         * setup stacks for re-entrant exception handlers
 441         */
 442        __asm__ (
 443        "msr    cpsr_c, %1\n\t"
 444        "add    r14, %0, %2\n\t"
 445        "mov    sp, r14\n\t"
 446        "msr    cpsr_c, %3\n\t"
 447        "add    r14, %0, %4\n\t"
 448        "mov    sp, r14\n\t"
 449        "msr    cpsr_c, %5\n\t"
 450        "add    r14, %0, %6\n\t"
 451        "mov    sp, r14\n\t"
 452        "msr    cpsr_c, %7"
 453            :
 454            : "r" (stk),
 455              PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
 456              "I" (offsetof(struct stack, irq[0])),
 457              PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
 458              "I" (offsetof(struct stack, abt[0])),
 459              PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
 460              "I" (offsetof(struct stack, und[0])),
 461              PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
 462            : "r14");
 463#endif
 464}
 465
 466u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
 467
 468void __init smp_setup_processor_id(void)
 469{
 470        int i;
 471        u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
 472        u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 473
 474        cpu_logical_map(0) = cpu;
 475        for (i = 1; i < nr_cpu_ids; ++i)
 476                cpu_logical_map(i) = i == cpu ? 0 : i;
 477
 478        /*
 479         * clear __my_cpu_offset on boot CPU to avoid hang caused by
 480         * using percpu variable early, for example, lockdep will
 481         * access percpu variable inside lock_release
 482         */
 483        set_my_cpu_offset(0);
 484
 485        printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
 486}
 487
 488struct mpidr_hash mpidr_hash;
 489#ifdef CONFIG_SMP
 490/**
 491 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 492 *                        level in order to build a linear index from an
 493 *                        MPIDR value. Resulting algorithm is a collision
 494 *                        free hash carried out through shifting and ORing
 495 */
 496static void __init smp_build_mpidr_hash(void)
 497{
 498        u32 i, affinity;
 499        u32 fs[3], bits[3], ls, mask = 0;
 500        /*
 501         * Pre-scan the list of MPIDRS and filter out bits that do
 502         * not contribute to affinity levels, ie they never toggle.
 503         */
 504        for_each_possible_cpu(i)
 505                mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
 506        pr_debug("mask of set bits 0x%x\n", mask);
 507        /*
 508         * Find and stash the last and first bit set at all affinity levels to
 509         * check how many bits are required to represent them.
 510         */
 511        for (i = 0; i < 3; i++) {
 512                affinity = MPIDR_AFFINITY_LEVEL(mask, i);
 513                /*
 514                 * Find the MSB bit and LSB bits position
 515                 * to determine how many bits are required
 516                 * to express the affinity level.
 517                 */
 518                ls = fls(affinity);
 519                fs[i] = affinity ? ffs(affinity) - 1 : 0;
 520                bits[i] = ls - fs[i];
 521        }
 522        /*
 523         * An index can be created from the MPIDR by isolating the
 524         * significant bits at each affinity level and by shifting
 525         * them in order to compress the 24 bits values space to a
 526         * compressed set of values. This is equivalent to hashing
 527         * the MPIDR through shifting and ORing. It is a collision free
 528         * hash though not minimal since some levels might contain a number
 529         * of CPUs that is not an exact power of 2 and their bit
 530         * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
 531         */
 532        mpidr_hash.shift_aff[0] = fs[0];
 533        mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
 534        mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
 535                                                (bits[1] + bits[0]);
 536        mpidr_hash.mask = mask;
 537        mpidr_hash.bits = bits[2] + bits[1] + bits[0];
 538        pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
 539                                mpidr_hash.shift_aff[0],
 540                                mpidr_hash.shift_aff[1],
 541                                mpidr_hash.shift_aff[2],
 542                                mpidr_hash.mask,
 543                                mpidr_hash.bits);
 544        /*
 545         * 4x is an arbitrary value used to warn on a hash table much bigger
 546         * than expected on most systems.
 547         */
 548        if (mpidr_hash_size() > 4 * num_possible_cpus())
 549                pr_warn("Large number of MPIDR hash buckets detected\n");
 550        sync_cache_w(&mpidr_hash);
 551}
 552#endif
 553
 554static void __init setup_processor(void)
 555{
 556        struct proc_info_list *list;
 557
 558        /*
 559         * locate processor in the list of supported processor
 560         * types.  The linker builds this table for us from the
 561         * entries in arch/arm/mm/proc-*.S
 562         */
 563        list = lookup_processor_type(read_cpuid_id());
 564        if (!list) {
 565                printk("CPU configuration botched (ID %08x), unable "
 566                       "to continue.\n", read_cpuid_id());
 567                while (1);
 568        }
 569
 570        cpu_name = list->cpu_name;
 571        __cpu_architecture = __get_cpu_architecture();
 572
 573#ifdef MULTI_CPU
 574        processor = *list->proc;
 575#endif
 576#ifdef MULTI_TLB
 577        cpu_tlb = *list->tlb;
 578#endif
 579#ifdef MULTI_USER
 580        cpu_user = *list->user;
 581#endif
 582#ifdef MULTI_CACHE
 583        cpu_cache = *list->cache;
 584#endif
 585
 586        printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
 587               cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
 588               proc_arch[cpu_architecture()], cr_alignment);
 589
 590        snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
 591                 list->arch_name, ENDIANNESS);
 592        snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
 593                 list->elf_name, ENDIANNESS);
 594        elf_hwcap = list->elf_hwcap;
 595
 596        cpuid_init_hwcaps();
 597
 598#ifndef CONFIG_ARM_THUMB
 599        elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 600#endif
 601
 602        feat_v6_fixup();
 603
 604        cacheid_init();
 605        cpu_init();
 606}
 607
 608void __init dump_machine_table(void)
 609{
 610        const struct machine_desc *p;
 611
 612        early_print("Available machine support:\n\nID (hex)\tNAME\n");
 613        for_each_machine_desc(p)
 614                early_print("%08x\t%s\n", p->nr, p->name);
 615
 616        early_print("\nPlease check your kernel config and/or bootloader.\n");
 617
 618        while (true)
 619                /* can't use cpu_relax() here as it may require MMU setup */;
 620}
 621
 622int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
 623{
 624        struct membank *bank = &meminfo.bank[meminfo.nr_banks];
 625
 626        if (meminfo.nr_banks >= NR_BANKS) {
 627                printk(KERN_CRIT "NR_BANKS too low, "
 628                        "ignoring memory at 0x%08llx\n", (long long)start);
 629                return -EINVAL;
 630        }
 631
 632        /*
 633         * Ensure that start/size are aligned to a page boundary.
 634         * Size is appropriately rounded down, start is rounded up.
 635         */
 636        size -= start & ~PAGE_MASK;
 637        bank->start = PAGE_ALIGN(start);
 638
 639#ifndef CONFIG_ARM_LPAE
 640        if (bank->start + size < bank->start) {
 641                printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
 642                        "32-bit physical address space\n", (long long)start);
 643                /*
 644                 * To ensure bank->start + bank->size is representable in
 645                 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
 646                 * This means we lose a page after masking.
 647                 */
 648                size = ULONG_MAX - bank->start;
 649        }
 650#endif
 651
 652        bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
 653
 654        /*
 655         * Check whether this memory region has non-zero size or
 656         * invalid node number.
 657         */
 658        if (bank->size == 0)
 659                return -EINVAL;
 660
 661        meminfo.nr_banks++;
 662        return 0;
 663}
 664
 665/*
 666 * Pick out the memory size.  We look for mem=size@start,
 667 * where start and size are "size[KkMm]"
 668 */
 669static int __init early_mem(char *p)
 670{
 671        static int usermem __initdata = 0;
 672        phys_addr_t size;
 673        phys_addr_t start;
 674        char *endp;
 675
 676        /*
 677         * If the user specifies memory size, we
 678         * blow away any automatically generated
 679         * size.
 680         */
 681        if (usermem == 0) {
 682                usermem = 1;
 683                meminfo.nr_banks = 0;
 684        }
 685
 686        start = PHYS_OFFSET;
 687        size  = memparse(p, &endp);
 688        if (*endp == '@')
 689                start = memparse(endp + 1, NULL);
 690
 691        arm_add_memory(start, size);
 692
 693        return 0;
 694}
 695early_param("mem", early_mem);
 696
 697static void __init request_standard_resources(const struct machine_desc *mdesc)
 698{
 699        struct memblock_region *region;
 700        struct resource *res;
 701
 702        kernel_code.start   = virt_to_phys(_text);
 703        kernel_code.end     = virt_to_phys(_etext - 1);
 704        kernel_data.start   = virt_to_phys(_sdata);
 705        kernel_data.end     = virt_to_phys(_end - 1);
 706
 707        for_each_memblock(memory, region) {
 708                res = alloc_bootmem_low(sizeof(*res));
 709                res->name  = "System RAM";
 710                res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
 711                res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
 712                res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 713
 714                request_resource(&iomem_resource, res);
 715
 716                if (kernel_code.start >= res->start &&
 717                    kernel_code.end <= res->end)
 718                        request_resource(res, &kernel_code);
 719                if (kernel_data.start >= res->start &&
 720                    kernel_data.end <= res->end)
 721                        request_resource(res, &kernel_data);
 722        }
 723
 724        if (mdesc->video_start) {
 725                video_ram.start = mdesc->video_start;
 726                video_ram.end   = mdesc->video_end;
 727                request_resource(&iomem_resource, &video_ram);
 728        }
 729
 730        /*
 731         * Some machines don't have the possibility of ever
 732         * possessing lp0, lp1 or lp2
 733         */
 734        if (mdesc->reserve_lp0)
 735                request_resource(&ioport_resource, &lp0);
 736        if (mdesc->reserve_lp1)
 737                request_resource(&ioport_resource, &lp1);
 738        if (mdesc->reserve_lp2)
 739                request_resource(&ioport_resource, &lp2);
 740}
 741
 742#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
 743struct screen_info screen_info = {
 744 .orig_video_lines      = 30,
 745 .orig_video_cols       = 80,
 746 .orig_video_mode       = 0,
 747 .orig_video_ega_bx     = 0,
 748 .orig_video_isVGA      = 1,
 749 .orig_video_points     = 8
 750};
 751#endif
 752
 753static int __init customize_machine(void)
 754{
 755        /*
 756         * customizes platform devices, or adds new ones
 757         * On DT based machines, we fall back to populating the
 758         * machine from the device tree, if no callback is provided,
 759         * otherwise we would always need an init_machine callback.
 760         */
 761        if (machine_desc->init_machine)
 762                machine_desc->init_machine();
 763#ifdef CONFIG_OF
 764        else
 765                of_platform_populate(NULL, of_default_bus_match_table,
 766                                        NULL, NULL);
 767#endif
 768        return 0;
 769}
 770arch_initcall(customize_machine);
 771
 772static int __init init_machine_late(void)
 773{
 774        if (machine_desc->init_late)
 775                machine_desc->init_late();
 776        return 0;
 777}
 778late_initcall(init_machine_late);
 779
 780#ifdef CONFIG_KEXEC
 781static inline unsigned long long get_total_mem(void)
 782{
 783        unsigned long total;
 784
 785        total = max_low_pfn - min_low_pfn;
 786        return total << PAGE_SHIFT;
 787}
 788
 789/**
 790 * reserve_crashkernel() - reserves memory are for crash kernel
 791 *
 792 * This function reserves memory area given in "crashkernel=" kernel command
 793 * line parameter. The memory reserved is used by a dump capture kernel when
 794 * primary kernel is crashing.
 795 */
 796static void __init reserve_crashkernel(void)
 797{
 798        unsigned long long crash_size, crash_base;
 799        unsigned long long total_mem;
 800        int ret;
 801
 802        total_mem = get_total_mem();
 803        ret = parse_crashkernel(boot_command_line, total_mem,
 804                                &crash_size, &crash_base);
 805        if (ret)
 806                return;
 807
 808        ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
 809        if (ret < 0) {
 810                printk(KERN_WARNING "crashkernel reservation failed - "
 811                       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
 812                return;
 813        }
 814
 815        printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
 816               "for crashkernel (System RAM: %ldMB)\n",
 817               (unsigned long)(crash_size >> 20),
 818               (unsigned long)(crash_base >> 20),
 819               (unsigned long)(total_mem >> 20));
 820
 821        crashk_res.start = crash_base;
 822        crashk_res.end = crash_base + crash_size - 1;
 823        insert_resource(&iomem_resource, &crashk_res);
 824}
 825#else
 826static inline void reserve_crashkernel(void) {}
 827#endif /* CONFIG_KEXEC */
 828
 829static int __init meminfo_cmp(const void *_a, const void *_b)
 830{
 831        const struct membank *a = _a, *b = _b;
 832        long cmp = bank_pfn_start(a) - bank_pfn_start(b);
 833        return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
 834}
 835
 836void __init hyp_mode_check(void)
 837{
 838#ifdef CONFIG_ARM_VIRT_EXT
 839        sync_boot_mode();
 840
 841        if (is_hyp_mode_available()) {
 842                pr_info("CPU: All CPU(s) started in HYP mode.\n");
 843                pr_info("CPU: Virtualization extensions available.\n");
 844        } else if (is_hyp_mode_mismatched()) {
 845                pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
 846                        __boot_cpu_mode & MODE_MASK);
 847                pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
 848        } else
 849                pr_info("CPU: All CPU(s) started in SVC mode.\n");
 850#endif
 851}
 852
 853void __init setup_arch(char **cmdline_p)
 854{
 855        const struct machine_desc *mdesc;
 856
 857        setup_processor();
 858        mdesc = setup_machine_fdt(__atags_pointer);
 859        if (!mdesc)
 860                mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
 861        machine_desc = mdesc;
 862        machine_name = mdesc->name;
 863
 864        setup_dma_zone(mdesc);
 865
 866        if (mdesc->reboot_mode != REBOOT_HARD)
 867                reboot_mode = mdesc->reboot_mode;
 868
 869        init_mm.start_code = (unsigned long) _text;
 870        init_mm.end_code   = (unsigned long) _etext;
 871        init_mm.end_data   = (unsigned long) _edata;
 872        init_mm.brk        = (unsigned long) _end;
 873
 874        /* populate cmd_line too for later use, preserving boot_command_line */
 875        strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 876        *cmdline_p = cmd_line;
 877
 878        parse_early_param();
 879
 880        sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
 881        sanity_check_meminfo();
 882        arm_memblock_init(&meminfo, mdesc);
 883
 884        paging_init(mdesc);
 885        request_standard_resources(mdesc);
 886
 887        if (mdesc->restart)
 888                arm_pm_restart = mdesc->restart;
 889
 890        unflatten_device_tree();
 891
 892        arm_dt_init_cpu_maps();
 893        psci_init();
 894#ifdef CONFIG_SMP
 895        if (is_smp()) {
 896                if (!mdesc->smp_init || !mdesc->smp_init()) {
 897                        if (psci_smp_available())
 898                                smp_set_ops(&psci_smp_ops);
 899                        else if (mdesc->smp)
 900                                smp_set_ops(mdesc->smp);
 901                }
 902                smp_init_cpus();
 903                smp_build_mpidr_hash();
 904        }
 905#endif
 906
 907        if (!is_smp())
 908                hyp_mode_check();
 909
 910        reserve_crashkernel();
 911
 912#ifdef CONFIG_MULTI_IRQ_HANDLER
 913        handle_arch_irq = mdesc->handle_irq;
 914#endif
 915
 916#ifdef CONFIG_VT
 917#if defined(CONFIG_VGA_CONSOLE)
 918        conswitchp = &vga_con;
 919#elif defined(CONFIG_DUMMY_CONSOLE)
 920        conswitchp = &dummy_con;
 921#endif
 922#endif
 923
 924        if (mdesc->init_early)
 925                mdesc->init_early();
 926}
 927
 928
 929static int __init topology_init(void)
 930{
 931        int cpu;
 932
 933        for_each_possible_cpu(cpu) {
 934                struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
 935                cpuinfo->cpu.hotpluggable = 1;
 936                register_cpu(&cpuinfo->cpu, cpu);
 937        }
 938
 939        return 0;
 940}
 941subsys_initcall(topology_init);
 942
 943#ifdef CONFIG_HAVE_PROC_CPU
 944static int __init proc_cpu_init(void)
 945{
 946        struct proc_dir_entry *res;
 947
 948        res = proc_mkdir("cpu", NULL);
 949        if (!res)
 950                return -ENOMEM;
 951        return 0;
 952}
 953fs_initcall(proc_cpu_init);
 954#endif
 955
 956static const char *hwcap_str[] = {
 957        "swp",
 958        "half",
 959        "thumb",
 960        "26bit",
 961        "fastmult",
 962        "fpa",
 963        "vfp",
 964        "edsp",
 965        "java",
 966        "iwmmxt",
 967        "crunch",
 968        "thumbee",
 969        "neon",
 970        "vfpv3",
 971        "vfpv3d16",
 972        "tls",
 973        "vfpv4",
 974        "idiva",
 975        "idivt",
 976        "vfpd32",
 977        "lpae",
 978        NULL
 979};
 980
 981static int c_show(struct seq_file *m, void *v)
 982{
 983        int i, j;
 984        u32 cpuid;
 985
 986        for_each_online_cpu(i) {
 987                /*
 988                 * glibc reads /proc/cpuinfo to determine the number of
 989                 * online processors, looking for lines beginning with
 990                 * "processor".  Give glibc what it expects.
 991                 */
 992                seq_printf(m, "processor\t: %d\n", i);
 993                cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
 994                seq_printf(m, "model name\t: %s rev %d (%s)\n",
 995                           cpu_name, cpuid & 15, elf_platform);
 996
 997                /* dump out the processor features */
 998                seq_puts(m, "Features\t: ");
 999
1000                for (j = 0; hwcap_str[j]; j++)
1001                        if (elf_hwcap & (1 << j))
1002                                seq_printf(m, "%s ", hwcap_str[j]);
1003
1004                seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1005                seq_printf(m, "CPU architecture: %s\n",
1006                           proc_arch[cpu_architecture()]);
1007
1008                if ((cpuid & 0x0008f000) == 0x00000000) {
1009                        /* pre-ARM7 */
1010                        seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1011                } else {
1012                        if ((cpuid & 0x0008f000) == 0x00007000) {
1013                                /* ARM7 */
1014                                seq_printf(m, "CPU variant\t: 0x%02x\n",
1015                                           (cpuid >> 16) & 127);
1016                        } else {
1017                                /* post-ARM7 */
1018                                seq_printf(m, "CPU variant\t: 0x%x\n",
1019                                           (cpuid >> 20) & 15);
1020                        }
1021                        seq_printf(m, "CPU part\t: 0x%03x\n",
1022                                   (cpuid >> 4) & 0xfff);
1023                }
1024                seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1025        }
1026
1027        seq_printf(m, "Hardware\t: %s\n", machine_name);
1028        seq_printf(m, "Revision\t: %04x\n", system_rev);
1029        seq_printf(m, "Serial\t\t: %08x%08x\n",
1030                   system_serial_high, system_serial_low);
1031
1032        return 0;
1033}
1034
1035static void *c_start(struct seq_file *m, loff_t *pos)
1036{
1037        return *pos < 1 ? (void *)1 : NULL;
1038}
1039
1040static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1041{
1042        ++*pos;
1043        return NULL;
1044}
1045
1046static void c_stop(struct seq_file *m, void *v)
1047{
1048}
1049
1050const struct seq_operations cpuinfo_op = {
1051        .start  = c_start,
1052        .next   = c_next,
1053        .stop   = c_stop,
1054        .show   = c_show
1055};
1056