linux/arch/arm/kernel/setup.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/setup.c
   3 *
   4 *  Copyright (C) 1995-2001 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/export.h>
  11#include <linux/kernel.h>
  12#include <linux/stddef.h>
  13#include <linux/ioport.h>
  14#include <linux/delay.h>
  15#include <linux/utsname.h>
  16#include <linux/initrd.h>
  17#include <linux/console.h>
  18#include <linux/bootmem.h>
  19#include <linux/seq_file.h>
  20#include <linux/screen_info.h>
  21#include <linux/of_iommu.h>
  22#include <linux/of_platform.h>
  23#include <linux/init.h>
  24#include <linux/kexec.h>
  25#include <linux/of_fdt.h>
  26#include <linux/cpu.h>
  27#include <linux/interrupt.h>
  28#include <linux/smp.h>
  29#include <linux/proc_fs.h>
  30#include <linux/memblock.h>
  31#include <linux/bug.h>
  32#include <linux/compiler.h>
  33#include <linux/sort.h>
  34
  35#include <asm/unified.h>
  36#include <asm/cp15.h>
  37#include <asm/cpu.h>
  38#include <asm/cputype.h>
  39#include <asm/elf.h>
  40#include <asm/procinfo.h>
  41#include <asm/psci.h>
  42#include <asm/sections.h>
  43#include <asm/setup.h>
  44#include <asm/smp_plat.h>
  45#include <asm/mach-types.h>
  46#include <asm/cacheflush.h>
  47#include <asm/cachetype.h>
  48#include <asm/tlbflush.h>
  49
  50#include <asm/prom.h>
  51#include <asm/mach/arch.h>
  52#include <asm/mach/irq.h>
  53#include <asm/mach/time.h>
  54#include <asm/system_info.h>
  55#include <asm/system_misc.h>
  56#include <asm/traps.h>
  57#include <asm/unwind.h>
  58#include <asm/memblock.h>
  59#include <asm/virt.h>
  60
  61#include "atags.h"
  62
  63
  64#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
  65char fpe_type[8];
  66
  67static int __init fpe_setup(char *line)
  68{
  69        memcpy(fpe_type, line, 8);
  70        return 1;
  71}
  72
  73__setup("fpe=", fpe_setup);
  74#endif
  75
  76extern void init_default_cache_policy(unsigned long);
  77extern void paging_init(const struct machine_desc *desc);
  78extern void early_paging_init(const struct machine_desc *,
  79                              struct proc_info_list *);
  80extern void sanity_check_meminfo(void);
  81extern enum reboot_mode reboot_mode;
  82extern void setup_dma_zone(const struct machine_desc *desc);
  83
  84unsigned int processor_id;
  85EXPORT_SYMBOL(processor_id);
  86unsigned int __machine_arch_type __read_mostly;
  87EXPORT_SYMBOL(__machine_arch_type);
  88unsigned int cacheid __read_mostly;
  89EXPORT_SYMBOL(cacheid);
  90
  91unsigned int __atags_pointer __initdata;
  92
  93unsigned int system_rev;
  94EXPORT_SYMBOL(system_rev);
  95
  96unsigned int system_serial_low;
  97EXPORT_SYMBOL(system_serial_low);
  98
  99unsigned int system_serial_high;
 100EXPORT_SYMBOL(system_serial_high);
 101
 102unsigned int elf_hwcap __read_mostly;
 103EXPORT_SYMBOL(elf_hwcap);
 104
 105unsigned int elf_hwcap2 __read_mostly;
 106EXPORT_SYMBOL(elf_hwcap2);
 107
 108
 109#ifdef MULTI_CPU
 110struct processor processor __read_mostly;
 111#endif
 112#ifdef MULTI_TLB
 113struct cpu_tlb_fns cpu_tlb __read_mostly;
 114#endif
 115#ifdef MULTI_USER
 116struct cpu_user_fns cpu_user __read_mostly;
 117#endif
 118#ifdef MULTI_CACHE
 119struct cpu_cache_fns cpu_cache __read_mostly;
 120#endif
 121#ifdef CONFIG_OUTER_CACHE
 122struct outer_cache_fns outer_cache __read_mostly;
 123EXPORT_SYMBOL(outer_cache);
 124#endif
 125
 126/*
 127 * Cached cpu_architecture() result for use by assembler code.
 128 * C code should use the cpu_architecture() function instead of accessing this
 129 * variable directly.
 130 */
 131int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
 132
 133struct stack {
 134        u32 irq[3];
 135        u32 abt[3];
 136        u32 und[3];
 137        u32 fiq[3];
 138} ____cacheline_aligned;
 139
 140#ifndef CONFIG_CPU_V7M
 141static struct stack stacks[NR_CPUS];
 142#endif
 143
 144char elf_platform[ELF_PLATFORM_SIZE];
 145EXPORT_SYMBOL(elf_platform);
 146
 147static const char *cpu_name;
 148static const char *machine_name;
 149static char __initdata cmd_line[COMMAND_LINE_SIZE];
 150const struct machine_desc *machine_desc __initdata;
 151
 152static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
 153#define ENDIANNESS ((char)endian_test.l)
 154
 155DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
 156
 157/*
 158 * Standard memory resources
 159 */
 160static struct resource mem_res[] = {
 161        {
 162                .name = "Video RAM",
 163                .start = 0,
 164                .end = 0,
 165                .flags = IORESOURCE_MEM
 166        },
 167        {
 168                .name = "Kernel code",
 169                .start = 0,
 170                .end = 0,
 171                .flags = IORESOURCE_MEM
 172        },
 173        {
 174                .name = "Kernel data",
 175                .start = 0,
 176                .end = 0,
 177                .flags = IORESOURCE_MEM
 178        }
 179};
 180
 181#define video_ram   mem_res[0]
 182#define kernel_code mem_res[1]
 183#define kernel_data mem_res[2]
 184
 185static struct resource io_res[] = {
 186        {
 187                .name = "reserved",
 188                .start = 0x3bc,
 189                .end = 0x3be,
 190                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 191        },
 192        {
 193                .name = "reserved",
 194                .start = 0x378,
 195                .end = 0x37f,
 196                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 197        },
 198        {
 199                .name = "reserved",
 200                .start = 0x278,
 201                .end = 0x27f,
 202                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 203        }
 204};
 205
 206#define lp0 io_res[0]
 207#define lp1 io_res[1]
 208#define lp2 io_res[2]
 209
 210static const char *proc_arch[] = {
 211        "undefined/unknown",
 212        "3",
 213        "4",
 214        "4T",
 215        "5",
 216        "5T",
 217        "5TE",
 218        "5TEJ",
 219        "6TEJ",
 220        "7",
 221        "7M",
 222        "?(12)",
 223        "?(13)",
 224        "?(14)",
 225        "?(15)",
 226        "?(16)",
 227        "?(17)",
 228};
 229
 230#ifdef CONFIG_CPU_V7M
 231static int __get_cpu_architecture(void)
 232{
 233        return CPU_ARCH_ARMv7M;
 234}
 235#else
 236static int __get_cpu_architecture(void)
 237{
 238        int cpu_arch;
 239
 240        if ((read_cpuid_id() & 0x0008f000) == 0) {
 241                cpu_arch = CPU_ARCH_UNKNOWN;
 242        } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
 243                cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
 244        } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
 245                cpu_arch = (read_cpuid_id() >> 16) & 7;
 246                if (cpu_arch)
 247                        cpu_arch += CPU_ARCH_ARMv3;
 248        } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
 249                unsigned int mmfr0;
 250
 251                /* Revised CPUID format. Read the Memory Model Feature
 252                 * Register 0 and check for VMSAv7 or PMSAv7 */
 253                asm("mrc        p15, 0, %0, c0, c1, 4"
 254                    : "=r" (mmfr0));
 255                if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
 256                    (mmfr0 & 0x000000f0) >= 0x00000030)
 257                        cpu_arch = CPU_ARCH_ARMv7;
 258                else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
 259                         (mmfr0 & 0x000000f0) == 0x00000020)
 260                        cpu_arch = CPU_ARCH_ARMv6;
 261                else
 262                        cpu_arch = CPU_ARCH_UNKNOWN;
 263        } else
 264                cpu_arch = CPU_ARCH_UNKNOWN;
 265
 266        return cpu_arch;
 267}
 268#endif
 269
 270int __pure cpu_architecture(void)
 271{
 272        BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
 273
 274        return __cpu_architecture;
 275}
 276
 277static int cpu_has_aliasing_icache(unsigned int arch)
 278{
 279        int aliasing_icache;
 280        unsigned int id_reg, num_sets, line_size;
 281
 282        /* PIPT caches never alias. */
 283        if (icache_is_pipt())
 284                return 0;
 285
 286        /* arch specifies the register format */
 287        switch (arch) {
 288        case CPU_ARCH_ARMv7:
 289                asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
 290                    : /* No output operands */
 291                    : "r" (1));
 292                isb();
 293                asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
 294                    : "=r" (id_reg));
 295                line_size = 4 << ((id_reg & 0x7) + 2);
 296                num_sets = ((id_reg >> 13) & 0x7fff) + 1;
 297                aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
 298                break;
 299        case CPU_ARCH_ARMv6:
 300                aliasing_icache = read_cpuid_cachetype() & (1 << 11);
 301                break;
 302        default:
 303                /* I-cache aliases will be handled by D-cache aliasing code */
 304                aliasing_icache = 0;
 305        }
 306
 307        return aliasing_icache;
 308}
 309
 310static void __init cacheid_init(void)
 311{
 312        unsigned int arch = cpu_architecture();
 313
 314        if (arch == CPU_ARCH_ARMv7M) {
 315                cacheid = 0;
 316        } else if (arch >= CPU_ARCH_ARMv6) {
 317                unsigned int cachetype = read_cpuid_cachetype();
 318                if ((cachetype & (7 << 29)) == 4 << 29) {
 319                        /* ARMv7 register format */
 320                        arch = CPU_ARCH_ARMv7;
 321                        cacheid = CACHEID_VIPT_NONALIASING;
 322                        switch (cachetype & (3 << 14)) {
 323                        case (1 << 14):
 324                                cacheid |= CACHEID_ASID_TAGGED;
 325                                break;
 326                        case (3 << 14):
 327                                cacheid |= CACHEID_PIPT;
 328                                break;
 329                        }
 330                } else {
 331                        arch = CPU_ARCH_ARMv6;
 332                        if (cachetype & (1 << 23))
 333                                cacheid = CACHEID_VIPT_ALIASING;
 334                        else
 335                                cacheid = CACHEID_VIPT_NONALIASING;
 336                }
 337                if (cpu_has_aliasing_icache(arch))
 338                        cacheid |= CACHEID_VIPT_I_ALIASING;
 339        } else {
 340                cacheid = CACHEID_VIVT;
 341        }
 342
 343        pr_info("CPU: %s data cache, %s instruction cache\n",
 344                cache_is_vivt() ? "VIVT" :
 345                cache_is_vipt_aliasing() ? "VIPT aliasing" :
 346                cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
 347                cache_is_vivt() ? "VIVT" :
 348                icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
 349                icache_is_vipt_aliasing() ? "VIPT aliasing" :
 350                icache_is_pipt() ? "PIPT" :
 351                cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
 352}
 353
 354/*
 355 * These functions re-use the assembly code in head.S, which
 356 * already provide the required functionality.
 357 */
 358extern struct proc_info_list *lookup_processor_type(unsigned int);
 359
 360void __init early_print(const char *str, ...)
 361{
 362        extern void printascii(const char *);
 363        char buf[256];
 364        va_list ap;
 365
 366        va_start(ap, str);
 367        vsnprintf(buf, sizeof(buf), str, ap);
 368        va_end(ap);
 369
 370#ifdef CONFIG_DEBUG_LL
 371        printascii(buf);
 372#endif
 373        printk("%s", buf);
 374}
 375
 376static void __init cpuid_init_hwcaps(void)
 377{
 378        unsigned int divide_instrs, vmsa;
 379
 380        if (cpu_architecture() < CPU_ARCH_ARMv7)
 381                return;
 382
 383        divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
 384
 385        switch (divide_instrs) {
 386        case 2:
 387                elf_hwcap |= HWCAP_IDIVA;
 388        case 1:
 389                elf_hwcap |= HWCAP_IDIVT;
 390        }
 391
 392        /* LPAE implies atomic ldrd/strd instructions */
 393        vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
 394        if (vmsa >= 5)
 395                elf_hwcap |= HWCAP_LPAE;
 396}
 397
 398static void __init elf_hwcap_fixup(void)
 399{
 400        unsigned id = read_cpuid_id();
 401        unsigned sync_prim;
 402
 403        /*
 404         * HWCAP_TLS is available only on 1136 r1p0 and later,
 405         * see also kuser_get_tls_init.
 406         */
 407        if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
 408            ((id >> 20) & 3) == 0) {
 409                elf_hwcap &= ~HWCAP_TLS;
 410                return;
 411        }
 412
 413        /* Verify if CPUID scheme is implemented */
 414        if ((id & 0x000f0000) != 0x000f0000)
 415                return;
 416
 417        /*
 418         * If the CPU supports LDREX/STREX and LDREXB/STREXB,
 419         * avoid advertising SWP; it may not be atomic with
 420         * multiprocessing cores.
 421         */
 422        sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) |
 423                    ((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f);
 424        if (sync_prim >= 0x13)
 425                elf_hwcap &= ~HWCAP_SWP;
 426}
 427
 428/*
 429 * cpu_init - initialise one CPU.
 430 *
 431 * cpu_init sets up the per-CPU stacks.
 432 */
 433void notrace cpu_init(void)
 434{
 435#ifndef CONFIG_CPU_V7M
 436        unsigned int cpu = smp_processor_id();
 437        struct stack *stk = &stacks[cpu];
 438
 439        if (cpu >= NR_CPUS) {
 440                pr_crit("CPU%u: bad primary CPU number\n", cpu);
 441                BUG();
 442        }
 443
 444        /*
 445         * This only works on resume and secondary cores. For booting on the
 446         * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
 447         */
 448        set_my_cpu_offset(per_cpu_offset(cpu));
 449
 450        cpu_proc_init();
 451
 452        /*
 453         * Define the placement constraint for the inline asm directive below.
 454         * In Thumb-2, msr with an immediate value is not allowed.
 455         */
 456#ifdef CONFIG_THUMB2_KERNEL
 457#define PLC     "r"
 458#else
 459#define PLC     "I"
 460#endif
 461
 462        /*
 463         * setup stacks for re-entrant exception handlers
 464         */
 465        __asm__ (
 466        "msr    cpsr_c, %1\n\t"
 467        "add    r14, %0, %2\n\t"
 468        "mov    sp, r14\n\t"
 469        "msr    cpsr_c, %3\n\t"
 470        "add    r14, %0, %4\n\t"
 471        "mov    sp, r14\n\t"
 472        "msr    cpsr_c, %5\n\t"
 473        "add    r14, %0, %6\n\t"
 474        "mov    sp, r14\n\t"
 475        "msr    cpsr_c, %7\n\t"
 476        "add    r14, %0, %8\n\t"
 477        "mov    sp, r14\n\t"
 478        "msr    cpsr_c, %9"
 479            :
 480            : "r" (stk),
 481              PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
 482              "I" (offsetof(struct stack, irq[0])),
 483              PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
 484              "I" (offsetof(struct stack, abt[0])),
 485              PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
 486              "I" (offsetof(struct stack, und[0])),
 487              PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
 488              "I" (offsetof(struct stack, fiq[0])),
 489              PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
 490            : "r14");
 491#endif
 492}
 493
 494u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
 495
 496void __init smp_setup_processor_id(void)
 497{
 498        int i;
 499        u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
 500        u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 501
 502        cpu_logical_map(0) = cpu;
 503        for (i = 1; i < nr_cpu_ids; ++i)
 504                cpu_logical_map(i) = i == cpu ? 0 : i;
 505
 506        /*
 507         * clear __my_cpu_offset on boot CPU to avoid hang caused by
 508         * using percpu variable early, for example, lockdep will
 509         * access percpu variable inside lock_release
 510         */
 511        set_my_cpu_offset(0);
 512
 513        pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
 514}
 515
 516struct mpidr_hash mpidr_hash;
 517#ifdef CONFIG_SMP
 518/**
 519 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 520 *                        level in order to build a linear index from an
 521 *                        MPIDR value. Resulting algorithm is a collision
 522 *                        free hash carried out through shifting and ORing
 523 */
 524static void __init smp_build_mpidr_hash(void)
 525{
 526        u32 i, affinity;
 527        u32 fs[3], bits[3], ls, mask = 0;
 528        /*
 529         * Pre-scan the list of MPIDRS and filter out bits that do
 530         * not contribute to affinity levels, ie they never toggle.
 531         */
 532        for_each_possible_cpu(i)
 533                mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
 534        pr_debug("mask of set bits 0x%x\n", mask);
 535        /*
 536         * Find and stash the last and first bit set at all affinity levels to
 537         * check how many bits are required to represent them.
 538         */
 539        for (i = 0; i < 3; i++) {
 540                affinity = MPIDR_AFFINITY_LEVEL(mask, i);
 541                /*
 542                 * Find the MSB bit and LSB bits position
 543                 * to determine how many bits are required
 544                 * to express the affinity level.
 545                 */
 546                ls = fls(affinity);
 547                fs[i] = affinity ? ffs(affinity) - 1 : 0;
 548                bits[i] = ls - fs[i];
 549        }
 550        /*
 551         * An index can be created from the MPIDR by isolating the
 552         * significant bits at each affinity level and by shifting
 553         * them in order to compress the 24 bits values space to a
 554         * compressed set of values. This is equivalent to hashing
 555         * the MPIDR through shifting and ORing. It is a collision free
 556         * hash though not minimal since some levels might contain a number
 557         * of CPUs that is not an exact power of 2 and their bit
 558         * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
 559         */
 560        mpidr_hash.shift_aff[0] = fs[0];
 561        mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
 562        mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
 563                                                (bits[1] + bits[0]);
 564        mpidr_hash.mask = mask;
 565        mpidr_hash.bits = bits[2] + bits[1] + bits[0];
 566        pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
 567                                mpidr_hash.shift_aff[0],
 568                                mpidr_hash.shift_aff[1],
 569                                mpidr_hash.shift_aff[2],
 570                                mpidr_hash.mask,
 571                                mpidr_hash.bits);
 572        /*
 573         * 4x is an arbitrary value used to warn on a hash table much bigger
 574         * than expected on most systems.
 575         */
 576        if (mpidr_hash_size() > 4 * num_possible_cpus())
 577                pr_warn("Large number of MPIDR hash buckets detected\n");
 578        sync_cache_w(&mpidr_hash);
 579}
 580#endif
 581
 582static void __init setup_processor(void)
 583{
 584        struct proc_info_list *list;
 585
 586        /*
 587         * locate processor in the list of supported processor
 588         * types.  The linker builds this table for us from the
 589         * entries in arch/arm/mm/proc-*.S
 590         */
 591        list = lookup_processor_type(read_cpuid_id());
 592        if (!list) {
 593                pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
 594                       read_cpuid_id());
 595                while (1);
 596        }
 597
 598        cpu_name = list->cpu_name;
 599        __cpu_architecture = __get_cpu_architecture();
 600
 601#ifdef MULTI_CPU
 602        processor = *list->proc;
 603#endif
 604#ifdef MULTI_TLB
 605        cpu_tlb = *list->tlb;
 606#endif
 607#ifdef MULTI_USER
 608        cpu_user = *list->user;
 609#endif
 610#ifdef MULTI_CACHE
 611        cpu_cache = *list->cache;
 612#endif
 613
 614        pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
 615                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
 616                proc_arch[cpu_architecture()], get_cr());
 617
 618        snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
 619                 list->arch_name, ENDIANNESS);
 620        snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
 621                 list->elf_name, ENDIANNESS);
 622        elf_hwcap = list->elf_hwcap;
 623
 624        cpuid_init_hwcaps();
 625
 626#ifndef CONFIG_ARM_THUMB
 627        elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 628#endif
 629#ifdef CONFIG_MMU
 630        init_default_cache_policy(list->__cpu_mm_mmu_flags);
 631#endif
 632        erratum_a15_798181_init();
 633
 634        elf_hwcap_fixup();
 635
 636        cacheid_init();
 637        cpu_init();
 638}
 639
 640void __init dump_machine_table(void)
 641{
 642        const struct machine_desc *p;
 643
 644        early_print("Available machine support:\n\nID (hex)\tNAME\n");
 645        for_each_machine_desc(p)
 646                early_print("%08x\t%s\n", p->nr, p->name);
 647
 648        early_print("\nPlease check your kernel config and/or bootloader.\n");
 649
 650        while (true)
 651                /* can't use cpu_relax() here as it may require MMU setup */;
 652}
 653
 654int __init arm_add_memory(u64 start, u64 size)
 655{
 656        u64 aligned_start;
 657
 658        /*
 659         * Ensure that start/size are aligned to a page boundary.
 660         * Size is rounded down, start is rounded up.
 661         */
 662        aligned_start = PAGE_ALIGN(start);
 663        if (aligned_start > start + size)
 664                size = 0;
 665        else
 666                size -= aligned_start - start;
 667
 668#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
 669        if (aligned_start > ULONG_MAX) {
 670                pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
 671                        (long long)start);
 672                return -EINVAL;
 673        }
 674
 675        if (aligned_start + size > ULONG_MAX) {
 676                pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
 677                        (long long)start);
 678                /*
 679                 * To ensure bank->start + bank->size is representable in
 680                 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
 681                 * This means we lose a page after masking.
 682                 */
 683                size = ULONG_MAX - aligned_start;
 684        }
 685#endif
 686
 687        if (aligned_start < PHYS_OFFSET) {
 688                if (aligned_start + size <= PHYS_OFFSET) {
 689                        pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
 690                                aligned_start, aligned_start + size);
 691                        return -EINVAL;
 692                }
 693
 694                pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
 695                        aligned_start, (u64)PHYS_OFFSET);
 696
 697                size -= PHYS_OFFSET - aligned_start;
 698                aligned_start = PHYS_OFFSET;
 699        }
 700
 701        start = aligned_start;
 702        size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
 703
 704        /*
 705         * Check whether this memory region has non-zero size or
 706         * invalid node number.
 707         */
 708        if (size == 0)
 709                return -EINVAL;
 710
 711        memblock_add(start, size);
 712        return 0;
 713}
 714
 715/*
 716 * Pick out the memory size.  We look for mem=size@start,
 717 * where start and size are "size[KkMm]"
 718 */
 719
 720static int __init early_mem(char *p)
 721{
 722        static int usermem __initdata = 0;
 723        u64 size;
 724        u64 start;
 725        char *endp;
 726
 727        /*
 728         * If the user specifies memory size, we
 729         * blow away any automatically generated
 730         * size.
 731         */
 732        if (usermem == 0) {
 733                usermem = 1;
 734                memblock_remove(memblock_start_of_DRAM(),
 735                        memblock_end_of_DRAM() - memblock_start_of_DRAM());
 736        }
 737
 738        start = PHYS_OFFSET;
 739        size  = memparse(p, &endp);
 740        if (*endp == '@')
 741                start = memparse(endp + 1, NULL);
 742
 743        arm_add_memory(start, size);
 744
 745        return 0;
 746}
 747early_param("mem", early_mem);
 748
 749static void __init request_standard_resources(const struct machine_desc *mdesc)
 750{
 751        struct memblock_region *region;
 752        struct resource *res;
 753
 754        kernel_code.start   = virt_to_phys(_text);
 755        kernel_code.end     = virt_to_phys(_etext - 1);
 756        kernel_data.start   = virt_to_phys(_sdata);
 757        kernel_data.end     = virt_to_phys(_end - 1);
 758
 759        for_each_memblock(memory, region) {
 760                res = memblock_virt_alloc(sizeof(*res), 0);
 761                res->name  = "System RAM";
 762                res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
 763                res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
 764                res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 765
 766                request_resource(&iomem_resource, res);
 767
 768                if (kernel_code.start >= res->start &&
 769                    kernel_code.end <= res->end)
 770                        request_resource(res, &kernel_code);
 771                if (kernel_data.start >= res->start &&
 772                    kernel_data.end <= res->end)
 773                        request_resource(res, &kernel_data);
 774        }
 775
 776        if (mdesc->video_start) {
 777                video_ram.start = mdesc->video_start;
 778                video_ram.end   = mdesc->video_end;
 779                request_resource(&iomem_resource, &video_ram);
 780        }
 781
 782        /*
 783         * Some machines don't have the possibility of ever
 784         * possessing lp0, lp1 or lp2
 785         */
 786        if (mdesc->reserve_lp0)
 787                request_resource(&ioport_resource, &lp0);
 788        if (mdesc->reserve_lp1)
 789                request_resource(&ioport_resource, &lp1);
 790        if (mdesc->reserve_lp2)
 791                request_resource(&ioport_resource, &lp2);
 792}
 793
 794#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
 795struct screen_info screen_info = {
 796 .orig_video_lines      = 30,
 797 .orig_video_cols       = 80,
 798 .orig_video_mode       = 0,
 799 .orig_video_ega_bx     = 0,
 800 .orig_video_isVGA      = 1,
 801 .orig_video_points     = 8
 802};
 803#endif
 804
 805static int __init customize_machine(void)
 806{
 807        /*
 808         * customizes platform devices, or adds new ones
 809         * On DT based machines, we fall back to populating the
 810         * machine from the device tree, if no callback is provided,
 811         * otherwise we would always need an init_machine callback.
 812         */
 813        of_iommu_init();
 814        if (machine_desc->init_machine)
 815                machine_desc->init_machine();
 816#ifdef CONFIG_OF
 817        else
 818                of_platform_populate(NULL, of_default_bus_match_table,
 819                                        NULL, NULL);
 820#endif
 821        return 0;
 822}
 823arch_initcall(customize_machine);
 824
 825static int __init init_machine_late(void)
 826{
 827        if (machine_desc->init_late)
 828                machine_desc->init_late();
 829        return 0;
 830}
 831late_initcall(init_machine_late);
 832
 833#ifdef CONFIG_KEXEC
 834static inline unsigned long long get_total_mem(void)
 835{
 836        unsigned long total;
 837
 838        total = max_low_pfn - min_low_pfn;
 839        return total << PAGE_SHIFT;
 840}
 841
 842/**
 843 * reserve_crashkernel() - reserves memory are for crash kernel
 844 *
 845 * This function reserves memory area given in "crashkernel=" kernel command
 846 * line parameter. The memory reserved is used by a dump capture kernel when
 847 * primary kernel is crashing.
 848 */
 849static void __init reserve_crashkernel(void)
 850{
 851        unsigned long long crash_size, crash_base;
 852        unsigned long long total_mem;
 853        int ret;
 854
 855        total_mem = get_total_mem();
 856        ret = parse_crashkernel(boot_command_line, total_mem,
 857                                &crash_size, &crash_base);
 858        if (ret)
 859                return;
 860
 861        ret = memblock_reserve(crash_base, crash_size);
 862        if (ret < 0) {
 863                pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
 864                        (unsigned long)crash_base);
 865                return;
 866        }
 867
 868        pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
 869                (unsigned long)(crash_size >> 20),
 870                (unsigned long)(crash_base >> 20),
 871                (unsigned long)(total_mem >> 20));
 872
 873        crashk_res.start = crash_base;
 874        crashk_res.end = crash_base + crash_size - 1;
 875        insert_resource(&iomem_resource, &crashk_res);
 876}
 877#else
 878static inline void reserve_crashkernel(void) {}
 879#endif /* CONFIG_KEXEC */
 880
 881void __init hyp_mode_check(void)
 882{
 883#ifdef CONFIG_ARM_VIRT_EXT
 884        sync_boot_mode();
 885
 886        if (is_hyp_mode_available()) {
 887                pr_info("CPU: All CPU(s) started in HYP mode.\n");
 888                pr_info("CPU: Virtualization extensions available.\n");
 889        } else if (is_hyp_mode_mismatched()) {
 890                pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
 891                        __boot_cpu_mode & MODE_MASK);
 892                pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
 893        } else
 894                pr_info("CPU: All CPU(s) started in SVC mode.\n");
 895#endif
 896}
 897
 898void __init setup_arch(char **cmdline_p)
 899{
 900        const struct machine_desc *mdesc;
 901
 902        setup_processor();
 903        mdesc = setup_machine_fdt(__atags_pointer);
 904        if (!mdesc)
 905                mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
 906        machine_desc = mdesc;
 907        machine_name = mdesc->name;
 908        dump_stack_set_arch_desc("%s", mdesc->name);
 909
 910        if (mdesc->reboot_mode != REBOOT_HARD)
 911                reboot_mode = mdesc->reboot_mode;
 912
 913        init_mm.start_code = (unsigned long) _text;
 914        init_mm.end_code   = (unsigned long) _etext;
 915        init_mm.end_data   = (unsigned long) _edata;
 916        init_mm.brk        = (unsigned long) _end;
 917
 918        /* populate cmd_line too for later use, preserving boot_command_line */
 919        strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 920        *cmdline_p = cmd_line;
 921
 922        parse_early_param();
 923
 924        early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
 925        setup_dma_zone(mdesc);
 926        sanity_check_meminfo();
 927        arm_memblock_init(mdesc);
 928
 929        paging_init(mdesc);
 930        request_standard_resources(mdesc);
 931
 932        if (mdesc->restart)
 933                arm_pm_restart = mdesc->restart;
 934
 935        unflatten_device_tree();
 936
 937        arm_dt_init_cpu_maps();
 938        psci_init();
 939#ifdef CONFIG_SMP
 940        if (is_smp()) {
 941                if (!mdesc->smp_init || !mdesc->smp_init()) {
 942                        if (psci_smp_available())
 943                                smp_set_ops(&psci_smp_ops);
 944                        else if (mdesc->smp)
 945                                smp_set_ops(mdesc->smp);
 946                }
 947                smp_init_cpus();
 948                smp_build_mpidr_hash();
 949        }
 950#endif
 951
 952        if (!is_smp())
 953                hyp_mode_check();
 954
 955        reserve_crashkernel();
 956
 957#ifdef CONFIG_MULTI_IRQ_HANDLER
 958        handle_arch_irq = mdesc->handle_irq;
 959#endif
 960
 961#ifdef CONFIG_VT
 962#if defined(CONFIG_VGA_CONSOLE)
 963        conswitchp = &vga_con;
 964#elif defined(CONFIG_DUMMY_CONSOLE)
 965        conswitchp = &dummy_con;
 966#endif
 967#endif
 968
 969        if (mdesc->init_early)
 970                mdesc->init_early();
 971}
 972
 973
 974static int __init topology_init(void)
 975{
 976        int cpu;
 977
 978        for_each_possible_cpu(cpu) {
 979                struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
 980                cpuinfo->cpu.hotpluggable = 1;
 981                register_cpu(&cpuinfo->cpu, cpu);
 982        }
 983
 984        return 0;
 985}
 986subsys_initcall(topology_init);
 987
 988#ifdef CONFIG_HAVE_PROC_CPU
 989static int __init proc_cpu_init(void)
 990{
 991        struct proc_dir_entry *res;
 992
 993        res = proc_mkdir("cpu", NULL);
 994        if (!res)
 995                return -ENOMEM;
 996        return 0;
 997}
 998fs_initcall(proc_cpu_init);
 999#endif
1000
1001static const char *hwcap_str[] = {
1002        "swp",
1003        "half",
1004        "thumb",
1005        "26bit",
1006        "fastmult",
1007        "fpa",
1008        "vfp",
1009        "edsp",
1010        "java",
1011        "iwmmxt",
1012        "crunch",
1013        "thumbee",
1014        "neon",
1015        "vfpv3",
1016        "vfpv3d16",
1017        "tls",
1018        "vfpv4",
1019        "idiva",
1020        "idivt",
1021        "vfpd32",
1022        "lpae",
1023        "evtstrm",
1024        NULL
1025};
1026
1027static const char *hwcap2_str[] = {
1028        "aes",
1029        "pmull",
1030        "sha1",
1031        "sha2",
1032        "crc32",
1033        NULL
1034};
1035
1036static int c_show(struct seq_file *m, void *v)
1037{
1038        int i, j;
1039        u32 cpuid;
1040
1041        for_each_online_cpu(i) {
1042                /*
1043                 * glibc reads /proc/cpuinfo to determine the number of
1044                 * online processors, looking for lines beginning with
1045                 * "processor".  Give glibc what it expects.
1046                 */
1047                seq_printf(m, "processor\t: %d\n", i);
1048                cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1049                seq_printf(m, "model name\t: %s rev %d (%s)\n",
1050                           cpu_name, cpuid & 15, elf_platform);
1051
1052#if defined(CONFIG_SMP)
1053                seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1054                           per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1055                           (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1056#else
1057                seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1058                           loops_per_jiffy / (500000/HZ),
1059                           (loops_per_jiffy / (5000/HZ)) % 100);
1060#endif
1061                /* dump out the processor features */
1062                seq_puts(m, "Features\t: ");
1063
1064                for (j = 0; hwcap_str[j]; j++)
1065                        if (elf_hwcap & (1 << j))
1066                                seq_printf(m, "%s ", hwcap_str[j]);
1067
1068                for (j = 0; hwcap2_str[j]; j++)
1069                        if (elf_hwcap2 & (1 << j))
1070                                seq_printf(m, "%s ", hwcap2_str[j]);
1071
1072                seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1073                seq_printf(m, "CPU architecture: %s\n",
1074                           proc_arch[cpu_architecture()]);
1075
1076                if ((cpuid & 0x0008f000) == 0x00000000) {
1077                        /* pre-ARM7 */
1078                        seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1079                } else {
1080                        if ((cpuid & 0x0008f000) == 0x00007000) {
1081                                /* ARM7 */
1082                                seq_printf(m, "CPU variant\t: 0x%02x\n",
1083                                           (cpuid >> 16) & 127);
1084                        } else {
1085                                /* post-ARM7 */
1086                                seq_printf(m, "CPU variant\t: 0x%x\n",
1087                                           (cpuid >> 20) & 15);
1088                        }
1089                        seq_printf(m, "CPU part\t: 0x%03x\n",
1090                                   (cpuid >> 4) & 0xfff);
1091                }
1092                seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1093        }
1094
1095        seq_printf(m, "Hardware\t: %s\n", machine_name);
1096        seq_printf(m, "Revision\t: %04x\n", system_rev);
1097        seq_printf(m, "Serial\t\t: %08x%08x\n",
1098                   system_serial_high, system_serial_low);
1099
1100        return 0;
1101}
1102
1103static void *c_start(struct seq_file *m, loff_t *pos)
1104{
1105        return *pos < 1 ? (void *)1 : NULL;
1106}
1107
1108static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1109{
1110        ++*pos;
1111        return NULL;
1112}
1113
1114static void c_stop(struct seq_file *m, void *v)
1115{
1116}
1117
1118const struct seq_operations cpuinfo_op = {
1119        .start  = c_start,
1120        .next   = c_next,
1121        .stop   = c_stop,
1122        .show   = c_show
1123};
1124