linux/arch/arm/kernel/setup.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/setup.c
   3 *
   4 *  Copyright (C) 1995-2001 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/export.h>
  11#include <linux/kernel.h>
  12#include <linux/stddef.h>
  13#include <linux/ioport.h>
  14#include <linux/delay.h>
  15#include <linux/utsname.h>
  16#include <linux/initrd.h>
  17#include <linux/console.h>
  18#include <linux/bootmem.h>
  19#include <linux/seq_file.h>
  20#include <linux/screen_info.h>
  21#include <linux/of_platform.h>
  22#include <linux/init.h>
  23#include <linux/kexec.h>
  24#include <linux/of_fdt.h>
  25#include <linux/cpu.h>
  26#include <linux/interrupt.h>
  27#include <linux/smp.h>
  28#include <linux/proc_fs.h>
  29#include <linux/memblock.h>
  30#include <linux/bug.h>
  31#include <linux/compiler.h>
  32#include <linux/sort.h>
  33
  34#include <asm/unified.h>
  35#include <asm/cp15.h>
  36#include <asm/cpu.h>
  37#include <asm/cputype.h>
  38#include <asm/elf.h>
  39#include <asm/procinfo.h>
  40#include <asm/psci.h>
  41#include <asm/sections.h>
  42#include <asm/setup.h>
  43#include <asm/smp_plat.h>
  44#include <asm/mach-types.h>
  45#include <asm/cacheflush.h>
  46#include <asm/cachetype.h>
  47#include <asm/tlbflush.h>
  48
  49#include <asm/prom.h>
  50#include <asm/mach/arch.h>
  51#include <asm/mach/irq.h>
  52#include <asm/mach/time.h>
  53#include <asm/system_info.h>
  54#include <asm/system_misc.h>
  55#include <asm/traps.h>
  56#include <asm/unwind.h>
  57#include <asm/memblock.h>
  58#include <asm/virt.h>
  59
  60#include "atags.h"
  61
  62
  63#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
  64char fpe_type[8];
  65
  66static int __init fpe_setup(char *line)
  67{
  68        memcpy(fpe_type, line, 8);
  69        return 1;
  70}
  71
  72__setup("fpe=", fpe_setup);
  73#endif
  74
  75extern void init_default_cache_policy(unsigned long);
  76extern void paging_init(const struct machine_desc *desc);
  77extern void early_paging_init(const struct machine_desc *,
  78                              struct proc_info_list *);
  79extern void sanity_check_meminfo(void);
  80extern enum reboot_mode reboot_mode;
  81extern void setup_dma_zone(const struct machine_desc *desc);
  82
  83unsigned int processor_id;
  84EXPORT_SYMBOL(processor_id);
  85unsigned int __machine_arch_type __read_mostly;
  86EXPORT_SYMBOL(__machine_arch_type);
  87unsigned int cacheid __read_mostly;
  88EXPORT_SYMBOL(cacheid);
  89
  90unsigned int __atags_pointer __initdata;
  91
  92unsigned int system_rev;
  93EXPORT_SYMBOL(system_rev);
  94
  95unsigned int system_serial_low;
  96EXPORT_SYMBOL(system_serial_low);
  97
  98unsigned int system_serial_high;
  99EXPORT_SYMBOL(system_serial_high);
 100
 101unsigned int elf_hwcap __read_mostly;
 102EXPORT_SYMBOL(elf_hwcap);
 103
 104unsigned int elf_hwcap2 __read_mostly;
 105EXPORT_SYMBOL(elf_hwcap2);
 106
 107
 108#ifdef MULTI_CPU
 109struct processor processor __read_mostly;
 110#endif
 111#ifdef MULTI_TLB
 112struct cpu_tlb_fns cpu_tlb __read_mostly;
 113#endif
 114#ifdef MULTI_USER
 115struct cpu_user_fns cpu_user __read_mostly;
 116#endif
 117#ifdef MULTI_CACHE
 118struct cpu_cache_fns cpu_cache __read_mostly;
 119#endif
 120#ifdef CONFIG_OUTER_CACHE
 121struct outer_cache_fns outer_cache __read_mostly;
 122EXPORT_SYMBOL(outer_cache);
 123#endif
 124
 125/*
 126 * Cached cpu_architecture() result for use by assembler code.
 127 * C code should use the cpu_architecture() function instead of accessing this
 128 * variable directly.
 129 */
 130int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
 131
 132struct stack {
 133        u32 irq[3];
 134        u32 abt[3];
 135        u32 und[3];
 136} ____cacheline_aligned;
 137
 138#ifndef CONFIG_CPU_V7M
 139static struct stack stacks[NR_CPUS];
 140#endif
 141
 142char elf_platform[ELF_PLATFORM_SIZE];
 143EXPORT_SYMBOL(elf_platform);
 144
 145static const char *cpu_name;
 146static const char *machine_name;
 147static char __initdata cmd_line[COMMAND_LINE_SIZE];
 148const struct machine_desc *machine_desc __initdata;
 149
 150static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
 151#define ENDIANNESS ((char)endian_test.l)
 152
 153DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
 154
 155/*
 156 * Standard memory resources
 157 */
 158static struct resource mem_res[] = {
 159        {
 160                .name = "Video RAM",
 161                .start = 0,
 162                .end = 0,
 163                .flags = IORESOURCE_MEM
 164        },
 165        {
 166                .name = "Kernel code",
 167                .start = 0,
 168                .end = 0,
 169                .flags = IORESOURCE_MEM
 170        },
 171        {
 172                .name = "Kernel data",
 173                .start = 0,
 174                .end = 0,
 175                .flags = IORESOURCE_MEM
 176        }
 177};
 178
 179#define video_ram   mem_res[0]
 180#define kernel_code mem_res[1]
 181#define kernel_data mem_res[2]
 182
 183static struct resource io_res[] = {
 184        {
 185                .name = "reserved",
 186                .start = 0x3bc,
 187                .end = 0x3be,
 188                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 189        },
 190        {
 191                .name = "reserved",
 192                .start = 0x378,
 193                .end = 0x37f,
 194                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 195        },
 196        {
 197                .name = "reserved",
 198                .start = 0x278,
 199                .end = 0x27f,
 200                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 201        }
 202};
 203
 204#define lp0 io_res[0]
 205#define lp1 io_res[1]
 206#define lp2 io_res[2]
 207
 208static const char *proc_arch[] = {
 209        "undefined/unknown",
 210        "3",
 211        "4",
 212        "4T",
 213        "5",
 214        "5T",
 215        "5TE",
 216        "5TEJ",
 217        "6TEJ",
 218        "7",
 219        "7M",
 220        "?(12)",
 221        "?(13)",
 222        "?(14)",
 223        "?(15)",
 224        "?(16)",
 225        "?(17)",
 226};
 227
 228#ifdef CONFIG_CPU_V7M
 229static int __get_cpu_architecture(void)
 230{
 231        return CPU_ARCH_ARMv7M;
 232}
 233#else
 234static int __get_cpu_architecture(void)
 235{
 236        int cpu_arch;
 237
 238        if ((read_cpuid_id() & 0x0008f000) == 0) {
 239                cpu_arch = CPU_ARCH_UNKNOWN;
 240        } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
 241                cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
 242        } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
 243                cpu_arch = (read_cpuid_id() >> 16) & 7;
 244                if (cpu_arch)
 245                        cpu_arch += CPU_ARCH_ARMv3;
 246        } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
 247                unsigned int mmfr0;
 248
 249                /* Revised CPUID format. Read the Memory Model Feature
 250                 * Register 0 and check for VMSAv7 or PMSAv7 */
 251                asm("mrc        p15, 0, %0, c0, c1, 4"
 252                    : "=r" (mmfr0));
 253                if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
 254                    (mmfr0 & 0x000000f0) >= 0x00000030)
 255                        cpu_arch = CPU_ARCH_ARMv7;
 256                else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
 257                         (mmfr0 & 0x000000f0) == 0x00000020)
 258                        cpu_arch = CPU_ARCH_ARMv6;
 259                else
 260                        cpu_arch = CPU_ARCH_UNKNOWN;
 261        } else
 262                cpu_arch = CPU_ARCH_UNKNOWN;
 263
 264        return cpu_arch;
 265}
 266#endif
 267
 268int __pure cpu_architecture(void)
 269{
 270        BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
 271
 272        return __cpu_architecture;
 273}
 274
 275static int cpu_has_aliasing_icache(unsigned int arch)
 276{
 277        int aliasing_icache;
 278        unsigned int id_reg, num_sets, line_size;
 279
 280        /* PIPT caches never alias. */
 281        if (icache_is_pipt())
 282                return 0;
 283
 284        /* arch specifies the register format */
 285        switch (arch) {
 286        case CPU_ARCH_ARMv7:
 287                asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
 288                    : /* No output operands */
 289                    : "r" (1));
 290                isb();
 291                asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
 292                    : "=r" (id_reg));
 293                line_size = 4 << ((id_reg & 0x7) + 2);
 294                num_sets = ((id_reg >> 13) & 0x7fff) + 1;
 295                aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
 296                break;
 297        case CPU_ARCH_ARMv6:
 298                aliasing_icache = read_cpuid_cachetype() & (1 << 11);
 299                break;
 300        default:
 301                /* I-cache aliases will be handled by D-cache aliasing code */
 302                aliasing_icache = 0;
 303        }
 304
 305        return aliasing_icache;
 306}
 307
 308static void __init cacheid_init(void)
 309{
 310        unsigned int arch = cpu_architecture();
 311
 312        if (arch == CPU_ARCH_ARMv7M) {
 313                cacheid = 0;
 314        } else if (arch >= CPU_ARCH_ARMv6) {
 315                unsigned int cachetype = read_cpuid_cachetype();
 316                if ((cachetype & (7 << 29)) == 4 << 29) {
 317                        /* ARMv7 register format */
 318                        arch = CPU_ARCH_ARMv7;
 319                        cacheid = CACHEID_VIPT_NONALIASING;
 320                        switch (cachetype & (3 << 14)) {
 321                        case (1 << 14):
 322                                cacheid |= CACHEID_ASID_TAGGED;
 323                                break;
 324                        case (3 << 14):
 325                                cacheid |= CACHEID_PIPT;
 326                                break;
 327                        }
 328                } else {
 329                        arch = CPU_ARCH_ARMv6;
 330                        if (cachetype & (1 << 23))
 331                                cacheid = CACHEID_VIPT_ALIASING;
 332                        else
 333                                cacheid = CACHEID_VIPT_NONALIASING;
 334                }
 335                if (cpu_has_aliasing_icache(arch))
 336                        cacheid |= CACHEID_VIPT_I_ALIASING;
 337        } else {
 338                cacheid = CACHEID_VIVT;
 339        }
 340
 341        pr_info("CPU: %s data cache, %s instruction cache\n",
 342                cache_is_vivt() ? "VIVT" :
 343                cache_is_vipt_aliasing() ? "VIPT aliasing" :
 344                cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
 345                cache_is_vivt() ? "VIVT" :
 346                icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
 347                icache_is_vipt_aliasing() ? "VIPT aliasing" :
 348                icache_is_pipt() ? "PIPT" :
 349                cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
 350}
 351
 352/*
 353 * These functions re-use the assembly code in head.S, which
 354 * already provide the required functionality.
 355 */
 356extern struct proc_info_list *lookup_processor_type(unsigned int);
 357
 358void __init early_print(const char *str, ...)
 359{
 360        extern void printascii(const char *);
 361        char buf[256];
 362        va_list ap;
 363
 364        va_start(ap, str);
 365        vsnprintf(buf, sizeof(buf), str, ap);
 366        va_end(ap);
 367
 368#ifdef CONFIG_DEBUG_LL
 369        printascii(buf);
 370#endif
 371        printk("%s", buf);
 372}
 373
 374static void __init cpuid_init_hwcaps(void)
 375{
 376        unsigned int divide_instrs, vmsa;
 377
 378        if (cpu_architecture() < CPU_ARCH_ARMv7)
 379                return;
 380
 381        divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
 382
 383        switch (divide_instrs) {
 384        case 2:
 385                elf_hwcap |= HWCAP_IDIVA;
 386        case 1:
 387                elf_hwcap |= HWCAP_IDIVT;
 388        }
 389
 390        /* LPAE implies atomic ldrd/strd instructions */
 391        vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
 392        if (vmsa >= 5)
 393                elf_hwcap |= HWCAP_LPAE;
 394}
 395
 396static void __init elf_hwcap_fixup(void)
 397{
 398        unsigned id = read_cpuid_id();
 399        unsigned sync_prim;
 400
 401        /*
 402         * HWCAP_TLS is available only on 1136 r1p0 and later,
 403         * see also kuser_get_tls_init.
 404         */
 405        if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
 406            ((id >> 20) & 3) == 0) {
 407                elf_hwcap &= ~HWCAP_TLS;
 408                return;
 409        }
 410
 411        /* Verify if CPUID scheme is implemented */
 412        if ((id & 0x000f0000) != 0x000f0000)
 413                return;
 414
 415        /*
 416         * If the CPU supports LDREX/STREX and LDREXB/STREXB,
 417         * avoid advertising SWP; it may not be atomic with
 418         * multiprocessing cores.
 419         */
 420        sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) |
 421                    ((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f);
 422        if (sync_prim >= 0x13)
 423                elf_hwcap &= ~HWCAP_SWP;
 424}
 425
 426/*
 427 * cpu_init - initialise one CPU.
 428 *
 429 * cpu_init sets up the per-CPU stacks.
 430 */
 431void notrace cpu_init(void)
 432{
 433#ifndef CONFIG_CPU_V7M
 434        unsigned int cpu = smp_processor_id();
 435        struct stack *stk = &stacks[cpu];
 436
 437        if (cpu >= NR_CPUS) {
 438                pr_crit("CPU%u: bad primary CPU number\n", cpu);
 439                BUG();
 440        }
 441
 442        /*
 443         * This only works on resume and secondary cores. For booting on the
 444         * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
 445         */
 446        set_my_cpu_offset(per_cpu_offset(cpu));
 447
 448        cpu_proc_init();
 449
 450        /*
 451         * Define the placement constraint for the inline asm directive below.
 452         * In Thumb-2, msr with an immediate value is not allowed.
 453         */
 454#ifdef CONFIG_THUMB2_KERNEL
 455#define PLC     "r"
 456#else
 457#define PLC     "I"
 458#endif
 459
 460        /*
 461         * setup stacks for re-entrant exception handlers
 462         */
 463        __asm__ (
 464        "msr    cpsr_c, %1\n\t"
 465        "add    r14, %0, %2\n\t"
 466        "mov    sp, r14\n\t"
 467        "msr    cpsr_c, %3\n\t"
 468        "add    r14, %0, %4\n\t"
 469        "mov    sp, r14\n\t"
 470        "msr    cpsr_c, %5\n\t"
 471        "add    r14, %0, %6\n\t"
 472        "mov    sp, r14\n\t"
 473        "msr    cpsr_c, %7"
 474            :
 475            : "r" (stk),
 476              PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
 477              "I" (offsetof(struct stack, irq[0])),
 478              PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
 479              "I" (offsetof(struct stack, abt[0])),
 480              PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
 481              "I" (offsetof(struct stack, und[0])),
 482              PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
 483            : "r14");
 484#endif
 485}
 486
 487u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
 488
 489void __init smp_setup_processor_id(void)
 490{
 491        int i;
 492        u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
 493        u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 494
 495        cpu_logical_map(0) = cpu;
 496        for (i = 1; i < nr_cpu_ids; ++i)
 497                cpu_logical_map(i) = i == cpu ? 0 : i;
 498
 499        /*
 500         * clear __my_cpu_offset on boot CPU to avoid hang caused by
 501         * using percpu variable early, for example, lockdep will
 502         * access percpu variable inside lock_release
 503         */
 504        set_my_cpu_offset(0);
 505
 506        pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
 507}
 508
 509struct mpidr_hash mpidr_hash;
 510#ifdef CONFIG_SMP
 511/**
 512 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 513 *                        level in order to build a linear index from an
 514 *                        MPIDR value. Resulting algorithm is a collision
 515 *                        free hash carried out through shifting and ORing
 516 */
 517static void __init smp_build_mpidr_hash(void)
 518{
 519        u32 i, affinity;
 520        u32 fs[3], bits[3], ls, mask = 0;
 521        /*
 522         * Pre-scan the list of MPIDRS and filter out bits that do
 523         * not contribute to affinity levels, ie they never toggle.
 524         */
 525        for_each_possible_cpu(i)
 526                mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
 527        pr_debug("mask of set bits 0x%x\n", mask);
 528        /*
 529         * Find and stash the last and first bit set at all affinity levels to
 530         * check how many bits are required to represent them.
 531         */
 532        for (i = 0; i < 3; i++) {
 533                affinity = MPIDR_AFFINITY_LEVEL(mask, i);
 534                /*
 535                 * Find the MSB bit and LSB bits position
 536                 * to determine how many bits are required
 537                 * to express the affinity level.
 538                 */
 539                ls = fls(affinity);
 540                fs[i] = affinity ? ffs(affinity) - 1 : 0;
 541                bits[i] = ls - fs[i];
 542        }
 543        /*
 544         * An index can be created from the MPIDR by isolating the
 545         * significant bits at each affinity level and by shifting
 546         * them in order to compress the 24 bits values space to a
 547         * compressed set of values. This is equivalent to hashing
 548         * the MPIDR through shifting and ORing. It is a collision free
 549         * hash though not minimal since some levels might contain a number
 550         * of CPUs that is not an exact power of 2 and their bit
 551         * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
 552         */
 553        mpidr_hash.shift_aff[0] = fs[0];
 554        mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
 555        mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
 556                                                (bits[1] + bits[0]);
 557        mpidr_hash.mask = mask;
 558        mpidr_hash.bits = bits[2] + bits[1] + bits[0];
 559        pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
 560                                mpidr_hash.shift_aff[0],
 561                                mpidr_hash.shift_aff[1],
 562                                mpidr_hash.shift_aff[2],
 563                                mpidr_hash.mask,
 564                                mpidr_hash.bits);
 565        /*
 566         * 4x is an arbitrary value used to warn on a hash table much bigger
 567         * than expected on most systems.
 568         */
 569        if (mpidr_hash_size() > 4 * num_possible_cpus())
 570                pr_warn("Large number of MPIDR hash buckets detected\n");
 571        sync_cache_w(&mpidr_hash);
 572}
 573#endif
 574
 575static void __init setup_processor(void)
 576{
 577        struct proc_info_list *list;
 578
 579        /*
 580         * locate processor in the list of supported processor
 581         * types.  The linker builds this table for us from the
 582         * entries in arch/arm/mm/proc-*.S
 583         */
 584        list = lookup_processor_type(read_cpuid_id());
 585        if (!list) {
 586                pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
 587                       read_cpuid_id());
 588                while (1);
 589        }
 590
 591        cpu_name = list->cpu_name;
 592        __cpu_architecture = __get_cpu_architecture();
 593
 594#ifdef MULTI_CPU
 595        processor = *list->proc;
 596#endif
 597#ifdef MULTI_TLB
 598        cpu_tlb = *list->tlb;
 599#endif
 600#ifdef MULTI_USER
 601        cpu_user = *list->user;
 602#endif
 603#ifdef MULTI_CACHE
 604        cpu_cache = *list->cache;
 605#endif
 606
 607        pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
 608                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
 609                proc_arch[cpu_architecture()], get_cr());
 610
 611        snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
 612                 list->arch_name, ENDIANNESS);
 613        snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
 614                 list->elf_name, ENDIANNESS);
 615        elf_hwcap = list->elf_hwcap;
 616
 617        cpuid_init_hwcaps();
 618
 619#ifndef CONFIG_ARM_THUMB
 620        elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 621#endif
 622#ifdef CONFIG_MMU
 623        init_default_cache_policy(list->__cpu_mm_mmu_flags);
 624#endif
 625        erratum_a15_798181_init();
 626
 627        elf_hwcap_fixup();
 628
 629        cacheid_init();
 630        cpu_init();
 631}
 632
 633void __init dump_machine_table(void)
 634{
 635        const struct machine_desc *p;
 636
 637        early_print("Available machine support:\n\nID (hex)\tNAME\n");
 638        for_each_machine_desc(p)
 639                early_print("%08x\t%s\n", p->nr, p->name);
 640
 641        early_print("\nPlease check your kernel config and/or bootloader.\n");
 642
 643        while (true)
 644                /* can't use cpu_relax() here as it may require MMU setup */;
 645}
 646
 647int __init arm_add_memory(u64 start, u64 size)
 648{
 649        u64 aligned_start;
 650
 651        /*
 652         * Ensure that start/size are aligned to a page boundary.
 653         * Size is appropriately rounded down, start is rounded up.
 654         */
 655        size -= start & ~PAGE_MASK;
 656        aligned_start = PAGE_ALIGN(start);
 657
 658#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
 659        if (aligned_start > ULONG_MAX) {
 660                pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
 661                        (long long)start);
 662                return -EINVAL;
 663        }
 664
 665        if (aligned_start + size > ULONG_MAX) {
 666                pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
 667                        (long long)start);
 668                /*
 669                 * To ensure bank->start + bank->size is representable in
 670                 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
 671                 * This means we lose a page after masking.
 672                 */
 673                size = ULONG_MAX - aligned_start;
 674        }
 675#endif
 676
 677        if (aligned_start < PHYS_OFFSET) {
 678                if (aligned_start + size <= PHYS_OFFSET) {
 679                        pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
 680                                aligned_start, aligned_start + size);
 681                        return -EINVAL;
 682                }
 683
 684                pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
 685                        aligned_start, (u64)PHYS_OFFSET);
 686
 687                size -= PHYS_OFFSET - aligned_start;
 688                aligned_start = PHYS_OFFSET;
 689        }
 690
 691        start = aligned_start;
 692        size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
 693
 694        /*
 695         * Check whether this memory region has non-zero size or
 696         * invalid node number.
 697         */
 698        if (size == 0)
 699                return -EINVAL;
 700
 701        memblock_add(start, size);
 702        return 0;
 703}
 704
 705/*
 706 * Pick out the memory size.  We look for mem=size@start,
 707 * where start and size are "size[KkMm]"
 708 */
 709
 710static int __init early_mem(char *p)
 711{
 712        static int usermem __initdata = 0;
 713        u64 size;
 714        u64 start;
 715        char *endp;
 716
 717        /*
 718         * If the user specifies memory size, we
 719         * blow away any automatically generated
 720         * size.
 721         */
 722        if (usermem == 0) {
 723                usermem = 1;
 724                memblock_remove(memblock_start_of_DRAM(),
 725                        memblock_end_of_DRAM() - memblock_start_of_DRAM());
 726        }
 727
 728        start = PHYS_OFFSET;
 729        size  = memparse(p, &endp);
 730        if (*endp == '@')
 731                start = memparse(endp + 1, NULL);
 732
 733        arm_add_memory(start, size);
 734
 735        return 0;
 736}
 737early_param("mem", early_mem);
 738
 739static void __init request_standard_resources(const struct machine_desc *mdesc)
 740{
 741        struct memblock_region *region;
 742        struct resource *res;
 743
 744        kernel_code.start   = virt_to_phys(_text);
 745        kernel_code.end     = virt_to_phys(_etext - 1);
 746        kernel_data.start   = virt_to_phys(_sdata);
 747        kernel_data.end     = virt_to_phys(_end - 1);
 748
 749        for_each_memblock(memory, region) {
 750                res = memblock_virt_alloc(sizeof(*res), 0);
 751                res->name  = "System RAM";
 752                res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
 753                res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
 754                res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 755
 756                request_resource(&iomem_resource, res);
 757
 758                if (kernel_code.start >= res->start &&
 759                    kernel_code.end <= res->end)
 760                        request_resource(res, &kernel_code);
 761                if (kernel_data.start >= res->start &&
 762                    kernel_data.end <= res->end)
 763                        request_resource(res, &kernel_data);
 764        }
 765
 766        if (mdesc->video_start) {
 767                video_ram.start = mdesc->video_start;
 768                video_ram.end   = mdesc->video_end;
 769                request_resource(&iomem_resource, &video_ram);
 770        }
 771
 772        /*
 773         * Some machines don't have the possibility of ever
 774         * possessing lp0, lp1 or lp2
 775         */
 776        if (mdesc->reserve_lp0)
 777                request_resource(&ioport_resource, &lp0);
 778        if (mdesc->reserve_lp1)
 779                request_resource(&ioport_resource, &lp1);
 780        if (mdesc->reserve_lp2)
 781                request_resource(&ioport_resource, &lp2);
 782}
 783
 784#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
 785struct screen_info screen_info = {
 786 .orig_video_lines      = 30,
 787 .orig_video_cols       = 80,
 788 .orig_video_mode       = 0,
 789 .orig_video_ega_bx     = 0,
 790 .orig_video_isVGA      = 1,
 791 .orig_video_points     = 8
 792};
 793#endif
 794
 795static int __init customize_machine(void)
 796{
 797        /*
 798         * customizes platform devices, or adds new ones
 799         * On DT based machines, we fall back to populating the
 800         * machine from the device tree, if no callback is provided,
 801         * otherwise we would always need an init_machine callback.
 802         */
 803        if (machine_desc->init_machine)
 804                machine_desc->init_machine();
 805#ifdef CONFIG_OF
 806        else
 807                of_platform_populate(NULL, of_default_bus_match_table,
 808                                        NULL, NULL);
 809#endif
 810        return 0;
 811}
 812arch_initcall(customize_machine);
 813
 814static int __init init_machine_late(void)
 815{
 816        if (machine_desc->init_late)
 817                machine_desc->init_late();
 818        return 0;
 819}
 820late_initcall(init_machine_late);
 821
 822#ifdef CONFIG_KEXEC
 823static inline unsigned long long get_total_mem(void)
 824{
 825        unsigned long total;
 826
 827        total = max_low_pfn - min_low_pfn;
 828        return total << PAGE_SHIFT;
 829}
 830
 831/**
 832 * reserve_crashkernel() - reserves memory are for crash kernel
 833 *
 834 * This function reserves memory area given in "crashkernel=" kernel command
 835 * line parameter. The memory reserved is used by a dump capture kernel when
 836 * primary kernel is crashing.
 837 */
 838static void __init reserve_crashkernel(void)
 839{
 840        unsigned long long crash_size, crash_base;
 841        unsigned long long total_mem;
 842        int ret;
 843
 844        total_mem = get_total_mem();
 845        ret = parse_crashkernel(boot_command_line, total_mem,
 846                                &crash_size, &crash_base);
 847        if (ret)
 848                return;
 849
 850        ret = memblock_reserve(crash_base, crash_size);
 851        if (ret < 0) {
 852                pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
 853                        (unsigned long)crash_base);
 854                return;
 855        }
 856
 857        pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
 858                (unsigned long)(crash_size >> 20),
 859                (unsigned long)(crash_base >> 20),
 860                (unsigned long)(total_mem >> 20));
 861
 862        crashk_res.start = crash_base;
 863        crashk_res.end = crash_base + crash_size - 1;
 864        insert_resource(&iomem_resource, &crashk_res);
 865}
 866#else
 867static inline void reserve_crashkernel(void) {}
 868#endif /* CONFIG_KEXEC */
 869
 870void __init hyp_mode_check(void)
 871{
 872#ifdef CONFIG_ARM_VIRT_EXT
 873        sync_boot_mode();
 874
 875        if (is_hyp_mode_available()) {
 876                pr_info("CPU: All CPU(s) started in HYP mode.\n");
 877                pr_info("CPU: Virtualization extensions available.\n");
 878        } else if (is_hyp_mode_mismatched()) {
 879                pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
 880                        __boot_cpu_mode & MODE_MASK);
 881                pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
 882        } else
 883                pr_info("CPU: All CPU(s) started in SVC mode.\n");
 884#endif
 885}
 886
 887void __init setup_arch(char **cmdline_p)
 888{
 889        const struct machine_desc *mdesc;
 890
 891        setup_processor();
 892        mdesc = setup_machine_fdt(__atags_pointer);
 893        if (!mdesc)
 894                mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
 895        machine_desc = mdesc;
 896        machine_name = mdesc->name;
 897
 898        if (mdesc->reboot_mode != REBOOT_HARD)
 899                reboot_mode = mdesc->reboot_mode;
 900
 901        init_mm.start_code = (unsigned long) _text;
 902        init_mm.end_code   = (unsigned long) _etext;
 903        init_mm.end_data   = (unsigned long) _edata;
 904        init_mm.brk        = (unsigned long) _end;
 905
 906        /* populate cmd_line too for later use, preserving boot_command_line */
 907        strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 908        *cmdline_p = cmd_line;
 909
 910        parse_early_param();
 911
 912        early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
 913        setup_dma_zone(mdesc);
 914        sanity_check_meminfo();
 915        arm_memblock_init(mdesc);
 916
 917        paging_init(mdesc);
 918        request_standard_resources(mdesc);
 919
 920        if (mdesc->restart)
 921                arm_pm_restart = mdesc->restart;
 922
 923        unflatten_device_tree();
 924
 925        arm_dt_init_cpu_maps();
 926        psci_init();
 927#ifdef CONFIG_SMP
 928        if (is_smp()) {
 929                if (!mdesc->smp_init || !mdesc->smp_init()) {
 930                        if (psci_smp_available())
 931                                smp_set_ops(&psci_smp_ops);
 932                        else if (mdesc->smp)
 933                                smp_set_ops(mdesc->smp);
 934                }
 935                smp_init_cpus();
 936                smp_build_mpidr_hash();
 937        }
 938#endif
 939
 940        if (!is_smp())
 941                hyp_mode_check();
 942
 943        reserve_crashkernel();
 944
 945#ifdef CONFIG_MULTI_IRQ_HANDLER
 946        handle_arch_irq = mdesc->handle_irq;
 947#endif
 948
 949#ifdef CONFIG_VT
 950#if defined(CONFIG_VGA_CONSOLE)
 951        conswitchp = &vga_con;
 952#elif defined(CONFIG_DUMMY_CONSOLE)
 953        conswitchp = &dummy_con;
 954#endif
 955#endif
 956
 957        if (mdesc->init_early)
 958                mdesc->init_early();
 959}
 960
 961
 962static int __init topology_init(void)
 963{
 964        int cpu;
 965
 966        for_each_possible_cpu(cpu) {
 967                struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
 968                cpuinfo->cpu.hotpluggable = 1;
 969                register_cpu(&cpuinfo->cpu, cpu);
 970        }
 971
 972        return 0;
 973}
 974subsys_initcall(topology_init);
 975
 976#ifdef CONFIG_HAVE_PROC_CPU
 977static int __init proc_cpu_init(void)
 978{
 979        struct proc_dir_entry *res;
 980
 981        res = proc_mkdir("cpu", NULL);
 982        if (!res)
 983                return -ENOMEM;
 984        return 0;
 985}
 986fs_initcall(proc_cpu_init);
 987#endif
 988
 989static const char *hwcap_str[] = {
 990        "swp",
 991        "half",
 992        "thumb",
 993        "26bit",
 994        "fastmult",
 995        "fpa",
 996        "vfp",
 997        "edsp",
 998        "java",
 999        "iwmmxt",
1000        "crunch",
1001        "thumbee",
1002        "neon",
1003        "vfpv3",
1004        "vfpv3d16",
1005        "tls",
1006        "vfpv4",
1007        "idiva",
1008        "idivt",
1009        "vfpd32",
1010        "lpae",
1011        "evtstrm",
1012        NULL
1013};
1014
1015static const char *hwcap2_str[] = {
1016        "aes",
1017        "pmull",
1018        "sha1",
1019        "sha2",
1020        "crc32",
1021        NULL
1022};
1023
1024static int c_show(struct seq_file *m, void *v)
1025{
1026        int i, j;
1027        u32 cpuid;
1028
1029        for_each_online_cpu(i) {
1030                /*
1031                 * glibc reads /proc/cpuinfo to determine the number of
1032                 * online processors, looking for lines beginning with
1033                 * "processor".  Give glibc what it expects.
1034                 */
1035                seq_printf(m, "processor\t: %d\n", i);
1036                cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1037                seq_printf(m, "model name\t: %s rev %d (%s)\n",
1038                           cpu_name, cpuid & 15, elf_platform);
1039
1040                /* dump out the processor features */
1041                seq_puts(m, "Features\t: ");
1042
1043                for (j = 0; hwcap_str[j]; j++)
1044                        if (elf_hwcap & (1 << j))
1045                                seq_printf(m, "%s ", hwcap_str[j]);
1046
1047                for (j = 0; hwcap2_str[j]; j++)
1048                        if (elf_hwcap2 & (1 << j))
1049                                seq_printf(m, "%s ", hwcap2_str[j]);
1050
1051                seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1052                seq_printf(m, "CPU architecture: %s\n",
1053                           proc_arch[cpu_architecture()]);
1054
1055                if ((cpuid & 0x0008f000) == 0x00000000) {
1056                        /* pre-ARM7 */
1057                        seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1058                } else {
1059                        if ((cpuid & 0x0008f000) == 0x00007000) {
1060                                /* ARM7 */
1061                                seq_printf(m, "CPU variant\t: 0x%02x\n",
1062                                           (cpuid >> 16) & 127);
1063                        } else {
1064                                /* post-ARM7 */
1065                                seq_printf(m, "CPU variant\t: 0x%x\n",
1066                                           (cpuid >> 20) & 15);
1067                        }
1068                        seq_printf(m, "CPU part\t: 0x%03x\n",
1069                                   (cpuid >> 4) & 0xfff);
1070                }
1071                seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1072        }
1073
1074        seq_printf(m, "Hardware\t: %s\n", machine_name);
1075        seq_printf(m, "Revision\t: %04x\n", system_rev);
1076        seq_printf(m, "Serial\t\t: %08x%08x\n",
1077                   system_serial_high, system_serial_low);
1078
1079        return 0;
1080}
1081
1082static void *c_start(struct seq_file *m, loff_t *pos)
1083{
1084        return *pos < 1 ? (void *)1 : NULL;
1085}
1086
1087static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1088{
1089        ++*pos;
1090        return NULL;
1091}
1092
1093static void c_stop(struct seq_file *m, void *v)
1094{
1095}
1096
1097const struct seq_operations cpuinfo_op = {
1098        .start  = c_start,
1099        .next   = c_next,
1100        .stop   = c_stop,
1101        .show   = c_show
1102};
1103