linux/arch/arm/kernel/setup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/kernel/setup.c
   4 *
   5 *  Copyright (C) 1995-2001 Russell King
   6 */
   7#include <linux/efi.h>
   8#include <linux/export.h>
   9#include <linux/kernel.h>
  10#include <linux/stddef.h>
  11#include <linux/ioport.h>
  12#include <linux/delay.h>
  13#include <linux/utsname.h>
  14#include <linux/initrd.h>
  15#include <linux/console.h>
  16#include <linux/seq_file.h>
  17#include <linux/screen_info.h>
  18#include <linux/of_platform.h>
  19#include <linux/init.h>
  20#include <linux/kexec.h>
  21#include <linux/libfdt.h>
  22#include <linux/of_fdt.h>
  23#include <linux/cpu.h>
  24#include <linux/interrupt.h>
  25#include <linux/smp.h>
  26#include <linux/proc_fs.h>
  27#include <linux/memblock.h>
  28#include <linux/bug.h>
  29#include <linux/compiler.h>
  30#include <linux/sort.h>
  31#include <linux/psci.h>
  32
  33#include <asm/unified.h>
  34#include <asm/cp15.h>
  35#include <asm/cpu.h>
  36#include <asm/cputype.h>
  37#include <asm/efi.h>
  38#include <asm/elf.h>
  39#include <asm/early_ioremap.h>
  40#include <asm/fixmap.h>
  41#include <asm/procinfo.h>
  42#include <asm/psci.h>
  43#include <asm/sections.h>
  44#include <asm/setup.h>
  45#include <asm/smp_plat.h>
  46#include <asm/mach-types.h>
  47#include <asm/cacheflush.h>
  48#include <asm/cachetype.h>
  49#include <asm/tlbflush.h>
  50#include <asm/xen/hypervisor.h>
  51
  52#include <asm/prom.h>
  53#include <asm/mach/arch.h>
  54#include <asm/mach/irq.h>
  55#include <asm/mach/time.h>
  56#include <asm/system_info.h>
  57#include <asm/system_misc.h>
  58#include <asm/traps.h>
  59#include <asm/unwind.h>
  60#include <asm/memblock.h>
  61#include <asm/virt.h>
  62#include <asm/kasan.h>
  63
  64#include "atags.h"
  65
  66
  67#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
  68char fpe_type[8];
  69
  70static int __init fpe_setup(char *line)
  71{
  72        memcpy(fpe_type, line, 8);
  73        return 1;
  74}
  75
  76__setup("fpe=", fpe_setup);
  77#endif
  78
  79extern void init_default_cache_policy(unsigned long);
  80extern void paging_init(const struct machine_desc *desc);
  81extern void early_mm_init(const struct machine_desc *);
  82extern void adjust_lowmem_bounds(void);
  83extern enum reboot_mode reboot_mode;
  84extern void setup_dma_zone(const struct machine_desc *desc);
  85
  86unsigned int processor_id;
  87EXPORT_SYMBOL(processor_id);
  88unsigned int __machine_arch_type __read_mostly;
  89EXPORT_SYMBOL(__machine_arch_type);
  90unsigned int cacheid __read_mostly;
  91EXPORT_SYMBOL(cacheid);
  92
  93unsigned int __atags_pointer __initdata;
  94
  95unsigned int system_rev;
  96EXPORT_SYMBOL(system_rev);
  97
  98const char *system_serial;
  99EXPORT_SYMBOL(system_serial);
 100
 101unsigned int system_serial_low;
 102EXPORT_SYMBOL(system_serial_low);
 103
 104unsigned int system_serial_high;
 105EXPORT_SYMBOL(system_serial_high);
 106
 107unsigned int elf_hwcap __read_mostly;
 108EXPORT_SYMBOL(elf_hwcap);
 109
 110unsigned int elf_hwcap2 __read_mostly;
 111EXPORT_SYMBOL(elf_hwcap2);
 112
 113
 114#ifdef MULTI_CPU
 115struct processor processor __ro_after_init;
 116#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
 117struct processor *cpu_vtable[NR_CPUS] = {
 118        [0] = &processor,
 119};
 120#endif
 121#endif
 122#ifdef MULTI_TLB
 123struct cpu_tlb_fns cpu_tlb __ro_after_init;
 124#endif
 125#ifdef MULTI_USER
 126struct cpu_user_fns cpu_user __ro_after_init;
 127#endif
 128#ifdef MULTI_CACHE
 129struct cpu_cache_fns cpu_cache __ro_after_init;
 130#endif
 131#ifdef CONFIG_OUTER_CACHE
 132struct outer_cache_fns outer_cache __ro_after_init;
 133EXPORT_SYMBOL(outer_cache);
 134#endif
 135
 136/*
 137 * Cached cpu_architecture() result for use by assembler code.
 138 * C code should use the cpu_architecture() function instead of accessing this
 139 * variable directly.
 140 */
 141int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
 142
 143struct stack {
 144        u32 irq[3];
 145        u32 abt[3];
 146        u32 und[3];
 147        u32 fiq[3];
 148} ____cacheline_aligned;
 149
 150#ifndef CONFIG_CPU_V7M
 151static struct stack stacks[NR_CPUS];
 152#endif
 153
 154char elf_platform[ELF_PLATFORM_SIZE];
 155EXPORT_SYMBOL(elf_platform);
 156
 157static const char *cpu_name;
 158static const char *machine_name;
 159static char __initdata cmd_line[COMMAND_LINE_SIZE];
 160const struct machine_desc *machine_desc __initdata;
 161
 162static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
 163#define ENDIANNESS ((char)endian_test.l)
 164
 165DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
 166
 167/*
 168 * Standard memory resources
 169 */
 170static struct resource mem_res[] = {
 171        {
 172                .name = "Video RAM",
 173                .start = 0,
 174                .end = 0,
 175                .flags = IORESOURCE_MEM
 176        },
 177        {
 178                .name = "Kernel code",
 179                .start = 0,
 180                .end = 0,
 181                .flags = IORESOURCE_SYSTEM_RAM
 182        },
 183        {
 184                .name = "Kernel data",
 185                .start = 0,
 186                .end = 0,
 187                .flags = IORESOURCE_SYSTEM_RAM
 188        }
 189};
 190
 191#define video_ram   mem_res[0]
 192#define kernel_code mem_res[1]
 193#define kernel_data mem_res[2]
 194
 195static struct resource io_res[] = {
 196        {
 197                .name = "reserved",
 198                .start = 0x3bc,
 199                .end = 0x3be,
 200                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 201        },
 202        {
 203                .name = "reserved",
 204                .start = 0x378,
 205                .end = 0x37f,
 206                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 207        },
 208        {
 209                .name = "reserved",
 210                .start = 0x278,
 211                .end = 0x27f,
 212                .flags = IORESOURCE_IO | IORESOURCE_BUSY
 213        }
 214};
 215
 216#define lp0 io_res[0]
 217#define lp1 io_res[1]
 218#define lp2 io_res[2]
 219
 220static const char *proc_arch[] = {
 221        "undefined/unknown",
 222        "3",
 223        "4",
 224        "4T",
 225        "5",
 226        "5T",
 227        "5TE",
 228        "5TEJ",
 229        "6TEJ",
 230        "7",
 231        "7M",
 232        "?(12)",
 233        "?(13)",
 234        "?(14)",
 235        "?(15)",
 236        "?(16)",
 237        "?(17)",
 238};
 239
 240#ifdef CONFIG_CPU_V7M
 241static int __get_cpu_architecture(void)
 242{
 243        return CPU_ARCH_ARMv7M;
 244}
 245#else
 246static int __get_cpu_architecture(void)
 247{
 248        int cpu_arch;
 249
 250        if ((read_cpuid_id() & 0x0008f000) == 0) {
 251                cpu_arch = CPU_ARCH_UNKNOWN;
 252        } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
 253                cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
 254        } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
 255                cpu_arch = (read_cpuid_id() >> 16) & 7;
 256                if (cpu_arch)
 257                        cpu_arch += CPU_ARCH_ARMv3;
 258        } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
 259                /* Revised CPUID format. Read the Memory Model Feature
 260                 * Register 0 and check for VMSAv7 or PMSAv7 */
 261                unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
 262                if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
 263                    (mmfr0 & 0x000000f0) >= 0x00000030)
 264                        cpu_arch = CPU_ARCH_ARMv7;
 265                else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
 266                         (mmfr0 & 0x000000f0) == 0x00000020)
 267                        cpu_arch = CPU_ARCH_ARMv6;
 268                else
 269                        cpu_arch = CPU_ARCH_UNKNOWN;
 270        } else
 271                cpu_arch = CPU_ARCH_UNKNOWN;
 272
 273        return cpu_arch;
 274}
 275#endif
 276
 277int __pure cpu_architecture(void)
 278{
 279        BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
 280
 281        return __cpu_architecture;
 282}
 283
 284static int cpu_has_aliasing_icache(unsigned int arch)
 285{
 286        int aliasing_icache;
 287        unsigned int id_reg, num_sets, line_size;
 288
 289        /* PIPT caches never alias. */
 290        if (icache_is_pipt())
 291                return 0;
 292
 293        /* arch specifies the register format */
 294        switch (arch) {
 295        case CPU_ARCH_ARMv7:
 296                set_csselr(CSSELR_ICACHE | CSSELR_L1);
 297                isb();
 298                id_reg = read_ccsidr();
 299                line_size = 4 << ((id_reg & 0x7) + 2);
 300                num_sets = ((id_reg >> 13) & 0x7fff) + 1;
 301                aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
 302                break;
 303        case CPU_ARCH_ARMv6:
 304                aliasing_icache = read_cpuid_cachetype() & (1 << 11);
 305                break;
 306        default:
 307                /* I-cache aliases will be handled by D-cache aliasing code */
 308                aliasing_icache = 0;
 309        }
 310
 311        return aliasing_icache;
 312}
 313
 314static void __init cacheid_init(void)
 315{
 316        unsigned int arch = cpu_architecture();
 317
 318        if (arch >= CPU_ARCH_ARMv6) {
 319                unsigned int cachetype = read_cpuid_cachetype();
 320
 321                if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
 322                        cacheid = 0;
 323                } else if ((cachetype & (7 << 29)) == 4 << 29) {
 324                        /* ARMv7 register format */
 325                        arch = CPU_ARCH_ARMv7;
 326                        cacheid = CACHEID_VIPT_NONALIASING;
 327                        switch (cachetype & (3 << 14)) {
 328                        case (1 << 14):
 329                                cacheid |= CACHEID_ASID_TAGGED;
 330                                break;
 331                        case (3 << 14):
 332                                cacheid |= CACHEID_PIPT;
 333                                break;
 334                        }
 335                } else {
 336                        arch = CPU_ARCH_ARMv6;
 337                        if (cachetype & (1 << 23))
 338                                cacheid = CACHEID_VIPT_ALIASING;
 339                        else
 340                                cacheid = CACHEID_VIPT_NONALIASING;
 341                }
 342                if (cpu_has_aliasing_icache(arch))
 343                        cacheid |= CACHEID_VIPT_I_ALIASING;
 344        } else {
 345                cacheid = CACHEID_VIVT;
 346        }
 347
 348        pr_info("CPU: %s data cache, %s instruction cache\n",
 349                cache_is_vivt() ? "VIVT" :
 350                cache_is_vipt_aliasing() ? "VIPT aliasing" :
 351                cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
 352                cache_is_vivt() ? "VIVT" :
 353                icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
 354                icache_is_vipt_aliasing() ? "VIPT aliasing" :
 355                icache_is_pipt() ? "PIPT" :
 356                cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
 357}
 358
 359/*
 360 * These functions re-use the assembly code in head.S, which
 361 * already provide the required functionality.
 362 */
 363extern struct proc_info_list *lookup_processor_type(unsigned int);
 364
 365void __init early_print(const char *str, ...)
 366{
 367        extern void printascii(const char *);
 368        char buf[256];
 369        va_list ap;
 370
 371        va_start(ap, str);
 372        vsnprintf(buf, sizeof(buf), str, ap);
 373        va_end(ap);
 374
 375#ifdef CONFIG_DEBUG_LL
 376        printascii(buf);
 377#endif
 378        printk("%s", buf);
 379}
 380
 381#ifdef CONFIG_ARM_PATCH_IDIV
 382
 383static inline u32 __attribute_const__ sdiv_instruction(void)
 384{
 385        if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
 386                /* "sdiv r0, r0, r1" */
 387                u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
 388                return __opcode_to_mem_thumb32(insn);
 389        }
 390
 391        /* "sdiv r0, r0, r1" */
 392        return __opcode_to_mem_arm(0xe710f110);
 393}
 394
 395static inline u32 __attribute_const__ udiv_instruction(void)
 396{
 397        if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
 398                /* "udiv r0, r0, r1" */
 399                u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
 400                return __opcode_to_mem_thumb32(insn);
 401        }
 402
 403        /* "udiv r0, r0, r1" */
 404        return __opcode_to_mem_arm(0xe730f110);
 405}
 406
 407static inline u32 __attribute_const__ bx_lr_instruction(void)
 408{
 409        if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
 410                /* "bx lr; nop" */
 411                u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
 412                return __opcode_to_mem_thumb32(insn);
 413        }
 414
 415        /* "bx lr" */
 416        return __opcode_to_mem_arm(0xe12fff1e);
 417}
 418
 419static void __init patch_aeabi_idiv(void)
 420{
 421        extern void __aeabi_uidiv(void);
 422        extern void __aeabi_idiv(void);
 423        uintptr_t fn_addr;
 424        unsigned int mask;
 425
 426        mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
 427        if (!(elf_hwcap & mask))
 428                return;
 429
 430        pr_info("CPU: div instructions available: patching division code\n");
 431
 432        fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
 433        asm ("" : "+g" (fn_addr));
 434        ((u32 *)fn_addr)[0] = udiv_instruction();
 435        ((u32 *)fn_addr)[1] = bx_lr_instruction();
 436        flush_icache_range(fn_addr, fn_addr + 8);
 437
 438        fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
 439        asm ("" : "+g" (fn_addr));
 440        ((u32 *)fn_addr)[0] = sdiv_instruction();
 441        ((u32 *)fn_addr)[1] = bx_lr_instruction();
 442        flush_icache_range(fn_addr, fn_addr + 8);
 443}
 444
 445#else
 446static inline void patch_aeabi_idiv(void) { }
 447#endif
 448
 449static void __init cpuid_init_hwcaps(void)
 450{
 451        int block;
 452        u32 isar5;
 453
 454        if (cpu_architecture() < CPU_ARCH_ARMv7)
 455                return;
 456
 457        block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
 458        if (block >= 2)
 459                elf_hwcap |= HWCAP_IDIVA;
 460        if (block >= 1)
 461                elf_hwcap |= HWCAP_IDIVT;
 462
 463        /* LPAE implies atomic ldrd/strd instructions */
 464        block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
 465        if (block >= 5)
 466                elf_hwcap |= HWCAP_LPAE;
 467
 468        /* check for supported v8 Crypto instructions */
 469        isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
 470
 471        block = cpuid_feature_extract_field(isar5, 4);
 472        if (block >= 2)
 473                elf_hwcap2 |= HWCAP2_PMULL;
 474        if (block >= 1)
 475                elf_hwcap2 |= HWCAP2_AES;
 476
 477        block = cpuid_feature_extract_field(isar5, 8);
 478        if (block >= 1)
 479                elf_hwcap2 |= HWCAP2_SHA1;
 480
 481        block = cpuid_feature_extract_field(isar5, 12);
 482        if (block >= 1)
 483                elf_hwcap2 |= HWCAP2_SHA2;
 484
 485        block = cpuid_feature_extract_field(isar5, 16);
 486        if (block >= 1)
 487                elf_hwcap2 |= HWCAP2_CRC32;
 488}
 489
 490static void __init elf_hwcap_fixup(void)
 491{
 492        unsigned id = read_cpuid_id();
 493
 494        /*
 495         * HWCAP_TLS is available only on 1136 r1p0 and later,
 496         * see also kuser_get_tls_init.
 497         */
 498        if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
 499            ((id >> 20) & 3) == 0) {
 500                elf_hwcap &= ~HWCAP_TLS;
 501                return;
 502        }
 503
 504        /* Verify if CPUID scheme is implemented */
 505        if ((id & 0x000f0000) != 0x000f0000)
 506                return;
 507
 508        /*
 509         * If the CPU supports LDREX/STREX and LDREXB/STREXB,
 510         * avoid advertising SWP; it may not be atomic with
 511         * multiprocessing cores.
 512         */
 513        if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
 514            (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
 515             cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
 516                elf_hwcap &= ~HWCAP_SWP;
 517}
 518
 519/*
 520 * cpu_init - initialise one CPU.
 521 *
 522 * cpu_init sets up the per-CPU stacks.
 523 */
 524void notrace cpu_init(void)
 525{
 526#ifndef CONFIG_CPU_V7M
 527        unsigned int cpu = smp_processor_id();
 528        struct stack *stk = &stacks[cpu];
 529
 530        if (cpu >= NR_CPUS) {
 531                pr_crit("CPU%u: bad primary CPU number\n", cpu);
 532                BUG();
 533        }
 534
 535        /*
 536         * This only works on resume and secondary cores. For booting on the
 537         * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
 538         */
 539        set_my_cpu_offset(per_cpu_offset(cpu));
 540
 541        cpu_proc_init();
 542
 543        /*
 544         * Define the placement constraint for the inline asm directive below.
 545         * In Thumb-2, msr with an immediate value is not allowed.
 546         */
 547#ifdef CONFIG_THUMB2_KERNEL
 548#define PLC_l   "l"
 549#define PLC_r   "r"
 550#else
 551#define PLC_l   "I"
 552#define PLC_r   "I"
 553#endif
 554
 555        /*
 556         * setup stacks for re-entrant exception handlers
 557         */
 558        __asm__ (
 559        "msr    cpsr_c, %1\n\t"
 560        "add    r14, %0, %2\n\t"
 561        "mov    sp, r14\n\t"
 562        "msr    cpsr_c, %3\n\t"
 563        "add    r14, %0, %4\n\t"
 564        "mov    sp, r14\n\t"
 565        "msr    cpsr_c, %5\n\t"
 566        "add    r14, %0, %6\n\t"
 567        "mov    sp, r14\n\t"
 568        "msr    cpsr_c, %7\n\t"
 569        "add    r14, %0, %8\n\t"
 570        "mov    sp, r14\n\t"
 571        "msr    cpsr_c, %9"
 572            :
 573            : "r" (stk),
 574              PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
 575              "I" (offsetof(struct stack, irq[0])),
 576              PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
 577              "I" (offsetof(struct stack, abt[0])),
 578              PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
 579              "I" (offsetof(struct stack, und[0])),
 580              PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
 581              "I" (offsetof(struct stack, fiq[0])),
 582              PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
 583            : "r14");
 584#endif
 585}
 586
 587u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
 588
 589void __init smp_setup_processor_id(void)
 590{
 591        int i;
 592        u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
 593        u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 594
 595        cpu_logical_map(0) = cpu;
 596        for (i = 1; i < nr_cpu_ids; ++i)
 597                cpu_logical_map(i) = i == cpu ? 0 : i;
 598
 599        /*
 600         * clear __my_cpu_offset on boot CPU to avoid hang caused by
 601         * using percpu variable early, for example, lockdep will
 602         * access percpu variable inside lock_release
 603         */
 604        set_my_cpu_offset(0);
 605
 606        pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
 607}
 608
 609struct mpidr_hash mpidr_hash;
 610#ifdef CONFIG_SMP
 611/**
 612 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 613 *                        level in order to build a linear index from an
 614 *                        MPIDR value. Resulting algorithm is a collision
 615 *                        free hash carried out through shifting and ORing
 616 */
 617static void __init smp_build_mpidr_hash(void)
 618{
 619        u32 i, affinity;
 620        u32 fs[3], bits[3], ls, mask = 0;
 621        /*
 622         * Pre-scan the list of MPIDRS and filter out bits that do
 623         * not contribute to affinity levels, ie they never toggle.
 624         */
 625        for_each_possible_cpu(i)
 626                mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
 627        pr_debug("mask of set bits 0x%x\n", mask);
 628        /*
 629         * Find and stash the last and first bit set at all affinity levels to
 630         * check how many bits are required to represent them.
 631         */
 632        for (i = 0; i < 3; i++) {
 633                affinity = MPIDR_AFFINITY_LEVEL(mask, i);
 634                /*
 635                 * Find the MSB bit and LSB bits position
 636                 * to determine how many bits are required
 637                 * to express the affinity level.
 638                 */
 639                ls = fls(affinity);
 640                fs[i] = affinity ? ffs(affinity) - 1 : 0;
 641                bits[i] = ls - fs[i];
 642        }
 643        /*
 644         * An index can be created from the MPIDR by isolating the
 645         * significant bits at each affinity level and by shifting
 646         * them in order to compress the 24 bits values space to a
 647         * compressed set of values. This is equivalent to hashing
 648         * the MPIDR through shifting and ORing. It is a collision free
 649         * hash though not minimal since some levels might contain a number
 650         * of CPUs that is not an exact power of 2 and their bit
 651         * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
 652         */
 653        mpidr_hash.shift_aff[0] = fs[0];
 654        mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
 655        mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
 656                                                (bits[1] + bits[0]);
 657        mpidr_hash.mask = mask;
 658        mpidr_hash.bits = bits[2] + bits[1] + bits[0];
 659        pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
 660                                mpidr_hash.shift_aff[0],
 661                                mpidr_hash.shift_aff[1],
 662                                mpidr_hash.shift_aff[2],
 663                                mpidr_hash.mask,
 664                                mpidr_hash.bits);
 665        /*
 666         * 4x is an arbitrary value used to warn on a hash table much bigger
 667         * than expected on most systems.
 668         */
 669        if (mpidr_hash_size() > 4 * num_possible_cpus())
 670                pr_warn("Large number of MPIDR hash buckets detected\n");
 671        sync_cache_w(&mpidr_hash);
 672}
 673#endif
 674
 675/*
 676 * locate processor in the list of supported processor types.  The linker
 677 * builds this table for us from the entries in arch/arm/mm/proc-*.S
 678 */
 679struct proc_info_list *lookup_processor(u32 midr)
 680{
 681        struct proc_info_list *list = lookup_processor_type(midr);
 682
 683        if (!list) {
 684                pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
 685                       smp_processor_id(), midr);
 686                while (1)
 687                /* can't use cpu_relax() here as it may require MMU setup */;
 688        }
 689
 690        return list;
 691}
 692
 693static void __init setup_processor(void)
 694{
 695        unsigned int midr = read_cpuid_id();
 696        struct proc_info_list *list = lookup_processor(midr);
 697
 698        cpu_name = list->cpu_name;
 699        __cpu_architecture = __get_cpu_architecture();
 700
 701        init_proc_vtable(list->proc);
 702#ifdef MULTI_TLB
 703        cpu_tlb = *list->tlb;
 704#endif
 705#ifdef MULTI_USER
 706        cpu_user = *list->user;
 707#endif
 708#ifdef MULTI_CACHE
 709        cpu_cache = *list->cache;
 710#endif
 711
 712        pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
 713                list->cpu_name, midr, midr & 15,
 714                proc_arch[cpu_architecture()], get_cr());
 715
 716        snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
 717                 list->arch_name, ENDIANNESS);
 718        snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
 719                 list->elf_name, ENDIANNESS);
 720        elf_hwcap = list->elf_hwcap;
 721
 722        cpuid_init_hwcaps();
 723        patch_aeabi_idiv();
 724
 725#ifndef CONFIG_ARM_THUMB
 726        elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 727#endif
 728#ifdef CONFIG_MMU
 729        init_default_cache_policy(list->__cpu_mm_mmu_flags);
 730#endif
 731        erratum_a15_798181_init();
 732
 733        elf_hwcap_fixup();
 734
 735        cacheid_init();
 736        cpu_init();
 737}
 738
 739void __init dump_machine_table(void)
 740{
 741        const struct machine_desc *p;
 742
 743        early_print("Available machine support:\n\nID (hex)\tNAME\n");
 744        for_each_machine_desc(p)
 745                early_print("%08x\t%s\n", p->nr, p->name);
 746
 747        early_print("\nPlease check your kernel config and/or bootloader.\n");
 748
 749        while (true)
 750                /* can't use cpu_relax() here as it may require MMU setup */;
 751}
 752
 753int __init arm_add_memory(u64 start, u64 size)
 754{
 755        u64 aligned_start;
 756
 757        /*
 758         * Ensure that start/size are aligned to a page boundary.
 759         * Size is rounded down, start is rounded up.
 760         */
 761        aligned_start = PAGE_ALIGN(start);
 762        if (aligned_start > start + size)
 763                size = 0;
 764        else
 765                size -= aligned_start - start;
 766
 767#ifndef CONFIG_PHYS_ADDR_T_64BIT
 768        if (aligned_start > ULONG_MAX) {
 769                pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
 770                        start);
 771                return -EINVAL;
 772        }
 773
 774        if (aligned_start + size > ULONG_MAX) {
 775                pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
 776                        (long long)start);
 777                /*
 778                 * To ensure bank->start + bank->size is representable in
 779                 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
 780                 * This means we lose a page after masking.
 781                 */
 782                size = ULONG_MAX - aligned_start;
 783        }
 784#endif
 785
 786        if (aligned_start < PHYS_OFFSET) {
 787                if (aligned_start + size <= PHYS_OFFSET) {
 788                        pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
 789                                aligned_start, aligned_start + size);
 790                        return -EINVAL;
 791                }
 792
 793                pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
 794                        aligned_start, (u64)PHYS_OFFSET);
 795
 796                size -= PHYS_OFFSET - aligned_start;
 797                aligned_start = PHYS_OFFSET;
 798        }
 799
 800        start = aligned_start;
 801        size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
 802
 803        /*
 804         * Check whether this memory region has non-zero size or
 805         * invalid node number.
 806         */
 807        if (size == 0)
 808                return -EINVAL;
 809
 810        memblock_add(start, size);
 811        return 0;
 812}
 813
 814/*
 815 * Pick out the memory size.  We look for mem=size@start,
 816 * where start and size are "size[KkMm]"
 817 */
 818
 819static int __init early_mem(char *p)
 820{
 821        static int usermem __initdata = 0;
 822        u64 size;
 823        u64 start;
 824        char *endp;
 825
 826        /*
 827         * If the user specifies memory size, we
 828         * blow away any automatically generated
 829         * size.
 830         */
 831        if (usermem == 0) {
 832                usermem = 1;
 833                memblock_remove(memblock_start_of_DRAM(),
 834                        memblock_end_of_DRAM() - memblock_start_of_DRAM());
 835        }
 836
 837        start = PHYS_OFFSET;
 838        size  = memparse(p, &endp);
 839        if (*endp == '@')
 840                start = memparse(endp + 1, NULL);
 841
 842        arm_add_memory(start, size);
 843
 844        return 0;
 845}
 846early_param("mem", early_mem);
 847
 848static void __init request_standard_resources(const struct machine_desc *mdesc)
 849{
 850        phys_addr_t start, end, res_end;
 851        struct resource *res;
 852        u64 i;
 853
 854        kernel_code.start   = virt_to_phys(_text);
 855        kernel_code.end     = virt_to_phys(__init_begin - 1);
 856        kernel_data.start   = virt_to_phys(_sdata);
 857        kernel_data.end     = virt_to_phys(_end - 1);
 858
 859        for_each_mem_range(i, &start, &end) {
 860                unsigned long boot_alias_start;
 861
 862                /*
 863                 * In memblock, end points to the first byte after the
 864                 * range while in resourses, end points to the last byte in
 865                 * the range.
 866                 */
 867                res_end = end - 1;
 868
 869                /*
 870                 * Some systems have a special memory alias which is only
 871                 * used for booting.  We need to advertise this region to
 872                 * kexec-tools so they know where bootable RAM is located.
 873                 */
 874                boot_alias_start = phys_to_idmap(start);
 875                if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
 876                        res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
 877                        if (!res)
 878                                panic("%s: Failed to allocate %zu bytes\n",
 879                                      __func__, sizeof(*res));
 880                        res->name = "System RAM (boot alias)";
 881                        res->start = boot_alias_start;
 882                        res->end = phys_to_idmap(res_end);
 883                        res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 884                        request_resource(&iomem_resource, res);
 885                }
 886
 887                res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
 888                if (!res)
 889                        panic("%s: Failed to allocate %zu bytes\n", __func__,
 890                              sizeof(*res));
 891                res->name  = "System RAM";
 892                res->start = start;
 893                res->end = res_end;
 894                res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 895
 896                request_resource(&iomem_resource, res);
 897
 898                if (kernel_code.start >= res->start &&
 899                    kernel_code.end <= res->end)
 900                        request_resource(res, &kernel_code);
 901                if (kernel_data.start >= res->start &&
 902                    kernel_data.end <= res->end)
 903                        request_resource(res, &kernel_data);
 904        }
 905
 906        if (mdesc->video_start) {
 907                video_ram.start = mdesc->video_start;
 908                video_ram.end   = mdesc->video_end;
 909                request_resource(&iomem_resource, &video_ram);
 910        }
 911
 912        /*
 913         * Some machines don't have the possibility of ever
 914         * possessing lp0, lp1 or lp2
 915         */
 916        if (mdesc->reserve_lp0)
 917                request_resource(&ioport_resource, &lp0);
 918        if (mdesc->reserve_lp1)
 919                request_resource(&ioport_resource, &lp1);
 920        if (mdesc->reserve_lp2)
 921                request_resource(&ioport_resource, &lp2);
 922}
 923
 924#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
 925    defined(CONFIG_EFI)
 926struct screen_info screen_info = {
 927 .orig_video_lines      = 30,
 928 .orig_video_cols       = 80,
 929 .orig_video_mode       = 0,
 930 .orig_video_ega_bx     = 0,
 931 .orig_video_isVGA      = 1,
 932 .orig_video_points     = 8
 933};
 934#endif
 935
 936static int __init customize_machine(void)
 937{
 938        /*
 939         * customizes platform devices, or adds new ones
 940         * On DT based machines, we fall back to populating the
 941         * machine from the device tree, if no callback is provided,
 942         * otherwise we would always need an init_machine callback.
 943         */
 944        if (machine_desc->init_machine)
 945                machine_desc->init_machine();
 946
 947        return 0;
 948}
 949arch_initcall(customize_machine);
 950
 951static int __init init_machine_late(void)
 952{
 953        struct device_node *root;
 954        int ret;
 955
 956        if (machine_desc->init_late)
 957                machine_desc->init_late();
 958
 959        root = of_find_node_by_path("/");
 960        if (root) {
 961                ret = of_property_read_string(root, "serial-number",
 962                                              &system_serial);
 963                if (ret)
 964                        system_serial = NULL;
 965        }
 966
 967        if (!system_serial)
 968                system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
 969                                          system_serial_high,
 970                                          system_serial_low);
 971
 972        return 0;
 973}
 974late_initcall(init_machine_late);
 975
 976#ifdef CONFIG_KEXEC
 977/*
 978 * The crash region must be aligned to 128MB to avoid
 979 * zImage relocating below the reserved region.
 980 */
 981#define CRASH_ALIGN     (128 << 20)
 982
 983static inline unsigned long long get_total_mem(void)
 984{
 985        unsigned long total;
 986
 987        total = max_low_pfn - min_low_pfn;
 988        return total << PAGE_SHIFT;
 989}
 990
 991/**
 992 * reserve_crashkernel() - reserves memory are for crash kernel
 993 *
 994 * This function reserves memory area given in "crashkernel=" kernel command
 995 * line parameter. The memory reserved is used by a dump capture kernel when
 996 * primary kernel is crashing.
 997 */
 998static void __init reserve_crashkernel(void)
 999{
1000        unsigned long long crash_size, crash_base;
1001        unsigned long long total_mem;
1002        int ret;
1003
1004        total_mem = get_total_mem();
1005        ret = parse_crashkernel(boot_command_line, total_mem,
1006                                &crash_size, &crash_base);
1007        if (ret)
1008                return;
1009
1010        if (crash_base <= 0) {
1011                unsigned long long crash_max = idmap_to_phys((u32)~0);
1012                unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1013                if (crash_max > lowmem_max)
1014                        crash_max = lowmem_max;
1015
1016                crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
1017                                                       CRASH_ALIGN, crash_max);
1018                if (!crash_base) {
1019                        pr_err("crashkernel reservation failed - No suitable area found.\n");
1020                        return;
1021                }
1022        } else {
1023                unsigned long long crash_max = crash_base + crash_size;
1024                unsigned long long start;
1025
1026                start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
1027                                                  crash_base, crash_max);
1028                if (!start) {
1029                        pr_err("crashkernel reservation failed - memory is in use.\n");
1030                        return;
1031                }
1032        }
1033
1034        pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1035                (unsigned long)(crash_size >> 20),
1036                (unsigned long)(crash_base >> 20),
1037                (unsigned long)(total_mem >> 20));
1038
1039        /* The crashk resource must always be located in normal mem */
1040        crashk_res.start = crash_base;
1041        crashk_res.end = crash_base + crash_size - 1;
1042        insert_resource(&iomem_resource, &crashk_res);
1043
1044        if (arm_has_idmap_alias()) {
1045                /*
1046                 * If we have a special RAM alias for use at boot, we
1047                 * need to advertise to kexec tools where the alias is.
1048                 */
1049                static struct resource crashk_boot_res = {
1050                        .name = "Crash kernel (boot alias)",
1051                        .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1052                };
1053
1054                crashk_boot_res.start = phys_to_idmap(crash_base);
1055                crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1056                insert_resource(&iomem_resource, &crashk_boot_res);
1057        }
1058}
1059#else
1060static inline void reserve_crashkernel(void) {}
1061#endif /* CONFIG_KEXEC */
1062
1063void __init hyp_mode_check(void)
1064{
1065#ifdef CONFIG_ARM_VIRT_EXT
1066        sync_boot_mode();
1067
1068        if (is_hyp_mode_available()) {
1069                pr_info("CPU: All CPU(s) started in HYP mode.\n");
1070                pr_info("CPU: Virtualization extensions available.\n");
1071        } else if (is_hyp_mode_mismatched()) {
1072                pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1073                        __boot_cpu_mode & MODE_MASK);
1074                pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1075        } else
1076                pr_info("CPU: All CPU(s) started in SVC mode.\n");
1077#endif
1078}
1079
1080static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1081
1082static int arm_restart(struct notifier_block *nb, unsigned long action,
1083                       void *data)
1084{
1085        __arm_pm_restart(action, data);
1086        return NOTIFY_DONE;
1087}
1088
1089static struct notifier_block arm_restart_nb = {
1090        .notifier_call = arm_restart,
1091        .priority = 128,
1092};
1093
1094void __init setup_arch(char **cmdline_p)
1095{
1096        const struct machine_desc *mdesc = NULL;
1097        void *atags_vaddr = NULL;
1098
1099        if (__atags_pointer)
1100                atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1101
1102        setup_processor();
1103        if (atags_vaddr) {
1104                mdesc = setup_machine_fdt(atags_vaddr);
1105                if (mdesc)
1106                        memblock_reserve(__atags_pointer,
1107                                         fdt_totalsize(atags_vaddr));
1108        }
1109        if (!mdesc)
1110                mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1111        if (!mdesc) {
1112                early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1113                early_print("  r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1114                            __atags_pointer);
1115                if (__atags_pointer)
1116                        early_print("  r2[]=%*ph\n", 16, atags_vaddr);
1117                dump_machine_table();
1118        }
1119
1120        machine_desc = mdesc;
1121        machine_name = mdesc->name;
1122        dump_stack_set_arch_desc("%s", mdesc->name);
1123
1124        if (mdesc->reboot_mode != REBOOT_HARD)
1125                reboot_mode = mdesc->reboot_mode;
1126
1127        setup_initial_init_mm(_text, _etext, _edata, _end);
1128
1129        /* populate cmd_line too for later use, preserving boot_command_line */
1130        strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1131        *cmdline_p = cmd_line;
1132
1133        early_fixmap_init();
1134        early_ioremap_init();
1135
1136        parse_early_param();
1137
1138#ifdef CONFIG_MMU
1139        early_mm_init(mdesc);
1140#endif
1141        setup_dma_zone(mdesc);
1142        xen_early_init();
1143        efi_init();
1144        /*
1145         * Make sure the calculation for lowmem/highmem is set appropriately
1146         * before reserving/allocating any memory
1147         */
1148        adjust_lowmem_bounds();
1149        arm_memblock_init(mdesc);
1150        /* Memory may have been removed so recalculate the bounds. */
1151        adjust_lowmem_bounds();
1152
1153        early_ioremap_reset();
1154
1155        paging_init(mdesc);
1156        kasan_init();
1157        request_standard_resources(mdesc);
1158
1159        if (mdesc->restart) {
1160                __arm_pm_restart = mdesc->restart;
1161                register_restart_handler(&arm_restart_nb);
1162        }
1163
1164        unflatten_device_tree();
1165
1166        arm_dt_init_cpu_maps();
1167        psci_dt_init();
1168#ifdef CONFIG_SMP
1169        if (is_smp()) {
1170                if (!mdesc->smp_init || !mdesc->smp_init()) {
1171                        if (psci_smp_available())
1172                                smp_set_ops(&psci_smp_ops);
1173                        else if (mdesc->smp)
1174                                smp_set_ops(mdesc->smp);
1175                }
1176                smp_init_cpus();
1177                smp_build_mpidr_hash();
1178        }
1179#endif
1180
1181        if (!is_smp())
1182                hyp_mode_check();
1183
1184        reserve_crashkernel();
1185
1186#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1187        handle_arch_irq = mdesc->handle_irq;
1188#endif
1189
1190#ifdef CONFIG_VT
1191#if defined(CONFIG_VGA_CONSOLE)
1192        conswitchp = &vga_con;
1193#endif
1194#endif
1195
1196        if (mdesc->init_early)
1197                mdesc->init_early();
1198}
1199
1200
1201static int __init topology_init(void)
1202{
1203        int cpu;
1204
1205        for_each_possible_cpu(cpu) {
1206                struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1207                cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1208                register_cpu(&cpuinfo->cpu, cpu);
1209        }
1210
1211        return 0;
1212}
1213subsys_initcall(topology_init);
1214
1215#ifdef CONFIG_HAVE_PROC_CPU
1216static int __init proc_cpu_init(void)
1217{
1218        struct proc_dir_entry *res;
1219
1220        res = proc_mkdir("cpu", NULL);
1221        if (!res)
1222                return -ENOMEM;
1223        return 0;
1224}
1225fs_initcall(proc_cpu_init);
1226#endif
1227
1228static const char *hwcap_str[] = {
1229        "swp",
1230        "half",
1231        "thumb",
1232        "26bit",
1233        "fastmult",
1234        "fpa",
1235        "vfp",
1236        "edsp",
1237        "java",
1238        "iwmmxt",
1239        "crunch",
1240        "thumbee",
1241        "neon",
1242        "vfpv3",
1243        "vfpv3d16",
1244        "tls",
1245        "vfpv4",
1246        "idiva",
1247        "idivt",
1248        "vfpd32",
1249        "lpae",
1250        "evtstrm",
1251        NULL
1252};
1253
1254static const char *hwcap2_str[] = {
1255        "aes",
1256        "pmull",
1257        "sha1",
1258        "sha2",
1259        "crc32",
1260        NULL
1261};
1262
1263static int c_show(struct seq_file *m, void *v)
1264{
1265        int i, j;
1266        u32 cpuid;
1267
1268        for_each_online_cpu(i) {
1269                /*
1270                 * glibc reads /proc/cpuinfo to determine the number of
1271                 * online processors, looking for lines beginning with
1272                 * "processor".  Give glibc what it expects.
1273                 */
1274                seq_printf(m, "processor\t: %d\n", i);
1275                cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1276                seq_printf(m, "model name\t: %s rev %d (%s)\n",
1277                           cpu_name, cpuid & 15, elf_platform);
1278
1279#if defined(CONFIG_SMP)
1280                seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1281                           per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1282                           (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1283#else
1284                seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1285                           loops_per_jiffy / (500000/HZ),
1286                           (loops_per_jiffy / (5000/HZ)) % 100);
1287#endif
1288                /* dump out the processor features */
1289                seq_puts(m, "Features\t: ");
1290
1291                for (j = 0; hwcap_str[j]; j++)
1292                        if (elf_hwcap & (1 << j))
1293                                seq_printf(m, "%s ", hwcap_str[j]);
1294
1295                for (j = 0; hwcap2_str[j]; j++)
1296                        if (elf_hwcap2 & (1 << j))
1297                                seq_printf(m, "%s ", hwcap2_str[j]);
1298
1299                seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1300                seq_printf(m, "CPU architecture: %s\n",
1301                           proc_arch[cpu_architecture()]);
1302
1303                if ((cpuid & 0x0008f000) == 0x00000000) {
1304                        /* pre-ARM7 */
1305                        seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1306                } else {
1307                        if ((cpuid & 0x0008f000) == 0x00007000) {
1308                                /* ARM7 */
1309                                seq_printf(m, "CPU variant\t: 0x%02x\n",
1310                                           (cpuid >> 16) & 127);
1311                        } else {
1312                                /* post-ARM7 */
1313                                seq_printf(m, "CPU variant\t: 0x%x\n",
1314                                           (cpuid >> 20) & 15);
1315                        }
1316                        seq_printf(m, "CPU part\t: 0x%03x\n",
1317                                   (cpuid >> 4) & 0xfff);
1318                }
1319                seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1320        }
1321
1322        seq_printf(m, "Hardware\t: %s\n", machine_name);
1323        seq_printf(m, "Revision\t: %04x\n", system_rev);
1324        seq_printf(m, "Serial\t\t: %s\n", system_serial);
1325
1326        return 0;
1327}
1328
1329static void *c_start(struct seq_file *m, loff_t *pos)
1330{
1331        return *pos < 1 ? (void *)1 : NULL;
1332}
1333
1334static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1335{
1336        ++*pos;
1337        return NULL;
1338}
1339
1340static void c_stop(struct seq_file *m, void *v)
1341{
1342}
1343
1344const struct seq_operations cpuinfo_op = {
1345        .start  = c_start,
1346        .next   = c_next,
1347        .stop   = c_stop,
1348        .show   = c_show
1349};
1350