linux/arch/metag/kernel/setup.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
   3 *
   4 * This file contains the architecture-dependant parts of system setup.
   5 *
   6 */
   7
   8#include <linux/export.h>
   9#include <linux/bootmem.h>
  10#include <linux/console.h>
  11#include <linux/cpu.h>
  12#include <linux/delay.h>
  13#include <linux/errno.h>
  14#include <linux/fs.h>
  15#include <linux/genhd.h>
  16#include <linux/init.h>
  17#include <linux/initrd.h>
  18#include <linux/interrupt.h>
  19#include <linux/kernel.h>
  20#include <linux/memblock.h>
  21#include <linux/mm.h>
  22#include <linux/of_fdt.h>
  23#include <linux/of_platform.h>
  24#include <linux/pfn.h>
  25#include <linux/root_dev.h>
  26#include <linux/sched.h>
  27#include <linux/seq_file.h>
  28#include <linux/start_kernel.h>
  29#include <linux/string.h>
  30
  31#include <asm/cachepart.h>
  32#include <asm/clock.h>
  33#include <asm/core_reg.h>
  34#include <asm/cpu.h>
  35#include <asm/da.h>
  36#include <asm/highmem.h>
  37#include <asm/hwthread.h>
  38#include <asm/l2cache.h>
  39#include <asm/mach/arch.h>
  40#include <asm/metag_mem.h>
  41#include <asm/metag_regs.h>
  42#include <asm/mmu.h>
  43#include <asm/mmzone.h>
  44#include <asm/processor.h>
  45#include <asm/sections.h>
  46#include <asm/setup.h>
  47#include <asm/traps.h>
  48
  49/* Priv protect as many registers as possible. */
  50#define DEFAULT_PRIV    (TXPRIVEXT_COPRO_BITS           | \
  51                         TXPRIVEXT_TXTRIGGER_BIT        | \
  52                         TXPRIVEXT_TXGBLCREG_BIT        | \
  53                         TXPRIVEXT_ILOCK_BIT            | \
  54                         TXPRIVEXT_TXITACCYC_BIT        | \
  55                         TXPRIVEXT_TXDIVTIME_BIT        | \
  56                         TXPRIVEXT_TXAMAREGX_BIT        | \
  57                         TXPRIVEXT_TXTIMERI_BIT         | \
  58                         TXPRIVEXT_TXSTATUS_BIT         | \
  59                         TXPRIVEXT_TXDISABLE_BIT)
  60
  61/* Meta2 specific bits. */
  62#ifdef CONFIG_METAG_META12
  63#define META2_PRIV      0
  64#else
  65#define META2_PRIV      (TXPRIVEXT_TXTIMER_BIT          | \
  66                         TXPRIVEXT_TRACE_BIT)
  67#endif
  68
  69/* Unaligned access checking bits. */
  70#ifdef CONFIG_METAG_UNALIGNED
  71#define UNALIGNED_PRIV  TXPRIVEXT_ALIGNREW_BIT
  72#else
  73#define UNALIGNED_PRIV  0
  74#endif
  75
  76#define PRIV_BITS       (DEFAULT_PRIV                   | \
  77                         META2_PRIV                     | \
  78                         UNALIGNED_PRIV)
  79
  80/*
  81 * Protect access to:
  82 * 0x06000000-0x07ffffff Direct mapped region
  83 * 0x05000000-0x05ffffff MMU table region (Meta1)
  84 * 0x04400000-0x047fffff Cache flush region
  85 * 0x84000000-0x87ffffff Core cache memory region (Meta2)
  86 *
  87 * Allow access to:
  88 * 0x80000000-0x81ffffff Core code memory region (Meta2)
  89 */
  90#ifdef CONFIG_METAG_META12
  91#define PRIVSYSR_BITS   TXPRIVSYSR_ALL_BITS
  92#else
  93#define PRIVSYSR_BITS   (TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT)
  94#endif
  95
  96/* Protect all 0x02xxxxxx and 0x048xxxxx. */
  97#define PIOREG_BITS     0xffffffff
  98
  99/*
 100 * Protect all 0x04000xx0 (system events)
 101 * except write combiner flush and write fence (system events 4 and 5).
 102 */
 103#define PSYREG_BITS     0xfffffffb
 104
 105
 106extern char _heap_start[];
 107
 108#ifdef CONFIG_METAG_BUILTIN_DTB
 109extern u32 __dtb_start[];
 110#endif
 111
 112#ifdef CONFIG_DA_CONSOLE
 113/* Our early channel based console driver */
 114extern struct console dash_console;
 115#endif
 116
 117const struct machine_desc *machine_desc __initdata;
 118
 119/*
 120 * Map a Linux CPU number to a hardware thread ID
 121 * In SMP this will be setup with the correct mapping at startup; in UP this
 122 * will map to the HW thread on which we are running.
 123 */
 124u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
 125        [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
 126};
 127EXPORT_SYMBOL_GPL(cpu_2_hwthread_id);
 128
 129/*
 130 * Map a hardware thread ID to a Linux CPU number
 131 * In SMP this will be fleshed out with the correct CPU ID for a particular
 132 * hardware thread. In UP this will be initialised with the boot CPU ID.
 133 */
 134u8 hwthread_id_2_cpu[4] __read_mostly = {
 135        [0 ... 3] = BAD_CPU_ID
 136};
 137
 138/* The relative offset of the MMU mapped memory (from ldlk or bootloader)
 139 * to the real physical memory.  This is needed as we have to use the
 140 * physical addresses in the MMU tables (pte entries), and not the virtual
 141 * addresses.
 142 * This variable is used in the __pa() and __va() macros, and should
 143 * probably only be used via them.
 144 */
 145unsigned int meta_memoffset;
 146EXPORT_SYMBOL(meta_memoffset);
 147
 148static char __initdata *original_cmd_line;
 149
 150DEFINE_PER_CPU(PTBI, pTBI);
 151
 152/*
 153 * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
 154 *
 155 *      "hwthread_map=0:1,1:2,2:3,3:0"
 156 *
 157 *      Linux CPU ID    HWTHREAD_ID
 158 *      ---------------------------
 159 *          0                 1
 160 *          1                 2
 161 *          2                 3
 162 *          3                 0
 163 */
 164static int __init parse_hwthread_map(char *p)
 165{
 166        int cpu;
 167
 168        while (*p) {
 169                cpu = (*p++) - '0';
 170                if (cpu < 0 || cpu > 9)
 171                        goto err_cpu;
 172
 173                p++;            /* skip semi-colon */
 174                cpu_2_hwthread_id[cpu] = (*p++) - '0';
 175                if (cpu_2_hwthread_id[cpu] >= 4)
 176                        goto err_thread;
 177                hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu;
 178
 179                if (*p == ',')
 180                        p++;            /* skip comma */
 181        }
 182
 183        return 0;
 184err_cpu:
 185        pr_err("%s: hwthread_map cpu argument out of range\n", __func__);
 186        return -EINVAL;
 187err_thread:
 188        pr_err("%s: hwthread_map thread argument out of range\n", __func__);
 189        return -EINVAL;
 190}
 191early_param("hwthread_map", parse_hwthread_map);
 192
 193void __init dump_machine_table(void)
 194{
 195        struct machine_desc *p;
 196        const char **compat;
 197
 198        pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
 199        for_each_machine_desc(p) {
 200                pr_info("\t%s\t[", p->name);
 201                for (compat = p->dt_compat; compat && *compat; ++compat)
 202                        printk(" '%s'", *compat);
 203                printk(" ]\n");
 204        }
 205
 206        pr_info("\nPlease check your kernel config and/or bootloader.\n");
 207
 208        hard_processor_halt(HALT_PANIC);
 209}
 210
 211#ifdef CONFIG_METAG_HALT_ON_PANIC
 212static int metag_panic_event(struct notifier_block *this, unsigned long event,
 213                             void *ptr)
 214{
 215        hard_processor_halt(HALT_PANIC);
 216        return NOTIFY_DONE;
 217}
 218
 219static struct notifier_block metag_panic_block = {
 220        metag_panic_event,
 221        NULL,
 222        0
 223};
 224#endif
 225
 226void __init setup_arch(char **cmdline_p)
 227{
 228        unsigned long start_pfn;
 229        unsigned long text_start = (unsigned long)(&_stext);
 230        unsigned long cpu = smp_processor_id();
 231        unsigned long heap_start, heap_end;
 232        unsigned long start_pte;
 233        PTBI _pTBI;
 234        PTBISEG p_heap;
 235        int heap_id, i;
 236
 237        metag_cache_probe();
 238
 239        metag_da_probe();
 240#ifdef CONFIG_DA_CONSOLE
 241        if (metag_da_enabled()) {
 242                /* An early channel based console driver */
 243                register_console(&dash_console);
 244                add_preferred_console("ttyDA", 1, NULL);
 245        }
 246#endif
 247
 248        /* try interpreting the argument as a device tree */
 249        machine_desc = setup_machine_fdt(original_cmd_line);
 250        /* if it doesn't look like a device tree it must be a command line */
 251        if (!machine_desc) {
 252#ifdef CONFIG_METAG_BUILTIN_DTB
 253                /* try the embedded device tree */
 254                machine_desc = setup_machine_fdt(__dtb_start);
 255                if (!machine_desc)
 256                        panic("Invalid embedded device tree.");
 257#else
 258                /* use the default machine description */
 259                machine_desc = default_machine_desc();
 260#endif
 261#ifndef CONFIG_CMDLINE_FORCE
 262                /* append the bootloader cmdline to any builtin fdt cmdline */
 263                if (boot_command_line[0] && original_cmd_line[0])
 264                        strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 265                strlcat(boot_command_line, original_cmd_line,
 266                        COMMAND_LINE_SIZE);
 267#endif
 268        }
 269        setup_meta_clocks(machine_desc->clocks);
 270
 271        *cmdline_p = boot_command_line;
 272        parse_early_param();
 273
 274        /*
 275         * Make sure we don't alias in dcache or icache
 276         */
 277        check_for_cache_aliasing(cpu);
 278
 279
 280#ifdef CONFIG_METAG_HALT_ON_PANIC
 281        atomic_notifier_chain_register(&panic_notifier_list,
 282                                       &metag_panic_block);
 283#endif
 284
 285#ifdef CONFIG_DUMMY_CONSOLE
 286        conswitchp = &dummy_con;
 287#endif
 288
 289        if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT))
 290                panic("Privilege must be enabled for this thread.");
 291
 292        _pTBI = __TBI(TBID_ISTAT_BIT);
 293
 294        per_cpu(pTBI, cpu) = _pTBI;
 295
 296        if (!per_cpu(pTBI, cpu))
 297                panic("No TBI found!");
 298
 299        /*
 300         * Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
 301         * rather than the version from the bootloader. This makes call
 302         * stacks easier to understand and may allow us to unmap the
 303         * bootloader at some point.
 304         */
 305        for (i = 0; i <= TBID_SIGNUM_MAX; i++)
 306                _pTBI->fnSigs[i] = __TBIUnExpXXX;
 307
 308        /* A Meta requirement is that the kernel is loaded (virtually)
 309         * at the PAGE_OFFSET.
 310         */
 311        if (PAGE_OFFSET != text_start)
 312                panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
 313                      PAGE_OFFSET, text_start);
 314
 315        start_pte = mmu_read_second_level_page(text_start);
 316
 317        /*
 318         * Kernel pages should have the PRIV bit set by the bootloader.
 319         */
 320        if (!(start_pte & _PAGE_KERNEL))
 321                panic("kernel pte does not have PRIV set");
 322
 323        /*
 324         * See __pa and __va in include/asm/page.h.
 325         * This value is negative when running in local space but the
 326         * calculations work anyway.
 327         */
 328        meta_memoffset = text_start - (start_pte & PAGE_MASK);
 329
 330        /* Now lets look at the heap space */
 331        heap_id = (__TBIThreadId() & TBID_THREAD_BITS)
 332                + TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP);
 333
 334        p_heap = __TBIFindSeg(NULL, heap_id);
 335
 336        if (!p_heap)
 337                panic("Could not find heap from TBI!");
 338
 339        /* The heap begins at the first full page after the kernel data. */
 340        heap_start = (unsigned long) &_heap_start;
 341
 342        /* The heap ends at the end of the heap segment specified with
 343         * ldlk.
 344         */
 345        if (is_global_space(text_start)) {
 346                pr_debug("WARNING: running in global space!\n");
 347                heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes;
 348        } else {
 349                heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes;
 350        }
 351
 352        ROOT_DEV = Root_RAM0;
 353
 354        /* init_mm is the mm struct used for the first task.  It is then
 355         * cloned for all other tasks spawned from that task.
 356         *
 357         * Note - we are using the virtual addresses here.
 358         */
 359        init_mm.start_code = (unsigned long)(&_stext);
 360        init_mm.end_code = (unsigned long)(&_etext);
 361        init_mm.end_data = (unsigned long)(&_edata);
 362        init_mm.brk = (unsigned long)heap_start;
 363
 364        min_low_pfn = PFN_UP(__pa(text_start));
 365        max_low_pfn = PFN_DOWN(__pa(heap_end));
 366
 367        pfn_base = min_low_pfn;
 368
 369        /* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
 370         * call later makes sure to keep the rounded up pages marked reserved.
 371         */
 372        max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1);
 373        max_pfn &= ~((1 << MAX_ORDER) - 1);
 374
 375        start_pfn = PFN_UP(__pa(heap_start));
 376
 377        if (min_low_pfn & ((1 << MAX_ORDER) - 1)) {
 378                /* Theoretically, we could expand the space that the
 379                 * bootmem allocator covers - much as we do for the
 380                 * 'high' address, and then tell the bootmem system
 381                 * that the lowest chunk is 'not available'.  Right
 382                 * now it is just much easier to constrain the
 383                 * user to always MAX_ORDER align their kernel space.
 384                 */
 385
 386                panic("Kernel must be %d byte aligned, currently at %#lx.",
 387                      1 << (MAX_ORDER + PAGE_SHIFT),
 388                      min_low_pfn << PAGE_SHIFT);
 389        }
 390
 391#ifdef CONFIG_HIGHMEM
 392        highstart_pfn = highend_pfn = max_pfn;
 393        high_memory = (void *) __va(PFN_PHYS(highstart_pfn));
 394#else
 395        high_memory = (void *)__va(PFN_PHYS(max_pfn));
 396#endif
 397
 398        paging_init(heap_end);
 399
 400        setup_priv();
 401
 402        /* Setup the boot cpu's mapping. The rest will be setup below. */
 403        cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
 404        hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
 405
 406        unflatten_and_copy_device_tree();
 407
 408#ifdef CONFIG_SMP
 409        smp_init_cpus();
 410#endif
 411
 412        if (machine_desc->init_early)
 413                machine_desc->init_early();
 414}
 415
 416static int __init customize_machine(void)
 417{
 418        /* customizes platform devices, or adds new ones */
 419        if (machine_desc->init_machine)
 420                machine_desc->init_machine();
 421        else
 422                of_platform_populate(NULL, of_default_bus_match_table, NULL,
 423                                     NULL);
 424        return 0;
 425}
 426arch_initcall(customize_machine);
 427
 428static int __init init_machine_late(void)
 429{
 430        if (machine_desc->init_late)
 431                machine_desc->init_late();
 432        return 0;
 433}
 434late_initcall(init_machine_late);
 435
 436#ifdef CONFIG_PROC_FS
 437/*
 438 *      Get CPU information for use by the procfs.
 439 */
 440static const char *get_cpu_capabilities(unsigned int txenable)
 441{
 442#ifdef CONFIG_METAG_META21
 443        /* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
 444        int coreid = metag_in32(METAC_CORE_ID);
 445        unsigned int dsp_type = (coreid >> 3) & 7;
 446        unsigned int fpu_type = (coreid >> 7) & 3;
 447
 448        switch (dsp_type | fpu_type << 3) {
 449        case (0x00): return "EDSP";
 450        case (0x01): return "DSP";
 451        case (0x08): return "EDSP+LFPU";
 452        case (0x09): return "DSP+LFPU";
 453        case (0x10): return "EDSP+FPU";
 454        case (0x11): return "DSP+FPU";
 455        }
 456        return "UNKNOWN";
 457
 458#else
 459        if (!(txenable & TXENABLE_CLASS_BITS))
 460                return "DSP";
 461        else
 462                return "";
 463#endif
 464}
 465
 466static int show_cpuinfo(struct seq_file *m, void *v)
 467{
 468        const char *cpu;
 469        unsigned int txenable, thread_id, major, minor;
 470        unsigned long clockfreq = get_coreclock();
 471#ifdef CONFIG_SMP
 472        int i;
 473        unsigned long lpj;
 474#endif
 475
 476        cpu = "META";
 477
 478        txenable = __core_reg_get(TXENABLE);
 479        major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S;
 480        minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S;
 481        thread_id = (txenable >> 8) & 0x3;
 482
 483#ifdef CONFIG_SMP
 484        for_each_online_cpu(i) {
 485                lpj = per_cpu(cpu_data, i).loops_per_jiffy;
 486                txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM,
 487                                                        cpu_2_hwthread_id[i]);
 488
 489                seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
 490                              "Clocking:\t%lu.%1luMHz\n"
 491                              "BogoMips:\t%lu.%02lu\n"
 492                              "Calibration:\t%lu loops\n"
 493                              "Capabilities:\t%s\n\n",
 494                              cpu, major, minor, i,
 495                              clockfreq / 1000000, (clockfreq / 100000) % 10,
 496                              lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100,
 497                              lpj,
 498                              get_cpu_capabilities(txenable));
 499        }
 500#else
 501        seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
 502                   "Clocking:\t%lu.%1luMHz\n"
 503                   "BogoMips:\t%lu.%02lu\n"
 504                   "Calibration:\t%lu loops\n"
 505                   "Capabilities:\t%s\n",
 506                   cpu, major, minor, thread_id,
 507                   clockfreq / 1000000, (clockfreq / 100000) % 10,
 508                   loops_per_jiffy / (500000 / HZ),
 509                   (loops_per_jiffy / (5000 / HZ)) % 100,
 510                   loops_per_jiffy,
 511                   get_cpu_capabilities(txenable));
 512#endif /* CONFIG_SMP */
 513
 514#ifdef CONFIG_METAG_L2C
 515        if (meta_l2c_is_present()) {
 516                seq_printf(m, "L2 cache:\t%s\n"
 517                              "L2 cache size:\t%d KB\n",
 518                              meta_l2c_is_enabled() ? "enabled" : "disabled",
 519                              meta_l2c_size() >> 10);
 520        }
 521#endif
 522        return 0;
 523}
 524
 525static void *c_start(struct seq_file *m, loff_t *pos)
 526{
 527        return (void *)(*pos == 0);
 528}
 529static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 530{
 531        return NULL;
 532}
 533static void c_stop(struct seq_file *m, void *v)
 534{
 535}
 536const struct seq_operations cpuinfo_op = {
 537        .start = c_start,
 538        .next  = c_next,
 539        .stop  = c_stop,
 540        .show  = show_cpuinfo,
 541};
 542#endif /* CONFIG_PROC_FS */
 543
 544void __init metag_start_kernel(char *args)
 545{
 546        /* Zero the timer register so timestamps are from the point at
 547         * which the kernel started running.
 548         */
 549        __core_reg_set(TXTIMER, 0);
 550
 551        /* Clear the bss. */
 552        memset(__bss_start, 0,
 553               (unsigned long)__bss_stop - (unsigned long)__bss_start);
 554
 555        /* Remember where these are for use in setup_arch */
 556        original_cmd_line = args;
 557
 558        current_thread_info()->cpu = hard_processor_id();
 559
 560        start_kernel();
 561}
 562
 563/**
 564 * setup_priv() - Set up privilege protection registers.
 565 *
 566 * Set up privilege protection registers such as TXPRIVEXT to prevent userland
 567 * from touching our precious registers and sensitive memory areas.
 568 */
 569void setup_priv(void)
 570{
 571        unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S;
 572
 573        __core_reg_set(TXPRIVEXT, PRIV_BITS);
 574
 575        metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset);
 576        metag_out32(PIOREG_BITS,   T0PIOREG   + offset);
 577        metag_out32(PSYREG_BITS,   T0PSYREG   + offset);
 578}
 579
 580PTBI pTBI_get(unsigned int cpu)
 581{
 582        return per_cpu(pTBI, cpu);
 583}
 584EXPORT_SYMBOL(pTBI_get);
 585
 586#if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
 587static char capabilities[] = "dsp fpu";
 588#elif defined(CONFIG_METAG_DSP)
 589static char capabilities[] = "dsp";
 590#elif defined(CONFIG_METAG_FPU)
 591static char capabilities[] = "fpu";
 592#else
 593static char capabilities[] = "";
 594#endif
 595
 596static struct ctl_table caps_kern_table[] = {
 597        {
 598                .procname       = "capabilities",
 599                .data           = capabilities,
 600                .maxlen         = sizeof(capabilities),
 601                .mode           = 0444,
 602                .proc_handler   = proc_dostring,
 603        },
 604        {}
 605};
 606
 607static struct ctl_table caps_root_table[] = {
 608        {
 609                .procname       = "kernel",
 610                .mode           = 0555,
 611                .child          = caps_kern_table,
 612        },
 613        {}
 614};
 615
 616static int __init capabilities_register_sysctl(void)
 617{
 618        struct ctl_table_header *caps_table_header;
 619
 620        caps_table_header = register_sysctl_table(caps_root_table);
 621        if (!caps_table_header) {
 622                pr_err("Unable to register CAPABILITIES sysctl\n");
 623                return -ENOMEM;
 624        }
 625
 626        return 0;
 627}
 628
 629core_initcall(capabilities_register_sysctl);
 630