linux/arch/c6x/kernel/setup.c
<<
>>
Prefs
   1/*
   2 *  Port on Texas Instruments TMS320C6x architecture
   3 *
   4 *  Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
   5 *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
   6 *
   7 *  This program is free software; you can redistribute it and/or modify
   8 *  it under the terms of the GNU General Public License version 2 as
   9 *  published by the Free Software Foundation.
  10 */
  11#include <linux/dma-mapping.h>
  12#include <linux/memblock.h>
  13#include <linux/seq_file.h>
  14#include <linux/bootmem.h>
  15#include <linux/clkdev.h>
  16#include <linux/initrd.h>
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/of_fdt.h>
  20#include <linux/string.h>
  21#include <linux/errno.h>
  22#include <linux/cache.h>
  23#include <linux/delay.h>
  24#include <linux/sched.h>
  25#include <linux/clk.h>
  26#include <linux/cpu.h>
  27#include <linux/fs.h>
  28#include <linux/of.h>
  29
  30
  31#include <asm/sections.h>
  32#include <asm/div64.h>
  33#include <asm/setup.h>
  34#include <asm/dscr.h>
  35#include <asm/clock.h>
  36#include <asm/soc.h>
  37
  38static const char *c6x_soc_name;
  39
  40int c6x_num_cores;
  41EXPORT_SYMBOL_GPL(c6x_num_cores);
  42
  43unsigned int c6x_silicon_rev;
  44EXPORT_SYMBOL_GPL(c6x_silicon_rev);
  45
  46/*
  47 * Device status register. This holds information
  48 * about device configuration needed by some drivers.
  49 */
  50unsigned int c6x_devstat;
  51EXPORT_SYMBOL_GPL(c6x_devstat);
  52
  53/*
  54 * Some SoCs have fuse registers holding a unique MAC
  55 * address. This is parsed out of the device tree with
  56 * the resulting MAC being held here.
  57 */
  58unsigned char c6x_fuse_mac[6];
  59
  60unsigned long memory_start;
  61unsigned long memory_end;
  62
  63unsigned long ram_start;
  64unsigned long ram_end;
  65
  66/* Uncached memory for DMA consistent use (memdma=) */
  67static unsigned long dma_start __initdata;
  68static unsigned long dma_size __initdata;
  69
  70char c6x_command_line[COMMAND_LINE_SIZE];
  71
  72#if defined(CONFIG_CMDLINE_BOOL)
  73static const char default_command_line[COMMAND_LINE_SIZE] __section(.cmdline) =
  74        CONFIG_CMDLINE;
  75#endif
  76
  77struct cpuinfo_c6x {
  78        const char *cpu_name;
  79        const char *cpu_voltage;
  80        const char *mmu;
  81        const char *fpu;
  82        char *cpu_rev;
  83        unsigned int core_id;
  84        char __cpu_rev[5];
  85};
  86
  87static DEFINE_PER_CPU(struct cpuinfo_c6x, cpu_data);
  88
  89unsigned int ticks_per_ns_scaled;
  90EXPORT_SYMBOL(ticks_per_ns_scaled);
  91
  92unsigned int c6x_core_freq;
  93
  94static void __init get_cpuinfo(void)
  95{
  96        unsigned cpu_id, rev_id, csr;
  97        struct clk *coreclk = clk_get_sys(NULL, "core");
  98        unsigned long core_khz;
  99        u64 tmp;
 100        struct cpuinfo_c6x *p;
 101        struct device_node *node, *np;
 102
 103        p = &per_cpu(cpu_data, smp_processor_id());
 104
 105        if (!IS_ERR(coreclk))
 106                c6x_core_freq = clk_get_rate(coreclk);
 107        else {
 108                printk(KERN_WARNING
 109                       "Cannot find core clock frequency. Using 700MHz\n");
 110                c6x_core_freq = 700000000;
 111        }
 112
 113        core_khz = c6x_core_freq / 1000;
 114
 115        tmp = (uint64_t)core_khz << C6X_NDELAY_SCALE;
 116        do_div(tmp, 1000000);
 117        ticks_per_ns_scaled = tmp;
 118
 119        csr = get_creg(CSR);
 120        cpu_id = csr >> 24;
 121        rev_id = (csr >> 16) & 0xff;
 122
 123        p->mmu = "none";
 124        p->fpu = "none";
 125        p->cpu_voltage = "unknown";
 126
 127        switch (cpu_id) {
 128        case 0:
 129                p->cpu_name = "C67x";
 130                p->fpu = "yes";
 131                break;
 132        case 2:
 133                p->cpu_name = "C62x";
 134                break;
 135        case 8:
 136                p->cpu_name = "C64x";
 137                break;
 138        case 12:
 139                p->cpu_name = "C64x";
 140                break;
 141        case 16:
 142                p->cpu_name = "C64x+";
 143                p->cpu_voltage = "1.2";
 144                break;
 145        default:
 146                p->cpu_name = "unknown";
 147                break;
 148        }
 149
 150        if (cpu_id < 16) {
 151                switch (rev_id) {
 152                case 0x1:
 153                        if (cpu_id > 8) {
 154                                p->cpu_rev = "DM640/DM641/DM642/DM643";
 155                                p->cpu_voltage = "1.2 - 1.4";
 156                        } else {
 157                                p->cpu_rev = "C6201";
 158                                p->cpu_voltage = "2.5";
 159                        }
 160                        break;
 161                case 0x2:
 162                        p->cpu_rev = "C6201B/C6202/C6211";
 163                        p->cpu_voltage = "1.8";
 164                        break;
 165                case 0x3:
 166                        p->cpu_rev = "C6202B/C6203/C6204/C6205";
 167                        p->cpu_voltage = "1.5";
 168                        break;
 169                case 0x201:
 170                        p->cpu_rev = "C6701 revision 0 (early CPU)";
 171                        p->cpu_voltage = "1.8";
 172                        break;
 173                case 0x202:
 174                        p->cpu_rev = "C6701/C6711/C6712";
 175                        p->cpu_voltage = "1.8";
 176                        break;
 177                case 0x801:
 178                        p->cpu_rev = "C64x";
 179                        p->cpu_voltage = "1.5";
 180                        break;
 181                default:
 182                        p->cpu_rev = "unknown";
 183                }
 184        } else {
 185                p->cpu_rev = p->__cpu_rev;
 186                snprintf(p->__cpu_rev, sizeof(p->__cpu_rev), "0x%x", cpu_id);
 187        }
 188
 189        p->core_id = get_coreid();
 190
 191        node = of_find_node_by_name(NULL, "cpus");
 192        if (node) {
 193                for_each_child_of_node(node, np)
 194                        if (!strcmp("cpu", np->name))
 195                                ++c6x_num_cores;
 196                of_node_put(node);
 197        }
 198
 199        node = of_find_node_by_name(NULL, "soc");
 200        if (node) {
 201                if (of_property_read_string(node, "model", &c6x_soc_name))
 202                        c6x_soc_name = "unknown";
 203                of_node_put(node);
 204        } else
 205                c6x_soc_name = "unknown";
 206
 207        printk(KERN_INFO "CPU%d: %s rev %s, %s volts, %uMHz\n",
 208               p->core_id, p->cpu_name, p->cpu_rev,
 209               p->cpu_voltage, c6x_core_freq / 1000000);
 210}
 211
 212/*
 213 * Early parsing of the command line
 214 */
 215static u32 mem_size __initdata;
 216
 217/* "mem=" parsing. */
 218static int __init early_mem(char *p)
 219{
 220        if (!p)
 221                return -EINVAL;
 222
 223        mem_size = memparse(p, &p);
 224        /* don't remove all of memory when handling "mem={invalid}" */
 225        if (mem_size == 0)
 226                return -EINVAL;
 227
 228        return 0;
 229}
 230early_param("mem", early_mem);
 231
 232/* "memdma=<size>[@<address>]" parsing. */
 233static int __init early_memdma(char *p)
 234{
 235        if (!p)
 236                return -EINVAL;
 237
 238        dma_size = memparse(p, &p);
 239        if (*p == '@')
 240                dma_start = memparse(p, &p);
 241
 242        return 0;
 243}
 244early_param("memdma", early_memdma);
 245
 246int __init c6x_add_memory(phys_addr_t start, unsigned long size)
 247{
 248        static int ram_found __initdata;
 249
 250        /* We only handle one bank (the one with PAGE_OFFSET) for now */
 251        if (ram_found)
 252                return -EINVAL;
 253
 254        if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size))
 255                return 0;
 256
 257        ram_start = start;
 258        ram_end = start + size;
 259
 260        ram_found = 1;
 261        return 0;
 262}
 263
 264/*
 265 * Do early machine setup and device tree parsing. This is called very
 266 * early on the boot process.
 267 */
 268notrace void __init machine_init(unsigned long dt_ptr)
 269{
 270        struct boot_param_header *dtb = __va(dt_ptr);
 271        struct boot_param_header *fdt = (struct boot_param_header *)_fdt_start;
 272
 273        /* interrupts must be masked */
 274        set_creg(IER, 2);
 275
 276        /*
 277         * Set the Interrupt Service Table (IST) to the beginning of the
 278         * vector table.
 279         */
 280        set_ist(_vectors_start);
 281
 282        lockdep_init();
 283
 284        /*
 285         * dtb is passed in from bootloader.
 286         * fdt is linked in blob.
 287         */
 288        if (dtb && dtb != fdt)
 289                fdt = dtb;
 290
 291        /* Do some early initialization based on the flat device tree */
 292        early_init_devtree(fdt);
 293
 294        /* parse_early_param needs a boot_command_line */
 295        strlcpy(boot_command_line, c6x_command_line, COMMAND_LINE_SIZE);
 296        parse_early_param();
 297}
 298
 299void __init setup_arch(char **cmdline_p)
 300{
 301        int bootmap_size;
 302        struct memblock_region *reg;
 303
 304        printk(KERN_INFO "Initializing kernel\n");
 305
 306        /* Initialize command line */
 307        *cmdline_p = c6x_command_line;
 308
 309        memory_end = ram_end;
 310        memory_end &= ~(PAGE_SIZE - 1);
 311
 312        if (mem_size && (PAGE_OFFSET + PAGE_ALIGN(mem_size)) < memory_end)
 313                memory_end = PAGE_OFFSET + PAGE_ALIGN(mem_size);
 314
 315        /* add block that this kernel can use */
 316        memblock_add(PAGE_OFFSET, memory_end - PAGE_OFFSET);
 317
 318        /* reserve kernel text/data/bss */
 319        memblock_reserve(PAGE_OFFSET,
 320                         PAGE_ALIGN((unsigned long)&_end - PAGE_OFFSET));
 321
 322        if (dma_size) {
 323                /* align to cacheability granularity */
 324                dma_size = CACHE_REGION_END(dma_size);
 325
 326                if (!dma_start)
 327                        dma_start = memory_end - dma_size;
 328
 329                /* align to cacheability granularity */
 330                dma_start = CACHE_REGION_START(dma_start);
 331
 332                /* reserve DMA memory taken from kernel memory */
 333                if (memblock_is_region_memory(dma_start, dma_size))
 334                        memblock_reserve(dma_start, dma_size);
 335        }
 336
 337        memory_start = PAGE_ALIGN((unsigned int) &_end);
 338
 339        printk(KERN_INFO "Memory Start=%08lx, Memory End=%08lx\n",
 340               memory_start, memory_end);
 341
 342#ifdef CONFIG_BLK_DEV_INITRD
 343        /*
 344         * Reserve initrd memory if in kernel memory.
 345         */
 346        if (initrd_start < initrd_end)
 347                if (memblock_is_region_memory(initrd_start,
 348                                              initrd_end - initrd_start))
 349                        memblock_reserve(initrd_start,
 350                                         initrd_end - initrd_start);
 351#endif
 352
 353        init_mm.start_code = (unsigned long) &_stext;
 354        init_mm.end_code   = (unsigned long) &_etext;
 355        init_mm.end_data   = memory_start;
 356        init_mm.brk        = memory_start;
 357
 358        /*
 359         * Give all the memory to the bootmap allocator,  tell it to put the
 360         * boot mem_map at the start of memory
 361         */
 362        bootmap_size = init_bootmem_node(NODE_DATA(0),
 363                                         memory_start >> PAGE_SHIFT,
 364                                         PAGE_OFFSET >> PAGE_SHIFT,
 365                                         memory_end >> PAGE_SHIFT);
 366        memblock_reserve(memory_start, bootmap_size);
 367
 368        unflatten_device_tree();
 369
 370        c6x_cache_init();
 371
 372        /* Set the whole external memory as non-cacheable */
 373        disable_caching(ram_start, ram_end - 1);
 374
 375        /* Set caching of external RAM used by Linux */
 376        for_each_memblock(memory, reg)
 377                enable_caching(CACHE_REGION_START(reg->base),
 378                               CACHE_REGION_START(reg->base + reg->size - 1));
 379
 380#ifdef CONFIG_BLK_DEV_INITRD
 381        /*
 382         * Enable caching for initrd which falls outside kernel memory.
 383         */
 384        if (initrd_start < initrd_end) {
 385                if (!memblock_is_region_memory(initrd_start,
 386                                               initrd_end - initrd_start))
 387                        enable_caching(CACHE_REGION_START(initrd_start),
 388                                       CACHE_REGION_START(initrd_end - 1));
 389        }
 390#endif
 391
 392        /*
 393         * Disable caching for dma coherent memory taken from kernel memory.
 394         */
 395        if (dma_size && memblock_is_region_memory(dma_start, dma_size))
 396                disable_caching(dma_start,
 397                                CACHE_REGION_START(dma_start + dma_size - 1));
 398
 399        /* Initialize the coherent memory allocator */
 400        coherent_mem_init(dma_start, dma_size);
 401
 402        /*
 403         * Free all memory as a starting point.
 404         */
 405        free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET);
 406
 407        /*
 408         * Then reserve memory which is already being used.
 409         */
 410        for_each_memblock(reserved, reg) {
 411                pr_debug("reserved - 0x%08x-0x%08x\n",
 412                         (u32) reg->base, (u32) reg->size);
 413                reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
 414        }
 415
 416        max_low_pfn = PFN_DOWN(memory_end);
 417        min_low_pfn = PFN_UP(memory_start);
 418        max_mapnr = max_low_pfn - min_low_pfn;
 419
 420        /* Get kmalloc into gear */
 421        paging_init();
 422
 423        /*
 424         * Probe for Device State Configuration Registers.
 425         * We have to do this early in case timer needs to be enabled
 426         * through DSCR.
 427         */
 428        dscr_probe();
 429
 430        /* We do this early for timer and core clock frequency */
 431        c64x_setup_clocks();
 432
 433        /* Get CPU info */
 434        get_cpuinfo();
 435
 436#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
 437        conswitchp = &dummy_con;
 438#endif
 439}
 440
 441#define cpu_to_ptr(n) ((void *)((long)(n)+1))
 442#define ptr_to_cpu(p) ((long)(p) - 1)
 443
 444static int show_cpuinfo(struct seq_file *m, void *v)
 445{
 446        int n = ptr_to_cpu(v);
 447        struct cpuinfo_c6x *p = &per_cpu(cpu_data, n);
 448
 449        if (n == 0) {
 450                seq_printf(m,
 451                           "soc\t\t: %s\n"
 452                           "soc revision\t: 0x%x\n"
 453                           "soc cores\t: %d\n",
 454                           c6x_soc_name, c6x_silicon_rev, c6x_num_cores);
 455        }
 456
 457        seq_printf(m,
 458                   "\n"
 459                   "processor\t: %d\n"
 460                   "cpu\t\t: %s\n"
 461                   "core revision\t: %s\n"
 462                   "core voltage\t: %s\n"
 463                   "core id\t\t: %d\n"
 464                   "mmu\t\t: %s\n"
 465                   "fpu\t\t: %s\n"
 466                   "cpu MHz\t\t: %u\n"
 467                   "bogomips\t: %lu.%02lu\n\n",
 468                   n,
 469                   p->cpu_name, p->cpu_rev, p->cpu_voltage,
 470                   p->core_id, p->mmu, p->fpu,
 471                   (c6x_core_freq + 500000) / 1000000,
 472                   (loops_per_jiffy/(500000/HZ)),
 473                   (loops_per_jiffy/(5000/HZ))%100);
 474
 475        return 0;
 476}
 477
 478static void *c_start(struct seq_file *m, loff_t *pos)
 479{
 480        return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
 481}
 482static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 483{
 484        ++*pos;
 485        return NULL;
 486}
 487static void c_stop(struct seq_file *m, void *v)
 488{
 489}
 490
 491const struct seq_operations cpuinfo_op = {
 492        c_start,
 493        c_stop,
 494        c_next,
 495        show_cpuinfo
 496};
 497
 498static struct cpu cpu_devices[NR_CPUS];
 499
 500static int __init topology_init(void)
 501{
 502        int i;
 503
 504        for_each_present_cpu(i)
 505                register_cpu(&cpu_devices[i], i);
 506
 507        return 0;
 508}
 509
 510subsys_initcall(topology_init);
 511