linux/arch/blackfin/kernel/setup.c
<<
>>
Prefs
   1/*
   2 * Copyright 2004-2010 Analog Devices Inc.
   3 *
   4 * Licensed under the GPL-2 or later.
   5 */
   6
   7#include <linux/delay.h>
   8#include <linux/console.h>
   9#include <linux/bootmem.h>
  10#include <linux/seq_file.h>
  11#include <linux/cpu.h>
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/tty.h>
  15#include <linux/pfn.h>
  16
  17#ifdef CONFIG_MTD_UCLINUX
  18#include <linux/mtd/map.h>
  19#include <linux/ext2_fs.h>
  20#include <uapi/linux/cramfs_fs.h>
  21#include <linux/romfs_fs.h>
  22#endif
  23
  24#include <asm/cplb.h>
  25#include <asm/cacheflush.h>
  26#include <asm/blackfin.h>
  27#include <asm/cplbinit.h>
  28#include <asm/clocks.h>
  29#include <asm/div64.h>
  30#include <asm/cpu.h>
  31#include <asm/fixed_code.h>
  32#include <asm/early_printk.h>
  33#include <asm/irq_handler.h>
  34#include <asm/pda.h>
  35#ifdef CONFIG_BF60x
  36#include <mach/pm.h>
  37#endif
  38#ifdef CONFIG_SCB_PRIORITY
  39#include <asm/scb.h>
  40#endif
  41
  42u16 _bfin_swrst;
  43EXPORT_SYMBOL(_bfin_swrst);
  44
  45unsigned long memory_start, memory_end, physical_mem_end;
  46unsigned long _rambase, _ramstart, _ramend;
  47unsigned long reserved_mem_dcache_on;
  48unsigned long reserved_mem_icache_on;
  49EXPORT_SYMBOL(memory_start);
  50EXPORT_SYMBOL(memory_end);
  51EXPORT_SYMBOL(physical_mem_end);
  52EXPORT_SYMBOL(_ramend);
  53EXPORT_SYMBOL(reserved_mem_dcache_on);
  54
  55#ifdef CONFIG_MTD_UCLINUX
  56extern struct map_info uclinux_ram_map;
  57unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
  58EXPORT_SYMBOL(memory_mtd_end);
  59EXPORT_SYMBOL(memory_mtd_start);
  60EXPORT_SYMBOL(mtd_size);
  61#endif
  62
  63char __initdata command_line[COMMAND_LINE_SIZE];
  64struct blackfin_initial_pda __initdata initial_pda;
  65
  66/* boot memmap, for parsing "memmap=" */
  67#define BFIN_MEMMAP_MAX         128 /* number of entries in bfin_memmap */
  68#define BFIN_MEMMAP_RAM         1
  69#define BFIN_MEMMAP_RESERVED    2
  70static struct bfin_memmap {
  71        int nr_map;
  72        struct bfin_memmap_entry {
  73                unsigned long long addr; /* start of memory segment */
  74                unsigned long long size;
  75                unsigned long type;
  76        } map[BFIN_MEMMAP_MAX];
  77} bfin_memmap __initdata;
  78
  79/* for memmap sanitization */
  80struct change_member {
  81        struct bfin_memmap_entry *pentry; /* pointer to original entry */
  82        unsigned long long addr; /* address for this change point */
  83};
  84static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
  85static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
  86static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
  87static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
  88
  89DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
  90
  91static int early_init_clkin_hz(char *buf);
  92
  93#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
  94void __init generate_cplb_tables(void)
  95{
  96        unsigned int cpu;
  97
  98        generate_cplb_tables_all();
  99        /* Generate per-CPU I&D CPLB tables */
 100        for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
 101                generate_cplb_tables_cpu(cpu);
 102}
 103#endif
 104
 105void bfin_setup_caches(unsigned int cpu)
 106{
 107#ifdef CONFIG_BFIN_ICACHE
 108        bfin_icache_init(icplb_tbl[cpu]);
 109#endif
 110
 111#ifdef CONFIG_BFIN_DCACHE
 112        bfin_dcache_init(dcplb_tbl[cpu]);
 113#endif
 114
 115        bfin_setup_cpudata(cpu);
 116
 117        /*
 118         * In cache coherence emulation mode, we need to have the
 119         * D-cache enabled before running any atomic operation which
 120         * might involve cache invalidation (i.e. spinlock, rwlock).
 121         * So printk's are deferred until then.
 122         */
 123#ifdef CONFIG_BFIN_ICACHE
 124        printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
 125        printk(KERN_INFO "  External memory:"
 126# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
 127               " cacheable"
 128# else
 129               " uncacheable"
 130# endif
 131               " in instruction cache\n");
 132        if (L2_LENGTH)
 133                printk(KERN_INFO "  L2 SRAM        :"
 134# ifdef CONFIG_BFIN_L2_ICACHEABLE
 135                       " cacheable"
 136# else
 137                       " uncacheable"
 138# endif
 139                       " in instruction cache\n");
 140
 141#else
 142        printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
 143#endif
 144
 145#ifdef CONFIG_BFIN_DCACHE
 146        printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
 147        printk(KERN_INFO "  External memory:"
 148# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
 149               " cacheable (write-back)"
 150# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
 151               " cacheable (write-through)"
 152# else
 153               " uncacheable"
 154# endif
 155               " in data cache\n");
 156        if (L2_LENGTH)
 157                printk(KERN_INFO "  L2 SRAM        :"
 158# if defined CONFIG_BFIN_L2_WRITEBACK
 159                       " cacheable (write-back)"
 160# elif defined CONFIG_BFIN_L2_WRITETHROUGH
 161                       " cacheable (write-through)"
 162# else
 163                       " uncacheable"
 164# endif
 165                       " in data cache\n");
 166#else
 167        printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
 168#endif
 169}
 170
 171void bfin_setup_cpudata(unsigned int cpu)
 172{
 173        struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
 174
 175        cpudata->imemctl = bfin_read_IMEM_CONTROL();
 176        cpudata->dmemctl = bfin_read_DMEM_CONTROL();
 177}
 178
 179void __init bfin_cache_init(void)
 180{
 181#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
 182        generate_cplb_tables();
 183#endif
 184        bfin_setup_caches(0);
 185}
 186
 187void __init bfin_relocate_l1_mem(void)
 188{
 189        unsigned long text_l1_len = (unsigned long)_text_l1_len;
 190        unsigned long data_l1_len = (unsigned long)_data_l1_len;
 191        unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
 192        unsigned long l2_len = (unsigned long)_l2_len;
 193
 194        early_shadow_stamp();
 195
 196        /*
 197         * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
 198         * we know that everything about l1 text/data is nice and aligned,
 199         * so copy by 4 byte chunks, and don't worry about overlapping
 200         * src/dest.
 201         *
 202         * We can't use the dma_memcpy functions, since they can call
 203         * scheduler functions which might be in L1 :( and core writes
 204         * into L1 instruction cause bad access errors, so we are stuck,
 205         * we are required to use DMA, but can't use the common dma
 206         * functions. We can't use memcpy either - since that might be
 207         * going to be in the relocated L1
 208         */
 209
 210        blackfin_dma_early_init();
 211
 212        /* if necessary, copy L1 text to L1 instruction SRAM */
 213        if (L1_CODE_LENGTH && text_l1_len)
 214                early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
 215
 216        /* if necessary, copy L1 data to L1 data bank A SRAM */
 217        if (L1_DATA_A_LENGTH && data_l1_len)
 218                early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
 219
 220        /* if necessary, copy L1 data B to L1 data bank B SRAM */
 221        if (L1_DATA_B_LENGTH && data_b_l1_len)
 222                early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
 223
 224        early_dma_memcpy_done();
 225
 226#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
 227        blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
 228#endif
 229
 230        /* if necessary, copy L2 text/data to L2 SRAM */
 231        if (L2_LENGTH && l2_len)
 232                memcpy(_stext_l2, _l2_lma, l2_len);
 233}
 234
 235#ifdef CONFIG_SMP
 236void __init bfin_relocate_coreb_l1_mem(void)
 237{
 238        unsigned long text_l1_len = (unsigned long)_text_l1_len;
 239        unsigned long data_l1_len = (unsigned long)_data_l1_len;
 240        unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
 241
 242        blackfin_dma_early_init();
 243
 244        /* if necessary, copy L1 text to L1 instruction SRAM */
 245        if (L1_CODE_LENGTH && text_l1_len)
 246                early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
 247                                text_l1_len);
 248
 249        /* if necessary, copy L1 data to L1 data bank A SRAM */
 250        if (L1_DATA_A_LENGTH && data_l1_len)
 251                early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
 252                                data_l1_len);
 253
 254        /* if necessary, copy L1 data B to L1 data bank B SRAM */
 255        if (L1_DATA_B_LENGTH && data_b_l1_len)
 256                early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
 257                                data_b_l1_len);
 258
 259        early_dma_memcpy_done();
 260
 261#ifdef CONFIG_ICACHE_FLUSH_L1
 262        blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
 263                        (unsigned long)_stext_l1 + COREB_L1_CODE_START;
 264#endif
 265}
 266#endif
 267
 268#ifdef CONFIG_ROMKERNEL
 269void __init bfin_relocate_xip_data(void)
 270{
 271        early_shadow_stamp();
 272
 273        memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
 274        memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
 275}
 276#endif
 277
 278/* add_memory_region to memmap */
 279static void __init add_memory_region(unsigned long long start,
 280                              unsigned long long size, int type)
 281{
 282        int i;
 283
 284        i = bfin_memmap.nr_map;
 285
 286        if (i == BFIN_MEMMAP_MAX) {
 287                printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
 288                return;
 289        }
 290
 291        bfin_memmap.map[i].addr = start;
 292        bfin_memmap.map[i].size = size;
 293        bfin_memmap.map[i].type = type;
 294        bfin_memmap.nr_map++;
 295}
 296
 297/*
 298 * Sanitize the boot memmap, removing overlaps.
 299 */
 300static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
 301{
 302        struct change_member *change_tmp;
 303        unsigned long current_type, last_type;
 304        unsigned long long last_addr;
 305        int chgidx, still_changing;
 306        int overlap_entries;
 307        int new_entry;
 308        int old_nr, new_nr, chg_nr;
 309        int i;
 310
 311        /*
 312                Visually we're performing the following (1,2,3,4 = memory types)
 313
 314                Sample memory map (w/overlaps):
 315                   ____22__________________
 316                   ______________________4_
 317                   ____1111________________
 318                   _44_____________________
 319                   11111111________________
 320                   ____________________33__
 321                   ___________44___________
 322                   __________33333_________
 323                   ______________22________
 324                   ___________________2222_
 325                   _________111111111______
 326                   _____________________11_
 327                   _________________4______
 328
 329                Sanitized equivalent (no overlap):
 330                   1_______________________
 331                   _44_____________________
 332                   ___1____________________
 333                   ____22__________________
 334                   ______11________________
 335                   _________1______________
 336                   __________3_____________
 337                   ___________44___________
 338                   _____________33_________
 339                   _______________2________
 340                   ________________1_______
 341                   _________________4______
 342                   ___________________2____
 343                   ____________________33__
 344                   ______________________4_
 345        */
 346        /* if there's only one memory region, don't bother */
 347        if (*pnr_map < 2)
 348                return -1;
 349
 350        old_nr = *pnr_map;
 351
 352        /* bail out if we find any unreasonable addresses in memmap */
 353        for (i = 0; i < old_nr; i++)
 354                if (map[i].addr + map[i].size < map[i].addr)
 355                        return -1;
 356
 357        /* create pointers for initial change-point information (for sorting) */
 358        for (i = 0; i < 2*old_nr; i++)
 359                change_point[i] = &change_point_list[i];
 360
 361        /* record all known change-points (starting and ending addresses),
 362           omitting those that are for empty memory regions */
 363        chgidx = 0;
 364        for (i = 0; i < old_nr; i++) {
 365                if (map[i].size != 0) {
 366                        change_point[chgidx]->addr = map[i].addr;
 367                        change_point[chgidx++]->pentry = &map[i];
 368                        change_point[chgidx]->addr = map[i].addr + map[i].size;
 369                        change_point[chgidx++]->pentry = &map[i];
 370                }
 371        }
 372        chg_nr = chgidx;        /* true number of change-points */
 373
 374        /* sort change-point list by memory addresses (low -> high) */
 375        still_changing = 1;
 376        while (still_changing) {
 377                still_changing = 0;
 378                for (i = 1; i < chg_nr; i++) {
 379                        /* if <current_addr> > <last_addr>, swap */
 380                        /* or, if current=<start_addr> & last=<end_addr>, swap */
 381                        if ((change_point[i]->addr < change_point[i-1]->addr) ||
 382                                ((change_point[i]->addr == change_point[i-1]->addr) &&
 383                                 (change_point[i]->addr == change_point[i]->pentry->addr) &&
 384                                 (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
 385                           ) {
 386                                change_tmp = change_point[i];
 387                                change_point[i] = change_point[i-1];
 388                                change_point[i-1] = change_tmp;
 389                                still_changing = 1;
 390                        }
 391                }
 392        }
 393
 394        /* create a new memmap, removing overlaps */
 395        overlap_entries = 0;    /* number of entries in the overlap table */
 396        new_entry = 0;          /* index for creating new memmap entries */
 397        last_type = 0;          /* start with undefined memory type */
 398        last_addr = 0;          /* start with 0 as last starting address */
 399        /* loop through change-points, determining affect on the new memmap */
 400        for (chgidx = 0; chgidx < chg_nr; chgidx++) {
 401                /* keep track of all overlapping memmap entries */
 402                if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
 403                        /* add map entry to overlap list (> 1 entry implies an overlap) */
 404                        overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
 405                } else {
 406                        /* remove entry from list (order independent, so swap with last) */
 407                        for (i = 0; i < overlap_entries; i++) {
 408                                if (overlap_list[i] == change_point[chgidx]->pentry)
 409                                        overlap_list[i] = overlap_list[overlap_entries-1];
 410                        }
 411                        overlap_entries--;
 412                }
 413                /* if there are overlapping entries, decide which "type" to use */
 414                /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
 415                current_type = 0;
 416                for (i = 0; i < overlap_entries; i++)
 417                        if (overlap_list[i]->type > current_type)
 418                                current_type = overlap_list[i]->type;
 419                /* continue building up new memmap based on this information */
 420                if (current_type != last_type) {
 421                        if (last_type != 0) {
 422                                new_map[new_entry].size =
 423                                        change_point[chgidx]->addr - last_addr;
 424                                /* move forward only if the new size was non-zero */
 425                                if (new_map[new_entry].size != 0)
 426                                        if (++new_entry >= BFIN_MEMMAP_MAX)
 427                                                break;  /* no more space left for new entries */
 428                        }
 429                        if (current_type != 0) {
 430                                new_map[new_entry].addr = change_point[chgidx]->addr;
 431                                new_map[new_entry].type = current_type;
 432                                last_addr = change_point[chgidx]->addr;
 433                        }
 434                        last_type = current_type;
 435                }
 436        }
 437        new_nr = new_entry;     /* retain count for new entries */
 438
 439        /* copy new mapping into original location */
 440        memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
 441        *pnr_map = new_nr;
 442
 443        return 0;
 444}
 445
 446static void __init print_memory_map(char *who)
 447{
 448        int i;
 449
 450        for (i = 0; i < bfin_memmap.nr_map; i++) {
 451                printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
 452                        bfin_memmap.map[i].addr,
 453                        bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
 454                switch (bfin_memmap.map[i].type) {
 455                case BFIN_MEMMAP_RAM:
 456                        printk(KERN_CONT "(usable)\n");
 457                        break;
 458                case BFIN_MEMMAP_RESERVED:
 459                        printk(KERN_CONT "(reserved)\n");
 460                        break;
 461                default:
 462                        printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
 463                        break;
 464                }
 465        }
 466}
 467
 468static __init int parse_memmap(char *arg)
 469{
 470        unsigned long long start_at, mem_size;
 471
 472        if (!arg)
 473                return -EINVAL;
 474
 475        mem_size = memparse(arg, &arg);
 476        if (*arg == '@') {
 477                start_at = memparse(arg+1, &arg);
 478                add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
 479        } else if (*arg == '$') {
 480                start_at = memparse(arg+1, &arg);
 481                add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
 482        }
 483
 484        return 0;
 485}
 486
 487/*
 488 * Initial parsing of the command line.  Currently, we support:
 489 *  - Controlling the linux memory size: mem=xxx[KMG]
 490 *  - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
 491 *       $ -> reserved memory is dcacheable
 492 *       # -> reserved memory is icacheable
 493 *  - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
 494 *       @ from <start> to <start>+<mem>, type RAM
 495 *       $ from <start> to <start>+<mem>, type RESERVED
 496 */
 497static __init void parse_cmdline_early(char *cmdline_p)
 498{
 499        char c = ' ', *to = cmdline_p;
 500        unsigned int memsize;
 501        for (;;) {
 502                if (c == ' ') {
 503                        if (!memcmp(to, "mem=", 4)) {
 504                                to += 4;
 505                                memsize = memparse(to, &to);
 506                                if (memsize)
 507                                        _ramend = memsize;
 508
 509                        } else if (!memcmp(to, "max_mem=", 8)) {
 510                                to += 8;
 511                                memsize = memparse(to, &to);
 512                                if (memsize) {
 513                                        physical_mem_end = memsize;
 514                                        if (*to != ' ') {
 515                                                if (*to == '$'
 516                                                    || *(to + 1) == '$')
 517                                                        reserved_mem_dcache_on = 1;
 518                                                if (*to == '#'
 519                                                    || *(to + 1) == '#')
 520                                                        reserved_mem_icache_on = 1;
 521                                        }
 522                                }
 523                        } else if (!memcmp(to, "clkin_hz=", 9)) {
 524                                to += 9;
 525                                early_init_clkin_hz(to);
 526#ifdef CONFIG_EARLY_PRINTK
 527                        } else if (!memcmp(to, "earlyprintk=", 12)) {
 528                                to += 12;
 529                                setup_early_printk(to);
 530#endif
 531                        } else if (!memcmp(to, "memmap=", 7)) {
 532                                to += 7;
 533                                parse_memmap(to);
 534                        }
 535                }
 536                c = *(to++);
 537                if (!c)
 538                        break;
 539        }
 540}
 541
 542/*
 543 * Setup memory defaults from user config.
 544 * The physical memory layout looks like:
 545 *
 546 *  [_rambase, _ramstart]:              kernel image
 547 *  [memory_start, memory_end]:         dynamic memory managed by kernel
 548 *  [memory_end, _ramend]:              reserved memory
 549 *      [memory_mtd_start(memory_end),
 550 *              memory_mtd_start + mtd_size]:   rootfs (if any)
 551 *      [_ramend - DMA_UNCACHED_REGION,
 552 *              _ramend]:                       uncached DMA region
 553 *  [_ramend, physical_mem_end]:        memory not managed by kernel
 554 */
 555static __init void memory_setup(void)
 556{
 557#ifdef CONFIG_MTD_UCLINUX
 558        unsigned long mtd_phys = 0;
 559#endif
 560        unsigned long max_mem;
 561
 562        _rambase = CONFIG_BOOT_LOAD;
 563        _ramstart = (unsigned long)_end;
 564
 565        if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
 566                console_init();
 567                panic("DMA region exceeds memory limit: %lu.",
 568                        _ramend - _ramstart);
 569        }
 570        max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
 571
 572#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
 573        /* Due to a Hardware Anomaly we need to limit the size of usable
 574         * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
 575         * 05000263 - Hardware loop corrupted when taking an ICPLB exception
 576         */
 577# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
 578        if (max_mem >= 56 * 1024 * 1024)
 579                max_mem = 56 * 1024 * 1024;
 580# else
 581        if (max_mem >= 60 * 1024 * 1024)
 582                max_mem = 60 * 1024 * 1024;
 583# endif                         /* CONFIG_DEBUG_HUNT_FOR_ZERO */
 584#endif                          /* ANOMALY_05000263 */
 585
 586
 587#ifdef CONFIG_MPU
 588        /* Round up to multiple of 4MB */
 589        memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
 590#else
 591        memory_start = PAGE_ALIGN(_ramstart);
 592#endif
 593
 594#if defined(CONFIG_MTD_UCLINUX)
 595        /* generic memory mapped MTD driver */
 596        memory_mtd_end = memory_end;
 597
 598        mtd_phys = _ramstart;
 599        mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
 600
 601# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
 602        if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
 603                mtd_size =
 604                    PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
 605# endif
 606
 607# if defined(CONFIG_CRAMFS)
 608        if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
 609                mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
 610# endif
 611
 612# if defined(CONFIG_ROMFS_FS)
 613        if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
 614            && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
 615                mtd_size =
 616                    PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
 617
 618                /* ROM_FS is XIP, so if we found it, we need to limit memory */
 619                if (memory_end > max_mem) {
 620                        pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
 621                                (max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
 622                        memory_end = max_mem;
 623                }
 624        }
 625# endif                         /* CONFIG_ROMFS_FS */
 626
 627        /* Since the default MTD_UCLINUX has no magic number, we just blindly
 628         * read 8 past the end of the kernel's image, and look at it.
 629         * When no image is attached, mtd_size is set to a random number
 630         * Do some basic sanity checks before operating on things
 631         */
 632        if (mtd_size == 0 || memory_end <= mtd_size) {
 633                pr_emerg("Could not find valid ram mtd attached.\n");
 634        } else {
 635                memory_end -= mtd_size;
 636
 637                /* Relocate MTD image to the top of memory after the uncached memory area */
 638                uclinux_ram_map.phys = memory_mtd_start = memory_end;
 639                uclinux_ram_map.size = mtd_size;
 640                pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
 641                        _end, mtd_size, (void *)memory_mtd_start);
 642                dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
 643        }
 644#endif                          /* CONFIG_MTD_UCLINUX */
 645
 646        /* We need lo limit memory, since everything could have a text section
 647         * of userspace in it, and expose anomaly 05000263. If the anomaly
 648         * doesn't exist, or we don't need to - then dont.
 649         */
 650        if (memory_end > max_mem) {
 651                pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
 652                                (max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
 653                memory_end = max_mem;
 654        }
 655
 656#ifdef CONFIG_MPU
 657#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
 658        page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
 659                                        ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
 660#else
 661        page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
 662#endif
 663        page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
 664#endif
 665
 666        init_mm.start_code = (unsigned long)_stext;
 667        init_mm.end_code = (unsigned long)_etext;
 668        init_mm.end_data = (unsigned long)_edata;
 669        init_mm.brk = (unsigned long)0;
 670
 671        printk(KERN_INFO "Board Memory: %ldMB\n", (physical_mem_end - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
 672        printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
 673
 674        printk(KERN_INFO "Memory map:\n"
 675               "  fixedcode = 0x%p-0x%p\n"
 676               "  text      = 0x%p-0x%p\n"
 677               "  rodata    = 0x%p-0x%p\n"
 678               "  bss       = 0x%p-0x%p\n"
 679               "  data      = 0x%p-0x%p\n"
 680               "    stack   = 0x%p-0x%p\n"
 681               "  init      = 0x%p-0x%p\n"
 682               "  available = 0x%p-0x%p\n"
 683#ifdef CONFIG_MTD_UCLINUX
 684               "  rootfs    = 0x%p-0x%p\n"
 685#endif
 686#if DMA_UNCACHED_REGION > 0
 687               "  DMA Zone  = 0x%p-0x%p\n"
 688#endif
 689                , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
 690                _stext, _etext,
 691                __start_rodata, __end_rodata,
 692                __bss_start, __bss_stop,
 693                _sdata, _edata,
 694                (void *)&init_thread_union,
 695                (void *)((int)(&init_thread_union) + THREAD_SIZE),
 696                __init_begin, __init_end,
 697                (void *)_ramstart, (void *)memory_end
 698#ifdef CONFIG_MTD_UCLINUX
 699                , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
 700#endif
 701#if DMA_UNCACHED_REGION > 0
 702                , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
 703#endif
 704                );
 705}
 706
 707/*
 708 * Find the lowest, highest page frame number we have available
 709 */
 710void __init find_min_max_pfn(void)
 711{
 712        int i;
 713
 714        max_pfn = 0;
 715        min_low_pfn = PFN_DOWN(memory_end);
 716
 717        for (i = 0; i < bfin_memmap.nr_map; i++) {
 718                unsigned long start, end;
 719                /* RAM? */
 720                if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
 721                        continue;
 722                start = PFN_UP(bfin_memmap.map[i].addr);
 723                end = PFN_DOWN(bfin_memmap.map[i].addr +
 724                                bfin_memmap.map[i].size);
 725                if (start >= end)
 726                        continue;
 727                if (end > max_pfn)
 728                        max_pfn = end;
 729                if (start < min_low_pfn)
 730                        min_low_pfn = start;
 731        }
 732}
 733
 734static __init void setup_bootmem_allocator(void)
 735{
 736        int bootmap_size;
 737        int i;
 738        unsigned long start_pfn, end_pfn;
 739        unsigned long curr_pfn, last_pfn, size;
 740
 741        /* mark memory between memory_start and memory_end usable */
 742        add_memory_region(memory_start,
 743                memory_end - memory_start, BFIN_MEMMAP_RAM);
 744        /* sanity check for overlap */
 745        sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
 746        print_memory_map("boot memmap");
 747
 748        /* initialize globals in linux/bootmem.h */
 749        find_min_max_pfn();
 750        /* pfn of the last usable page frame */
 751        if (max_pfn > memory_end >> PAGE_SHIFT)
 752                max_pfn = memory_end >> PAGE_SHIFT;
 753        /* pfn of last page frame directly mapped by kernel */
 754        max_low_pfn = max_pfn;
 755        /* pfn of the first usable page frame after kernel image*/
 756        if (min_low_pfn < memory_start >> PAGE_SHIFT)
 757                min_low_pfn = memory_start >> PAGE_SHIFT;
 758        start_pfn = CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT;
 759        end_pfn = memory_end >> PAGE_SHIFT;
 760
 761        /*
 762         * give all the memory to the bootmap allocator, tell it to put the
 763         * boot mem_map at the start of memory.
 764         */
 765        bootmap_size = init_bootmem_node(NODE_DATA(0),
 766                        memory_start >> PAGE_SHIFT,     /* map goes here */
 767                        start_pfn, end_pfn);
 768
 769        /* register the memmap regions with the bootmem allocator */
 770        for (i = 0; i < bfin_memmap.nr_map; i++) {
 771                /*
 772                 * Reserve usable memory
 773                 */
 774                if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
 775                        continue;
 776                /*
 777                 * We are rounding up the start address of usable memory:
 778                 */
 779                curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
 780                if (curr_pfn >= end_pfn)
 781                        continue;
 782                /*
 783                 * ... and at the end of the usable range downwards:
 784                 */
 785                last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
 786                                         bfin_memmap.map[i].size);
 787
 788                if (last_pfn > end_pfn)
 789                        last_pfn = end_pfn;
 790
 791                /*
 792                 * .. finally, did all the rounding and playing
 793                 * around just make the area go away?
 794                 */
 795                if (last_pfn <= curr_pfn)
 796                        continue;
 797
 798                size = last_pfn - curr_pfn;
 799                free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
 800        }
 801
 802        /* reserve memory before memory_start, including bootmap */
 803        reserve_bootmem(CONFIG_PHY_RAM_BASE_ADDRESS,
 804                memory_start + bootmap_size + PAGE_SIZE - 1 - CONFIG_PHY_RAM_BASE_ADDRESS,
 805                BOOTMEM_DEFAULT);
 806}
 807
 808#define EBSZ_TO_MEG(ebsz) \
 809({ \
 810        int meg = 0; \
 811        switch (ebsz & 0xf) { \
 812                case 0x1: meg =  16; break; \
 813                case 0x3: meg =  32; break; \
 814                case 0x5: meg =  64; break; \
 815                case 0x7: meg = 128; break; \
 816                case 0x9: meg = 256; break; \
 817                case 0xb: meg = 512; break; \
 818        } \
 819        meg; \
 820})
 821static inline int __init get_mem_size(void)
 822{
 823#if defined(EBIU_SDBCTL)
 824# if defined(BF561_FAMILY)
 825        int ret = 0;
 826        u32 sdbctl = bfin_read_EBIU_SDBCTL();
 827        ret += EBSZ_TO_MEG(sdbctl >>  0);
 828        ret += EBSZ_TO_MEG(sdbctl >>  8);
 829        ret += EBSZ_TO_MEG(sdbctl >> 16);
 830        ret += EBSZ_TO_MEG(sdbctl >> 24);
 831        return ret;
 832# else
 833        return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
 834# endif
 835#elif defined(EBIU_DDRCTL1)
 836        u32 ddrctl = bfin_read_EBIU_DDRCTL1();
 837        int ret = 0;
 838        switch (ddrctl & 0xc0000) {
 839        case DEVSZ_64:
 840                ret = 64 / 8;
 841                break;
 842        case DEVSZ_128:
 843                ret = 128 / 8;
 844                break;
 845        case DEVSZ_256:
 846                ret = 256 / 8;
 847                break;
 848        case DEVSZ_512:
 849                ret = 512 / 8;
 850                break;
 851        }
 852        switch (ddrctl & 0x30000) {
 853        case DEVWD_4:
 854                ret *= 2;
 855        case DEVWD_8:
 856                ret *= 2;
 857        case DEVWD_16:
 858                break;
 859        }
 860        if ((ddrctl & 0xc000) == 0x4000)
 861                ret *= 2;
 862        return ret;
 863#elif defined(CONFIG_BF60x)
 864        u32 ddrctl = bfin_read_DMC0_CFG();
 865        int ret;
 866        switch (ddrctl & 0xf00) {
 867        case DEVSZ_64:
 868                ret = 64 / 8;
 869                break;
 870        case DEVSZ_128:
 871                ret = 128 / 8;
 872                break;
 873        case DEVSZ_256:
 874                ret = 256 / 8;
 875                break;
 876        case DEVSZ_512:
 877                ret = 512 / 8;
 878                break;
 879        case DEVSZ_1G:
 880                ret = 1024 / 8;
 881                break;
 882        case DEVSZ_2G:
 883                ret = 2048 / 8;
 884                break;
 885        }
 886        return ret;
 887#endif
 888        BUG();
 889}
 890
 891__attribute__((weak))
 892void __init native_machine_early_platform_add_devices(void)
 893{
 894}
 895
 896#ifdef CONFIG_BF60x
 897static inline u_long bfin_get_clk(char *name)
 898{
 899        struct clk *clk;
 900        u_long clk_rate;
 901
 902        clk = clk_get(NULL, name);
 903        if (IS_ERR(clk))
 904                return 0;
 905
 906        clk_rate = clk_get_rate(clk);
 907        clk_put(clk);
 908        return clk_rate;
 909}
 910#endif
 911
 912void __init setup_arch(char **cmdline_p)
 913{
 914        u32 mmr;
 915        unsigned long sclk, cclk;
 916
 917        native_machine_early_platform_add_devices();
 918
 919        enable_shadow_console();
 920
 921        /* Check to make sure we are running on the right processor */
 922        mmr =  bfin_cpuid();
 923        if (unlikely(CPUID != bfin_cpuid()))
 924                printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
 925                        CPU, bfin_cpuid(), bfin_revid());
 926
 927#ifdef CONFIG_DUMMY_CONSOLE
 928        conswitchp = &dummy_con;
 929#endif
 930
 931#if defined(CONFIG_CMDLINE_BOOL)
 932        strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
 933        command_line[sizeof(command_line) - 1] = 0;
 934#endif
 935
 936        /* Keep a copy of command line */
 937        *cmdline_p = &command_line[0];
 938        memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 939        boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
 940
 941        memset(&bfin_memmap, 0, sizeof(bfin_memmap));
 942
 943#ifdef CONFIG_BF60x
 944        /* Should init clock device before parse command early */
 945        clk_init();
 946#endif
 947        /* If the user does not specify things on the command line, use
 948         * what the bootloader set things up as
 949         */
 950        physical_mem_end = 0;
 951        parse_cmdline_early(&command_line[0]);
 952
 953        if (_ramend == 0)
 954                _ramend = get_mem_size() * 1024 * 1024;
 955
 956        if (physical_mem_end == 0)
 957                physical_mem_end = _ramend;
 958
 959        memory_setup();
 960
 961#ifndef CONFIG_BF60x
 962        /* Initialize Async memory banks */
 963        bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
 964        bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
 965        bfin_write_EBIU_AMGCTL(AMGCTLVAL);
 966#ifdef CONFIG_EBIU_MBSCTLVAL
 967        bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
 968        bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
 969        bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
 970#endif
 971#endif
 972#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
 973        bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
 974        bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
 975        bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
 976        bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
 977                                        ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
 978#endif
 979
 980        cclk = get_cclk();
 981        sclk = get_sclk();
 982
 983        if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
 984                panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
 985
 986#ifdef BF561_FAMILY
 987        if (ANOMALY_05000266) {
 988                bfin_read_IMDMA_D0_IRQ_STATUS();
 989                bfin_read_IMDMA_D1_IRQ_STATUS();
 990        }
 991#endif
 992
 993        mmr = bfin_read_TBUFCTL();
 994        printk(KERN_INFO "Hardware Trace %s and %sabled\n",
 995                (mmr & 0x1) ? "active" : "off",
 996                (mmr & 0x2) ? "en" : "dis");
 997#ifndef CONFIG_BF60x
 998        mmr = bfin_read_SYSCR();
 999        printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
1000
1001        /* Newer parts mirror SWRST bits in SYSCR */
1002#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
1003    defined(CONFIG_BF538) || defined(CONFIG_BF539)
1004        _bfin_swrst = bfin_read_SWRST();
1005#else
1006        /* Clear boot mode field */
1007        _bfin_swrst = mmr & ~0xf;
1008#endif
1009
1010#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
1011        bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
1012#endif
1013#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
1014        bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
1015#endif
1016
1017#ifdef CONFIG_SMP
1018        if (_bfin_swrst & SWRST_DBL_FAULT_A) {
1019#else
1020        if (_bfin_swrst & RESET_DOUBLE) {
1021#endif
1022                printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
1023#ifdef CONFIG_DEBUG_DOUBLEFAULT
1024                /* We assume the crashing kernel, and the current symbol table match */
1025                printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
1026                        initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
1027                        initial_pda.retx_doublefault);
1028                printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n",
1029                        initial_pda.dcplb_doublefault_addr);
1030                printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n",
1031                        initial_pda.icplb_doublefault_addr);
1032#endif
1033                printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
1034                        initial_pda.retx);
1035        } else if (_bfin_swrst & RESET_WDOG)
1036                printk(KERN_INFO "Recovering from Watchdog event\n");
1037        else if (_bfin_swrst & RESET_SOFTWARE)
1038                printk(KERN_NOTICE "Reset caused by Software reset\n");
1039#endif
1040        printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
1041        if (bfin_compiled_revid() == 0xffff)
1042                printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
1043        else if (bfin_compiled_revid() == -1)
1044                printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
1045        else
1046                printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
1047
1048        if (likely(CPUID == bfin_cpuid())) {
1049                if (bfin_revid() != bfin_compiled_revid()) {
1050                        if (bfin_compiled_revid() == -1)
1051                                printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
1052                                       bfin_revid());
1053                        else if (bfin_compiled_revid() != 0xffff) {
1054                                printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
1055                                       bfin_compiled_revid(), bfin_revid());
1056                                if (bfin_compiled_revid() > bfin_revid())
1057                                        panic("Error: you are missing anomaly workarounds for this rev");
1058                        }
1059                }
1060                if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
1061                        printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
1062                               CPU, bfin_revid());
1063        }
1064
1065        printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
1066
1067#ifdef CONFIG_BF60x
1068        printk(KERN_INFO "Processor Speed: %lu MHz core clock, %lu MHz SCLk, %lu MHz SCLK0, %lu MHz SCLK1 and %lu MHz DCLK\n",
1069                cclk / 1000000, bfin_get_clk("SYSCLK") / 1000000, get_sclk0() / 1000000, get_sclk1() / 1000000, get_dclk() / 1000000);
1070#else
1071        printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
1072               cclk / 1000000, sclk / 1000000);
1073#endif
1074
1075        setup_bootmem_allocator();
1076
1077        paging_init();
1078
1079        /* Copy atomic sequences to their fixed location, and sanity check that
1080           these locations are the ones that we advertise to userspace.  */
1081        memcpy((void *)FIXED_CODE_START, &fixed_code_start,
1082               FIXED_CODE_END - FIXED_CODE_START);
1083        BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
1084               != SIGRETURN_STUB - FIXED_CODE_START);
1085        BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
1086               != ATOMIC_XCHG32 - FIXED_CODE_START);
1087        BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
1088               != ATOMIC_CAS32 - FIXED_CODE_START);
1089        BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
1090               != ATOMIC_ADD32 - FIXED_CODE_START);
1091        BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
1092               != ATOMIC_SUB32 - FIXED_CODE_START);
1093        BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
1094               != ATOMIC_IOR32 - FIXED_CODE_START);
1095        BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
1096               != ATOMIC_AND32 - FIXED_CODE_START);
1097        BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
1098               != ATOMIC_XOR32 - FIXED_CODE_START);
1099        BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
1100                != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
1101
1102#ifdef CONFIG_SMP
1103        platform_init_cpus();
1104#endif
1105        init_exception_vectors();
1106        bfin_cache_init();      /* Initialize caches for the boot CPU */
1107#ifdef CONFIG_SCB_PRIORITY
1108        init_scb();
1109#endif
1110}
1111
1112static int __init topology_init(void)
1113{
1114        unsigned int cpu;
1115
1116        for_each_possible_cpu(cpu) {
1117                register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
1118        }
1119
1120        return 0;
1121}
1122
1123subsys_initcall(topology_init);
1124
1125/* Get the input clock frequency */
1126static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
1127#ifndef CONFIG_BF60x
1128static u_long get_clkin_hz(void)
1129{
1130        return cached_clkin_hz;
1131}
1132#endif
1133static int __init early_init_clkin_hz(char *buf)
1134{
1135        cached_clkin_hz = simple_strtoul(buf, NULL, 0);
1136#ifdef BFIN_KERNEL_CLOCK
1137        if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1138                panic("cannot change clkin_hz when reprogramming clocks");
1139#endif
1140        return 1;
1141}
1142early_param("clkin_hz=", early_init_clkin_hz);
1143
1144#ifndef CONFIG_BF60x
1145/* Get the voltage input multiplier */
1146static u_long get_vco(void)
1147{
1148        static u_long cached_vco;
1149        u_long msel, pll_ctl;
1150
1151        /* The assumption here is that VCO never changes at runtime.
1152         * If, someday, we support that, then we'll have to change this.
1153         */
1154        if (cached_vco)
1155                return cached_vco;
1156
1157        pll_ctl = bfin_read_PLL_CTL();
1158        msel = (pll_ctl >> 9) & 0x3F;
1159        if (0 == msel)
1160                msel = 64;
1161
1162        cached_vco = get_clkin_hz();
1163        cached_vco >>= (1 & pll_ctl);   /* DF bit */
1164        cached_vco *= msel;
1165        return cached_vco;
1166}
1167#endif
1168
1169/* Get the Core clock */
1170u_long get_cclk(void)
1171{
1172#ifdef CONFIG_BF60x
1173        return bfin_get_clk("CCLK");
1174#else
1175        static u_long cached_cclk_pll_div, cached_cclk;
1176        u_long csel, ssel;
1177
1178        if (bfin_read_PLL_STAT() & 0x1)
1179                return get_clkin_hz();
1180
1181        ssel = bfin_read_PLL_DIV();
1182        if (ssel == cached_cclk_pll_div)
1183                return cached_cclk;
1184        else
1185                cached_cclk_pll_div = ssel;
1186
1187        csel = ((ssel >> 4) & 0x03);
1188        ssel &= 0xf;
1189        if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */
1190                cached_cclk = get_vco() / ssel;
1191        else
1192                cached_cclk = get_vco() >> csel;
1193        return cached_cclk;
1194#endif
1195}
1196EXPORT_SYMBOL(get_cclk);
1197
1198#ifdef CONFIG_BF60x
1199/* Get the bf60x clock of SCLK0 domain */
1200u_long get_sclk0(void)
1201{
1202        return bfin_get_clk("SCLK0");
1203}
1204EXPORT_SYMBOL(get_sclk0);
1205
1206/* Get the bf60x clock of SCLK1 domain */
1207u_long get_sclk1(void)
1208{
1209        return bfin_get_clk("SCLK1");
1210}
1211EXPORT_SYMBOL(get_sclk1);
1212
1213/* Get the bf60x DRAM clock */
1214u_long get_dclk(void)
1215{
1216        return bfin_get_clk("DCLK");
1217}
1218EXPORT_SYMBOL(get_dclk);
1219#endif
1220
1221/* Get the default system clock */
1222u_long get_sclk(void)
1223{
1224#ifdef CONFIG_BF60x
1225        return get_sclk0();
1226#else
1227        static u_long cached_sclk;
1228        u_long ssel;
1229
1230        /* The assumption here is that SCLK never changes at runtime.
1231         * If, someday, we support that, then we'll have to change this.
1232         */
1233        if (cached_sclk)
1234                return cached_sclk;
1235
1236        if (bfin_read_PLL_STAT() & 0x1)
1237                return get_clkin_hz();
1238
1239        ssel = bfin_read_PLL_DIV() & 0xf;
1240        if (0 == ssel) {
1241                printk(KERN_WARNING "Invalid System Clock\n");
1242                ssel = 1;
1243        }
1244
1245        cached_sclk = get_vco() / ssel;
1246        return cached_sclk;
1247#endif
1248}
1249EXPORT_SYMBOL(get_sclk);
1250
1251unsigned long sclk_to_usecs(unsigned long sclk)
1252{
1253        u64 tmp = USEC_PER_SEC * (u64)sclk;
1254        do_div(tmp, get_sclk());
1255        return tmp;
1256}
1257EXPORT_SYMBOL(sclk_to_usecs);
1258
1259unsigned long usecs_to_sclk(unsigned long usecs)
1260{
1261        u64 tmp = get_sclk() * (u64)usecs;
1262        do_div(tmp, USEC_PER_SEC);
1263        return tmp;
1264}
1265EXPORT_SYMBOL(usecs_to_sclk);
1266
1267/*
1268 *      Get CPU information for use by the procfs.
1269 */
1270static int show_cpuinfo(struct seq_file *m, void *v)
1271{
1272        char *cpu, *mmu, *fpu, *vendor, *cache;
1273        uint32_t revid;
1274        int cpu_num = *(unsigned int *)v;
1275        u_long sclk, cclk;
1276        u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1277        struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
1278
1279        cpu = CPU;
1280        mmu = "none";
1281        fpu = "none";
1282        revid = bfin_revid();
1283
1284        sclk = get_sclk();
1285        cclk = get_cclk();
1286
1287        switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
1288        case 0xca:
1289                vendor = "Analog Devices";
1290                break;
1291        default:
1292                vendor = "unknown";
1293                break;
1294        }
1295
1296        seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1297
1298        if (CPUID == bfin_cpuid())
1299                seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
1300        else
1301                seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1302                        CPUID, bfin_cpuid());
1303
1304        seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1305                "stepping\t: %d ",
1306                cpu, cclk/1000000, sclk/1000000,
1307#ifdef CONFIG_MPU
1308                "mpu on",
1309#else
1310                "mpu off",
1311#endif
1312                revid);
1313
1314        if (bfin_revid() != bfin_compiled_revid()) {
1315                if (bfin_compiled_revid() == -1)
1316                        seq_printf(m, "(Compiled for Rev none)");
1317                else if (bfin_compiled_revid() == 0xffff)
1318                        seq_printf(m, "(Compiled for Rev any)");
1319                else
1320                        seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1321        }
1322
1323        seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n",
1324                cclk/1000000, cclk%1000000,
1325                sclk/1000000, sclk%1000000);
1326        seq_printf(m, "bogomips\t: %lu.%02lu\n"
1327                "Calibration\t: %lu loops\n",
1328                (loops_per_jiffy * HZ) / 500000,
1329                ((loops_per_jiffy * HZ) / 5000) % 100,
1330                (loops_per_jiffy * HZ));
1331
1332        /* Check Cache configutation */
1333        switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1334        case ACACHE_BSRAM:
1335                cache = "dbank-A/B\t: cache/sram";
1336                dcache_size = 16;
1337                dsup_banks = 1;
1338                break;
1339        case ACACHE_BCACHE:
1340                cache = "dbank-A/B\t: cache/cache";
1341                dcache_size = 32;
1342                dsup_banks = 2;
1343                break;
1344        case ASRAM_BSRAM:
1345                cache = "dbank-A/B\t: sram/sram";
1346                dcache_size = 0;
1347                dsup_banks = 0;
1348                break;
1349        default:
1350                cache = "unknown";
1351                dcache_size = 0;
1352                dsup_banks = 0;
1353                break;
1354        }
1355
1356        /* Is it turned on? */
1357        if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1358                dcache_size = 0;
1359
1360        if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1361                icache_size = 0;
1362
1363        seq_printf(m, "cache size\t: %d KB(L1 icache) "
1364                "%d KB(L1 dcache) %d KB(L2 cache)\n",
1365                icache_size, dcache_size, 0);
1366        seq_printf(m, "%s\n", cache);
1367        seq_printf(m, "external memory\t: "
1368#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1369                   "cacheable"
1370#else
1371                   "uncacheable"
1372#endif
1373                   " in instruction cache\n");
1374        seq_printf(m, "external memory\t: "
1375#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1376                      "cacheable (write-back)"
1377#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1378                      "cacheable (write-through)"
1379#else
1380                      "uncacheable"
1381#endif
1382                      " in data cache\n");
1383
1384        if (icache_size)
1385                seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1386                           BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
1387        else
1388                seq_printf(m, "icache setup\t: off\n");
1389
1390        seq_printf(m,
1391                   "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1392                   dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1393                   BFIN_DLINES);
1394#ifdef __ARCH_SYNC_CORE_DCACHE
1395        seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1396#endif
1397#ifdef __ARCH_SYNC_CORE_ICACHE
1398        seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1399#endif
1400
1401        seq_printf(m, "\n");
1402
1403        if (cpu_num != num_possible_cpus() - 1)
1404                return 0;
1405
1406        if (L2_LENGTH) {
1407                seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1408                seq_printf(m, "L2 SRAM\t\t: "
1409#if defined(CONFIG_BFIN_L2_ICACHEABLE)
1410                              "cacheable"
1411#else
1412                              "uncacheable"
1413#endif
1414                              " in instruction cache\n");
1415                seq_printf(m, "L2 SRAM\t\t: "
1416#if defined(CONFIG_BFIN_L2_WRITEBACK)
1417                              "cacheable (write-back)"
1418#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1419                              "cacheable (write-through)"
1420#else
1421                              "uncacheable"
1422#endif
1423                              " in data cache\n");
1424        }
1425        seq_printf(m, "board name\t: %s\n", bfin_board_name);
1426        seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1427                physical_mem_end >> 10, 0ul, physical_mem_end);
1428        seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1429                ((int)memory_end - (int)_rambase) >> 10,
1430                _rambase, memory_end);
1431
1432        return 0;
1433}
1434
1435static void *c_start(struct seq_file *m, loff_t *pos)
1436{
1437        if (*pos == 0)
1438                *pos = cpumask_first(cpu_online_mask);
1439        if (*pos >= num_online_cpus())
1440                return NULL;
1441
1442        return pos;
1443}
1444
1445static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1446{
1447        *pos = cpumask_next(*pos, cpu_online_mask);
1448
1449        return c_start(m, pos);
1450}
1451
1452static void c_stop(struct seq_file *m, void *v)
1453{
1454}
1455
1456const struct seq_operations cpuinfo_op = {
1457        .start = c_start,
1458        .next = c_next,
1459        .stop = c_stop,
1460        .show = show_cpuinfo,
1461};
1462
1463void __init cmdline_init(const char *r0)
1464{
1465        early_shadow_stamp();
1466        if (r0)
1467                strlcpy(command_line, r0, COMMAND_LINE_SIZE);
1468}
1469